summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkyle <kyle@ihatethathostname.lab.bos.redhat.com>2010-11-16 00:19:59 -0500
committerkyle <kyle@ihatethathostname.lab.bos.redhat.com>2010-11-16 00:19:59 -0500
commit2c69ace4647b7b554d3a9ece98399cb72fe06415 (patch)
tree75d386b798ba366b8c0e736ff08d324c74306fd7
parenta568a0d41b42e78b2d8c05f9300de0e31fcee780 (diff)
drm-intel rebase, enable kernel-debug
-rw-r--r--config-generic8
-rw-r--r--config-nodebug96
-rw-r--r--config-x86_64-generic2
-rw-r--r--drm-intel-2.6.37-rc2.patch24423
-rw-r--r--drm-intel-big-hammer.patch16
-rw-r--r--drm-intel-make-lvds-work.patch19
-rw-r--r--kernel.spec18
7 files changed, 24504 insertions, 78 deletions
diff --git a/config-generic b/config-generic
index f834847f..67c580f6 100644
--- a/config-generic
+++ b/config-generic
@@ -1415,11 +1415,11 @@ CONFIG_ATMEL=m
CONFIG_B43=m
CONFIG_B43_PCMCIA=y
CONFIG_B43_SDIO=y
-CONFIG_B43_DEBUG=y
+# CONFIG_B43_DEBUG is not set
CONFIG_B43_PHY_LP=y
# CONFIG_B43_FORCE_PIO is not set
CONFIG_B43LEGACY=m
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA=y
CONFIG_B43LEGACY_PIO=y
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
@@ -3768,7 +3768,7 @@ CONFIG_IBMASR=m
CONFIG_PM_DEBUG=y
CONFIG_PM_TRACE=y
# CONFIG_PM_VERBOSE is not set
-CONFIG_PM_TEST_SUSPEND=y
+# CONFIG_PM_TEST_SUSPEND is not set
CONFIG_PM_RUNTIME=y
## BEGIN ISA Junk.
@@ -4206,7 +4206,7 @@ CONFIG_USB_ATMEL=m
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FUNCTION_GRAPH_TRACER is not set
-CONFIG_BOOT_TRACER=y
+# CONFIG_BOOT_TRACER is not set
CONFIG_EARLY_PRINTK_DBGP=y
CONFIG_SECURITYFS=y
diff --git a/config-nodebug b/config-nodebug
index d2288c5e..b4472f9a 100644
--- a/config-nodebug
+++ b/config-nodebug
@@ -2,92 +2,92 @@ CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DEBUG=y
CONFIG_SND_PCM_XRUN_DEBUG=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_LOCK_ALLOC=y
-CONFIG_PROVE_LOCKING=y
-CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_PROVE_RCU=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_PROVE_RCU is not set
# CONFIG_PROVE_RCU_REPEATEDLY is not set
-CONFIG_DEBUG_PER_CPU_MAPS=y
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
CONFIG_CPUMASK_OFFSTACK=y
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set
-CONFIG_FAULT_INJECTION=y
-CONFIG_FAILSLAB=y
-CONFIG_FAIL_PAGE_ALLOC=y
-CONFIG_FAIL_MAKE_REQUEST=y
-CONFIG_FAULT_INJECTION_DEBUG_FS=y
-CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
-CONFIG_FAIL_IO_TIMEOUT=y
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_FAILSLAB is not set
+# CONFIG_FAIL_PAGE_ALLOC is not set
+# CONFIG_FAIL_MAKE_REQUEST is not set
+# CONFIG_FAULT_INJECTION_DEBUG_FS is not set
+# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set
+# CONFIG_FAIL_IO_TIMEOUT is not set
-CONFIG_SLUB_DEBUG_ON=y
+# CONFIG_SLUB_DEBUG_ON is not set
-CONFIG_LOCK_STAT=y
+# CONFIG_LOCK_STAT is not set
-CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_ACPI_DEBUG=y
+# CONFIG_ACPI_DEBUG is not set
# CONFIG_ACPI_DEBUG_FUNC_TRACE is not set
-CONFIG_DEBUG_SG=y
+# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_WRITECOUNT=y
-CONFIG_DEBUG_OBJECTS=y
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_OBJECTS_SELFTEST is not set
-CONFIG_DEBUG_OBJECTS_FREE=y
-CONFIG_DEBUG_OBJECTS_TIMERS=y
+# CONFIG_DEBUG_OBJECTS_FREE is not set
+# CONFIG_DEBUG_OBJECTS_TIMERS is not set
CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
-CONFIG_X86_PTDUMP=y
+# CONFIG_X86_PTDUMP is not set
-CONFIG_CAN_DEBUG_DEVICES=y
+# CONFIG_CAN_DEBUG_DEVICES is not set
-CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-CONFIG_DEBUG_NOTIFIERS=y
+# CONFIG_DEBUG_NOTIFIERS is not set
-CONFIG_DMA_API_DEBUG=y
+# CONFIG_DMA_API_DEBUG is not set
-CONFIG_MMIOTRACE=y
+# CONFIG_MMIOTRACE is not set
-CONFIG_DEBUG_CREDENTIALS=y
+# CONFIG_DEBUG_CREDENTIALS is not set
# off in both production debug and nodebug builds,
# on in rawhide nodebug builds
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
-CONFIG_EXT4_DEBUG=y
+# CONFIG_EXT4_DEBUG is not set
-CONFIG_DEBUG_PERF_USE_VMALLOC=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
-CONFIG_JBD2_DEBUG=y
+# CONFIG_JBD2_DEBUG is not set
-CONFIG_DEBUG_CFQ_IOSCHED=y
+# CONFIG_DEBUG_CFQ_IOSCHED is not set
-CONFIG_DRBD_FAULT_INJECTION=y
+# CONFIG_DRBD_FAULT_INJECTION is not set
-CONFIG_ATH_DEBUG=y
-CONFIG_IWLWIFI_DEVICE_TRACING=y
+# CONFIG_ATH_DEBUG is not set
+# CONFIG_IWLWIFI_DEVICE_TRACING is not set
-CONFIG_DEBUG_OBJECTS_WORK=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_OBJECTS_WORK is not set
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
-CONFIG_DMADEVICES_DEBUG=y
-CONFIG_DMADEVICES_VDEBUG=y
+# CONFIG_DMADEVICES_DEBUG is not set
+# CONFIG_DMADEVICES_VDEBUG is not set
CONFIG_PM_ADVANCED_DEBUG=y
-CONFIG_CEPH_FS_PRETTYDEBUG=y
-CONFIG_QUOTA_DEBUG=y
+# CONFIG_CEPH_FS_PRETTYDEBUG is not set
+# CONFIG_QUOTA_DEBUG is not set
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
CONFIG_PCI_DEFAULT_USE_CRS=y
diff --git a/config-x86_64-generic b/config-x86_64-generic
index 713c7ef1..3d8d67b0 100644
--- a/config-x86_64-generic
+++ b/config-x86_64-generic
@@ -15,7 +15,7 @@ CONFIG_NUMA=y
CONFIG_K8_NUMA=y
CONFIG_X86_64_ACPI_NUMA=y
# CONFIG_NUMA_EMU is not set
-CONFIG_NR_CPUS=512
+CONFIG_NR_CPUS=256
CONFIG_X86_POWERNOW_K8=m
CONFIG_X86_P4_CLOCKMOD=m
CONFIG_IA32_EMULATION=y
diff --git a/drm-intel-2.6.37-rc2.patch b/drm-intel-2.6.37-rc2.patch
new file mode 100644
index 00000000..e2a9d28b
--- /dev/null
+++ b/drm-intel-2.6.37-rc2.patch
@@ -0,0 +1,24423 @@
+ drivers/char/agp/Makefile | 1 +
+ drivers/char/agp/intel-agp.c | 201 +---
+ drivers/char/agp/intel-agp.h | 43 +-
+ drivers/char/agp/intel-gtt.c | 1614 ++++++++++-----------
+ drivers/gpu/drm/drm_edid.c | 92 +-
+ drivers/gpu/drm/i915/Makefile | 4 +-
+ drivers/gpu/drm/i915/dvo_ch7017.c | 66 +-
+ drivers/gpu/drm/i915/dvo_ch7xxx.c | 10 +-
+ drivers/gpu/drm/i915/dvo_ivch.c | 10 +-
+ drivers/gpu/drm/i915/dvo_sil164.c | 10 +-
+ drivers/gpu/drm/i915/dvo_tfp410.c | 10 +-
+ drivers/gpu/drm/i915/i915_debugfs.c | 337 ++++-
+ drivers/gpu/drm/i915/i915_dma.c | 360 ++----
+ drivers/gpu/drm/i915/i915_drv.c | 219 ++-
+ drivers/gpu/drm/i915/i915_drv.h | 272 +++--
+ drivers/gpu/drm/i915/i915_gem.c | 2292 +++++++++++++++---------------
+ drivers/gpu/drm/i915/i915_gem_debug.c | 148 ++-
+ drivers/gpu/drm/i915/i915_gem_evict.c | 72 +-
+ drivers/gpu/drm/i915/i915_gem_tiling.c | 54 +-
+ drivers/gpu/drm/i915/i915_irq.c | 259 ++--
+ drivers/gpu/drm/i915/i915_reg.h | 335 +++--
+ drivers/gpu/drm/i915/i915_suspend.c | 32 +-
+ drivers/gpu/drm/i915/intel_acpi.c | 286 ++++
+ drivers/gpu/drm/i915/intel_bios.c | 234 +++-
+ drivers/gpu/drm/i915/intel_bios.h | 6 +-
+ drivers/gpu/drm/i915/intel_crt.c | 127 +-
+ drivers/gpu/drm/i915/intel_display.c | 2374 ++++++++++++++++---------------
+ drivers/gpu/drm/i915/intel_dp.c | 658 ++++++---
+ drivers/gpu/drm/i915/intel_drv.h | 161 ++-
+ drivers/gpu/drm/i915/intel_dvo.c | 69 +-
+ drivers/gpu/drm/i915/intel_fb.c | 29 +-
+ drivers/gpu/drm/i915/intel_hdmi.c | 193 ++-
+ drivers/gpu/drm/i915/intel_i2c.c | 484 +++++--
+ drivers/gpu/drm/i915/intel_lvds.c | 445 +++---
+ drivers/gpu/drm/i915/intel_modes.c | 16 +-
+ drivers/gpu/drm/i915/intel_opregion.c | 517 +++++++
+ drivers/gpu/drm/i915/intel_overlay.c | 1004 +++++++------
+ drivers/gpu/drm/i915/intel_panel.c | 109 ++
+ drivers/gpu/drm/i915/intel_ringbuffer.c | 580 +++++---
+ drivers/gpu/drm/i915/intel_ringbuffer.h | 84 +-
+ drivers/gpu/drm/i915/intel_sdvo.c | 1076 +++++++--------
+ drivers/gpu/drm/i915/intel_tv.c | 165 +--
+ include/drm/drm_crtc.h | 1 +
+ include/drm/drm_dp_helper.h | 3 +
+ include/drm/i915_drm.h | 6 +-
+ include/drm/intel-gtt.h | 18 +
+ 46 files changed, 8590 insertions(+), 6496 deletions(-)
+
+diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
+index 627f542..8eb56e2 100644
+--- a/drivers/char/agp/Makefile
++++ b/drivers/char/agp/Makefile
+@@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
+ obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
+ obj-$(CONFIG_AGP_I460) += i460-agp.o
+ obj-$(CONFIG_AGP_INTEL) += intel-agp.o
++obj-$(CONFIG_AGP_INTEL) += intel-gtt.o
+ obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
+ obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
+ obj-$(CONFIG_AGP_SIS) += sis-agp.o
+diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
+index cd18493..e72f49d 100644
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -12,9 +12,6 @@
+ #include <asm/smp.h>
+ #include "agp.h"
+ #include "intel-agp.h"
+-#include <linux/intel-gtt.h>
+-
+-#include "intel-gtt.c"
+
+ int intel_agp_enabled;
+ EXPORT_SYMBOL(intel_agp_enabled);
+@@ -703,179 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = {
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ };
+
+-static int find_gmch(u16 device)
+-{
+- struct pci_dev *gmch_device;
+-
+- gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+- if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
+- gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
+- device, gmch_device);
+- }
+-
+- if (!gmch_device)
+- return 0;
+-
+- intel_private.pcidev = gmch_device;
+- return 1;
+-}
+-
+ /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+-static const struct intel_driver_description {
++static const struct intel_agp_driver_description {
+ unsigned int chip_id;
+- unsigned int gmch_chip_id;
+ char *name;
+ const struct agp_bridge_driver *driver;
+- const struct agp_bridge_driver *gmch_driver;
+ } intel_agp_chipsets[] = {
+- { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
+- NULL, &intel_810_driver },
+- { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
+- NULL, &intel_810_driver },
+- { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
+- NULL, &intel_810_driver },
+- { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
+- &intel_815_driver, &intel_810_driver },
+- { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
+- &intel_830mp_driver, &intel_830_driver },
+- { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
+- &intel_845_driver, &intel_830_driver },
+- { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
+- &intel_845_driver, &intel_830_driver },
+- { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
+- &intel_845_driver, &intel_830_driver },
+- { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
+- &intel_845_driver, &intel_830_driver },
+- { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
+- NULL, &intel_915_driver },
+- { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
+- NULL, &intel_915_driver },
+- { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
+- NULL, &intel_915_driver },
+- { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
+- NULL, &intel_915_driver },
+- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
+- NULL, &intel_915_driver },
+- { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
+- NULL, &intel_915_driver },
+- { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
+- NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
+- NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
+- NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
+- NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
+- NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
+- NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
+- { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
+- NULL, &intel_g33_driver },
+- { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
+- NULL, &intel_g33_driver },
+- { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
+- NULL, &intel_g33_driver },
+- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
+- NULL, &intel_g33_driver },
+- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
+- NULL, &intel_g33_driver },
+- { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
+- "GM45", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
+- "Eaglelake", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
+- "Q45/Q43", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
+- "G45/G43", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
+- "B43", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
+- "B43", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
+- "G41", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
+- "HD Graphics", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+- "HD Graphics", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+- "HD Graphics", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+- "HD Graphics", NULL, &intel_i965_driver },
+- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
+- "Sandybridge", NULL, &intel_gen6_driver },
+- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
+- "Sandybridge", NULL, &intel_gen6_driver },
+- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+- "Sandybridge", NULL, &intel_gen6_driver },
+- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+- "Sandybridge", NULL, &intel_gen6_driver },
+- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+- "Sandybridge", NULL, &intel_gen6_driver },
+- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+- "Sandybridge", NULL, &intel_gen6_driver },
+- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
+- "Sandybridge", NULL, &intel_gen6_driver },
+- { 0, 0, NULL, NULL, NULL }
++ { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
++ { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
++ { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
++ { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
++ { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
++ { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
++ { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
++ { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
++ { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
++ { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
++ { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
++ { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
++ { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
++ { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
++ { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
++ { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
++ { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
++ { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
++ { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
++ { 0, NULL, NULL }
+ };
+
+-static int __devinit intel_gmch_probe(struct pci_dev *pdev,
+- struct agp_bridge_data *bridge)
+-{
+- int i, mask;
+-
+- bridge->driver = NULL;
+-
+- for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+- if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
+- find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
+- bridge->driver =
+- intel_agp_chipsets[i].gmch_driver;
+- break;
+- }
+- }
+-
+- if (!bridge->driver)
+- return 0;
+-
+- bridge->dev_private_data = &intel_private;
+- bridge->dev = pdev;
+-
+- dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+-
+- if (bridge->driver->mask_memory == intel_gen6_mask_memory)
+- mask = 40;
+- else if (bridge->driver->mask_memory == intel_i965_mask_memory)
+- mask = 36;
+- else
+- mask = 32;
+-
+- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+- dev_err(&intel_private.pcidev->dev,
+- "set gfx device dma mask %d-bit failed!\n", mask);
+- else
+- pci_set_consistent_dma_mask(intel_private.pcidev,
+- DMA_BIT_MASK(mask));
+-
+- return 1;
+-}
+-
+ static int __devinit agp_intel_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+ {
+@@ -905,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
+ }
+ }
+
+- if (intel_agp_chipsets[i].name == NULL) {
++ if (!bridge->driver) {
+ if (cap_ptr)
+ dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
+ pdev->vendor, pdev->device);
+@@ -913,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
+ return -ENODEV;
+ }
+
+- if (!bridge->driver) {
+- if (cap_ptr)
+- dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
+- intel_agp_chipsets[i].gmch_chip_id);
+- agp_put_bridge(bridge);
+- return -ENODEV;
+- }
+-
+ bridge->dev = pdev;
+ bridge->dev_private_data = NULL;
+
+@@ -972,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
+
+ agp_remove_bridge(bridge);
+
+- if (intel_private.pcidev)
+- pci_dev_put(intel_private.pcidev);
++ intel_gmch_remove(pdev);
+
+ agp_put_bridge(bridge);
+ }
+@@ -1049,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
+ ID(PCI_DEVICE_ID_INTEL_G45_HB),
+ ID(PCI_DEVICE_ID_INTEL_G41_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_HB),
++ ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
+index d09b1ab..90539df 100644
+--- a/drivers/char/agp/intel-agp.h
++++ b/drivers/char/agp/intel-agp.h
+@@ -215,44 +215,7 @@
+ #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
+ #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
+
+-/* cover 915 and 945 variants */
+-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
+-
+-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
+-
+-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+-
+-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+-
+-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
+-
+-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
+- IS_SNB)
+-
++int intel_gmch_probe(struct pci_dev *pdev,
++ struct agp_bridge_data *bridge);
++void intel_gmch_remove(struct pci_dev *pdev);
+ #endif
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 75e0a34..9272c38 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -15,6 +15,18 @@
+ * /fairy-tale-mode off
+ */
+
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/pagemap.h>
++#include <linux/agp_backend.h>
++#include <asm/smp.h>
++#include "agp.h"
++#include "intel-agp.h"
++#include <linux/intel-gtt.h>
++#include <drm/intel-gtt.h>
++
+ /*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+@@ -23,11 +35,12 @@
+ */
+ #ifdef CONFIG_DMAR
+ #define USE_PCI_DMA_API 1
++#else
++#define USE_PCI_DMA_API 0
+ #endif
+
+ /* Max amount of stolen space, anything above will be returned to Linux */
+ int intel_max_stolen = 32 * 1024 * 1024;
+-EXPORT_SYMBOL(intel_max_stolen);
+
+ static const struct aper_size_info_fixed intel_i810_sizes[] =
+ {
+@@ -55,32 +68,36 @@ static struct gatt_mask intel_i810_masks[] =
+ #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
+ #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
+
+-static struct gatt_mask intel_gen6_masks[] =
+-{
+- {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
+- .type = INTEL_AGP_UNCACHED_MEMORY },
+- {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
+- .type = INTEL_AGP_CACHED_MEMORY_LLC },
+- {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
+- .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
+- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
+- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
+- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
+- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
++struct intel_gtt_driver {
++ unsigned int gen : 8;
++ unsigned int is_g33 : 1;
++ unsigned int is_pineview : 1;
++ unsigned int is_ironlake : 1;
++ unsigned int dma_mask_size : 8;
++ /* Chipset specific GTT setup */
++ int (*setup)(void);
++ /* This should undo anything done in ->setup() save the unmapping
++ * of the mmio register file, that's done in the generic code. */
++ void (*cleanup)(void);
++ void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
++ /* Flags is a more or less chipset specific opaque value.
++ * For chipsets that need to support old ums (non-gem) code, this
++ * needs to be identical to the various supported agp memory types! */
++ bool (*check_flags)(unsigned int flags);
++ void (*chipset_flush)(void);
+ };
+
+ static struct _intel_private {
++ struct intel_gtt base;
++ const struct intel_gtt_driver *driver;
+ struct pci_dev *pcidev; /* device one */
++ struct pci_dev *bridge_dev;
+ u8 __iomem *registers;
++ phys_addr_t gtt_bus_addr;
++ phys_addr_t gma_bus_addr;
++ phys_addr_t pte_bus_addr;
+ u32 __iomem *gtt; /* I915G */
+ int num_dcache_entries;
+- /* gtt_entries is the number of gtt entries that are already mapped
+- * to stolen memory. Stolen memory is larger than the memory mapped
+- * through gtt_entries, as it includes some reserved space for the BIOS
+- * popup and for the GTT.
+- */
+- int gtt_entries; /* i830+ */
+- int gtt_total_size;
+ union {
+ void __iomem *i9xx_flush_page;
+ void *i8xx_flush_page;
+@@ -88,23 +105,14 @@ static struct _intel_private {
+ struct page *i8xx_page;
+ struct resource ifp_resource;
+ int resource_valid;
++ struct page *scratch_page;
++ dma_addr_t scratch_page_dma;
+ } intel_private;
+
+-#ifdef USE_PCI_DMA_API
+-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+-{
+- *ret = pci_map_page(intel_private.pcidev, page, 0,
+- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+- if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+- return -EINVAL;
+- return 0;
+-}
+-
+-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+-{
+- pci_unmap_page(intel_private.pcidev, dma,
+- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+-}
++#define INTEL_GTT_GEN intel_private.driver->gen
++#define IS_G33 intel_private.driver->is_g33
++#define IS_PINEVIEW intel_private.driver->is_pineview
++#define IS_IRONLAKE intel_private.driver->is_ironlake
+
+ static void intel_agp_free_sglist(struct agp_memory *mem)
+ {
+@@ -125,6 +133,9 @@ static int intel_agp_map_memory(struct agp_memory *mem)
+ struct scatterlist *sg;
+ int i;
+
++ if (mem->sg_list)
++ return 0; /* already mapped (for e.g. resume */
++
+ DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+
+ if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+@@ -156,70 +167,17 @@ static void intel_agp_unmap_memory(struct agp_memory *mem)
+ intel_agp_free_sglist(mem);
+ }
+
+-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+- off_t pg_start, int mask_type)
+-{
+- struct scatterlist *sg;
+- int i, j;
+-
+- j = pg_start;
+-
+- WARN_ON(!mem->num_sg);
+-
+- if (mem->num_sg == mem->page_count) {
+- for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+- writel(agp_bridge->driver->mask_memory(agp_bridge,
+- sg_dma_address(sg), mask_type),
+- intel_private.gtt+j);
+- j++;
+- }
+- } else {
+- /* sg may merge pages, but we have to separate
+- * per-page addr for GTT */
+- unsigned int len, m;
+-
+- for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+- len = sg_dma_len(sg) / PAGE_SIZE;
+- for (m = 0; m < len; m++) {
+- writel(agp_bridge->driver->mask_memory(agp_bridge,
+- sg_dma_address(sg) + m * PAGE_SIZE,
+- mask_type),
+- intel_private.gtt+j);
+- j++;
+- }
+- }
+- }
+- readl(intel_private.gtt+j-1);
+-}
+-
+-#else
+-
+-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+- off_t pg_start, int mask_type)
+-{
+- int i, j;
+-
+- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+- writel(agp_bridge->driver->mask_memory(agp_bridge,
+- page_to_phys(mem->pages[i]), mask_type),
+- intel_private.gtt+j);
+- }
+-
+- readl(intel_private.gtt+j-1);
+-}
+-
+-#endif
+-
+ static int intel_i810_fetch_size(void)
+ {
+ u32 smram_miscc;
+ struct aper_size_info_fixed *values;
+
+- pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
++ pci_read_config_dword(intel_private.bridge_dev,
++ I810_SMRAM_MISCC, &smram_miscc);
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+- dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
++ dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
+ return 0;
+ }
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+@@ -284,7 +242,7 @@ static void intel_i810_cleanup(void)
+ iounmap(intel_private.registers);
+ }
+
+-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
++static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+ {
+ return;
+ }
+@@ -319,34 +277,6 @@ static void i8xx_destroy_pages(struct page *page)
+ atomic_dec(&agp_bridge->current_memory_agp);
+ }
+
+-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
+- int type)
+-{
+- if (type < AGP_USER_TYPES)
+- return type;
+- else if (type == AGP_USER_CACHED_MEMORY)
+- return INTEL_AGP_CACHED_MEMORY;
+- else
+- return 0;
+-}
+-
+-static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
+- int type)
+-{
+- unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
+- unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
+-
+- if (type_mask == AGP_USER_UNCACHED_MEMORY)
+- return INTEL_AGP_UNCACHED_MEMORY;
+- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
+- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
+- INTEL_AGP_CACHED_MEMORY_LLC_MLC;
+- else /* set 'normal'/'cached' to LLC by default */
+- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
+- INTEL_AGP_CACHED_MEMORY_LLC;
+-}
+-
+-
+ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+ {
+@@ -514,8 +444,33 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
+ return addr | bridge->driver->masks[type].mask;
+ }
+
+-static struct aper_size_info_fixed intel_i830_sizes[] =
++static int intel_gtt_setup_scratch_page(void)
+ {
++ struct page *page;
++ dma_addr_t dma_addr;
++
++ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
++ if (page == NULL)
++ return -ENOMEM;
++ get_page(page);
++ set_pages_uc(page, 1);
++
++ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
++ dma_addr = pci_map_page(intel_private.pcidev, page, 0,
++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++ if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
++ return -EINVAL;
++
++ intel_private.scratch_page_dma = dma_addr;
++ } else
++ intel_private.scratch_page_dma = page_to_phys(page);
++
++ intel_private.scratch_page = page;
++
++ return 0;
++}
++
++static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
+ {128, 32768, 5},
+ /* The 64M mode still requires a 128k gatt */
+ {64, 16384, 5},
+@@ -523,102 +478,49 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
+ {512, 131072, 7},
+ };
+
+-static void intel_i830_init_gtt_entries(void)
++static unsigned int intel_gtt_stolen_entries(void)
+ {
+ u16 gmch_ctrl;
+- int gtt_entries = 0;
+ u8 rdct;
+ int local = 0;
+ static const int ddt[4] = { 0, 16, 32, 64 };
+- int size; /* reserved space (in kb) at the top of stolen memory */
++ unsigned int overhead_entries, stolen_entries;
++ unsigned int stolen_size = 0;
+
+- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
++ pci_read_config_word(intel_private.bridge_dev,
++ I830_GMCH_CTRL, &gmch_ctrl);
+
+- if (IS_I965) {
+- u32 pgetbl_ctl;
+- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
++ if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
++ overhead_entries = 0;
++ else
++ overhead_entries = intel_private.base.gtt_mappable_entries
++ / 1024;
+
+- /* The 965 has a field telling us the size of the GTT,
+- * which may be larger than what is necessary to map the
+- * aperture.
+- */
+- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+- case I965_PGETBL_SIZE_128KB:
+- size = 128;
+- break;
+- case I965_PGETBL_SIZE_256KB:
+- size = 256;
+- break;
+- case I965_PGETBL_SIZE_512KB:
+- size = 512;
+- break;
+- case I965_PGETBL_SIZE_1MB:
+- size = 1024;
+- break;
+- case I965_PGETBL_SIZE_2MB:
+- size = 2048;
+- break;
+- case I965_PGETBL_SIZE_1_5MB:
+- size = 1024 + 512;
+- break;
+- default:
+- dev_info(&intel_private.pcidev->dev,
+- "unknown page table size, assuming 512KB\n");
+- size = 512;
+- }
+- size += 4; /* add in BIOS popup space */
+- } else if (IS_G33 && !IS_PINEVIEW) {
+- /* G33's GTT size defined in gmch_ctrl */
+- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+- case G33_PGETBL_SIZE_1M:
+- size = 1024;
+- break;
+- case G33_PGETBL_SIZE_2M:
+- size = 2048;
+- break;
+- default:
+- dev_info(&agp_bridge->dev->dev,
+- "unknown page table size 0x%x, assuming 512KB\n",
+- (gmch_ctrl & G33_PGETBL_SIZE_MASK));
+- size = 512;
+- }
+- size += 4;
+- } else if (IS_G4X || IS_PINEVIEW) {
+- /* On 4 series hardware, GTT stolen is separate from graphics
+- * stolen, ignore it in stolen gtt entries counting. However,
+- * 4KB of the stolen memory doesn't get mapped to the GTT.
+- */
+- size = 4;
+- } else {
+- /* On previous hardware, the GTT size was just what was
+- * required to map the aperture.
+- */
+- size = agp_bridge->driver->fetch_size() + 4;
+- }
++ overhead_entries += 1; /* BIOS popup */
+
+- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
++ if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
++ intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+- gtt_entries = KB(512) - KB(size);
++ stolen_size = KB(512);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+- gtt_entries = MB(1) - KB(size);
++ stolen_size = MB(1);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+- gtt_entries = MB(8) - KB(size);
++ stolen_size = MB(8);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+- gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
++ stolen_size = (I830_RDRAM_ND(rdct) + 1) *
+ MB(ddt[I830_RDRAM_DDT(rdct)]);
+ local = 1;
+ break;
+ default:
+- gtt_entries = 0;
++ stolen_size = 0;
+ break;
+ }
+- } else if (IS_SNB) {
++ } else if (INTEL_GTT_GEN == 6) {
+ /*
+ * SandyBridge has new memory control reg at 0x50.w
+ */
+@@ -626,149 +528,292 @@ static void intel_i830_init_gtt_entries(void)
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+ case SNB_GMCH_GMS_STOLEN_32M:
+- gtt_entries = MB(32) - KB(size);
++ stolen_size = MB(32);
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+- gtt_entries = MB(64) - KB(size);
++ stolen_size = MB(64);
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+- gtt_entries = MB(96) - KB(size);
++ stolen_size = MB(96);
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+- gtt_entries = MB(128) - KB(size);
++ stolen_size = MB(128);
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+- gtt_entries = MB(160) - KB(size);
++ stolen_size = MB(160);
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+- gtt_entries = MB(192) - KB(size);
++ stolen_size = MB(192);
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+- gtt_entries = MB(224) - KB(size);
++ stolen_size = MB(224);
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+- gtt_entries = MB(256) - KB(size);
++ stolen_size = MB(256);
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+- gtt_entries = MB(288) - KB(size);
++ stolen_size = MB(288);
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+- gtt_entries = MB(320) - KB(size);
++ stolen_size = MB(320);
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+- gtt_entries = MB(352) - KB(size);
++ stolen_size = MB(352);
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+- gtt_entries = MB(384) - KB(size);
++ stolen_size = MB(384);
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+- gtt_entries = MB(416) - KB(size);
++ stolen_size = MB(416);
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+- gtt_entries = MB(448) - KB(size);
++ stolen_size = MB(448);
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+- gtt_entries = MB(480) - KB(size);
++ stolen_size = MB(480);
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+- gtt_entries = MB(512) - KB(size);
++ stolen_size = MB(512);
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+- gtt_entries = MB(1) - KB(size);
++ stolen_size = MB(1);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+- gtt_entries = MB(4) - KB(size);
++ stolen_size = MB(4);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+- gtt_entries = MB(8) - KB(size);
++ stolen_size = MB(8);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+- gtt_entries = MB(16) - KB(size);
++ stolen_size = MB(16);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+- gtt_entries = MB(32) - KB(size);
++ stolen_size = MB(32);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+- /* Check it's really I915G */
+- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+- gtt_entries = MB(48) - KB(size);
+- else
+- gtt_entries = 0;
++ stolen_size = MB(48);
+ break;
+ case I915_GMCH_GMS_STOLEN_64M:
+- /* Check it's really I915G */
+- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+- gtt_entries = MB(64) - KB(size);
+- else
+- gtt_entries = 0;
++ stolen_size = MB(64);
+ break;
+ case G33_GMCH_GMS_STOLEN_128M:
+- if (IS_G33 || IS_I965 || IS_G4X)
+- gtt_entries = MB(128) - KB(size);
+- else
+- gtt_entries = 0;
++ stolen_size = MB(128);
+ break;
+ case G33_GMCH_GMS_STOLEN_256M:
+- if (IS_G33 || IS_I965 || IS_G4X)
+- gtt_entries = MB(256) - KB(size);
+- else
+- gtt_entries = 0;
++ stolen_size = MB(256);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+- if (IS_I965 || IS_G4X)
+- gtt_entries = MB(96) - KB(size);
+- else
+- gtt_entries = 0;
++ stolen_size = MB(96);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+- if (IS_I965 || IS_G4X)
+- gtt_entries = MB(160) - KB(size);
+- else
+- gtt_entries = 0;
++ stolen_size = MB(160);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+- if (IS_I965 || IS_G4X)
+- gtt_entries = MB(224) - KB(size);
+- else
+- gtt_entries = 0;
++ stolen_size = MB(224);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+- if (IS_I965 || IS_G4X)
+- gtt_entries = MB(352) - KB(size);
+- else
+- gtt_entries = 0;
++ stolen_size = MB(352);
+ break;
+ default:
+- gtt_entries = 0;
++ stolen_size = 0;
+ break;
+ }
+ }
+- if (!local && gtt_entries > intel_max_stolen) {
+- dev_info(&agp_bridge->dev->dev,
++
++ if (!local && stolen_size > intel_max_stolen) {
++ dev_info(&intel_private.bridge_dev->dev,
+ "detected %dK stolen memory, trimming to %dK\n",
+- gtt_entries / KB(1), intel_max_stolen / KB(1));
+- gtt_entries = intel_max_stolen / KB(4);
+- } else if (gtt_entries > 0) {
+- dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
+- gtt_entries / KB(1), local ? "local" : "stolen");
+- gtt_entries /= KB(4);
++ stolen_size / KB(1), intel_max_stolen / KB(1));
++ stolen_size = intel_max_stolen;
++ } else if (stolen_size > 0) {
++ dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
++ stolen_size / KB(1), local ? "local" : "stolen");
+ } else {
+- dev_info(&agp_bridge->dev->dev,
++ dev_info(&intel_private.bridge_dev->dev,
+ "no pre-allocated video memory detected\n");
+- gtt_entries = 0;
++ stolen_size = 0;
++ }
++
++ stolen_entries = stolen_size/KB(4) - overhead_entries;
++
++ return stolen_entries;
++}
++
++static unsigned int intel_gtt_total_entries(void)
++{
++ int size;
++
++ if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
++ u32 pgetbl_ctl;
++ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
++
++ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
++ case I965_PGETBL_SIZE_128KB:
++ size = KB(128);
++ break;
++ case I965_PGETBL_SIZE_256KB:
++ size = KB(256);
++ break;
++ case I965_PGETBL_SIZE_512KB:
++ size = KB(512);
++ break;
++ case I965_PGETBL_SIZE_1MB:
++ size = KB(1024);
++ break;
++ case I965_PGETBL_SIZE_2MB:
++ size = KB(2048);
++ break;
++ case I965_PGETBL_SIZE_1_5MB:
++ size = KB(1024 + 512);
++ break;
++ default:
++ dev_info(&intel_private.pcidev->dev,
++ "unknown page table size, assuming 512KB\n");
++ size = KB(512);
++ }
++
++ return size/4;
++ } else if (INTEL_GTT_GEN == 6) {
++ u16 snb_gmch_ctl;
++
++ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
++ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
++ default:
++ case SNB_GTT_SIZE_0M:
++ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
++ size = MB(0);
++ break;
++ case SNB_GTT_SIZE_1M:
++ size = MB(1);
++ break;
++ case SNB_GTT_SIZE_2M:
++ size = MB(2);
++ break;
++ }
++ return size/4;
++ } else {
++ /* On previous hardware, the GTT size was just what was
++ * required to map the aperture.
++ */
++ return intel_private.base.gtt_mappable_entries;
++ }
++}
++
++static unsigned int intel_gtt_mappable_entries(void)
++{
++ unsigned int aperture_size;
++
++ if (INTEL_GTT_GEN == 2) {
++ u16 gmch_ctrl;
++
++ pci_read_config_word(intel_private.bridge_dev,
++ I830_GMCH_CTRL, &gmch_ctrl);
++
++ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
++ aperture_size = MB(64);
++ else
++ aperture_size = MB(128);
++ } else {
++ /* 9xx supports large sizes, just look at the length */
++ aperture_size = pci_resource_len(intel_private.pcidev, 2);
++ }
++
++ return aperture_size >> PAGE_SHIFT;
++}
++
++static void intel_gtt_teardown_scratch_page(void)
++{
++ set_pages_wb(intel_private.scratch_page, 1);
++ pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++ put_page(intel_private.scratch_page);
++ __free_page(intel_private.scratch_page);
++}
++
++static void intel_gtt_cleanup(void)
++{
++ intel_private.driver->cleanup();
++
++ iounmap(intel_private.gtt);
++ iounmap(intel_private.registers);
++
++ intel_gtt_teardown_scratch_page();
++}
++
++static int intel_gtt_init(void)
++{
++ u32 gtt_map_size;
++ int ret;
++
++ ret = intel_private.driver->setup();
++ if (ret != 0)
++ return ret;
++
++ intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
++ intel_private.base.gtt_total_entries = intel_gtt_total_entries();
++
++ dev_info(&intel_private.bridge_dev->dev,
++ "detected gtt size: %dK total, %dK mappable\n",
++ intel_private.base.gtt_total_entries * 4,
++ intel_private.base.gtt_mappable_entries * 4);
++
++ gtt_map_size = intel_private.base.gtt_total_entries * 4;
++
++ intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
++ gtt_map_size);
++ if (!intel_private.gtt) {
++ intel_private.driver->cleanup();
++ iounmap(intel_private.registers);
++ return -ENOMEM;
++ }
++
++ global_cache_flush(); /* FIXME: ? */
++
++ /* we have to call this as early as possible after the MMIO base address is known */
++ intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
++ if (intel_private.base.gtt_stolen_entries == 0) {
++ intel_private.driver->cleanup();
++ iounmap(intel_private.registers);
++ iounmap(intel_private.gtt);
++ return -ENOMEM;
++ }
++
++ ret = intel_gtt_setup_scratch_page();
++ if (ret != 0) {
++ intel_gtt_cleanup();
++ return ret;
++ }
++
++ return 0;
++}
++
++static int intel_fake_agp_fetch_size(void)
++{
++ int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
++ unsigned int aper_size;
++ int i;
++
++ aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
++ / MB(1);
++
++ for (i = 0; i < num_sizes; i++) {
++ if (aper_size == intel_fake_agp_sizes[i].size) {
++ agp_bridge->current_size =
++ (void *) (intel_fake_agp_sizes + i);
++ return aper_size;
++ }
+ }
+
+- intel_private.gtt_entries = gtt_entries;
++ return 0;
+ }
+
+-static void intel_i830_fini_flush(void)
++static void i830_cleanup(void)
+ {
+ kunmap(intel_private.i8xx_page);
+ intel_private.i8xx_flush_page = NULL;
+- unmap_page_from_agp(intel_private.i8xx_page);
+
+ __free_page(intel_private.i8xx_page);
+ intel_private.i8xx_page = NULL;
+@@ -780,13 +825,13 @@ static void intel_i830_setup_flush(void)
+ if (intel_private.i8xx_page)
+ return;
+
+- intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
++ intel_private.i8xx_page = alloc_page(GFP_KERNEL);
+ if (!intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+ if (!intel_private.i8xx_flush_page)
+- intel_i830_fini_flush();
++ i830_cleanup();
+ }
+
+ /* The chipset_flush interface needs to get data that has already been
+@@ -799,7 +844,7 @@ static void intel_i830_setup_flush(void)
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out. It appears to work.
+ */
+-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
++static void i830_chipset_flush(void)
+ {
+ unsigned int *pg = intel_private.i8xx_flush_page;
+
+@@ -811,169 +856,184 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ }
+
+-/* The intel i830 automatically initializes the agp aperture during POST.
+- * Use the memory already set aside for in the GTT.
+- */
+-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
++static void i830_write_entry(dma_addr_t addr, unsigned int entry,
++ unsigned int flags)
+ {
+- int page_order;
+- struct aper_size_info_fixed *size;
+- int num_entries;
+- u32 temp;
++ u32 pte_flags = I810_PTE_VALID;
++
++ switch (flags) {
++ case AGP_DCACHE_MEMORY:
++ pte_flags |= I810_PTE_LOCAL;
++ break;
++ case AGP_USER_CACHED_MEMORY:
++ pte_flags |= I830_PTE_SYSTEM_CACHED;
++ break;
++ }
+
+- size = agp_bridge->current_size;
+- page_order = size->page_order;
+- num_entries = size->num_entries;
+- agp_bridge->gatt_table_real = NULL;
++ writel(addr | pte_flags, intel_private.gtt + entry);
++}
+
+- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+- temp &= 0xfff80000;
++static void intel_enable_gtt(void)
++{
++ u32 gma_addr;
++ u16 gmch_ctrl;
+
+- intel_private.registers = ioremap(temp, 128 * 4096);
+- if (!intel_private.registers)
+- return -ENOMEM;
++ if (INTEL_GTT_GEN == 2)
++ pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
++ &gma_addr);
++ else
++ pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
++ &gma_addr);
+
+- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+- global_cache_flush(); /* FIXME: ?? */
++ intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+
+- /* we have to call this as early as possible after the MMIO base address is known */
+- intel_i830_init_gtt_entries();
+- if (intel_private.gtt_entries == 0) {
+- iounmap(intel_private.registers);
++ pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
++ gmch_ctrl |= I830_GMCH_ENABLED;
++ pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
++
++ writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
++ intel_private.registers+I810_PGETBL_CTL);
++ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
++}
++
++static int i830_setup(void)
++{
++ u32 reg_addr;
++
++ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
++ reg_addr &= 0xfff80000;
++
++ intel_private.registers = ioremap(reg_addr, KB(64));
++ if (!intel_private.registers)
+ return -ENOMEM;
+- }
+
+- agp_bridge->gatt_table = NULL;
++ intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
++ intel_private.pte_bus_addr =
++ readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+
+- agp_bridge->gatt_bus_addr = temp;
++ intel_i830_setup_flush();
+
+ return 0;
+ }
+
+-/* Return the gatt table to a sane state. Use the top of stolen
+- * memory for the GTT.
+- */
+-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
++static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
+ {
++ agp_bridge->gatt_table_real = NULL;
++ agp_bridge->gatt_table = NULL;
++ agp_bridge->gatt_bus_addr = 0;
++
+ return 0;
+ }
+
+-static int intel_i830_fetch_size(void)
++static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
+ {
+- u16 gmch_ctrl;
+- struct aper_size_info_fixed *values;
++ return 0;
++}
+
+- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
++static int intel_fake_agp_configure(void)
++{
++ int i;
+
+- if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
+- agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
+- /* 855GM/852GM/865G has 128MB aperture size */
+- agp_bridge->current_size = (void *) values;
+- agp_bridge->aperture_size_idx = 0;
+- return values[0].size;
+- }
++ intel_enable_gtt();
+
+- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
++ agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
+
+- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+- agp_bridge->current_size = (void *) values;
+- agp_bridge->aperture_size_idx = 0;
+- return values[0].size;
+- } else {
+- agp_bridge->current_size = (void *) (values + 1);
+- agp_bridge->aperture_size_idx = 1;
+- return values[1].size;
++ for (i = intel_private.base.gtt_stolen_entries;
++ i < intel_private.base.gtt_total_entries; i++) {
++ intel_private.driver->write_entry(intel_private.scratch_page_dma,
++ i, 0);
+ }
++ readl(intel_private.gtt+i-1); /* PCI Posting. */
++
++ global_cache_flush();
+
+ return 0;
+ }
+
+-static int intel_i830_configure(void)
++static bool i830_check_flags(unsigned int flags)
+ {
+- struct aper_size_info_fixed *current_size;
+- u32 temp;
+- u16 gmch_ctrl;
+- int i;
++ switch (flags) {
++ case 0:
++ case AGP_PHYS_MEMORY:
++ case AGP_USER_CACHED_MEMORY:
++ case AGP_USER_MEMORY:
++ return true;
++ }
+
+- current_size = A_SIZE_FIX(agp_bridge->current_size);
++ return false;
++}
+
+- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
++static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
++ unsigned int sg_len,
++ unsigned int pg_start,
++ unsigned int flags)
++{
++ struct scatterlist *sg;
++ unsigned int len, m;
++ int i, j;
+
+- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+- gmch_ctrl |= I830_GMCH_ENABLED;
+- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+-
+- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
++ j = pg_start;
+
+- if (agp_bridge->driver->needs_scratch_page) {
+- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
++ /* sg may merge pages, but we have to separate
++ * per-page addr for GTT */
++ for_each_sg(sg_list, sg, sg_len, i) {
++ len = sg_dma_len(sg) >> PAGE_SHIFT;
++ for (m = 0; m < len; m++) {
++ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
++ intel_private.driver->write_entry(addr,
++ j, flags);
++ j++;
+ }
+- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
+ }
+-
+- global_cache_flush();
+-
+- intel_i830_setup_flush();
+- return 0;
+-}
+-
+-static void intel_i830_cleanup(void)
+-{
+- iounmap(intel_private.registers);
++ readl(intel_private.gtt+j-1);
+ }
+
+-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
+- int type)
++static int intel_fake_agp_insert_entries(struct agp_memory *mem,
++ off_t pg_start, int type)
+ {
+- int i, j, num_entries;
+- void *temp;
++ int i, j;
+ int ret = -EINVAL;
+- int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+- temp = agp_bridge->current_size;
+- num_entries = A_SIZE_FIX(temp)->num_entries;
+-
+- if (pg_start < intel_private.gtt_entries) {
++ if (pg_start < intel_private.base.gtt_stolen_entries) {
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+- pg_start, intel_private.gtt_entries);
++ "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
++ pg_start, intel_private.base.gtt_stolen_entries);
+
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
+ goto out_err;
+ }
+
+- if ((pg_start + mem->page_count) > num_entries)
++ if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
+ goto out_err;
+
+- /* The i830 can't check the GTT for entries since its read only,
+- * depend on the caller to make the correct offset decisions.
+- */
+-
+ if (type != mem->type)
+ goto out_err;
+
+- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+-
+- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+- mask_type != INTEL_AGP_CACHED_MEMORY)
++ if (!intel_private.driver->check_flags(type))
+ goto out_err;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+- writel(agp_bridge->driver->mask_memory(agp_bridge,
+- page_to_phys(mem->pages[i]), mask_type),
+- intel_private.registers+I810_PTE_BASE+(j*4));
++ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
++ ret = intel_agp_map_memory(mem);
++ if (ret != 0)
++ return ret;
++
++ intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
++ pg_start, type);
++ } else {
++ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
++ dma_addr_t addr = page_to_phys(mem->pages[i]);
++ intel_private.driver->write_entry(addr,
++ j, type);
++ }
++ readl(intel_private.gtt+j-1);
+ }
+- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+
+ out:
+ ret = 0;
+@@ -982,29 +1042,39 @@ out_err:
+ return ret;
+ }
+
+-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
+- int type)
++static int intel_fake_agp_remove_entries(struct agp_memory *mem,
++ off_t pg_start, int type)
+ {
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+- if (pg_start < intel_private.gtt_entries) {
++ if (pg_start < intel_private.base.gtt_stolen_entries) {
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
++ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
++ intel_agp_unmap_memory(mem);
++
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
++ intel_private.driver->write_entry(intel_private.scratch_page_dma,
++ i, 0);
+ }
+- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
++ readl(intel_private.gtt+i-1);
+
+ return 0;
+ }
+
+-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
++static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
++{
++ intel_private.driver->chipset_flush();
++}
++
++static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
++ int type)
+ {
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+@@ -1015,9 +1085,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+ static int intel_alloc_chipset_flush_resource(void)
+ {
+ int ret;
+- ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
++ ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+- pcibios_align_resource, agp_bridge->dev);
++ pcibios_align_resource, intel_private.bridge_dev);
+
+ return ret;
+ }
+@@ -1027,11 +1097,11 @@ static void intel_i915_setup_chipset_flush(void)
+ int ret;
+ u32 temp;
+
+- pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
++ pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
+ if (!(temp & 0x1)) {
+ intel_alloc_chipset_flush_resource();
+ intel_private.resource_valid = 1;
+- pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
++ pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ temp &= ~1;
+
+@@ -1050,17 +1120,17 @@ static void intel_i965_g33_setup_chipset_flush(void)
+ u32 temp_hi, temp_lo;
+ int ret;
+
+- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
+- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
++ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
++ pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
+
+ if (!(temp_lo & 0x1)) {
+
+ intel_alloc_chipset_flush_resource();
+
+ intel_private.resource_valid = 1;
+- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
++ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
+ upper_32_bits(intel_private.ifp_resource.start));
+- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
++ pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ u64 l64;
+
+@@ -1083,7 +1153,7 @@ static void intel_i9xx_setup_flush(void)
+ if (intel_private.ifp_resource.start)
+ return;
+
+- if (IS_SNB)
++ if (INTEL_GTT_GEN == 6)
+ return;
+
+ /* setup a resource for this object */
+@@ -1091,7 +1161,7 @@ static void intel_i9xx_setup_flush(void)
+ intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+ /* Setup chipset flush for 915 */
+- if (IS_I965 || IS_G33 || IS_G4X) {
++ if (IS_G33 || INTEL_GTT_GEN >= 4) {
+ intel_i965_g33_setup_chipset_flush();
+ } else {
+ intel_i915_setup_chipset_flush();
+@@ -1104,41 +1174,7 @@ static void intel_i9xx_setup_flush(void)
+ "can't ioremap flush page - no chipset flushing\n");
+ }
+
+-static int intel_i9xx_configure(void)
+-{
+- struct aper_size_info_fixed *current_size;
+- u32 temp;
+- u16 gmch_ctrl;
+- int i;
+-
+- current_size = A_SIZE_FIX(agp_bridge->current_size);
+-
+- pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
+-
+- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+-
+- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+- gmch_ctrl |= I830_GMCH_ENABLED;
+- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+-
+- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+-
+- if (agp_bridge->driver->needs_scratch_page) {
+- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
+- writel(agp_bridge->scratch_page, intel_private.gtt+i);
+- }
+- readl(intel_private.gtt+i-1); /* PCI Posting. */
+- }
+-
+- global_cache_flush();
+-
+- intel_i9xx_setup_flush();
+-
+- return 0;
+-}
+-
+-static void intel_i915_cleanup(void)
++static void i9xx_cleanup(void)
+ {
+ if (intel_private.i9xx_flush_page)
+ iounmap(intel_private.i9xx_flush_page);
+@@ -1146,320 +1182,93 @@ static void intel_i915_cleanup(void)
+ release_resource(&intel_private.ifp_resource);
+ intel_private.ifp_resource.start = 0;
+ intel_private.resource_valid = 0;
+- iounmap(intel_private.gtt);
+- iounmap(intel_private.registers);
+ }
+
+-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
++static void i9xx_chipset_flush(void)
+ {
+ if (intel_private.i9xx_flush_page)
+ writel(1, intel_private.i9xx_flush_page);
+ }
+
+-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
+- int type)
++static void i965_write_entry(dma_addr_t addr, unsigned int entry,
++ unsigned int flags)
+ {
+- int num_entries;
+- void *temp;
+- int ret = -EINVAL;
+- int mask_type;
+-
+- if (mem->page_count == 0)
+- goto out;
+-
+- temp = agp_bridge->current_size;
+- num_entries = A_SIZE_FIX(temp)->num_entries;
+-
+- if (pg_start < intel_private.gtt_entries) {
+- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+- pg_start, intel_private.gtt_entries);
+-
+- dev_info(&intel_private.pcidev->dev,
+- "trying to insert into local/stolen memory\n");
+- goto out_err;
+- }
+-
+- if ((pg_start + mem->page_count) > num_entries)
+- goto out_err;
+-
+- /* The i915 can't check the GTT for entries since it's read only;
+- * depend on the caller to make the correct offset decisions.
+- */
+-
+- if (type != mem->type)
+- goto out_err;
+-
+- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+-
+- if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+- mask_type != INTEL_AGP_CACHED_MEMORY)
+- goto out_err;
+-
+- if (!mem->is_flushed)
+- global_cache_flush();
+-
+- intel_agp_insert_sg_entries(mem, pg_start, mask_type);
+-
+- out:
+- ret = 0;
+- out_err:
+- mem->is_flushed = true;
+- return ret;
++ /* Shift high bits down */
++ addr |= (addr >> 28) & 0xf0;
++ writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
+ }
+
+-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
+- int type)
++static bool gen6_check_flags(unsigned int flags)
+ {
+- int i;
+-
+- if (mem->page_count == 0)
+- return 0;
+-
+- if (pg_start < intel_private.gtt_entries) {
+- dev_info(&intel_private.pcidev->dev,
+- "trying to disable local/stolen memory\n");
+- return -EINVAL;
+- }
+-
+- for (i = pg_start; i < (mem->page_count + pg_start); i++)
+- writel(agp_bridge->scratch_page, intel_private.gtt+i);
+-
+- readl(intel_private.gtt+i-1);
+-
+- return 0;
++ return true;
+ }
+
+-/* Return the aperture size by just checking the resource length. The effect
+- * described in the spec of the MSAC registers is just changing of the
+- * resource size.
+- */
+-static int intel_i9xx_fetch_size(void)
++static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
++ unsigned int flags)
+ {
+- int num_sizes = ARRAY_SIZE(intel_i830_sizes);
+- int aper_size; /* size in megabytes */
+- int i;
+-
+- aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
+-
+- for (i = 0; i < num_sizes; i++) {
+- if (aper_size == intel_i830_sizes[i].size) {
+- agp_bridge->current_size = intel_i830_sizes + i;
+- return aper_size;
+- }
++ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
++ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
++ u32 pte_flags;
++
++ if (type_mask == AGP_USER_MEMORY)
++ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
++ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
++ pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
++ if (gfdt)
++ pte_flags |= GEN6_PTE_GFDT;
++ } else { /* set 'normal'/'cached' to LLC by default */
++ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
++ if (gfdt)
++ pte_flags |= GEN6_PTE_GFDT;
+ }
+
+- return 0;
++ /* gen6 has bit11-4 for physical addr bit39-32 */
++ addr |= (addr >> 28) & 0xff0;
++ writel(addr | pte_flags, intel_private.gtt + entry);
+ }
+
+-static int intel_i915_get_gtt_size(void)
++static void gen6_cleanup(void)
+ {
+- int size;
+-
+- if (IS_G33) {
+- u16 gmch_ctrl;
+-
+- /* G33's GTT size defined in gmch_ctrl */
+- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+- case I830_GMCH_GMS_STOLEN_512:
+- size = 512;
+- break;
+- case I830_GMCH_GMS_STOLEN_1024:
+- size = 1024;
+- break;
+- case I830_GMCH_GMS_STOLEN_8192:
+- size = 8*1024;
+- break;
+- default:
+- dev_info(&agp_bridge->dev->dev,
+- "unknown page table size 0x%x, assuming 512KB\n",
+- (gmch_ctrl & I830_GMCH_GMS_MASK));
+- size = 512;
+- }
+- } else {
+- /* On previous hardware, the GTT size was just what was
+- * required to map the aperture.
+- */
+- size = agp_bridge->driver->fetch_size();
+- }
+-
+- return KB(size);
+ }
+
+-/* The intel i915 automatically initializes the agp aperture during POST.
+- * Use the memory already set aside for in the GTT.
+- */
+-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
++static int i9xx_setup(void)
+ {
+- int page_order;
+- struct aper_size_info_fixed *size;
+- int num_entries;
+- u32 temp, temp2;
+- int gtt_map_size;
+-
+- size = agp_bridge->current_size;
+- page_order = size->page_order;
+- num_entries = size->num_entries;
+- agp_bridge->gatt_table_real = NULL;
+-
+- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
+-
+- gtt_map_size = intel_i915_get_gtt_size();
++ u32 reg_addr;
+
+- intel_private.gtt = ioremap(temp2, gtt_map_size);
+- if (!intel_private.gtt)
+- return -ENOMEM;
+-
+- intel_private.gtt_total_size = gtt_map_size / 4;
+-
+- temp &= 0xfff80000;
+-
+- intel_private.registers = ioremap(temp, 128 * 4096);
+- if (!intel_private.registers) {
+- iounmap(intel_private.gtt);
+- return -ENOMEM;
+- }
++ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
+
+- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+- global_cache_flush(); /* FIXME: ? */
++ reg_addr &= 0xfff80000;
+
+- /* we have to call this as early as possible after the MMIO base address is known */
+- intel_i830_init_gtt_entries();
+- if (intel_private.gtt_entries == 0) {
+- iounmap(intel_private.gtt);
+- iounmap(intel_private.registers);
++ intel_private.registers = ioremap(reg_addr, 128 * 4096);
++ if (!intel_private.registers)
+ return -ENOMEM;
+- }
+
+- agp_bridge->gatt_table = NULL;
+-
+- agp_bridge->gatt_bus_addr = temp;
++ if (INTEL_GTT_GEN == 3) {
++ u32 gtt_addr;
+
+- return 0;
+-}
+-
+-/*
+- * The i965 supports 36-bit physical addresses, but to keep
+- * the format of the GTT the same, the bits that don't fit
+- * in a 32-bit word are shifted down to bits 4..7.
+- *
+- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
+- * is always zero on 32-bit architectures, so no need to make
+- * this conditional.
+- */
+-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
+- dma_addr_t addr, int type)
+-{
+- /* Shift high bits down */
+- addr |= (addr >> 28) & 0xf0;
+-
+- /* Type checking must be done elsewhere */
+- return addr | bridge->driver->masks[type].mask;
+-}
+-
+-static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
+- dma_addr_t addr, int type)
+-{
+- /* gen6 has bit11-4 for physical addr bit39-32 */
+- addr |= (addr >> 28) & 0xff0;
+-
+- /* Type checking must be done elsewhere */
+- return addr | bridge->driver->masks[type].mask;
+-}
+-
+-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
+-{
+- u16 snb_gmch_ctl;
+-
+- switch (agp_bridge->dev->device) {
+- case PCI_DEVICE_ID_INTEL_GM45_HB:
+- case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
+- case PCI_DEVICE_ID_INTEL_Q45_HB:
+- case PCI_DEVICE_ID_INTEL_G45_HB:
+- case PCI_DEVICE_ID_INTEL_G41_HB:
+- case PCI_DEVICE_ID_INTEL_B43_HB:
+- case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+- case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+- case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+- case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
+- *gtt_offset = *gtt_size = MB(2);
+- break;
+- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
+- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
+- *gtt_offset = MB(2);
++ pci_read_config_dword(intel_private.pcidev,
++ I915_PTEADDR, &gtt_addr);
++ intel_private.gtt_bus_addr = gtt_addr;
++ } else {
++ u32 gtt_offset;
+
+- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+- default:
+- case SNB_GTT_SIZE_0M:
+- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+- *gtt_size = MB(0);
+- break;
+- case SNB_GTT_SIZE_1M:
+- *gtt_size = MB(1);
++ switch (INTEL_GTT_GEN) {
++ case 5:
++ case 6:
++ gtt_offset = MB(2);
+ break;
+- case SNB_GTT_SIZE_2M:
+- *gtt_size = MB(2);
++ case 4:
++ default:
++ gtt_offset = KB(512);
+ break;
+ }
+- break;
+- default:
+- *gtt_offset = *gtt_size = KB(512);
+- }
+-}
+-
+-/* The intel i965 automatically initializes the agp aperture during POST.
+- * Use the memory already set aside for in the GTT.
+- */
+-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
+-{
+- int page_order;
+- struct aper_size_info_fixed *size;
+- int num_entries;
+- u32 temp;
+- int gtt_offset, gtt_size;
+-
+- size = agp_bridge->current_size;
+- page_order = size->page_order;
+- num_entries = size->num_entries;
+- agp_bridge->gatt_table_real = NULL;
+-
+- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+-
+- temp &= 0xfff00000;
+-
+- intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
+-
+- intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+-
+- if (!intel_private.gtt)
+- return -ENOMEM;
+-
+- intel_private.gtt_total_size = gtt_size / 4;
+-
+- intel_private.registers = ioremap(temp, 128 * 4096);
+- if (!intel_private.registers) {
+- iounmap(intel_private.gtt);
+- return -ENOMEM;
+- }
+-
+- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+- global_cache_flush(); /* FIXME: ? */
+-
+- /* we have to call this as early as possible after the MMIO base address is known */
+- intel_i830_init_gtt_entries();
+- if (intel_private.gtt_entries == 0) {
+- iounmap(intel_private.gtt);
+- iounmap(intel_private.registers);
+- return -ENOMEM;
++ intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+ }
+
+- agp_bridge->gatt_table = NULL;
++ intel_private.pte_bus_addr =
++ readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+
+- agp_bridge->gatt_bus_addr = temp;
++ intel_i9xx_setup_flush();
+
+ return 0;
+ }
+@@ -1475,7 +1284,7 @@ static const struct agp_bridge_driver intel_810_driver = {
+ .cleanup = intel_i810_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+- .agp_enable = intel_i810_agp_enable,
++ .agp_enable = intel_fake_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+@@ -1490,161 +1299,282 @@ static const struct agp_bridge_driver intel_810_driver = {
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ };
+
+-static const struct agp_bridge_driver intel_830_driver = {
++static const struct agp_bridge_driver intel_fake_agp_driver = {
+ .owner = THIS_MODULE,
+- .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+- .num_aperture_sizes = 4,
+- .needs_scratch_page = true,
+- .configure = intel_i830_configure,
+- .fetch_size = intel_i830_fetch_size,
+- .cleanup = intel_i830_cleanup,
+- .mask_memory = intel_i810_mask_memory,
+- .masks = intel_i810_masks,
+- .agp_enable = intel_i810_agp_enable,
++ .aperture_sizes = intel_fake_agp_sizes,
++ .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
++ .configure = intel_fake_agp_configure,
++ .fetch_size = intel_fake_agp_fetch_size,
++ .cleanup = intel_gtt_cleanup,
++ .agp_enable = intel_fake_agp_enable,
+ .cache_flush = global_cache_flush,
+- .create_gatt_table = intel_i830_create_gatt_table,
+- .free_gatt_table = intel_i830_free_gatt_table,
+- .insert_memory = intel_i830_insert_entries,
+- .remove_memory = intel_i830_remove_entries,
+- .alloc_by_type = intel_i830_alloc_by_type,
++ .create_gatt_table = intel_fake_agp_create_gatt_table,
++ .free_gatt_table = intel_fake_agp_free_gatt_table,
++ .insert_memory = intel_fake_agp_insert_entries,
++ .remove_memory = intel_fake_agp_remove_entries,
++ .alloc_by_type = intel_fake_agp_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+- .chipset_flush = intel_i830_chipset_flush,
++ .chipset_flush = intel_fake_agp_chipset_flush,
+ };
+
+-static const struct agp_bridge_driver intel_915_driver = {
+- .owner = THIS_MODULE,
+- .aperture_sizes = intel_i830_sizes,
+- .size_type = FIXED_APER_SIZE,
+- .num_aperture_sizes = 4,
+- .needs_scratch_page = true,
+- .configure = intel_i9xx_configure,
+- .fetch_size = intel_i9xx_fetch_size,
+- .cleanup = intel_i915_cleanup,
+- .mask_memory = intel_i810_mask_memory,
+- .masks = intel_i810_masks,
+- .agp_enable = intel_i810_agp_enable,
+- .cache_flush = global_cache_flush,
+- .create_gatt_table = intel_i915_create_gatt_table,
+- .free_gatt_table = intel_i830_free_gatt_table,
+- .insert_memory = intel_i915_insert_entries,
+- .remove_memory = intel_i915_remove_entries,
+- .alloc_by_type = intel_i830_alloc_by_type,
+- .free_by_type = intel_i810_free_by_type,
+- .agp_alloc_page = agp_generic_alloc_page,
+- .agp_alloc_pages = agp_generic_alloc_pages,
+- .agp_destroy_page = agp_generic_destroy_page,
+- .agp_destroy_pages = agp_generic_destroy_pages,
+- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+- .chipset_flush = intel_i915_chipset_flush,
+-#ifdef USE_PCI_DMA_API
+- .agp_map_page = intel_agp_map_page,
+- .agp_unmap_page = intel_agp_unmap_page,
+- .agp_map_memory = intel_agp_map_memory,
+- .agp_unmap_memory = intel_agp_unmap_memory,
+-#endif
++static const struct intel_gtt_driver i81x_gtt_driver = {
++ .gen = 1,
++ .dma_mask_size = 32,
+ };
+-
+-static const struct agp_bridge_driver intel_i965_driver = {
+- .owner = THIS_MODULE,
+- .aperture_sizes = intel_i830_sizes,
+- .size_type = FIXED_APER_SIZE,
+- .num_aperture_sizes = 4,
+- .needs_scratch_page = true,
+- .configure = intel_i9xx_configure,
+- .fetch_size = intel_i9xx_fetch_size,
+- .cleanup = intel_i915_cleanup,
+- .mask_memory = intel_i965_mask_memory,
+- .masks = intel_i810_masks,
+- .agp_enable = intel_i810_agp_enable,
+- .cache_flush = global_cache_flush,
+- .create_gatt_table = intel_i965_create_gatt_table,
+- .free_gatt_table = intel_i830_free_gatt_table,
+- .insert_memory = intel_i915_insert_entries,
+- .remove_memory = intel_i915_remove_entries,
+- .alloc_by_type = intel_i830_alloc_by_type,
+- .free_by_type = intel_i810_free_by_type,
+- .agp_alloc_page = agp_generic_alloc_page,
+- .agp_alloc_pages = agp_generic_alloc_pages,
+- .agp_destroy_page = agp_generic_destroy_page,
+- .agp_destroy_pages = agp_generic_destroy_pages,
+- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+- .chipset_flush = intel_i915_chipset_flush,
+-#ifdef USE_PCI_DMA_API
+- .agp_map_page = intel_agp_map_page,
+- .agp_unmap_page = intel_agp_unmap_page,
+- .agp_map_memory = intel_agp_map_memory,
+- .agp_unmap_memory = intel_agp_unmap_memory,
+-#endif
++static const struct intel_gtt_driver i8xx_gtt_driver = {
++ .gen = 2,
++ .setup = i830_setup,
++ .cleanup = i830_cleanup,
++ .write_entry = i830_write_entry,
++ .dma_mask_size = 32,
++ .check_flags = i830_check_flags,
++ .chipset_flush = i830_chipset_flush,
+ };
+-
+-static const struct agp_bridge_driver intel_gen6_driver = {
+- .owner = THIS_MODULE,
+- .aperture_sizes = intel_i830_sizes,
+- .size_type = FIXED_APER_SIZE,
+- .num_aperture_sizes = 4,
+- .needs_scratch_page = true,
+- .configure = intel_i9xx_configure,
+- .fetch_size = intel_i9xx_fetch_size,
+- .cleanup = intel_i915_cleanup,
+- .mask_memory = intel_gen6_mask_memory,
+- .masks = intel_gen6_masks,
+- .agp_enable = intel_i810_agp_enable,
+- .cache_flush = global_cache_flush,
+- .create_gatt_table = intel_i965_create_gatt_table,
+- .free_gatt_table = intel_i830_free_gatt_table,
+- .insert_memory = intel_i915_insert_entries,
+- .remove_memory = intel_i915_remove_entries,
+- .alloc_by_type = intel_i830_alloc_by_type,
+- .free_by_type = intel_i810_free_by_type,
+- .agp_alloc_page = agp_generic_alloc_page,
+- .agp_alloc_pages = agp_generic_alloc_pages,
+- .agp_destroy_page = agp_generic_destroy_page,
+- .agp_destroy_pages = agp_generic_destroy_pages,
+- .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
+- .chipset_flush = intel_i915_chipset_flush,
+-#ifdef USE_PCI_DMA_API
+- .agp_map_page = intel_agp_map_page,
+- .agp_unmap_page = intel_agp_unmap_page,
+- .agp_map_memory = intel_agp_map_memory,
+- .agp_unmap_memory = intel_agp_unmap_memory,
+-#endif
++static const struct intel_gtt_driver i915_gtt_driver = {
++ .gen = 3,
++ .setup = i9xx_setup,
++ .cleanup = i9xx_cleanup,
++ /* i945 is the last gpu to need phys mem (for overlay and cursors). */
++ .write_entry = i830_write_entry,
++ .dma_mask_size = 32,
++ .check_flags = i830_check_flags,
++ .chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver g33_gtt_driver = {
++ .gen = 3,
++ .is_g33 = 1,
++ .setup = i9xx_setup,
++ .cleanup = i9xx_cleanup,
++ .write_entry = i965_write_entry,
++ .dma_mask_size = 36,
++ .check_flags = i830_check_flags,
++ .chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver pineview_gtt_driver = {
++ .gen = 3,
++ .is_pineview = 1, .is_g33 = 1,
++ .setup = i9xx_setup,
++ .cleanup = i9xx_cleanup,
++ .write_entry = i965_write_entry,
++ .dma_mask_size = 36,
++ .check_flags = i830_check_flags,
++ .chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver i965_gtt_driver = {
++ .gen = 4,
++ .setup = i9xx_setup,
++ .cleanup = i9xx_cleanup,
++ .write_entry = i965_write_entry,
++ .dma_mask_size = 36,
++ .check_flags = i830_check_flags,
++ .chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver g4x_gtt_driver = {
++ .gen = 5,
++ .setup = i9xx_setup,
++ .cleanup = i9xx_cleanup,
++ .write_entry = i965_write_entry,
++ .dma_mask_size = 36,
++ .check_flags = i830_check_flags,
++ .chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver ironlake_gtt_driver = {
++ .gen = 5,
++ .is_ironlake = 1,
++ .setup = i9xx_setup,
++ .cleanup = i9xx_cleanup,
++ .write_entry = i965_write_entry,
++ .dma_mask_size = 36,
++ .check_flags = i830_check_flags,
++ .chipset_flush = i9xx_chipset_flush,
++};
++static const struct intel_gtt_driver sandybridge_gtt_driver = {
++ .gen = 6,
++ .setup = i9xx_setup,
++ .cleanup = gen6_cleanup,
++ .write_entry = gen6_write_entry,
++ .dma_mask_size = 40,
++ .check_flags = gen6_check_flags,
++ .chipset_flush = i9xx_chipset_flush,
+ };
+
+-static const struct agp_bridge_driver intel_g33_driver = {
+- .owner = THIS_MODULE,
+- .aperture_sizes = intel_i830_sizes,
+- .size_type = FIXED_APER_SIZE,
+- .num_aperture_sizes = 4,
+- .needs_scratch_page = true,
+- .configure = intel_i9xx_configure,
+- .fetch_size = intel_i9xx_fetch_size,
+- .cleanup = intel_i915_cleanup,
+- .mask_memory = intel_i965_mask_memory,
+- .masks = intel_i810_masks,
+- .agp_enable = intel_i810_agp_enable,
+- .cache_flush = global_cache_flush,
+- .create_gatt_table = intel_i915_create_gatt_table,
+- .free_gatt_table = intel_i830_free_gatt_table,
+- .insert_memory = intel_i915_insert_entries,
+- .remove_memory = intel_i915_remove_entries,
+- .alloc_by_type = intel_i830_alloc_by_type,
+- .free_by_type = intel_i810_free_by_type,
+- .agp_alloc_page = agp_generic_alloc_page,
+- .agp_alloc_pages = agp_generic_alloc_pages,
+- .agp_destroy_page = agp_generic_destroy_page,
+- .agp_destroy_pages = agp_generic_destroy_pages,
+- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+- .chipset_flush = intel_i915_chipset_flush,
+-#ifdef USE_PCI_DMA_API
+- .agp_map_page = intel_agp_map_page,
+- .agp_unmap_page = intel_agp_unmap_page,
+- .agp_map_memory = intel_agp_map_memory,
+- .agp_unmap_memory = intel_agp_unmap_memory,
+-#endif
++/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
++ * driver and gmch_driver must be non-null, and find_gmch will determine
++ * which one should be used if a gmch_chip_id is present.
++ */
++static const struct intel_gtt_driver_description {
++ unsigned int gmch_chip_id;
++ char *name;
++ const struct agp_bridge_driver *gmch_driver;
++ const struct intel_gtt_driver *gtt_driver;
++} intel_gtt_chipsets[] = {
++ { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
++ &i81x_gtt_driver},
++ { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
++ &i81x_gtt_driver},
++ { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
++ &i81x_gtt_driver},
++ { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
++ &i81x_gtt_driver},
++ { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
++ &intel_fake_agp_driver, &i8xx_gtt_driver},
++ { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
++ &intel_fake_agp_driver, &i8xx_gtt_driver},
++ { PCI_DEVICE_ID_INTEL_82854_IG, "854",
++ &intel_fake_agp_driver, &i8xx_gtt_driver},
++ { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
++ &intel_fake_agp_driver, &i8xx_gtt_driver},
++ { PCI_DEVICE_ID_INTEL_82865_IG, "865",
++ &intel_fake_agp_driver, &i8xx_gtt_driver},
++ { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
++ &intel_fake_agp_driver, &i915_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
++ &intel_fake_agp_driver, &i915_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
++ &intel_fake_agp_driver, &i915_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
++ &intel_fake_agp_driver, &i915_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
++ &intel_fake_agp_driver, &i915_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
++ &intel_fake_agp_driver, &i915_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
++ &intel_fake_agp_driver, &i965_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
++ &intel_fake_agp_driver, &i965_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
++ &intel_fake_agp_driver, &i965_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
++ &intel_fake_agp_driver, &i965_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
++ &intel_fake_agp_driver, &i965_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
++ &intel_fake_agp_driver, &i965_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
++ &intel_fake_agp_driver, &g33_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
++ &intel_fake_agp_driver, &g33_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
++ &intel_fake_agp_driver, &g33_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
++ &intel_fake_agp_driver, &pineview_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
++ &intel_fake_agp_driver, &pineview_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
++ &intel_fake_agp_driver, &g4x_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
++ &intel_fake_agp_driver, &g4x_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
++ &intel_fake_agp_driver, &g4x_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
++ &intel_fake_agp_driver, &g4x_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
++ &intel_fake_agp_driver, &g4x_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
++ &intel_fake_agp_driver, &g4x_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
++ &intel_fake_agp_driver, &g4x_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
++ "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
++ "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
++ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
++ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
++ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
++ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
++ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
++ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
++ "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
++ { 0, NULL, NULL }
+ };
++
++static int find_gmch(u16 device)
++{
++ struct pci_dev *gmch_device;
++
++ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
++ if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
++ gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
++ device, gmch_device);
++ }
++
++ if (!gmch_device)
++ return 0;
++
++ intel_private.pcidev = gmch_device;
++ return 1;
++}
++
++int intel_gmch_probe(struct pci_dev *pdev,
++ struct agp_bridge_data *bridge)
++{
++ int i, mask;
++ bridge->driver = NULL;
++
++ for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
++ if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
++ bridge->driver =
++ intel_gtt_chipsets[i].gmch_driver;
++ intel_private.driver =
++ intel_gtt_chipsets[i].gtt_driver;
++ break;
++ }
++ }
++
++ if (!bridge->driver)
++ return 0;
++
++ bridge->dev_private_data = &intel_private;
++ bridge->dev = pdev;
++
++ intel_private.bridge_dev = pci_dev_get(pdev);
++
++ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
++
++ mask = intel_private.driver->dma_mask_size;
++ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
++ dev_err(&intel_private.pcidev->dev,
++ "set gfx device dma mask %d-bit failed!\n", mask);
++ else
++ pci_set_consistent_dma_mask(intel_private.pcidev,
++ DMA_BIT_MASK(mask));
++
++ if (bridge->driver == &intel_810_driver)
++ return 1;
++
++ if (intel_gtt_init() != 0)
++ return 0;
++
++ return 1;
++}
++EXPORT_SYMBOL(intel_gmch_probe);
++
++struct intel_gtt *intel_gtt_get(void)
++{
++ return &intel_private.base;
++}
++EXPORT_SYMBOL(intel_gtt_get);
++
++void intel_gmch_remove(struct pci_dev *pdev)
++{
++ if (intel_private.pcidev)
++ pci_dev_put(intel_private.pcidev);
++ if (intel_private.bridge_dev)
++ pci_dev_put(intel_private.bridge_dev);
++}
++EXPORT_SYMBOL(intel_gmch_remove);
++
++MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
++MODULE_LICENSE("GPL and additional rights");
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 96e9631..7f356af 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -1268,34 +1268,51 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ }
+
+ #define HDMI_IDENTIFIER 0x000C03
++#define AUDIO_BLOCK 0x01
+ #define VENDOR_BLOCK 0x03
++#define EDID_BASIC_AUDIO (1 << 6)
++
+ /**
+- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+- * @edid: monitor EDID information
+- *
+- * Parse the CEA extension according to CEA-861-B.
+- * Return true if HDMI, false if not or unknown.
++ * Search EDID for CEA extension block.
+ */
+-bool drm_detect_hdmi_monitor(struct edid *edid)
++static u8 *drm_find_cea_extension(struct edid *edid)
+ {
+- char *edid_ext = NULL;
+- int i, hdmi_id;
+- int start_offset, end_offset;
+- bool is_hdmi = false;
++ u8 *edid_ext = NULL;
++ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+- goto end;
++ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
+- /* This block is CEA extension */
+- if (edid_ext[0] == 0x02)
++ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
++ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
++ return NULL;
++
++ return edid_ext;
++}
++
++/**
++ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
++ * @edid: monitor EDID information
++ *
++ * Parse the CEA extension according to CEA-861-B.
++ * Return true if HDMI, false if not or unknown.
++ */
++bool drm_detect_hdmi_monitor(struct edid *edid)
++{
++ u8 *edid_ext;
++ int i, hdmi_id;
++ int start_offset, end_offset;
++ bool is_hdmi = false;
++
++ edid_ext = drm_find_cea_extension(edid);
++ if (!edid_ext)
+ goto end;
+
+ /* Data block offset in CEA extension block */
+@@ -1326,6 +1343,53 @@ end:
+ EXPORT_SYMBOL(drm_detect_hdmi_monitor);
+
+ /**
++ * drm_detect_monitor_audio - check monitor audio capability
++ *
++ * Monitor should have CEA extension block.
++ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
++ * audio' only. If there is any audio extension block and supported
++ * audio format, assume at least 'basic audio' support, even if 'basic
++ * audio' is not defined in EDID.
++ *
++ */
++bool drm_detect_monitor_audio(struct edid *edid)
++{
++ u8 *edid_ext;
++ int i, j;
++ bool has_audio = false;
++ int start_offset, end_offset;
++
++ edid_ext = drm_find_cea_extension(edid);
++ if (!edid_ext)
++ goto end;
++
++ has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
++
++ if (has_audio) {
++ DRM_DEBUG_KMS("Monitor has basic audio support\n");
++ goto end;
++ }
++
++ /* Data block offset in CEA extension block */
++ start_offset = 4;
++ end_offset = edid_ext[2];
++
++ for (i = start_offset; i < end_offset;
++ i += ((edid_ext[i] & 0x1f) + 1)) {
++ if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
++ has_audio = true;
++ for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
++ DRM_DEBUG_KMS("CEA audio format %d\n",
++ (edid_ext[i + j] >> 3) & 0xf);
++ goto end;
++ }
++ }
++end:
++ return has_audio;
++}
++EXPORT_SYMBOL(drm_detect_monitor_audio);
++
++/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
+index 5c8e534..fdc833d 100644
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -26,15 +26,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+ intel_dvo.o \
+ intel_ringbuffer.o \
+ intel_overlay.o \
++ intel_opregion.o \
+ dvo_ch7xxx.o \
+ dvo_ch7017.o \
+ dvo_ivch.o \
+ dvo_tfp410.o \
+ dvo_sil164.o
+
+-i915-$(CONFIG_ACPI) += i915_opregion.o
+ i915-$(CONFIG_COMPAT) += i915_ioc32.o
+
++i915-$(CONFIG_ACPI) += intel_acpi.o
++
+ obj-$(CONFIG_DRM_I915) += i915.o
+
+ CFLAGS_i915_trace_points.o := -I$(src)
+diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
+index 14d5980..af70337 100644
+--- a/drivers/gpu/drm/i915/dvo_ch7017.c
++++ b/drivers/gpu/drm/i915/dvo_ch7017.c
+@@ -165,67 +165,44 @@ struct ch7017_priv {
+ static void ch7017_dump_regs(struct intel_dvo_device *dvo);
+ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+
+-static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
++static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
+ {
+- struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+- u8 out_buf[2];
+- u8 in_buf[2];
+-
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+- .buf = out_buf,
++ .buf = &addr,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+- .buf = in_buf,
++ .buf = val,
+ }
+ };
+-
+- out_buf[0] = addr;
+- out_buf[1] = 0;
+-
+- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+- *val= in_buf[0];
+- return true;
+- };
+-
+- return false;
++ return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
+ }
+
+-static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
++static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
+ {
+- struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+- uint8_t out_buf[2];
++ uint8_t buf[2] = { addr, val };
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+- .buf = out_buf,
++ .buf = buf,
+ };
+-
+- out_buf[0] = addr;
+- out_buf[1] = val;
+-
+- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+- return true;
+-
+- return false;
++ return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
+ }
+
+ /** Probes for a CH7017 on the given bus and slave address. */
+ static bool ch7017_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+ {
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ struct ch7017_priv *priv;
+- uint8_t val;
++ const char *str;
++ u8 val;
+
+ priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+ if (priv == NULL)
+@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
+ if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+ goto fail;
+
+- if (val != CH7017_DEVICE_ID_VALUE &&
+- val != CH7018_DEVICE_ID_VALUE &&
+- val != CH7019_DEVICE_ID_VALUE) {
++ switch (val) {
++ case CH7017_DEVICE_ID_VALUE:
++ str = "ch7017";
++ break;
++ case CH7018_DEVICE_ID_VALUE:
++ str = "ch7018";
++ break;
++ case CH7019_DEVICE_ID_VALUE:
++ str = "ch7019";
++ break;
++ default:
+ DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+- "Slave %d.\n",
+- val, i2cbus->adapter.name,dvo->slave_addr);
++ "slave %d.\n",
++ val, adapter->name,dvo->slave_addr);
+ goto fail;
+ }
+
++ DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
++ str, adapter->name, dvo->slave_addr);
+ return true;
++
+ fail:
+ kfree(priv);
+ return false;
+@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+ }
+
+ /* XXX: Should actually wait for update power status somehow */
+- udelay(20000);
++ msleep(20);
+ }
+
+ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
+index 6f1944b..7eaa94e 100644
+--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
++++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
+@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ {
+ struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
++ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+- addr, i2cbus->adapter.name, dvo->slave_addr);
++ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+ }
+@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ {
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
++ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+- addr, i2cbus->adapter.name, dvo->slave_addr);
++ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
+index a2ec3f4..a12ed94 100644
+--- a/drivers/gpu/drm/i915/dvo_ivch.c
++++ b/drivers/gpu/drm/i915/dvo_ivch.c
+@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+ {
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ u8 out_buf[1];
+ u8 in_buf[2];
+
+@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+
+ out_buf[0] = addr;
+
+- if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
++ if (i2c_transfer(adapter, msgs, 3) == 3) {
+ *data = (in_buf[1] << 8) | in_buf[0];
+ return true;
+ };
+@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+ "%s:%02x.\n",
+- addr, i2cbus->adapter.name, dvo->slave_addr);
++ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+ }
+@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+ {
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ u8 out_buf[3];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+ out_buf[1] = data & 0xff;
+ out_buf[2] = data >> 8;
+
+- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
++ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+- addr, i2cbus->adapter.name, dvo->slave_addr);
++ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
+index 9b8e676..e4b4091 100644
+--- a/drivers/gpu/drm/i915/dvo_sil164.c
++++ b/drivers/gpu/drm/i915/dvo_sil164.c
+@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ {
+ struct sil164_priv *sil = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
++ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+- addr, i2cbus->adapter.name, dvo->slave_addr);
++ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+ }
+@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ {
+ struct sil164_priv *sil= dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
++ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+- addr, i2cbus->adapter.name, dvo->slave_addr);
++ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
+index 56f6642..8ab2855 100644
+--- a/drivers/gpu/drm/i915/dvo_tfp410.c
++++ b/drivers/gpu/drm/i915/dvo_tfp410.c
+@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ {
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+- if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
++ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+- addr, i2cbus->adapter.name, dvo->slave_addr);
++ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+ }
+@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ {
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+- struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+- if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
++ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+- addr, i2cbus->adapter.name, dvo->slave_addr);
++ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 5e43d70..1f4f3ce 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -40,9 +40,51 @@
+
+ #if defined(CONFIG_DEBUG_FS)
+
+-#define ACTIVE_LIST 1
+-#define FLUSHING_LIST 2
+-#define INACTIVE_LIST 3
++enum {
++ ACTIVE_LIST,
++ FLUSHING_LIST,
++ INACTIVE_LIST,
++ PINNED_LIST,
++ DEFERRED_FREE_LIST,
++};
++
++static const char *yesno(int v)
++{
++ return v ? "yes" : "no";
++}
++
++static int i915_capabilities(struct seq_file *m, void *data)
++{
++ struct drm_info_node *node = (struct drm_info_node *) m->private;
++ struct drm_device *dev = node->minor->dev;
++ const struct intel_device_info *info = INTEL_INFO(dev);
++
++ seq_printf(m, "gen: %d\n", info->gen);
++#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
++ B(is_mobile);
++ B(is_i85x);
++ B(is_i915g);
++ B(is_i945gm);
++ B(is_g33);
++ B(need_gfx_hws);
++ B(is_g4x);
++ B(is_pineview);
++ B(is_broadwater);
++ B(is_crestline);
++ B(has_fbc);
++ B(has_rc6);
++ B(has_pipe_cxsr);
++ B(has_hotplug);
++ B(cursor_needs_physical);
++ B(has_overlay);
++ B(overlay_needs_physical);
++ B(supports_tv);
++ B(has_bsd_ring);
++ B(has_blt_ring);
++#undef B
++
++ return 0;
++}
+
+ static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+ {
+@@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+ }
+ }
+
++static void
++describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
++{
++ seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
++ &obj->base,
++ get_pin_flag(obj),
++ get_tiling_flag(obj),
++ obj->base.size,
++ obj->base.read_domains,
++ obj->base.write_domain,
++ obj->last_rendering_seqno,
++ obj->dirty ? " dirty" : "",
++ obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
++ if (obj->base.name)
++ seq_printf(m, " (name: %d)", obj->base.name);
++ if (obj->fence_reg != I915_FENCE_REG_NONE)
++ seq_printf(m, " (fence: %d)", obj->fence_reg);
++ if (obj->gtt_space != NULL)
++ seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
++ if (obj->ring != NULL)
++ seq_printf(m, " (%s)", obj->ring->name);
++}
++
+ static int i915_gem_object_list_info(struct seq_file *m, void *data)
+ {
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+@@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+- spinlock_t *lock = NULL;
++ size_t total_obj_size, total_gtt_size;
++ int count, ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
+
+ switch (list) {
+ case ACTIVE_LIST:
+ seq_printf(m, "Active:\n");
+- lock = &dev_priv->mm.active_list_lock;
+- head = &dev_priv->render_ring.active_list;
++ head = &dev_priv->mm.active_list;
+ break;
+ case INACTIVE_LIST:
+ seq_printf(m, "Inactive:\n");
+ head = &dev_priv->mm.inactive_list;
+ break;
++ case PINNED_LIST:
++ seq_printf(m, "Pinned:\n");
++ head = &dev_priv->mm.pinned_list;
++ break;
+ case FLUSHING_LIST:
+ seq_printf(m, "Flushing:\n");
+ head = &dev_priv->mm.flushing_list;
+ break;
++ case DEFERRED_FREE_LIST:
++ seq_printf(m, "Deferred free:\n");
++ head = &dev_priv->mm.deferred_free_list;
++ break;
+ default:
+- DRM_INFO("Ooops, unexpected list\n");
+- return 0;
++ mutex_unlock(&dev->struct_mutex);
++ return -EINVAL;
+ }
+
+- if (lock)
+- spin_lock(lock);
+- list_for_each_entry(obj_priv, head, list)
+- {
+- seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
+- &obj_priv->base,
+- get_pin_flag(obj_priv),
+- obj_priv->base.size,
+- obj_priv->base.read_domains,
+- obj_priv->base.write_domain,
+- obj_priv->last_rendering_seqno,
+- obj_priv->dirty ? " dirty" : "",
+- obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+-
+- if (obj_priv->base.name)
+- seq_printf(m, " (name: %d)", obj_priv->base.name);
+- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+- seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
+- if (obj_priv->gtt_space != NULL)
+- seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
+-
++ total_obj_size = total_gtt_size = count = 0;
++ list_for_each_entry(obj_priv, head, mm_list) {
++ seq_printf(m, " ");
++ describe_obj(m, obj_priv);
+ seq_printf(m, "\n");
++ total_obj_size += obj_priv->base.size;
++ total_gtt_size += obj_priv->gtt_space->size;
++ count++;
+ }
++ mutex_unlock(&dev->struct_mutex);
+
+- if (lock)
+- spin_unlock(lock);
++ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
++ count, total_obj_size, total_gtt_size);
+ return 0;
+ }
+
++static int i915_gem_object_info(struct seq_file *m, void* data)
++{
++ struct drm_info_node *node = (struct drm_info_node *) m->private;
++ struct drm_device *dev = node->minor->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
++
++ seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
++ seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
++ seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
++ seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
++ seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
++ seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
++ seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
++
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++}
++
++
+ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+ {
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+@@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *gem_request;
++ int ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
+
+ seq_printf(m, "Request:\n");
+ list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
+@@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
++ mutex_unlock(&dev->struct_mutex);
++
+ return 0;
+ }
+
+@@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
++ int ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
+
+ if (dev_priv->render_ring.status_page.page_addr != NULL) {
+ seq_printf(m, "Current sequence: %d\n",
+- i915_get_gem_seqno(dev, &dev_priv->render_ring));
++ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
+ } else {
+ seq_printf(m, "Current sequence: hws uninitialized\n");
+ }
+ seq_printf(m, "Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
++
++ mutex_unlock(&dev->struct_mutex);
++
+ return 0;
+ }
+
+@@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
++ int ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
+
+ if (!HAS_PCH_SPLIT(dev)) {
+ seq_printf(m, "Interrupt enable: %08x\n",
+@@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
+ atomic_read(&dev_priv->irq_received));
+ if (dev_priv->render_ring.status_page.page_addr != NULL) {
+ seq_printf(m, "Current sequence: %d\n",
+- i915_get_gem_seqno(dev, &dev_priv->render_ring));
++ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
+ } else {
+ seq_printf(m, "Current sequence: hws uninitialized\n");
+ }
+@@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
+ dev_priv->mm.waiting_gem_seqno);
+ seq_printf(m, "IRQ sequence: %d\n",
+ dev_priv->mm.irq_gem_seqno);
++ mutex_unlock(&dev->struct_mutex);
++
+ return 0;
+ }
+
+@@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- int i;
++ int i, ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
+
+ seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
+ seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
+@@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
+ seq_printf(m, "\n");
+ }
+ }
++ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+@@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
+ return 0;
+ }
+
+-static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
++static void i915_dump_object(struct seq_file *m,
++ struct io_mapping *mapping,
++ struct drm_i915_gem_object *obj_priv)
+ {
+- int page, i;
+- uint32_t *mem;
++ int page, page_count, i;
+
++ page_count = obj_priv->base.size / PAGE_SIZE;
+ for (page = 0; page < page_count; page++) {
+- mem = kmap_atomic(pages[page], KM_USER0);
++ u32 *mem = io_mapping_map_wc(mapping,
++ obj_priv->gtt_offset + page * PAGE_SIZE);
+ for (i = 0; i < PAGE_SIZE; i += 4)
+ seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
+- kunmap_atomic(mem, KM_USER0);
++ io_mapping_unmap(mem);
+ }
+ }
+
+@@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+- spin_lock(&dev_priv->mm.active_list_lock);
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
+
+- list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
+- list) {
++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ obj = &obj_priv->base;
+ if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
+- ret = i915_gem_object_get_pages(obj, 0);
+- if (ret) {
+- DRM_ERROR("Failed to get pages: %d\n", ret);
+- spin_unlock(&dev_priv->mm.active_list_lock);
+- return ret;
+- }
+-
+- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
+- i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
+-
+- i915_gem_object_put_pages(obj);
++ seq_printf(m, "--- gtt_offset = 0x%08x\n",
++ obj_priv->gtt_offset);
++ i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
+ }
+ }
+
+- spin_unlock(&dev_priv->mm.active_list_lock);
++ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+@@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- u8 *virt;
+- uint32_t *ptr, off;
++ int ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
+
+ if (!dev_priv->render_ring.gem_object) {
+ seq_printf(m, "No ringbuffer setup\n");
+- return 0;
+- }
+-
+- virt = dev_priv->render_ring.virtual_start;
++ } else {
++ u8 *virt = dev_priv->render_ring.virtual_start;
++ uint32_t off;
+
+- for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+- ptr = (uint32_t *)(virt + off);
+- seq_printf(m, "%08x : %08x\n", off, *ptr);
++ for (off = 0; off < dev_priv->render_ring.size; off += 4) {
++ uint32_t *ptr = (uint32_t *)(virt + off);
++ seq_printf(m, "%08x : %08x\n", off, *ptr);
++ }
+ }
++ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+@@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
+ seq_printf(m, "RingHead : %08x\n", head);
+ seq_printf(m, "RingTail : %08x\n", tail);
+ seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
+- seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
++ seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
+
+ return 0;
+ }
+@@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
+ seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
+ seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ }
+@@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
+ } else {
+ seq_printf(m, "FBC disabled: ");
+ switch (dev_priv->no_fbc_reason) {
++ case FBC_NO_OUTPUT:
++ seq_printf(m, "no outputs");
++ break;
+ case FBC_STOLEN_TOO_SMALL:
+ seq_printf(m, "not enough stolen memory");
+ break;
+@@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool sr_enabled = false;
+
+- if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
++ if (IS_GEN5(dev))
++ sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
++ else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev))
+ sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev))
+ sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+
+- seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
+- "disabled");
++ seq_printf(m, "self-refresh: %s\n",
++ sr_enabled ? "enabled" : "disabled");
+
+ return 0;
+ }
+@@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long temp, chipset, gfx;
++ int ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
+
+ temp = i915_mch_val(dev_priv);
+ chipset = i915_chipset_val(dev_priv);
+ gfx = i915_gfx_val(dev_priv);
++ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "GMCH temp: %ld\n", temp);
+ seq_printf(m, "Chipset power: %ld\n", chipset);
+@@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
+ return 0;
+ }
+
++static int i915_opregion(struct seq_file *m, void *unused)
++{
++ struct drm_info_node *node = (struct drm_info_node *) m->private;
++ struct drm_device *dev = node->minor->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++ int ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
++
++ if (opregion->header)
++ seq_write(m, opregion->header, OPREGION_SIZE);
++
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++}
++
++static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
++{
++ struct drm_info_node *node = (struct drm_info_node *) m->private;
++ struct drm_device *dev = node->minor->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct intel_fbdev *ifbdev;
++ struct intel_framebuffer *fb;
++ int ret;
++
++ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
++ if (ret)
++ return ret;
++
++ ifbdev = dev_priv->fbdev;
++ fb = to_intel_framebuffer(ifbdev->helper.fb);
++
++ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
++ fb->base.width,
++ fb->base.height,
++ fb->base.depth,
++ fb->base.bits_per_pixel);
++ describe_obj(m, to_intel_bo(fb->obj));
++ seq_printf(m, "\n");
++
++ list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
++ if (&fb->base == ifbdev->helper.fb)
++ continue;
++
++ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
++ fb->base.width,
++ fb->base.height,
++ fb->base.depth,
++ fb->base.bits_per_pixel);
++ describe_obj(m, to_intel_bo(fb->obj));
++ seq_printf(m, "\n");
++ }
++
++ mutex_unlock(&dev->mode_config.mutex);
++
++ return 0;
++}
++
+ static int
+ i915_wedged_open(struct inode *inode,
+ struct file *filp)
+@@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp,
+ "wedged : %d\n",
+ atomic_read(&dev_priv->mm.wedged));
+
++ if (len > sizeof (buf))
++ len = sizeof (buf);
++
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+ }
+
+@@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp,
+
+ atomic_set(&dev_priv->mm.wedged, val);
+ if (val) {
+- DRM_WAKEUP(&dev_priv->irq_queue);
++ wake_up_all(&dev_priv->irq_queue);
+ queue_work(dev_priv->wq, &dev_priv->error_work);
+ }
+
+@@ -782,6 +974,7 @@ static const struct file_operations i915_wedged_fops = {
+ .open = i915_wedged_open,
+ .read = i915_wedged_read,
+ .write = i915_wedged_write,
++ .llseek = default_llseek,
+ };
+
+ /* As the drm_debugfs_init() routines are called before dev->dev_private is
+@@ -823,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+ }
+
+ static struct drm_info_list i915_debugfs_list[] = {
++ {"i915_capabilities", i915_capabilities, 0, 0},
++ {"i915_gem_objects", i915_gem_object_info, 0},
+ {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+ {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
+ {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
++ {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
++ {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
+ {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
+ {"i915_gem_request", i915_gem_request_info, 0},
+ {"i915_gem_seqno", i915_gem_seqno_info, 0},
+@@ -845,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = {
+ {"i915_gfxec", i915_gfxec, 0},
+ {"i915_fbc_status", i915_fbc_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
++ {"i915_opregion", i915_opregion, 0},
++ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+ };
+ #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 2dd2c93..7a26f4dd 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -40,8 +40,7 @@
+ #include <linux/pnp.h>
+ #include <linux/vga_switcheroo.h>
+ #include <linux/slab.h>
+-
+-extern int intel_max_stolen; /* from AGP driver */
++#include <acpi/video.h>
+
+ /**
+ * Sets up the hardware status page for devices that need a physical address
+@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
+
+ memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
+
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
+ 0xf0;
+
+@@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
+
+ mutex_lock(&dev->struct_mutex);
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+- if (HAS_BSD(dev))
+- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
++ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
++ intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Clear the HWS virtual address at teardown */
+@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
+ DRM_DEBUG_DRIVER("hw status page @ %p\n",
+ ring->status_page.page_addr);
+ if (ring->status_page.gfx_addr != 0)
+- ring->setup_status_page(dev, ring);
++ intel_ring_setup_status_page(dev, ring);
+ else
+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+
+@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
+ return -EINVAL;
+ }
+
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ BEGIN_LP_RING(4);
+ OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+ OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+
+ if (!IS_I830(dev) && !IS_845G(dev)) {
+ BEGIN_LP_RING(2);
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+ OUT_RING(batch->start);
+ } else {
+@@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+ }
+
+
+- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
++ if (IS_G4X(dev) || IS_GEN5(dev)) {
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
+@@ -765,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
+ case I915_PARAM_HAS_BSD:
+ value = HAS_BSD(dev);
+ break;
++ case I915_PARAM_HAS_BLT:
++ value = HAS_BLT(dev);
++ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown parameter %d\n",
+ param->param);
+@@ -888,12 +890,12 @@ static int
+ intel_alloc_mchbar_resource(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
++ int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret;
+
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+@@ -920,7 +922,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
+ return ret;
+ }
+
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ upper_32_bits(dev_priv->mch_res.start));
+
+@@ -934,7 +936,7 @@ static void
+ intel_setup_mchbar(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
++ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+@@ -971,7 +973,7 @@ static void
+ intel_teardown_mchbar(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
++ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+
+ if (dev_priv->mchbar_need_disable) {
+@@ -990,174 +992,6 @@ intel_teardown_mchbar(struct drm_device *dev)
+ release_resource(&dev_priv->mch_res);
+ }
+
+-/**
+- * i915_probe_agp - get AGP bootup configuration
+- * @pdev: PCI device
+- * @aperture_size: returns AGP aperture configured size
+- * @preallocated_size: returns size of BIOS preallocated AGP space
+- *
+- * Since Intel integrated graphics are UMA, the BIOS has to set aside
+- * some RAM for the framebuffer at early boot. This code figures out
+- * how much was set aside so we can use it for our own purposes.
+- */
+-static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
+- uint32_t *preallocated_size,
+- uint32_t *start)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u16 tmp = 0;
+- unsigned long overhead;
+- unsigned long stolen;
+-
+- /* Get the fb aperture size and "stolen" memory amount. */
+- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
+-
+- *aperture_size = 1024 * 1024;
+- *preallocated_size = 1024 * 1024;
+-
+- switch (dev->pdev->device) {
+- case PCI_DEVICE_ID_INTEL_82830_CGC:
+- case PCI_DEVICE_ID_INTEL_82845G_IG:
+- case PCI_DEVICE_ID_INTEL_82855GM_IG:
+- case PCI_DEVICE_ID_INTEL_82865_IG:
+- if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
+- *aperture_size *= 64;
+- else
+- *aperture_size *= 128;
+- break;
+- default:
+- /* 9xx supports large sizes, just look at the length */
+- *aperture_size = pci_resource_len(dev->pdev, 2);
+- break;
+- }
+-
+- /*
+- * Some of the preallocated space is taken by the GTT
+- * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
+- */
+- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
+- overhead = 4096;
+- else
+- overhead = (*aperture_size / 1024) + 4096;
+-
+- if (IS_GEN6(dev)) {
+- /* SNB has memory control reg at 0x50.w */
+- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
+-
+- switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
+- case INTEL_855_GMCH_GMS_DISABLED:
+- DRM_ERROR("video memory is disabled\n");
+- return -1;
+- case SNB_GMCH_GMS_STOLEN_32M:
+- stolen = 32 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_64M:
+- stolen = 64 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_96M:
+- stolen = 96 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_128M:
+- stolen = 128 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_160M:
+- stolen = 160 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_192M:
+- stolen = 192 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_224M:
+- stolen = 224 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_256M:
+- stolen = 256 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_288M:
+- stolen = 288 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_320M:
+- stolen = 320 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_352M:
+- stolen = 352 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_384M:
+- stolen = 384 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_416M:
+- stolen = 416 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_448M:
+- stolen = 448 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_480M:
+- stolen = 480 * 1024 * 1024;
+- break;
+- case SNB_GMCH_GMS_STOLEN_512M:
+- stolen = 512 * 1024 * 1024;
+- break;
+- default:
+- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+- tmp & SNB_GMCH_GMS_STOLEN_MASK);
+- return -1;
+- }
+- } else {
+- switch (tmp & INTEL_GMCH_GMS_MASK) {
+- case INTEL_855_GMCH_GMS_DISABLED:
+- DRM_ERROR("video memory is disabled\n");
+- return -1;
+- case INTEL_855_GMCH_GMS_STOLEN_1M:
+- stolen = 1 * 1024 * 1024;
+- break;
+- case INTEL_855_GMCH_GMS_STOLEN_4M:
+- stolen = 4 * 1024 * 1024;
+- break;
+- case INTEL_855_GMCH_GMS_STOLEN_8M:
+- stolen = 8 * 1024 * 1024;
+- break;
+- case INTEL_855_GMCH_GMS_STOLEN_16M:
+- stolen = 16 * 1024 * 1024;
+- break;
+- case INTEL_855_GMCH_GMS_STOLEN_32M:
+- stolen = 32 * 1024 * 1024;
+- break;
+- case INTEL_915G_GMCH_GMS_STOLEN_48M:
+- stolen = 48 * 1024 * 1024;
+- break;
+- case INTEL_915G_GMCH_GMS_STOLEN_64M:
+- stolen = 64 * 1024 * 1024;
+- break;
+- case INTEL_GMCH_GMS_STOLEN_128M:
+- stolen = 128 * 1024 * 1024;
+- break;
+- case INTEL_GMCH_GMS_STOLEN_256M:
+- stolen = 256 * 1024 * 1024;
+- break;
+- case INTEL_GMCH_GMS_STOLEN_96M:
+- stolen = 96 * 1024 * 1024;
+- break;
+- case INTEL_GMCH_GMS_STOLEN_160M:
+- stolen = 160 * 1024 * 1024;
+- break;
+- case INTEL_GMCH_GMS_STOLEN_224M:
+- stolen = 224 * 1024 * 1024;
+- break;
+- case INTEL_GMCH_GMS_STOLEN_352M:
+- stolen = 352 * 1024 * 1024;
+- break;
+- default:
+- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+- tmp & INTEL_GMCH_GMS_MASK);
+- return -1;
+- }
+- }
+-
+- *preallocated_size = stolen - overhead;
+- *start = overhead;
+-
+- return 0;
+-}
+-
+ #define PTE_ADDRESS_MASK 0xfffff000
+ #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+ #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+@@ -1181,11 +1015,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ {
+ unsigned long *gtt;
+ unsigned long entry, phys;
+- int gtt_bar = IS_I9XX(dev) ? 0 : 1;
++ int gtt_bar = IS_GEN2(dev) ? 1 : 0;
+ int gtt_offset, gtt_size;
+
+- if (IS_I965G(dev)) {
+- if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
++ if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
+ gtt_offset = 2*1024*1024;
+ gtt_size = 2*1024*1024;
+ } else {
+@@ -1210,10 +1044,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+
+ /* Mask out these reserved bits on this hardware. */
+- if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
+- IS_I945G(dev) || IS_I945GM(dev)) {
++ if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
+ entry &= ~PTE_ADDRESS_MASK_HIGH;
+- }
+
+ /* If it's not a mapping type we know, then bail. */
+ if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
+@@ -1252,7 +1084,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ unsigned long ll_base = 0;
+
+ /* Leave 1M for line length buffer & misc. */
+- compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
++ compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
+ if (!compressed_fb) {
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ i915_warn_stolen(dev);
+@@ -1273,7 +1105,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ }
+
+ if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
+- compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
++ compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
+ 4096, 0);
+ if (!compressed_llb) {
+ i915_warn_stolen(dev);
+@@ -1343,10 +1175,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(dev->pdev, PCI_D0);
+ i915_resume(dev);
+- drm_kms_helper_poll_enable(dev);
+ } else {
+ printk(KERN_ERR "i915: switched off\n");
+- drm_kms_helper_poll_disable(dev);
+ i915_suspend(dev, pmm);
+ }
+ }
+@@ -1363,23 +1193,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+ }
+
+ static int i915_load_modeset_init(struct drm_device *dev,
+- unsigned long prealloc_start,
+ unsigned long prealloc_size,
+ unsigned long agp_size)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- int fb_bar = IS_I9XX(dev) ? 2 : 0;
+ int ret = 0;
+
+- dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
+- 0xff000000;
+-
+- /* Basic memrange allocator for stolen space (aka vram) */
+- drm_mm_init(&dev_priv->vram, 0, prealloc_size);
+- DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
+-
+- /* We're off and running w/KMS */
+- dev_priv->mm.suspended = 0;
++ /* Basic memrange allocator for stolen space (aka mm.vram) */
++ drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
+
+ /* Let GEM Manage from end of prealloc space to end of aperture.
+ *
+@@ -1414,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
+ */
+ dev_priv->allow_batchbuffer = 1;
+
+- ret = intel_init_bios(dev);
++ ret = intel_parse_bios(dev);
+ if (ret)
+ DRM_INFO("failed to find VBIOS tables\n");
+
+@@ -1423,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
+ if (ret)
+ goto cleanup_ringbuffer;
+
++ intel_register_dsm_handler();
++
+ ret = vga_switcheroo_register_client(dev->pdev,
+ i915_switcheroo_set_state,
+ i915_switcheroo_can_switch);
+@@ -1443,17 +1266,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
+ /* FIXME: do pre/post-mode set stuff in core KMS code */
+ dev->vblank_disable_allowed = 1;
+
+- /*
+- * Initialize the hardware status page IRQ location.
+- */
+-
+- I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
+-
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_irq;
+
+ drm_kms_helper_poll_init(dev);
++
++ /* We're off and running w/KMS */
++ dev_priv->mm.suspended = 0;
++
+ return 0;
+
+ cleanup_irq:
+@@ -1907,7 +1728,7 @@ static struct drm_i915_private *i915_mch_dev;
+ * - dev_priv->fmax
+ * - dev_priv->gpu_busy
+ */
+-DEFINE_SPINLOCK(mchdev_lock);
++static DEFINE_SPINLOCK(mchdev_lock);
+
+ /**
+ * i915_read_mch_val - return value for IPS use
+@@ -2062,7 +1883,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ struct drm_i915_private *dev_priv;
+ resource_size_t base, size;
+ int ret = 0, mmio_bar;
+- uint32_t agp_size, prealloc_size, prealloc_start;
++ uint32_t agp_size, prealloc_size;
+ /* i915 has 4 more counters */
+ dev->counters += 4;
+ dev->types[6] = _DRM_STAT_IRQ;
+@@ -2079,7 +1900,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ dev_priv->info = (struct intel_device_info *) flags;
+
+ /* Add register map (needed for suspend/resume) */
+- mmio_bar = IS_I9XX(dev) ? 0 : 1;
++ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ base = pci_resource_start(dev->pdev, mmio_bar);
+ size = pci_resource_len(dev->pdev, mmio_bar);
+
+@@ -2121,17 +1942,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ "performance may suffer.\n");
+ }
+
+- ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
+- if (ret)
++ dev_priv->mm.gtt = intel_gtt_get();
++ if (!dev_priv->mm.gtt) {
++ DRM_ERROR("Failed to initialize GTT\n");
++ ret = -ENODEV;
+ goto out_iomapfree;
+-
+- if (prealloc_size > intel_max_stolen) {
+- DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
+- prealloc_size >> 20, intel_max_stolen >> 20);
+- prealloc_size = intel_max_stolen;
+ }
+
+- dev_priv->wq = create_singlethread_workqueue("i915");
++ prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
++ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
++
++ /* The i915 workqueue is primarily used for batched retirement of
++ * requests (and thus managing bo) once the task has been completed
++ * by the GPU. i915_gem_retire_requests() is called directly when we
++ * need high-priority retirement, such as waiting for an explicit
++ * bo.
++ *
++ * It is also used for periodic low-priority events, such as
++ * idle-timers and hangcheck.
++ *
++ * All tasks on the workqueue are expected to acquire the dev mutex
++ * so there is no point in running more than one instance of the
++ * workqueue at any time: max_active = 1 and NON_REENTRANT.
++ */
++ dev_priv->wq = alloc_workqueue("i915",
++ WQ_UNBOUND | WQ_NON_REENTRANT,
++ 1);
+ if (dev_priv->wq == NULL) {
+ DRM_ERROR("Failed to create our workqueue.\n");
+ ret = -ENOMEM;
+@@ -2159,13 +1995,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+- if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
++ if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ }
+
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev);
++ intel_setup_gmbus(dev);
++ intel_opregion_setup(dev);
++
++ /* Make sure the bios did its job and set up vital registers */
++ intel_setup_bios(dev);
+
+ i915_gem_load(dev);
+
+@@ -2178,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+- else if (IS_IRONLAKE(dev))
++ else if (IS_GEN5(dev))
+ i915_ironlake_get_mem_freq(dev);
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+@@ -2212,8 +2053,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ intel_detect_pch(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+- ret = i915_load_modeset_init(dev, prealloc_start,
+- prealloc_size, agp_size);
++ ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+ if (ret < 0) {
+ DRM_ERROR("failed to init modeset\n");
+ goto out_workqueue_free;
+@@ -2221,7 +2061,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ }
+
+ /* Must be done after probing outputs */
+- intel_opregion_init(dev, 0);
++ intel_opregion_init(dev);
++ acpi_video_register();
+
+ setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
+ (unsigned long) dev);
+@@ -2231,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ dev_priv->mchdev_lock = &mchdev_lock;
+ spin_unlock(&mchdev_lock);
+
+- /* XXX Prevent module unload due to memory corruption bugs. */
+- __module_get(THIS_MODULE);
+-
+ return 0;
+
+ out_workqueue_free:
+@@ -2252,15 +2090,20 @@ free_priv:
+ int i915_driver_unload(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+-
+- i915_destroy_error_state(dev);
++ int ret;
+
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = NULL;
+ spin_unlock(&mchdev_lock);
+
+- destroy_workqueue(dev_priv->wq);
+- del_timer_sync(&dev_priv->hangcheck_timer);
++ mutex_lock(&dev->struct_mutex);
++ ret = i915_gpu_idle(dev);
++ if (ret)
++ DRM_ERROR("failed to idle hardware: %d\n", ret);
++ mutex_unlock(&dev->struct_mutex);
++
++ /* Cancel the retire work handler, which should be idle now. */
++ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+
+ io_mapping_free(dev_priv->mm.gtt_mapping);
+ if (dev_priv->mm.gtt_mtrr >= 0) {
+@@ -2269,7 +2112,10 @@ int i915_driver_unload(struct drm_device *dev)
+ dev_priv->mm.gtt_mtrr = -1;
+ }
+
++ acpi_video_unregister();
++
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++ intel_fbdev_fini(dev);
+ intel_modeset_cleanup(dev);
+
+ /*
+@@ -2281,20 +2127,25 @@ int i915_driver_unload(struct drm_device *dev)
+ dev_priv->child_dev = NULL;
+ dev_priv->child_dev_num = 0;
+ }
+- drm_irq_uninstall(dev);
++
+ vga_switcheroo_unregister_client(dev->pdev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+ }
+
++ /* Free error state after interrupts are fully disabled. */
++ del_timer_sync(&dev_priv->hangcheck_timer);
++ cancel_work_sync(&dev_priv->error_work);
++ i915_destroy_error_state(dev);
++
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+- if (dev_priv->regs != NULL)
+- iounmap(dev_priv->regs);
+-
+- intel_opregion_free(dev, 0);
++ intel_opregion_fini(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++ /* Flush any outstanding unpin_work. */
++ flush_workqueue(dev_priv->wq);
++
+ i915_gem_free_all_phys_object(dev);
+
+ mutex_lock(&dev->struct_mutex);
+@@ -2302,34 +2153,41 @@ int i915_driver_unload(struct drm_device *dev)
+ mutex_unlock(&dev->struct_mutex);
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
+- drm_mm_takedown(&dev_priv->vram);
+- i915_gem_lastclose(dev);
++ drm_mm_takedown(&dev_priv->mm.vram);
+
+ intel_cleanup_overlay(dev);
++
++ if (!I915_NEED_GFX_HWS(dev))
++ i915_free_hws(dev);
+ }
+
++ if (dev_priv->regs != NULL)
++ iounmap(dev_priv->regs);
++
++ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
+
++ destroy_workqueue(dev_priv->wq);
++
+ pci_dev_put(dev_priv->bridge_dev);
+ kfree(dev->dev_private);
+
+ return 0;
+ }
+
+-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+ {
+- struct drm_i915_file_private *i915_file_priv;
++ struct drm_i915_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+- i915_file_priv = (struct drm_i915_file_private *)
+- kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
+-
+- if (!i915_file_priv)
++ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
++ if (!file_priv)
+ return -ENOMEM;
+
+- file_priv->driver_priv = i915_file_priv;
++ file->driver_priv = file_priv;
+
+- INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
++ spin_lock_init(&file_priv->mm.lock);
++ INIT_LIST_HEAD(&file_priv->mm.request_list);
+
+ return 0;
+ }
+@@ -2372,11 +2230,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+ i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+ }
+
+-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
++void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+ {
+- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+- kfree(i915_file_priv);
++ kfree(file_priv);
+ }
+
+ struct drm_ioctl_desc i915_ioctls[] = {
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 6dbe14c..027cbfc 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -32,6 +32,7 @@
+ #include "drm.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
++#include "intel_drv.h"
+
+ #include <linux/console.h>
+ #include "drm_crtc_helper.h"
+@@ -43,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
+ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+
+ unsigned int i915_powersave = 1;
+-module_param_named(powersave, i915_powersave, int, 0400);
++module_param_named(powersave, i915_powersave, int, 0600);
+
+ unsigned int i915_lvds_downclock = 0;
+ module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+@@ -61,86 +62,110 @@ extern int intel_agp_enabled;
+ .driver_data = (unsigned long) info }
+
+ static const struct intel_device_info intel_i830_info = {
+- .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
++ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
++ .has_overlay = 1, .overlay_needs_physical = 1,
+ };
+
+ static const struct intel_device_info intel_845g_info = {
+- .gen = 2, .is_i8xx = 1,
++ .gen = 2,
++ .has_overlay = 1, .overlay_needs_physical = 1,
+ };
+
+ static const struct intel_device_info intel_i85x_info = {
+- .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
++ .gen = 2, .is_i85x = 1, .is_mobile = 1,
+ .cursor_needs_physical = 1,
++ .has_overlay = 1, .overlay_needs_physical = 1,
+ };
+
+ static const struct intel_device_info intel_i865g_info = {
+- .gen = 2, .is_i8xx = 1,
++ .gen = 2,
++ .has_overlay = 1, .overlay_needs_physical = 1,
+ };
+
+ static const struct intel_device_info intel_i915g_info = {
+- .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
++ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
++ .has_overlay = 1, .overlay_needs_physical = 1,
+ };
+ static const struct intel_device_info intel_i915gm_info = {
+- .gen = 3, .is_i9xx = 1, .is_mobile = 1,
++ .gen = 3, .is_mobile = 1,
+ .cursor_needs_physical = 1,
++ .has_overlay = 1, .overlay_needs_physical = 1,
++ .supports_tv = 1,
+ };
+ static const struct intel_device_info intel_i945g_info = {
+- .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
++ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
++ .has_overlay = 1, .overlay_needs_physical = 1,
+ };
+ static const struct intel_device_info intel_i945gm_info = {
+- .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
++ .gen = 3, .is_i945gm = 1, .is_mobile = 1,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
++ .has_overlay = 1, .overlay_needs_physical = 1,
++ .supports_tv = 1,
+ };
+
+ static const struct intel_device_info intel_i965g_info = {
+- .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
++ .gen = 4, .is_broadwater = 1,
+ .has_hotplug = 1,
++ .has_overlay = 1,
+ };
+
+ static const struct intel_device_info intel_i965gm_info = {
+- .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
++ .gen = 4, .is_crestline = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
++ .has_overlay = 1,
++ .supports_tv = 1,
+ };
+
+ static const struct intel_device_info intel_g33_info = {
+- .gen = 3, .is_g33 = 1, .is_i9xx = 1,
++ .gen = 3, .is_g33 = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
++ .has_overlay = 1,
+ };
+
+ static const struct intel_device_info intel_g45_info = {
+- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
++ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
++ .has_bsd_ring = 1,
+ };
+
+ static const struct intel_device_info intel_gm45_info = {
+- .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
++ .gen = 4, .is_g4x = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
++ .supports_tv = 1,
++ .has_bsd_ring = 1,
+ };
+
+ static const struct intel_device_info intel_pineview_info = {
+- .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
++ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
++ .has_overlay = 1,
+ };
+
+ static const struct intel_device_info intel_ironlake_d_info = {
+- .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
++ .gen = 5,
+ .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
++ .has_bsd_ring = 1,
+ };
+
+ static const struct intel_device_info intel_ironlake_m_info = {
+- .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
++ .gen = 5, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
++ .has_bsd_ring = 1,
+ };
+
+ static const struct intel_device_info intel_sandybridge_d_info = {
+- .gen = 6, .is_i965g = 1, .is_i9xx = 1,
++ .gen = 6,
+ .need_gfx_hws = 1, .has_hotplug = 1,
++ .has_bsd_ring = 1,
++ .has_blt_ring = 1,
+ };
+
+ static const struct intel_device_info intel_sandybridge_m_info = {
+- .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
++ .gen = 6, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
++ .has_bsd_ring = 1,
++ .has_blt_ring = 1,
+ };
+
+ static const struct pci_device_id pciidlist[] = { /* aka */
+@@ -237,7 +262,7 @@ static int i915_drm_freeze(struct drm_device *dev)
+
+ i915_save_state(dev);
+
+- intel_opregion_free(dev, 1);
++ intel_opregion_fini(dev);
+
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
+@@ -258,6 +283,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
+ if (state.event == PM_EVENT_PRETHAW)
+ return 0;
+
++ drm_kms_helper_poll_disable(dev);
++
+ error = i915_drm_freeze(dev);
+ if (error)
+ return error;
+@@ -277,8 +304,7 @@ static int i915_drm_thaw(struct drm_device *dev)
+ int error = 0;
+
+ i915_restore_state(dev);
+-
+- intel_opregion_init(dev, 1);
++ intel_opregion_setup(dev);
+
+ /* KMS EnterVT equivalent */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+@@ -294,6 +320,8 @@ static int i915_drm_thaw(struct drm_device *dev)
+ drm_helper_resume_force_mode(dev);
+ }
+
++ intel_opregion_init(dev);
++
+ dev_priv->modeset_on_lid = 0;
+
+ return error;
+@@ -301,12 +329,79 @@ static int i915_drm_thaw(struct drm_device *dev)
+
+ int i915_resume(struct drm_device *dev)
+ {
++ int ret;
++
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ pci_set_master(dev->pdev);
+
+- return i915_drm_thaw(dev);
++ ret = i915_drm_thaw(dev);
++ if (ret)
++ return ret;
++
++ drm_kms_helper_poll_enable(dev);
++ return 0;
++}
++
++static int i8xx_do_reset(struct drm_device *dev, u8 flags)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ if (IS_I85X(dev))
++ return -ENODEV;
++
++ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
++ POSTING_READ(D_STATE);
++
++ if (IS_I830(dev) || IS_845G(dev)) {
++ I915_WRITE(DEBUG_RESET_I830,
++ DEBUG_RESET_DISPLAY |
++ DEBUG_RESET_RENDER |
++ DEBUG_RESET_FULL);
++ POSTING_READ(DEBUG_RESET_I830);
++ msleep(1);
++
++ I915_WRITE(DEBUG_RESET_I830, 0);
++ POSTING_READ(DEBUG_RESET_I830);
++ }
++
++ msleep(1);
++
++ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
++ POSTING_READ(D_STATE);
++
++ return 0;
++}
++
++static int i965_reset_complete(struct drm_device *dev)
++{
++ u8 gdrst;
++ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
++ return gdrst & 0x1;
++}
++
++static int i965_do_reset(struct drm_device *dev, u8 flags)
++{
++ u8 gdrst;
++
++ /*
++ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
++ * well as the reset bit (GR/bit 0). Setting the GR bit
++ * triggers the reset; when done, the hardware will clear it.
++ */
++ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
++ pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
++
++ return wait_for(i965_reset_complete(dev), 500);
++}
++
++static int ironlake_do_reset(struct drm_device *dev, u8 flags)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
++ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
++ return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ }
+
+ /**
+@@ -325,54 +420,39 @@ int i915_resume(struct drm_device *dev)
+ * - re-init interrupt state
+ * - re-init display
+ */
+-int i965_reset(struct drm_device *dev, u8 flags)
++int i915_reset(struct drm_device *dev, u8 flags)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- unsigned long timeout;
+- u8 gdrst;
+ /*
+ * We really should only reset the display subsystem if we actually
+ * need to
+ */
+ bool need_display = true;
++ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+- /*
+- * Clear request list
+- */
+- i915_gem_retire_requests(dev);
+-
+- if (need_display)
+- i915_save_display(dev);
+-
+- if (IS_I965G(dev) || IS_G4X(dev)) {
+- /*
+- * Set the domains we want to reset, then the reset bit (bit 0).
+- * Clear the reset bit after a while and wait for hardware status
+- * bit (bit 1) to be set
+- */
+- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
+- pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
+- udelay(50);
+- pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
+-
+- /* ...we don't want to loop forever though, 500ms should be plenty */
+- timeout = jiffies + msecs_to_jiffies(500);
+- do {
+- udelay(100);
+- pci_read_config_byte(dev->pdev, GDRST, &gdrst);
+- } while ((gdrst & 0x1) && time_after(timeout, jiffies));
+-
+- if (gdrst & 0x1) {
+- WARN(true, "i915: Failed to reset chip\n");
+- mutex_unlock(&dev->struct_mutex);
+- return -EIO;
+- }
+- } else {
+- DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
++ i915_gem_reset(dev);
++
++ ret = -ENODEV;
++ if (get_seconds() - dev_priv->last_gpu_reset < 5) {
++ DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
++ } else switch (INTEL_INFO(dev)->gen) {
++ case 5:
++ ret = ironlake_do_reset(dev, flags);
++ break;
++ case 4:
++ ret = i965_do_reset(dev, flags);
++ break;
++ case 2:
++ ret = i8xx_do_reset(dev, flags);
++ break;
++ }
++ dev_priv->last_gpu_reset = get_seconds();
++ if (ret) {
++ DRM_ERROR("Failed to reset chip.\n");
+ mutex_unlock(&dev->struct_mutex);
+- return -ENODEV;
++ return ret;
+ }
+
+ /* Ok, now get things going again... */
+@@ -400,13 +480,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
+ mutex_lock(&dev->struct_mutex);
+ }
+
++ mutex_unlock(&dev->struct_mutex);
++
+ /*
+- * Display needs restore too...
++ * Perform a full modeset as on later generations, e.g. Ironlake, we may
++ * need to retrain the display link and cannot just restore the register
++ * values.
+ */
+- if (need_display)
+- i915_restore_display(dev);
++ if (need_display) {
++ mutex_lock(&dev->mode_config.mutex);
++ drm_helper_resume_force_mode(dev);
++ mutex_unlock(&dev->mode_config.mutex);
++ }
+
+- mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+@@ -422,6 +508,8 @@ i915_pci_remove(struct pci_dev *pdev)
+ {
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
++ pci_disable_device(pdev); /* core did previous enable */
++
+ drm_put_dev(dev);
+ }
+
+@@ -524,8 +612,6 @@ static struct drm_driver driver = {
+ .irq_uninstall = i915_driver_irq_uninstall,
+ .irq_handler = i915_driver_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+- .get_map_ofs = drm_core_get_map_ofs,
+- .get_reg_ofs = drm_core_get_reg_ofs,
+ .master_create = i915_master_create,
+ .master_destroy = i915_master_destroy,
+ #if defined(CONFIG_DEBUG_FS)
+@@ -548,6 +634,7 @@ static struct drm_driver driver = {
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = i915_compat_ioctl,
+ #endif
++ .llseek = noop_llseek,
+ },
+
+ .pci_driver = {
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index af4a263..90414ae 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -34,6 +34,8 @@
+ #include "intel_bios.h"
+ #include "intel_ringbuffer.h"
+ #include <linux/io-mapping.h>
++#include <linux/i2c.h>
++#include <drm/intel-gtt.h>
+
+ /* General customization:
+ */
+@@ -73,11 +75,9 @@ enum plane {
+ #define DRIVER_PATCHLEVEL 0
+
+ #define WATCH_COHERENCY 0
+-#define WATCH_BUF 0
+ #define WATCH_EXEC 0
+-#define WATCH_LRU 0
+ #define WATCH_RELOC 0
+-#define WATCH_INACTIVE 0
++#define WATCH_LISTS 0
+ #define WATCH_PWRITE 0
+
+ #define I915_GEM_PHYS_CURSOR_0 1
+@@ -110,8 +110,9 @@ struct intel_opregion {
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ struct opregion_asle *asle;
+- int enabled;
++ void *vbt;
+ };
++#define OPREGION_SIZE (8*1024)
+
+ struct intel_overlay;
+ struct intel_overlay_error_state;
+@@ -125,13 +126,16 @@ struct drm_i915_master_private {
+ struct drm_i915_fence_reg {
+ struct drm_gem_object *obj;
+ struct list_head lru_list;
++ bool gpu;
+ };
+
+ struct sdvo_device_mapping {
++ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+- u8 initialized;
++ u8 i2c_pin;
++ u8 i2c_speed;
+ u8 ddc_pin;
+ };
+
+@@ -193,28 +197,29 @@ struct drm_i915_display_funcs {
+ struct intel_device_info {
+ u8 gen;
+ u8 is_mobile : 1;
+- u8 is_i8xx : 1;
+ u8 is_i85x : 1;
+ u8 is_i915g : 1;
+- u8 is_i9xx : 1;
+ u8 is_i945gm : 1;
+- u8 is_i965g : 1;
+- u8 is_i965gm : 1;
+ u8 is_g33 : 1;
+ u8 need_gfx_hws : 1;
+ u8 is_g4x : 1;
+ u8 is_pineview : 1;
+ u8 is_broadwater : 1;
+ u8 is_crestline : 1;
+- u8 is_ironlake : 1;
+ u8 has_fbc : 1;
+ u8 has_rc6 : 1;
+ u8 has_pipe_cxsr : 1;
+ u8 has_hotplug : 1;
+ u8 cursor_needs_physical : 1;
++ u8 has_overlay : 1;
++ u8 overlay_needs_physical : 1;
++ u8 supports_tv : 1;
++ u8 has_bsd_ring : 1;
++ u8 has_blt_ring : 1;
+ };
+
+ enum no_fbc_reason {
++ FBC_NO_OUTPUT, /* no outputs enabled to compress */
+ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+@@ -241,9 +246,16 @@ typedef struct drm_i915_private {
+
+ void __iomem *regs;
+
++ struct intel_gmbus {
++ struct i2c_adapter adapter;
++ struct i2c_adapter *force_bit;
++ u32 reg0;
++ } *gmbus;
++
+ struct pci_dev *bridge_dev;
+ struct intel_ring_buffer render_ring;
+ struct intel_ring_buffer bsd_ring;
++ struct intel_ring_buffer blt_ring;
+ uint32_t next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+@@ -263,6 +275,9 @@ typedef struct drm_i915_private {
+ int front_offset;
+ int current_page;
+ int page_flipping;
++#define I915_DEBUG_READ (1<<0)
++#define I915_DEBUG_WRITE (1<<1)
++ unsigned long debug_flags;
+
+ wait_queue_head_t irq_queue;
+ atomic_t irq_received;
+@@ -289,24 +304,21 @@ typedef struct drm_i915_private {
+ unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
+ int vblank_pipe;
+ int num_pipe;
+- u32 flush_rings;
+-#define FLUSH_RENDER_RING 0x1
+-#define FLUSH_BSD_RING 0x2
+
+ /* For hangcheck timer */
+-#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
++#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd;
+ uint32_t last_instdone;
+ uint32_t last_instdone1;
+
+- struct drm_mm vram;
+-
+ unsigned long cfb_size;
+ unsigned long cfb_pitch;
++ unsigned long cfb_offset;
+ int cfb_fence;
+ int cfb_plane;
++ int cfb_y;
+
+ int irq_enabled;
+
+@@ -316,8 +328,7 @@ typedef struct drm_i915_private {
+ struct intel_overlay *overlay;
+
+ /* LVDS info */
+- int backlight_duty_cycle; /* restore backlight to this value */
+- bool panel_wants_dither;
++ int backlight_level; /* restore backlight to this value */
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+@@ -328,13 +339,23 @@ typedef struct drm_i915_private {
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+- unsigned int edp_support:1;
+ int lvds_ssc_freq;
+- int edp_bpp;
++ struct {
++ int rate;
++ int lanes;
++ int preemphasis;
++ int vswing;
++
++ bool initialized;
++ bool support;
++ int bpp;
++ struct edp_power_seq pps;
++ } edp;
++ bool no_aux_handshake;
+
+ struct notifier_block lid_notifier;
+
+- int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
++ int crt_ddc_pin;
+ struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+@@ -344,6 +365,7 @@ typedef struct drm_i915_private {
+ spinlock_t error_lock;
+ struct drm_i915_error_state *first_error;
+ struct work_struct error_work;
++ struct completion error_completion;
+ struct workqueue_struct *wq;
+
+ /* Display functions */
+@@ -507,6 +529,11 @@ typedef struct drm_i915_private {
+ u32 saveMCHBAR_RENDER_STANDBY;
+
+ struct {
++ /** Bridge to intel-gtt-ko */
++ struct intel_gtt *gtt;
++ /** Memory allocator for GTT stolen memory */
++ struct drm_mm vram;
++ /** Memory allocator for GTT */
+ struct drm_mm gtt_space;
+
+ struct io_mapping *gtt_mapping;
+@@ -521,7 +548,16 @@ typedef struct drm_i915_private {
+ */
+ struct list_head shrink_list;
+
+- spinlock_t active_list_lock;
++ /**
++ * List of objects currently involved in rendering.
++ *
++ * Includes buffers having the contents of their GPU caches
++ * flushed, not necessarily primitives. last_rendering_seqno
++ * represents when the rendering involved will be completed.
++ *
++ * A reference is held on the buffer while on this list.
++ */
++ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+@@ -535,15 +571,6 @@ typedef struct drm_i915_private {
+ struct list_head flushing_list;
+
+ /**
+- * List of objects currently pending a GPU write flush.
+- *
+- * All elements on this list will belong to either the
+- * active_list or flushing_list, last_rendering_seqno can
+- * be used to differentiate between the two elements.
+- */
+- struct list_head gpu_write_list;
+-
+- /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+@@ -555,6 +582,12 @@ typedef struct drm_i915_private {
+ */
+ struct list_head inactive_list;
+
++ /**
++ * LRU list of objects which are not in the ringbuffer but
++ * are still pinned in the GTT.
++ */
++ struct list_head pinned_list;
++
+ /** LRU list of objects with fence regs on them. */
+ struct list_head fence_list;
+
+@@ -611,6 +644,17 @@ typedef struct drm_i915_private {
+
+ /* storage for physical objects */
+ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
++
++ uint32_t flush_rings;
++
++ /* accounting, useful for userland debugging */
++ size_t object_memory;
++ size_t pin_memory;
++ size_t gtt_memory;
++ size_t gtt_total;
++ u32 object_count;
++ u32 pin_count;
++ u32 gtt_count;
+ } mm;
+ struct sdvo_device_mapping sdvo_mappings[2];
+ /* indicate whether the LVDS_BORDER should be enabled or not */
+@@ -626,8 +670,6 @@ typedef struct drm_i915_private {
+ /* Reclocking support */
+ bool render_reclock_avail;
+ bool lvds_downclock_avail;
+- /* indicate whether the LVDS EDID is OK */
+- bool lvds_edid_good;
+ /* indicates the reduced downclock for LVDS*/
+ int lvds_downclock;
+ struct work_struct idle_work;
+@@ -661,6 +703,8 @@ typedef struct drm_i915_private {
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
+
++ unsigned long last_gpu_reset;
++
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
+ } drm_i915_private_t;
+@@ -673,7 +717,8 @@ struct drm_i915_gem_object {
+ struct drm_mm_node *gtt_space;
+
+ /** This object's place on the active/flushing/inactive lists */
+- struct list_head list;
++ struct list_head ring_list;
++ struct list_head mm_list;
+ /** This object's place on GPU write list */
+ struct list_head gpu_write_list;
+ /** This object's place on eviction list */
+@@ -816,12 +861,14 @@ struct drm_i915_gem_request {
+ /** global list entry for this request */
+ struct list_head list;
+
++ struct drm_i915_file_private *file_priv;
+ /** file_priv list entry for this request */
+ struct list_head client_list;
+ };
+
+ struct drm_i915_file_private {
+ struct {
++ struct spinlock lock;
+ struct list_head request_list;
+ } mm;
+ };
+@@ -862,7 +909,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+ extern int i915_emit_box(struct drm_device *dev,
+ struct drm_clip_rect *boxes,
+ int i, int DR1, int DR4);
+-extern int i965_reset(struct drm_device *dev, u8 flags);
++extern int i915_reset(struct drm_device *dev, u8 flags);
+ extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+@@ -871,7 +918,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+
+ /* i915_irq.c */
+ void i915_hangcheck_elapsed(unsigned long data);
+-void i915_destroy_error_state(struct drm_device *dev);
+ extern int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ extern int i915_irq_wait(struct drm_device *dev, void *data,
+@@ -908,6 +954,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+ void intel_enable_asle (struct drm_device *dev);
+
++#ifdef CONFIG_DEBUG_FS
++extern void i915_destroy_error_state(struct drm_device *dev);
++#else
++#define i915_destroy_error_state(x)
++#endif
++
+
+ /* i915_mem.c */
+ extern int i915_mem_alloc(struct drm_device *dev, void *data,
+@@ -922,6 +974,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
+ extern void i915_mem_release(struct drm_device * dev,
+ struct drm_file *file_priv, struct mem_block *heap);
+ /* i915_gem.c */
++int i915_gem_check_is_wedged(struct drm_device *dev);
+ int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+@@ -972,13 +1025,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
+ int i915_gem_object_unbind(struct drm_gem_object *obj);
+ void i915_gem_release_mmap(struct drm_gem_object *obj);
+ void i915_gem_lastclose(struct drm_device *dev);
+-uint32_t i915_get_gem_seqno(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
+-bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
+-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
+-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
++
++/**
++ * Returns true if seq1 is later than seq2.
++ */
++static inline bool
++i915_seqno_passed(uint32_t seq1, uint32_t seq2)
++{
++ return (int32_t)(seq1 - seq2) >= 0;
++}
++
++int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
++ bool interruptible);
++int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
++ bool interruptible);
+ void i915_gem_retire_requests(struct drm_device *dev);
+-void i915_gem_retire_work_handler(struct work_struct *work);
++void i915_gem_reset(struct drm_device *dev);
+ void i915_gem_clflush_object(struct drm_gem_object *obj);
+ int i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+@@ -990,16 +1052,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ int i915_gpu_idle(struct drm_device *dev);
+ int i915_gem_idle(struct drm_device *dev);
+ uint32_t i915_add_request(struct drm_device *dev,
+- struct drm_file *file_priv,
+- uint32_t flush_domains,
+- struct intel_ring_buffer *ring);
++ struct drm_file *file_priv,
++ struct drm_i915_gem_request *request,
++ struct intel_ring_buffer *ring);
+ int i915_do_wait_request(struct drm_device *dev,
+- uint32_t seqno, int interruptible,
+- struct intel_ring_buffer *ring);
++ uint32_t seqno,
++ bool interruptible,
++ struct intel_ring_buffer *ring);
+ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write);
+-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
++int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
++ bool pipelined);
+ int i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int id,
+@@ -1007,10 +1071,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
+ void i915_gem_detach_phys_object(struct drm_device *dev,
+ struct drm_gem_object *obj);
+ void i915_gem_free_all_phys_object(struct drm_device *dev);
+-int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+-void i915_gem_object_put_pages(struct drm_gem_object *obj);
+ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+-int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
+
+ void i915_gem_shrinker_init(void);
+ void i915_gem_shrinker_exit(void);
+@@ -1032,15 +1093,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
+ /* i915_gem_debug.c */
+ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+-#if WATCH_INACTIVE
+-void i915_verify_inactive(struct drm_device *dev, char *file, int line);
++#if WATCH_LISTS
++int i915_verify_lists(struct drm_device *dev);
+ #else
+-#define i915_verify_inactive(dev, file, line)
++#define i915_verify_lists(dev) 0
+ #endif
+ void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
+ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+-void i915_dump_lru(struct drm_device *dev, const char *where);
+
+ /* i915_debugfs.c */
+ int i915_debugfs_init(struct drm_minor *minor);
+@@ -1054,21 +1114,42 @@ extern int i915_restore_state(struct drm_device *dev);
+ extern int i915_save_state(struct drm_device *dev);
+ extern int i915_restore_state(struct drm_device *dev);
+
++/* intel_i2c.c */
++extern int intel_setup_gmbus(struct drm_device *dev);
++extern void intel_teardown_gmbus(struct drm_device *dev);
++extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
++extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
++extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
++{
++ return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
++}
++extern void intel_i2c_reset(struct drm_device *dev);
++
++/* intel_opregion.c */
++extern int intel_opregion_setup(struct drm_device *dev);
+ #ifdef CONFIG_ACPI
+-/* i915_opregion.c */
+-extern int intel_opregion_init(struct drm_device *dev, int resume);
+-extern void intel_opregion_free(struct drm_device *dev, int suspend);
+-extern void opregion_asle_intr(struct drm_device *dev);
+-extern void ironlake_opregion_gse_intr(struct drm_device *dev);
+-extern void opregion_enable_asle(struct drm_device *dev);
++extern void intel_opregion_init(struct drm_device *dev);
++extern void intel_opregion_fini(struct drm_device *dev);
++extern void intel_opregion_asle_intr(struct drm_device *dev);
++extern void intel_opregion_gse_intr(struct drm_device *dev);
++extern void intel_opregion_enable_asle(struct drm_device *dev);
+ #else
+-static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
+-static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
+-static inline void opregion_asle_intr(struct drm_device *dev) { return; }
+-static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
+-static inline void opregion_enable_asle(struct drm_device *dev) { return; }
++static inline void intel_opregion_init(struct drm_device *dev) { return; }
++static inline void intel_opregion_fini(struct drm_device *dev) { return; }
++static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
++static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
++static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
+ #endif
+
++/* intel_acpi.c */
++#ifdef CONFIG_ACPI
++extern void intel_register_dsm_handler(void);
++extern void intel_unregister_dsm_handler(void);
++#else
++static inline void intel_register_dsm_handler(void) { return; }
++static inline void intel_unregister_dsm_handler(void) { return; }
++#endif /* CONFIG_ACPI */
++
+ /* modesetting */
+ extern void intel_modeset_init(struct drm_device *dev);
+ extern void intel_modeset_cleanup(struct drm_device *dev);
+@@ -1084,8 +1165,10 @@ extern void intel_detect_pch (struct drm_device *dev);
+ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
+
+ /* overlay */
++#ifdef CONFIG_DEBUG_FS
+ extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
++#endif
+
+ /**
+ * Lock test for when it's just for synchronization of ring access.
+@@ -1099,8 +1182,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ LOCK_TEST_WITH_RETURN(dev, file_priv); \
+ } while (0)
+
+-#define I915_READ(reg) readl(dev_priv->regs + (reg))
+-#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg))
++static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
++{
++ u32 val;
++
++ val = readl(dev_priv->regs + reg);
++ if (dev_priv->debug_flags & I915_DEBUG_READ)
++ printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
++ return val;
++}
++
++static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
++ u32 val)
++{
++ writel(val, dev_priv->regs + reg);
++ if (dev_priv->debug_flags & I915_DEBUG_WRITE)
++ printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
++}
++
++#define I915_READ(reg) i915_read(dev_priv, (reg))
++#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
+ #define I915_READ16(reg) readw(dev_priv->regs + (reg))
+ #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
+ #define I915_READ8(reg) readb(dev_priv->regs + (reg))
+@@ -1110,6 +1211,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ #define POSTING_READ(reg) (void)I915_READ(reg)
+ #define POSTING_READ16(reg) (void)I915_READ16(reg)
+
++#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
++ I915_DEBUG_WRITE)
++#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
++ I915_DEBUG_WRITE))
++
+ #define I915_VERBOSE 0
+
+ #define BEGIN_LP_RING(n) do { \
+@@ -1166,8 +1272,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+ #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+ #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+-#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
+-#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
+ #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+ #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+ #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+@@ -1178,8 +1282,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+ #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+ #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+-#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
+-#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
+ #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+
+ #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+@@ -1188,36 +1290,38 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
+ #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+ #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+
+-#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
++#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
++#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+ #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
++#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
++#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
++
+ /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+-#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
++#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
+-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
++#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
++#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
++#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+ #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+-#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
+- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
+- !IS_GEN6(dev))
++#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+ #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+ /* dsparb controlled by hw only */
+ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
++#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+ #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+ #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+ #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
+
+-#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
+- IS_GEN6(dev))
+-#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
++#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
++#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+
+ #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+ #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
++#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+
+ #define PRIMARY_RINGBUFFER_SIZE (128*1024)
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 90b1d67..6da2c6d 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -37,7 +37,9 @@
+ #include <linux/intel-gtt.h>
+
+ static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
+-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
++
++static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
++ bool pipelined);
+ static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
+ static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
+ static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+@@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size);
+ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
+-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
++static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
++ bool interruptible);
+ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+ unsigned alignment);
+ static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
+@@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
+ struct drm_file *file_priv);
+ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+
++static int
++i915_gem_object_get_pages(struct drm_gem_object *obj,
++ gfp_t gfpmask);
++
++static void
++i915_gem_object_put_pages(struct drm_gem_object *obj);
++
+ static LIST_HEAD(shrink_list);
+ static DEFINE_SPINLOCK(shrink_list_lock);
+
++/* some bookkeeping */
++static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
++ size_t size)
++{
++ dev_priv->mm.object_count++;
++ dev_priv->mm.object_memory += size;
++}
++
++static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
++ size_t size)
++{
++ dev_priv->mm.object_count--;
++ dev_priv->mm.object_memory -= size;
++}
++
++static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
++ size_t size)
++{
++ dev_priv->mm.gtt_count++;
++ dev_priv->mm.gtt_memory += size;
++}
++
++static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
++ size_t size)
++{
++ dev_priv->mm.gtt_count--;
++ dev_priv->mm.gtt_memory -= size;
++}
++
++static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
++ size_t size)
++{
++ dev_priv->mm.pin_count++;
++ dev_priv->mm.pin_memory += size;
++}
++
++static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
++ size_t size)
++{
++ dev_priv->mm.pin_count--;
++ dev_priv->mm.pin_memory -= size;
++}
++
++int
++i915_gem_check_is_wedged(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct completion *x = &dev_priv->error_completion;
++ unsigned long flags;
++ int ret;
++
++ if (!atomic_read(&dev_priv->mm.wedged))
++ return 0;
++
++ ret = wait_for_completion_interruptible(x);
++ if (ret)
++ return ret;
++
++ /* Success, we reset the GPU! */
++ if (!atomic_read(&dev_priv->mm.wedged))
++ return 0;
++
++ /* GPU is hung, bump the completion count to account for
++ * the token we just consumed so that we never hit zero and
++ * end up waiting upon a subsequent completion event that
++ * will never happen.
++ */
++ spin_lock_irqsave(&x->wait.lock, flags);
++ x->done++;
++ spin_unlock_irqrestore(&x->wait.lock, flags);
++ return -EIO;
++}
++
++static int i915_mutex_lock_interruptible(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int ret;
++
++ ret = i915_gem_check_is_wedged(dev);
++ if (ret)
++ return ret;
++
++ ret = mutex_lock_interruptible(&dev->struct_mutex);
++ if (ret)
++ return ret;
++
++ if (atomic_read(&dev_priv->mm.wedged)) {
++ mutex_unlock(&dev->struct_mutex);
++ return -EAGAIN;
++ }
++
++ WARN_ON(i915_verify_lists(dev));
++ return 0;
++}
++
+ static inline bool
+ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+ {
+@@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+ obj_priv->pin_count == 0;
+ }
+
+-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
++int i915_gem_do_init(struct drm_device *dev,
++ unsigned long start,
+ unsigned long end)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ drm_mm_init(&dev_priv->mm.gtt_space, start,
+ end - start);
+
+- dev->gtt_total = (uint32_t) (end - start);
++ dev_priv->mm.gtt_total = end - start;
+
+ return 0;
+ }
+@@ -103,14 +209,16 @@ int
+ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_get_aperture *args = data;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+- args->aper_size = dev->gtt_total;
+- args->aper_available_size = (args->aper_size -
+- atomic_read(&dev->pin_memory));
++ mutex_lock(&dev->struct_mutex);
++ args->aper_size = dev_priv->mm.gtt_total;
++ args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
++ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+@@ -136,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+- /* drop reference from allocate - handle holds it now */
+- drm_gem_object_unreference_unlocked(obj);
+ if (ret) {
++ drm_gem_object_release(obj);
++ i915_gem_info_remove_obj(dev->dev_private, obj->size);
++ kfree(obj);
+ return ret;
+ }
+
++ /* drop reference from allocate - handle holds it now */
++ drm_gem_object_unreference(obj);
++ trace_i915_gem_object_create(obj);
++
+ args->handle = handle;
+ return 0;
+ }
+@@ -152,19 +265,14 @@ fast_shmem_read(struct page **pages,
+ char __user *data,
+ int length)
+ {
+- char __iomem *vaddr;
+- int unwritten;
++ char *vaddr;
++ int ret;
+
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+- if (vaddr == NULL)
+- return -ENOMEM;
+- unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
++ ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+- if (unwritten)
+- return -EFAULT;
+-
+- return 0;
++ return ret;
+ }
+
+ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+@@ -258,22 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ loff_t offset, page_base;
+ char __user *user_data;
+ int page_offset, page_length;
+- int ret;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ remain = args->size;
+
+- mutex_lock(&dev->struct_mutex);
+-
+- ret = i915_gem_object_get_pages(obj, 0);
+- if (ret != 0)
+- goto fail_unlock;
+-
+- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+- args->size);
+- if (ret != 0)
+- goto fail_put_pages;
+-
+ obj_priv = to_intel_bo(obj);
+ offset = args->offset;
+
+@@ -290,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ if ((page_offset + remain) > PAGE_SIZE)
+ page_length = PAGE_SIZE - page_offset;
+
+- ret = fast_shmem_read(obj_priv->pages,
+- page_base, page_offset,
+- user_data, page_length);
+- if (ret)
+- goto fail_put_pages;
++ if (fast_shmem_read(obj_priv->pages,
++ page_base, page_offset,
++ user_data, page_length))
++ return -EFAULT;
+
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
+ }
+
+-fail_put_pages:
+- i915_gem_object_put_pages(obj);
+-fail_unlock:
+- mutex_unlock(&dev->struct_mutex);
+-
+- return ret;
++ return 0;
+ }
+
+ static int
+@@ -367,31 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
+
+- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
++ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (user_pages == NULL)
+ return -ENOMEM;
+
++ mutex_unlock(&dev->struct_mutex);
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 1, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
++ mutex_lock(&dev->struct_mutex);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+- goto fail_put_user_pages;
++ goto out;
+ }
+
+- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+-
+- mutex_lock(&dev->struct_mutex);
+-
+- ret = i915_gem_object_get_pages_or_evict(obj);
++ ret = i915_gem_object_set_cpu_read_domain_range(obj,
++ args->offset,
++ args->size);
+ if (ret)
+- goto fail_unlock;
++ goto out;
+
+- ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+- args->size);
+- if (ret != 0)
+- goto fail_put_pages;
++ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+ obj_priv = to_intel_bo(obj);
+ offset = args->offset;
+@@ -436,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ offset += page_length;
+ }
+
+-fail_put_pages:
+- i915_gem_object_put_pages(obj);
+-fail_unlock:
+- mutex_unlock(&dev->struct_mutex);
+-fail_put_user_pages:
++out:
+ for (i = 0; i < pinned_pages; i++) {
+ SetPageDirty(user_pages[i]);
+ page_cache_release(user_pages[i]);
+@@ -462,37 +545,64 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_i915_gem_pread *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+- int ret;
++ int ret = 0;
++
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
++ return ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+- if (obj == NULL)
+- return -ENOENT;
++ if (obj == NULL) {
++ ret = -ENOENT;
++ goto unlock;
++ }
+ obj_priv = to_intel_bo(obj);
+
+ /* Bounds check source. */
+ if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ ret = -EINVAL;
+- goto err;
++ goto out;
+ }
+
++ if (args->size == 0)
++ goto out;
++
+ if (!access_ok(VERIFY_WRITE,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size)) {
+ ret = -EFAULT;
+- goto err;
++ goto out;
+ }
+
+- if (i915_gem_object_needs_bit17_swizzle(obj)) {
+- ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+- } else {
+- ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+- if (ret != 0)
+- ret = i915_gem_shmem_pread_slow(dev, obj, args,
+- file_priv);
++ ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
++ args->size);
++ if (ret) {
++ ret = -EFAULT;
++ goto out;
+ }
+
+-err:
+- drm_gem_object_unreference_unlocked(obj);
++ ret = i915_gem_object_get_pages_or_evict(obj);
++ if (ret)
++ goto out;
++
++ ret = i915_gem_object_set_cpu_read_domain_range(obj,
++ args->offset,
++ args->size);
++ if (ret)
++ goto out_put;
++
++ ret = -EFAULT;
++ if (!i915_gem_object_needs_bit17_swizzle(obj))
++ ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
++ if (ret == -EFAULT)
++ ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
++
++out_put:
++ i915_gem_object_put_pages(obj);
++out:
++ drm_gem_object_unreference(obj);
++unlock:
++ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+@@ -513,9 +623,7 @@ fast_user_write(struct io_mapping *mapping,
+ unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
+ user_data, length);
+ io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
+- if (unwritten)
+- return -EFAULT;
+- return 0;
++ return unwritten;
+ }
+
+ /* Here's the write path which can sleep for
+@@ -548,18 +656,14 @@ fast_shmem_write(struct page **pages,
+ char __user *data,
+ int length)
+ {
+- char __iomem *vaddr;
+- unsigned long unwritten;
++ char *vaddr;
++ int ret;
+
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+- if (vaddr == NULL)
+- return -ENOMEM;
+- unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
++ ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+- if (unwritten)
+- return -EFAULT;
+- return 0;
++ return ret;
+ }
+
+ /**
+@@ -577,22 +681,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ loff_t offset, page_base;
+ char __user *user_data;
+ int page_offset, page_length;
+- int ret;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ remain = args->size;
+
+-
+- mutex_lock(&dev->struct_mutex);
+- ret = i915_gem_object_pin(obj, 0);
+- if (ret) {
+- mutex_unlock(&dev->struct_mutex);
+- return ret;
+- }
+- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+- if (ret)
+- goto fail;
+-
+ obj_priv = to_intel_bo(obj);
+ offset = obj_priv->gtt_offset + args->offset;
+
+@@ -609,26 +701,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ if ((page_offset + remain) > PAGE_SIZE)
+ page_length = PAGE_SIZE - page_offset;
+
+- ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
+- page_offset, user_data, page_length);
+-
+ /* If we get a fault while copying data, then (presumably) our
+ * source page isn't available. Return the error and we'll
+ * retry in the slow path.
+ */
+- if (ret)
+- goto fail;
++ if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
++ page_offset, user_data, page_length))
++
++ return -EFAULT;
+
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
+ }
+
+-fail:
+- i915_gem_object_unpin(obj);
+- mutex_unlock(&dev->struct_mutex);
+-
+- return ret;
++ return 0;
+ }
+
+ /**
+@@ -665,27 +752,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
+
+- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
++ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (user_pages == NULL)
+ return -ENOMEM;
+
++ mutex_unlock(&dev->struct_mutex);
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 0, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
++ mutex_lock(&dev->struct_mutex);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+ goto out_unpin_pages;
+ }
+
+- mutex_lock(&dev->struct_mutex);
+- ret = i915_gem_object_pin(obj, 0);
+- if (ret)
+- goto out_unlock;
+-
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+- goto out_unpin_object;
++ goto out_unpin_pages;
+
+ obj_priv = to_intel_bo(obj);
+ offset = obj_priv->gtt_offset + args->offset;
+@@ -721,10 +805,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ data_ptr += page_length;
+ }
+
+-out_unpin_object:
+- i915_gem_object_unpin(obj);
+-out_unlock:
+- mutex_unlock(&dev->struct_mutex);
+ out_unpin_pages:
+ for (i = 0; i < pinned_pages; i++)
+ page_cache_release(user_pages[i]);
+@@ -747,21 +827,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ loff_t offset, page_base;
+ char __user *user_data;
+ int page_offset, page_length;
+- int ret;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ remain = args->size;
+
+- mutex_lock(&dev->struct_mutex);
+-
+- ret = i915_gem_object_get_pages(obj, 0);
+- if (ret != 0)
+- goto fail_unlock;
+-
+- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+- if (ret != 0)
+- goto fail_put_pages;
+-
+ obj_priv = to_intel_bo(obj);
+ offset = args->offset;
+ obj_priv->dirty = 1;
+@@ -779,23 +848,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+ if ((page_offset + remain) > PAGE_SIZE)
+ page_length = PAGE_SIZE - page_offset;
+
+- ret = fast_shmem_write(obj_priv->pages,
++ if (fast_shmem_write(obj_priv->pages,
+ page_base, page_offset,
+- user_data, page_length);
+- if (ret)
+- goto fail_put_pages;
++ user_data, page_length))
++ return -EFAULT;
+
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
+ }
+
+-fail_put_pages:
+- i915_gem_object_put_pages(obj);
+-fail_unlock:
+- mutex_unlock(&dev->struct_mutex);
+-
+- return ret;
++ return 0;
+ }
+
+ /**
+@@ -833,30 +896,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+ num_pages = last_data_page - first_data_page + 1;
+
+- user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
++ user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+ if (user_pages == NULL)
+ return -ENOMEM;
+
++ mutex_unlock(&dev->struct_mutex);
+ down_read(&mm->mmap_sem);
+ pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+ num_pages, 0, 0, user_pages, NULL);
+ up_read(&mm->mmap_sem);
++ mutex_lock(&dev->struct_mutex);
+ if (pinned_pages < num_pages) {
+ ret = -EFAULT;
+- goto fail_put_user_pages;
++ goto out;
+ }
+
+- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+-
+- mutex_lock(&dev->struct_mutex);
+-
+- ret = i915_gem_object_get_pages_or_evict(obj);
++ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+- goto fail_unlock;
++ goto out;
+
+- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+- if (ret != 0)
+- goto fail_put_pages;
++ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+ obj_priv = to_intel_bo(obj);
+ offset = args->offset;
+@@ -902,11 +961,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+ offset += page_length;
+ }
+
+-fail_put_pages:
+- i915_gem_object_put_pages(obj);
+-fail_unlock:
+- mutex_unlock(&dev->struct_mutex);
+-fail_put_user_pages:
++out:
+ for (i = 0; i < pinned_pages; i++)
+ page_cache_release(user_pages[i]);
+ drm_free_large(user_pages);
+@@ -921,29 +976,46 @@ fail_put_user_pages:
+ */
+ int
+ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv)
++ struct drm_file *file)
+ {
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+- if (obj == NULL)
+- return -ENOENT;
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
++ return ret;
++
++ obj = drm_gem_object_lookup(dev, file, args->handle);
++ if (obj == NULL) {
++ ret = -ENOENT;
++ goto unlock;
++ }
+ obj_priv = to_intel_bo(obj);
+
++
+ /* Bounds check destination. */
+ if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ ret = -EINVAL;
+- goto err;
++ goto out;
+ }
+
++ if (args->size == 0)
++ goto out;
++
+ if (!access_ok(VERIFY_READ,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size)) {
+ ret = -EFAULT;
+- goto err;
++ goto out;
++ }
++
++ ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
++ args->size);
++ if (ret) {
++ ret = -EFAULT;
++ goto out;
+ }
+
+ /* We can only do the GTT pwrite on untiled buffers, as otherwise
+@@ -953,32 +1025,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ * perspective, requiring manual detiling by the client.
+ */
+ if (obj_priv->phys_obj)
+- ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
++ ret = i915_gem_phys_pwrite(dev, obj, args, file);
+ else if (obj_priv->tiling_mode == I915_TILING_NONE &&
+- dev->gtt_total != 0 &&
++ obj_priv->gtt_space &&
+ obj->write_domain != I915_GEM_DOMAIN_CPU) {
+- ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
+- if (ret == -EFAULT) {
+- ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
+- file_priv);
+- }
+- } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
+- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
++ ret = i915_gem_object_pin(obj, 0);
++ if (ret)
++ goto out;
++
++ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
++ if (ret)
++ goto out_unpin;
++
++ ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
++ if (ret == -EFAULT)
++ ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
++
++out_unpin:
++ i915_gem_object_unpin(obj);
+ } else {
+- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
+- if (ret == -EFAULT) {
+- ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
+- file_priv);
+- }
+- }
++ ret = i915_gem_object_get_pages_or_evict(obj);
++ if (ret)
++ goto out;
+
+-#if WATCH_PWRITE
+- if (ret)
+- DRM_INFO("pwrite failed %d\n", ret);
+-#endif
++ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
++ if (ret)
++ goto out_put;
+
+-err:
+- drm_gem_object_unreference_unlocked(obj);
++ ret = -EFAULT;
++ if (!i915_gem_object_needs_bit17_swizzle(obj))
++ ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
++ if (ret == -EFAULT)
++ ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
++
++out_put:
++ i915_gem_object_put_pages(obj);
++ }
++
++out:
++ drm_gem_object_unreference(obj);
++unlock:
++ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+@@ -1014,19 +1101,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ if (write_domain != 0 && read_domains != write_domain)
+ return -EINVAL;
+
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
++ return ret;
++
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+- if (obj == NULL)
+- return -ENOENT;
++ if (obj == NULL) {
++ ret = -ENOENT;
++ goto unlock;
++ }
+ obj_priv = to_intel_bo(obj);
+
+- mutex_lock(&dev->struct_mutex);
+-
+ intel_mark_busy(dev, obj);
+
+-#if WATCH_BUF
+- DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
+- obj, obj->size, read_domains, write_domain);
+-#endif
+ if (read_domains & I915_GEM_DOMAIN_GTT) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+
+@@ -1050,12 +1137,12 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ }
+
+-
+ /* Maintain LRU order of "inactive" objects */
+ if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
+- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+
+ drm_gem_object_unreference(obj);
++unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+@@ -1069,30 +1156,27 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ {
+ struct drm_i915_gem_sw_finish *args = data;
+ struct drm_gem_object *obj;
+- struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+- mutex_lock(&dev->struct_mutex);
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
++ return ret;
++
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+- mutex_unlock(&dev->struct_mutex);
+- return -ENOENT;
++ ret = -ENOENT;
++ goto unlock;
+ }
+
+-#if WATCH_BUF
+- DRM_INFO("%s: sw_finish %d (%p %zd)\n",
+- __func__, args->handle, obj, obj->size);
+-#endif
+- obj_priv = to_intel_bo(obj);
+-
+ /* Pinned buffers may be scanout, so flush the cache */
+- if (obj_priv->pin_count)
++ if (to_intel_bo(obj)->pin_count)
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ drm_gem_object_unreference(obj);
++unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+@@ -1181,13 +1265,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+
+ /* Need a new fence register? */
+ if (obj_priv->tiling_mode != I915_TILING_NONE) {
+- ret = i915_gem_object_get_fence_reg(obj);
++ ret = i915_gem_object_get_fence_reg(obj, true);
+ if (ret)
+ goto unlock;
+ }
+
+ if (i915_gem_object_is_inactive(obj_priv))
+- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+
+ pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+ page_offset;
+@@ -1246,7 +1330,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+ obj->size / PAGE_SIZE, 0, 0);
+ if (!list->file_offset_node) {
+ DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+- ret = -ENOMEM;
++ ret = -ENOSPC;
+ goto out_free_list;
+ }
+
+@@ -1258,9 +1342,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+ }
+
+ list->hash.key = list->file_offset_node->start;
+- if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
++ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
++ if (ret) {
+ DRM_ERROR("failed to add to map hash\n");
+- ret = -ENOMEM;
+ goto out_free_mm;
+ }
+
+@@ -1345,14 +1429,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+ * Minimum alignment is 4k (GTT page size), but might be greater
+ * if a fence register is needed for the object.
+ */
+- if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
++ if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
+ return 4096;
+
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+- if (IS_I9XX(dev))
++ if (INTEL_INFO(dev)->gen == 3)
+ start = 1024*1024;
+ else
+ start = 512*1024;
+@@ -1390,29 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+- if (obj == NULL)
+- return -ENOENT;
+-
+- mutex_lock(&dev->struct_mutex);
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
++ return ret;
+
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL) {
++ ret = -ENOENT;
++ goto unlock;
++ }
+ obj_priv = to_intel_bo(obj);
+
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+- drm_gem_object_unreference(obj);
+- mutex_unlock(&dev->struct_mutex);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+
+-
+ if (!obj_priv->mmap_offset) {
+ ret = i915_gem_create_mmap_offset(obj);
+- if (ret) {
+- drm_gem_object_unreference(obj);
+- mutex_unlock(&dev->struct_mutex);
+- return ret;
+- }
++ if (ret)
++ goto out;
+ }
+
+ args->offset = obj_priv->mmap_offset;
+@@ -1423,20 +1505,18 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ */
+ if (!obj_priv->agp_mem) {
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
+- if (ret) {
+- drm_gem_object_unreference(obj);
+- mutex_unlock(&dev->struct_mutex);
+- return ret;
+- }
++ if (ret)
++ goto out;
+ }
+
++out:
+ drm_gem_object_unreference(obj);
++unlock:
+ mutex_unlock(&dev->struct_mutex);
+-
+- return 0;
++ return ret;
+ }
+
+-void
++static void
+ i915_gem_object_put_pages(struct drm_gem_object *obj)
+ {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+@@ -1470,13 +1550,25 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
+ obj_priv->pages = NULL;
+ }
+
++static uint32_t
++i915_gem_next_request_seqno(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ ring->outstanding_lazy_request = true;
++ return dev_priv->next_seqno;
++}
++
+ static void
+-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
++i915_gem_object_move_to_active(struct drm_gem_object *obj,
+ struct intel_ring_buffer *ring)
+ {
+ struct drm_device *dev = obj->dev;
+- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
++ uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
++
+ BUG_ON(ring == NULL);
+ obj_priv->ring = ring;
+
+@@ -1485,10 +1577,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+ drm_gem_object_reference(obj);
+ obj_priv->active = 1;
+ }
++
+ /* Move from whatever list we were on to the tail of execution. */
+- spin_lock(&dev_priv->mm.active_list_lock);
+- list_move_tail(&obj_priv->list, &ring->active_list);
+- spin_unlock(&dev_priv->mm.active_list_lock);
++ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
++ list_move_tail(&obj_priv->ring_list, &ring->active_list);
+ obj_priv->last_rendering_seqno = seqno;
+ }
+
+@@ -1500,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+ BUG_ON(!obj_priv->active);
+- list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
++ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
++ list_del_init(&obj_priv->ring_list);
+ obj_priv->last_rendering_seqno = 0;
+ }
+
+@@ -1538,11 +1631,11 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+- i915_verify_inactive(dev, __FILE__, __LINE__);
+ if (obj_priv->pin_count != 0)
+- list_del_init(&obj_priv->list);
++ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
+ else
+- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
++ list_del_init(&obj_priv->ring_list);
+
+ BUG_ON(!list_empty(&obj_priv->gpu_write_list));
+
+@@ -1552,30 +1645,28 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+ obj_priv->active = 0;
+ drm_gem_object_unreference(obj);
+ }
+- i915_verify_inactive(dev, __FILE__, __LINE__);
++ WARN_ON(i915_verify_lists(dev));
+ }
+
+ static void
+ i915_gem_process_flushing_list(struct drm_device *dev,
+- uint32_t flush_domains, uint32_t seqno,
++ uint32_t flush_domains,
+ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ list_for_each_entry_safe(obj_priv, next,
+- &dev_priv->mm.gpu_write_list,
++ &ring->gpu_write_list,
+ gpu_write_list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+- if ((obj->write_domain & flush_domains) ==
+- obj->write_domain &&
+- obj_priv->ring->ring_flag == ring->ring_flag) {
++ if (obj->write_domain & flush_domains) {
+ uint32_t old_write_domain = obj->write_domain;
+
+ obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+- i915_gem_object_move_to_active(obj, seqno, ring);
++ i915_gem_object_move_to_active(obj, ring);
+
+ /* update the fence lru list */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+@@ -1593,23 +1684,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
+ }
+
+ uint32_t
+-i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+- uint32_t flush_domains, struct intel_ring_buffer *ring)
++i915_add_request(struct drm_device *dev,
++ struct drm_file *file,
++ struct drm_i915_gem_request *request,
++ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- struct drm_i915_file_private *i915_file_priv = NULL;
+- struct drm_i915_gem_request *request;
++ struct drm_i915_file_private *file_priv = NULL;
+ uint32_t seqno;
+ int was_empty;
+
+- if (file_priv != NULL)
+- i915_file_priv = file_priv->driver_priv;
++ if (file != NULL)
++ file_priv = file->driver_priv;
+
+- request = kzalloc(sizeof(*request), GFP_KERNEL);
+- if (request == NULL)
+- return 0;
++ if (request == NULL) {
++ request = kzalloc(sizeof(*request), GFP_KERNEL);
++ if (request == NULL)
++ return 0;
++ }
+
+- seqno = ring->add_request(dev, ring, file_priv, flush_domains);
++ seqno = ring->add_request(dev, ring, 0);
++ ring->outstanding_lazy_request = false;
+
+ request->seqno = seqno;
+ request->ring = ring;
+@@ -1617,23 +1712,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ was_empty = list_empty(&ring->request_list);
+ list_add_tail(&request->list, &ring->request_list);
+
+- if (i915_file_priv) {
++ if (file_priv) {
++ spin_lock(&file_priv->mm.lock);
++ request->file_priv = file_priv;
+ list_add_tail(&request->client_list,
+- &i915_file_priv->mm.request_list);
+- } else {
+- INIT_LIST_HEAD(&request->client_list);
++ &file_priv->mm.request_list);
++ spin_unlock(&file_priv->mm.lock);
+ }
+
+- /* Associate any objects on the flushing list matching the write
+- * domain we're flushing with our flush.
+- */
+- if (flush_domains != 0)
+- i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
+-
+ if (!dev_priv->mm.suspended) {
+- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
++ mod_timer(&dev_priv->hangcheck_timer,
++ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ if (was_empty)
+- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
++ queue_delayed_work(dev_priv->wq,
++ &dev_priv->mm.retire_work, HZ);
+ }
+ return seqno;
+ }
+@@ -1644,91 +1736,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+-static uint32_t
++static void
+ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
+ {
+ uint32_t flush_domains = 0;
+
+ /* The sampler always gets flushed on i965 (sigh) */
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+
+ ring->flush(dev, ring,
+ I915_GEM_DOMAIN_COMMAND, flush_domains);
+- return flush_domains;
+ }
+
+-/**
+- * Moves buffers associated only with the given active seqno from the active
+- * to inactive list, potentially freeing them.
+- */
+-static void
+-i915_gem_retire_request(struct drm_device *dev,
+- struct drm_i915_gem_request *request)
++static inline void
++i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+ {
+- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_file_private *file_priv = request->file_priv;
+
+- trace_i915_gem_request_retire(dev, request->seqno);
++ if (!file_priv)
++ return;
+
+- /* Move any buffers on the active list that are no longer referenced
+- * by the ringbuffer to the flushing/inactive lists as appropriate.
+- */
+- spin_lock(&dev_priv->mm.active_list_lock);
+- while (!list_empty(&request->ring->active_list)) {
+- struct drm_gem_object *obj;
+- struct drm_i915_gem_object *obj_priv;
++ spin_lock(&file_priv->mm.lock);
++ list_del(&request->client_list);
++ request->file_priv = NULL;
++ spin_unlock(&file_priv->mm.lock);
++}
+
+- obj_priv = list_first_entry(&request->ring->active_list,
+- struct drm_i915_gem_object,
+- list);
+- obj = &obj_priv->base;
++static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
++ struct intel_ring_buffer *ring)
++{
++ while (!list_empty(&ring->request_list)) {
++ struct drm_i915_gem_request *request;
+
+- /* If the seqno being retired doesn't match the oldest in the
+- * list, then the oldest in the list must still be newer than
+- * this seqno.
+- */
+- if (obj_priv->last_rendering_seqno != request->seqno)
+- goto out;
++ request = list_first_entry(&ring->request_list,
++ struct drm_i915_gem_request,
++ list);
+
+-#if WATCH_LRU
+- DRM_INFO("%s: retire %d moves to inactive list %p\n",
+- __func__, request->seqno, obj);
+-#endif
++ list_del(&request->list);
++ i915_gem_request_remove_from_client(request);
++ kfree(request);
++ }
+
+- if (obj->write_domain != 0)
+- i915_gem_object_move_to_flushing(obj);
+- else {
+- /* Take a reference on the object so it won't be
+- * freed while the spinlock is held. The list
+- * protection for this spinlock is safe when breaking
+- * the lock like this since the next thing we do
+- * is just get the head of the list again.
+- */
+- drm_gem_object_reference(obj);
+- i915_gem_object_move_to_inactive(obj);
+- spin_unlock(&dev_priv->mm.active_list_lock);
+- drm_gem_object_unreference(obj);
+- spin_lock(&dev_priv->mm.active_list_lock);
+- }
++ while (!list_empty(&ring->active_list)) {
++ struct drm_i915_gem_object *obj_priv;
++
++ obj_priv = list_first_entry(&ring->active_list,
++ struct drm_i915_gem_object,
++ ring_list);
++
++ obj_priv->base.write_domain = 0;
++ list_del_init(&obj_priv->gpu_write_list);
++ i915_gem_object_move_to_inactive(&obj_priv->base);
+ }
+-out:
+- spin_unlock(&dev_priv->mm.active_list_lock);
+ }
+
+-/**
+- * Returns true if seq1 is later than seq2.
+- */
+-bool
+-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
++void i915_gem_reset(struct drm_device *dev)
+ {
+- return (int32_t)(seq1 - seq2) >= 0;
+-}
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct drm_i915_gem_object *obj_priv;
++ int i;
+
+-uint32_t
+-i915_get_gem_seqno(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
+-{
+- return ring->get_gem_seqno(dev, ring);
++ i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
++ i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
++ i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
++
++ /* Remove anything from the flushing lists. The GPU cache is likely
++ * to be lost on reset along with the data, so simply move the
++ * lost bo to the inactive list.
++ */
++ while (!list_empty(&dev_priv->mm.flushing_list)) {
++ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
++ struct drm_i915_gem_object,
++ mm_list);
++
++ obj_priv->base.write_domain = 0;
++ list_del_init(&obj_priv->gpu_write_list);
++ i915_gem_object_move_to_inactive(&obj_priv->base);
++ }
++
++ /* Move everything out of the GPU domains to ensure we do any
++ * necessary invalidation upon reuse.
++ */
++ list_for_each_entry(obj_priv,
++ &dev_priv->mm.inactive_list,
++ mm_list)
++ {
++ obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
++ }
++
++ /* The fence registers are invalidated so clear them out */
++ for (i = 0; i < 16; i++) {
++ struct drm_i915_fence_reg *reg;
++
++ reg = &dev_priv->fence_regs[i];
++ if (!reg->obj)
++ continue;
++
++ i915_gem_clear_fence_reg(reg->obj);
++ }
+ }
+
+ /**
+@@ -1741,38 +1847,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno;
+
+- if (!ring->status_page.page_addr
+- || list_empty(&ring->request_list))
++ if (!ring->status_page.page_addr ||
++ list_empty(&ring->request_list))
+ return;
+
+- seqno = i915_get_gem_seqno(dev, ring);
++ WARN_ON(i915_verify_lists(dev));
+
++ seqno = ring->get_seqno(dev, ring);
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+- uint32_t retiring_seqno;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+- retiring_seqno = request->seqno;
+
+- if (i915_seqno_passed(seqno, retiring_seqno) ||
+- atomic_read(&dev_priv->mm.wedged)) {
+- i915_gem_retire_request(dev, request);
++ if (!i915_seqno_passed(seqno, request->seqno))
++ break;
++
++ trace_i915_gem_request_retire(dev, request->seqno);
++
++ list_del(&request->list);
++ i915_gem_request_remove_from_client(request);
++ kfree(request);
++ }
++
++ /* Move any buffers on the active list that are no longer referenced
++ * by the ringbuffer to the flushing/inactive lists as appropriate.
++ */
++ while (!list_empty(&ring->active_list)) {
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++
++ obj_priv = list_first_entry(&ring->active_list,
++ struct drm_i915_gem_object,
++ ring_list);
+
+- list_del(&request->list);
+- list_del(&request->client_list);
+- kfree(request);
+- } else
++ if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+ break;
++
++ obj = &obj_priv->base;
++ if (obj->write_domain != 0)
++ i915_gem_object_move_to_flushing(obj);
++ else
++ i915_gem_object_move_to_inactive(obj);
+ }
+
+ if (unlikely (dev_priv->trace_irq_seqno &&
+ i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+-
+ ring->user_irq_put(dev, ring);
+ dev_priv->trace_irq_seqno = 0;
+ }
++
++ WARN_ON(i915_verify_lists(dev));
+ }
+
+ void
+@@ -1790,16 +1916,16 @@ i915_gem_retire_requests(struct drm_device *dev)
+ */
+ list_for_each_entry_safe(obj_priv, tmp,
+ &dev_priv->mm.deferred_free_list,
+- list)
++ mm_list)
+ i915_gem_free_object_tail(&obj_priv->base);
+ }
+
+ i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
+- if (HAS_BSD(dev))
+- i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
++ i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
++ i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
+ }
+
+-void
++static void
+ i915_gem_retire_work_handler(struct work_struct *work)
+ {
+ drm_i915_private_t *dev_priv;
+@@ -1809,20 +1935,25 @@ i915_gem_retire_work_handler(struct work_struct *work)
+ mm.retire_work.work);
+ dev = dev_priv->dev;
+
+- mutex_lock(&dev->struct_mutex);
++ /* Come back later if the device is busy... */
++ if (!mutex_trylock(&dev->struct_mutex)) {
++ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
++ return;
++ }
++
+ i915_gem_retire_requests(dev);
+
+ if (!dev_priv->mm.suspended &&
+ (!list_empty(&dev_priv->render_ring.request_list) ||
+- (HAS_BSD(dev) &&
+- !list_empty(&dev_priv->bsd_ring.request_list))))
++ !list_empty(&dev_priv->bsd_ring.request_list) ||
++ !list_empty(&dev_priv->blt_ring.request_list)))
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ int
+ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+- int interruptible, struct intel_ring_buffer *ring)
++ bool interruptible, struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 ier;
+@@ -1831,9 +1962,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ BUG_ON(seqno == 0);
+
+ if (atomic_read(&dev_priv->mm.wedged))
+- return -EIO;
++ return -EAGAIN;
+
+- if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
++ if (ring->outstanding_lazy_request) {
++ seqno = i915_add_request(dev, NULL, NULL, ring);
++ if (seqno == 0)
++ return -ENOMEM;
++ }
++ BUG_ON(seqno == dev_priv->next_seqno);
++
++ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (HAS_PCH_SPLIT(dev))
+ ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else
+@@ -1852,12 +1990,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(
+- ring->get_gem_seqno(dev, ring), seqno)
++ ring->get_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ else
+ wait_event(ring->irq_queue,
+ i915_seqno_passed(
+- ring->get_gem_seqno(dev, ring), seqno)
++ ring->get_seqno(dev, ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+
+ ring->user_irq_put(dev, ring);
+@@ -1866,11 +2004,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ trace_i915_gem_request_wait_end(dev, seqno);
+ }
+ if (atomic_read(&dev_priv->mm.wedged))
+- ret = -EIO;
++ ret = -EAGAIN;
+
+ if (ret && ret != -ERESTARTSYS)
+- DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
+- __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
++ DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
++ __func__, ret, seqno, ring->get_seqno(dev, ring),
++ dev_priv->next_seqno);
+
+ /* Directly dispatch request retiring. While we have the work queue
+ * to handle this, the waiter on a request often wants an associated
+@@ -1889,27 +2028,48 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+ */
+ static int
+ i915_wait_request(struct drm_device *dev, uint32_t seqno,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ return i915_do_wait_request(dev, seqno, 1, ring);
+ }
+
+ static void
++i915_gem_flush_ring(struct drm_device *dev,
++ struct drm_file *file_priv,
++ struct intel_ring_buffer *ring,
++ uint32_t invalidate_domains,
++ uint32_t flush_domains)
++{
++ ring->flush(dev, ring, invalidate_domains, flush_domains);
++ i915_gem_process_flushing_list(dev, flush_domains, ring);
++}
++
++static void
+ i915_gem_flush(struct drm_device *dev,
++ struct drm_file *file_priv,
+ uint32_t invalidate_domains,
+- uint32_t flush_domains)
++ uint32_t flush_domains,
++ uint32_t flush_rings)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
++
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+- dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+- invalidate_domains,
+- flush_domains);
+-
+- if (HAS_BSD(dev))
+- dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+- invalidate_domains,
+- flush_domains);
++
++ if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
++ if (flush_rings & RING_RENDER)
++ i915_gem_flush_ring(dev, file_priv,
++ &dev_priv->render_ring,
++ invalidate_domains, flush_domains);
++ if (flush_rings & RING_BSD)
++ i915_gem_flush_ring(dev, file_priv,
++ &dev_priv->bsd_ring,
++ invalidate_domains, flush_domains);
++ if (flush_rings & RING_BLT)
++ i915_gem_flush_ring(dev, file_priv,
++ &dev_priv->blt_ring,
++ invalidate_domains, flush_domains);
++ }
+ }
+
+ /**
+@@ -1917,7 +2077,8 @@ i915_gem_flush(struct drm_device *dev,
+ * safe to unbind from the GTT or access from the CPU.
+ */
+ static int
+-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
++i915_gem_object_wait_rendering(struct drm_gem_object *obj,
++ bool interruptible)
+ {
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+@@ -1932,13 +2093,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+ * it.
+ */
+ if (obj_priv->active) {
+-#if WATCH_BUF
+- DRM_INFO("%s: object %p wait for seqno %08x\n",
+- __func__, obj, obj_priv->last_rendering_seqno);
+-#endif
+- ret = i915_wait_request(dev,
+- obj_priv->last_rendering_seqno, obj_priv->ring);
+- if (ret != 0)
++ ret = i915_do_wait_request(dev,
++ obj_priv->last_rendering_seqno,
++ interruptible,
++ obj_priv->ring);
++ if (ret)
+ return ret;
+ }
+
+@@ -1952,14 +2111,10 @@ int
+ i915_gem_object_unbind(struct drm_gem_object *obj)
+ {
+ struct drm_device *dev = obj->dev;
+- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ int ret = 0;
+
+-#if WATCH_BUF
+- DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
+- DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
+-#endif
+ if (obj_priv->gtt_space == NULL)
+ return 0;
+
+@@ -1984,33 +2139,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
+ * should be safe and we need to cleanup or else we might
+ * cause memory corruption through use-after-free.
+ */
++ if (ret) {
++ i915_gem_clflush_object(obj);
++ obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
++ }
+
+ /* release the fence reg _after_ flushing */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
+- if (obj_priv->agp_mem != NULL) {
+- drm_unbind_agp(obj_priv->agp_mem);
+- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+- obj_priv->agp_mem = NULL;
+- }
++ drm_unbind_agp(obj_priv->agp_mem);
++ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+
+ i915_gem_object_put_pages(obj);
+ BUG_ON(obj_priv->pages_refcount);
+
+- if (obj_priv->gtt_space) {
+- atomic_dec(&dev->gtt_count);
+- atomic_sub(obj->size, &dev->gtt_memory);
+-
+- drm_mm_put_block(obj_priv->gtt_space);
+- obj_priv->gtt_space = NULL;
+- }
++ i915_gem_info_remove_gtt(dev_priv, obj->size);
++ list_del_init(&obj_priv->mm_list);
+
+- /* Remove ourselves from the LRU list if present. */
+- spin_lock(&dev_priv->mm.active_list_lock);
+- if (!list_empty(&obj_priv->list))
+- list_del_init(&obj_priv->list);
+- spin_unlock(&dev_priv->mm.active_list_lock);
++ drm_mm_put_block(obj_priv->gtt_space);
++ obj_priv->gtt_space = NULL;
++ obj_priv->gtt_offset = 0;
+
+ if (i915_gem_object_is_purgeable(obj_priv))
+ i915_gem_object_truncate(obj);
+@@ -2020,48 +2169,48 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
+ return ret;
+ }
+
++static int i915_ring_idle(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
++{
++ if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
++ return 0;
++
++ i915_gem_flush_ring(dev, NULL, ring,
++ I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
++ return i915_wait_request(dev,
++ i915_gem_next_request_seqno(dev, ring),
++ ring);
++}
++
+ int
+ i915_gpu_idle(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool lists_empty;
+- uint32_t seqno1, seqno2;
+ int ret;
+
+- spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
+- list_empty(&dev_priv->render_ring.active_list) &&
+- (!HAS_BSD(dev) ||
+- list_empty(&dev_priv->bsd_ring.active_list)));
+- spin_unlock(&dev_priv->mm.active_list_lock);
+-
++ list_empty(&dev_priv->mm.active_list));
+ if (lists_empty)
+ return 0;
+
+ /* Flush everything onto the inactive list. */
+- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+- seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+- &dev_priv->render_ring);
+- if (seqno1 == 0)
+- return -ENOMEM;
+- ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+-
+- if (HAS_BSD(dev)) {
+- seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+- &dev_priv->bsd_ring);
+- if (seqno2 == 0)
+- return -ENOMEM;
++ ret = i915_ring_idle(dev, &dev_priv->render_ring);
++ if (ret)
++ return ret;
+
+- ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+- if (ret)
+- return ret;
+- }
++ ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
++ if (ret)
++ return ret;
+
++ ret = i915_ring_idle(dev, &dev_priv->blt_ring);
++ if (ret)
++ return ret;
+
+- return ret;
++ return 0;
+ }
+
+-int
++static int
+ i915_gem_object_get_pages(struct drm_gem_object *obj,
+ gfp_t gfpmask)
+ {
+@@ -2241,7 +2390,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+ I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+ }
+
+-static int i915_find_fence_reg(struct drm_device *dev)
++static int i915_find_fence_reg(struct drm_device *dev,
++ bool interruptible)
+ {
+ struct drm_i915_fence_reg *reg = NULL;
+ struct drm_i915_gem_object *obj_priv = NULL;
+@@ -2286,7 +2436,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
+ * private reference to obj like the other callers of put_fence_reg
+ * (set_tiling ioctl) do. */
+ drm_gem_object_reference(obj);
+- ret = i915_gem_object_put_fence_reg(obj);
++ ret = i915_gem_object_put_fence_reg(obj, interruptible);
+ drm_gem_object_unreference(obj);
+ if (ret != 0)
+ return ret;
+@@ -2308,7 +2458,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
+ * and tiling format.
+ */
+ int
+-i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
++i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
++ bool interruptible)
+ {
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -2343,7 +2494,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+ break;
+ }
+
+- ret = i915_find_fence_reg(dev);
++ ret = i915_find_fence_reg(dev, interruptible);
+ if (ret < 0)
+ return ret;
+
+@@ -2421,15 +2572,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+ * i915_gem_object_put_fence_reg - waits on outstanding fenced access
+ * to the buffer to finish, and then resets the fence register.
+ * @obj: tiled object holding a fence register.
++ * @bool: whether the wait upon the fence is interruptible
+ *
+ * Zeroes out the fence register itself and clears out the associated
+ * data structures in dev_priv and obj_priv.
+ */
+ int
+-i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
++i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
++ bool interruptible)
+ {
+ struct drm_device *dev = obj->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
++ struct drm_i915_fence_reg *reg;
+
+ if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
+@@ -2444,20 +2599,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+ * therefore we must wait for any outstanding access to complete
+ * before clearing the fence.
+ */
+- if (!IS_I965G(dev)) {
++ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
++ if (reg->gpu) {
+ int ret;
+
+- ret = i915_gem_object_flush_gpu_write_domain(obj);
+- if (ret != 0)
++ ret = i915_gem_object_flush_gpu_write_domain(obj, true);
++ if (ret)
+ return ret;
+
+- ret = i915_gem_object_wait_rendering(obj);
+- if (ret != 0)
++ ret = i915_gem_object_wait_rendering(obj, interruptible);
++ if (ret)
+ return ret;
++
++ reg->gpu = false;
+ }
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+- i915_gem_clear_fence_reg (obj);
++ i915_gem_clear_fence_reg(obj);
+
+ return 0;
+ }
+@@ -2490,7 +2648,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+- if (obj->size > dev->gtt_total) {
++ if (obj->size > dev_priv->mm.gtt_total) {
+ DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+ return -E2BIG;
+ }
+@@ -2498,19 +2656,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ search_free:
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ obj->size, alignment, 0);
+- if (free_space != NULL) {
++ if (free_space != NULL)
+ obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
+ alignment);
+- if (obj_priv->gtt_space != NULL)
+- obj_priv->gtt_offset = obj_priv->gtt_space->start;
+- }
+ if (obj_priv->gtt_space == NULL) {
+ /* If the gtt is empty and we're still having trouble
+ * fitting our object in, we're out of memory.
+ */
+-#if WATCH_LRU
+- DRM_INFO("%s: GTT full, evicting something\n", __func__);
+-#endif
+ ret = i915_gem_evict_something(dev, obj->size, alignment);
+ if (ret)
+ return ret;
+@@ -2518,10 +2670,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ goto search_free;
+ }
+
+-#if WATCH_BUF
+- DRM_INFO("Binding object of size %zd at 0x%08x\n",
+- obj->size, obj_priv->gtt_offset);
+-#endif
+ ret = i915_gem_object_get_pages(obj, gfpmask);
+ if (ret) {
+ drm_mm_put_block(obj_priv->gtt_space);
+@@ -2553,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ obj_priv->agp_mem = drm_agp_bind_pages(dev,
+ obj_priv->pages,
+ obj->size >> PAGE_SHIFT,
+- obj_priv->gtt_offset,
++ obj_priv->gtt_space->start,
+ obj_priv->agp_type);
+ if (obj_priv->agp_mem == NULL) {
+ i915_gem_object_put_pages(obj);
+@@ -2566,11 +2714,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+
+ goto search_free;
+ }
+- atomic_inc(&dev->gtt_count);
+- atomic_add(obj->size, &dev->gtt_memory);
+
+ /* keep track of bounds object by adding it to the inactive list */
+- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++ list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
++ i915_gem_info_add_gtt(dev_priv, obj->size);
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+@@ -2579,6 +2726,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+
++ obj_priv->gtt_offset = obj_priv->gtt_space->start;
+ trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+
+ return 0;
+@@ -2603,25 +2751,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
+
+ /** Flushes any GPU write domain for the object if it's dirty. */
+ static int
+-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
++i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
++ bool pipelined)
+ {
+ struct drm_device *dev = obj->dev;
+ uint32_t old_write_domain;
+- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+ if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ return 0;
+
+ /* Queue the GPU write cache flushing we need. */
+ old_write_domain = obj->write_domain;
+- i915_gem_flush(dev, 0, obj->write_domain);
+- if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
+- return -ENOMEM;
++ i915_gem_flush_ring(dev, NULL,
++ to_intel_bo(obj)->ring,
++ 0, obj->write_domain);
++ BUG_ON(obj->write_domain);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
+- return 0;
++
++ if (pipelined)
++ return 0;
++
++ return i915_gem_object_wait_rendering(obj, true);
+ }
+
+ /** Flushes the GTT write domain for the object if it's dirty. */
+@@ -2665,26 +2818,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+ old_write_domain);
+ }
+
+-int
+-i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
+-{
+- int ret = 0;
+-
+- switch (obj->write_domain) {
+- case I915_GEM_DOMAIN_GTT:
+- i915_gem_object_flush_gtt_write_domain(obj);
+- break;
+- case I915_GEM_DOMAIN_CPU:
+- i915_gem_object_flush_cpu_write_domain(obj);
+- break;
+- default:
+- ret = i915_gem_object_flush_gpu_write_domain(obj);
+- break;
+- }
+-
+- return ret;
+-}
+-
+ /**
+ * Moves a single object to the GTT read, and possibly write domain.
+ *
+@@ -2702,32 +2835,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
+- ret = i915_gem_object_flush_gpu_write_domain(obj);
++ ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ if (ret != 0)
+ return ret;
+
+- /* Wait on any GPU rendering and flushing to occur. */
+- ret = i915_gem_object_wait_rendering(obj);
+- if (ret != 0)
+- return ret;
++ i915_gem_object_flush_cpu_write_domain(obj);
++
++ if (write) {
++ ret = i915_gem_object_wait_rendering(obj, true);
++ if (ret)
++ return ret;
++ }
+
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
+- /* If we're writing through the GTT domain, then CPU and GPU caches
+- * will need to be invalidated at next use.
+- */
+- if (write)
+- obj->read_domains &= I915_GEM_DOMAIN_GTT;
+-
+- i915_gem_object_flush_cpu_write_domain(obj);
+-
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
++ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj_priv->dirty = 1;
+ }
+@@ -2744,51 +2873,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+ * wait, as in modesetting process we're not supposed to be interrupted.
+ */
+ int
+-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
++i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
++ bool pipelined)
+ {
+- struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+- uint32_t old_write_domain, old_read_domains;
++ uint32_t old_read_domains;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
+- ret = i915_gem_object_flush_gpu_write_domain(obj);
++ ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ if (ret)
+ return ret;
+
+- /* Wait on any GPU rendering and flushing to occur. */
+- if (obj_priv->active) {
+-#if WATCH_BUF
+- DRM_INFO("%s: object %p wait for seqno %08x\n",
+- __func__, obj, obj_priv->last_rendering_seqno);
+-#endif
+- ret = i915_do_wait_request(dev,
+- obj_priv->last_rendering_seqno,
+- 0,
+- obj_priv->ring);
+- if (ret != 0)
++ /* Currently, we are always called from an non-interruptible context. */
++ if (!pipelined) {
++ ret = i915_gem_object_wait_rendering(obj, false);
++ if (ret)
+ return ret;
+ }
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+- old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+-
+- /* It should now be out of any other write domains, and we can update
+- * the domain values for our changes.
+- */
+- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+- obj->read_domains = I915_GEM_DOMAIN_GTT;
+- obj->write_domain = I915_GEM_DOMAIN_GTT;
+- obj_priv->dirty = 1;
++ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+- old_write_domain);
++ obj->write_domain);
+
+ return 0;
+ }
+@@ -2805,12 +2919,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+ uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+- ret = i915_gem_object_flush_gpu_write_domain(obj);
+- if (ret)
+- return ret;
+-
+- /* Wait on any GPU rendering and flushing to occur. */
+- ret = i915_gem_object_wait_rendering(obj);
++ ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ if (ret != 0)
+ return ret;
+
+@@ -2821,6 +2930,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+ */
+ i915_gem_object_set_to_full_cpu_read_domain(obj);
+
++ if (write) {
++ ret = i915_gem_object_wait_rendering(obj, true);
++ if (ret)
++ return ret;
++ }
++
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
+@@ -2840,7 +2955,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+ * need to be invalidated at next use.
+ */
+ if (write) {
+- obj->read_domains &= I915_GEM_DOMAIN_CPU;
++ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+@@ -2963,26 +3078,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+ * drm_agp_chipset_flush
+ */
+ static void
+-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
++i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
++ struct intel_ring_buffer *ring)
+ {
+ struct drm_device *dev = obj->dev;
+- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ uint32_t invalidate_domains = 0;
+ uint32_t flush_domains = 0;
+ uint32_t old_read_domains;
+
+- BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
+- BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
+-
+ intel_mark_busy(dev, obj);
+
+-#if WATCH_BUF
+- DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
+- __func__, obj,
+- obj->read_domains, obj->pending_read_domains,
+- obj->write_domain, obj->pending_write_domain);
+-#endif
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+@@ -2999,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+ * write domain
+ */
+ if (obj->write_domain &&
+- obj->write_domain != obj->pending_read_domains) {
++ (obj->write_domain != obj->pending_read_domains ||
++ obj_priv->ring != ring)) {
+ flush_domains |= obj->write_domain;
+ invalidate_domains |=
+ obj->pending_read_domains & ~obj->write_domain;
+@@ -3009,13 +3117,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
+- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
+-#if WATCH_BUF
+- DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
+- __func__, flush_domains, invalidate_domains);
+-#endif
++ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+- }
+
+ old_read_domains = obj->read_domains;
+
+@@ -3029,21 +3132,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+ obj->pending_write_domain = obj->write_domain;
+ obj->read_domains = obj->pending_read_domains;
+
+- if (flush_domains & I915_GEM_GPU_DOMAINS) {
+- if (obj_priv->ring == &dev_priv->render_ring)
+- dev_priv->flush_rings |= FLUSH_RENDER_RING;
+- else if (obj_priv->ring == &dev_priv->bsd_ring)
+- dev_priv->flush_rings |= FLUSH_BSD_RING;
+- }
+-
+ dev->invalidate_domains |= invalidate_domains;
+ dev->flush_domains |= flush_domains;
+-#if WATCH_BUF
+- DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
+- __func__,
+- obj->read_domains, obj->write_domain,
+- dev->invalidate_domains, dev->flush_domains);
+-#endif
++ if (flush_domains & I915_GEM_GPU_DOMAINS)
++ dev_priv->mm.flush_rings |= obj_priv->ring->id;
++ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
++ dev_priv->mm.flush_rings |= ring->id;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+@@ -3106,12 +3200,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ if (offset == 0 && size == obj->size)
+ return i915_gem_object_set_to_cpu_domain(obj, 0);
+
+- ret = i915_gem_object_flush_gpu_write_domain(obj);
+- if (ret)
+- return ret;
+-
+- /* Wait on any GPU rendering and flushing to occur. */
+- ret = i915_gem_object_wait_rendering(obj);
++ ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ if (ret != 0)
+ return ret;
+ i915_gem_object_flush_gtt_write_domain(obj);
+@@ -3164,66 +3253,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ * Pin an object to the GTT and evaluate the relocations landing in it.
+ */
+ static int
+-i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+- struct drm_file *file_priv,
+- struct drm_i915_gem_exec_object2 *entry,
+- struct drm_i915_gem_relocation_entry *relocs)
++i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
++ struct drm_file *file_priv,
++ struct drm_i915_gem_exec_object2 *entry)
+ {
+- struct drm_device *dev = obj->dev;
++ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+- int i, ret;
+- void __iomem *reloc_page;
+- bool need_fence;
+-
+- need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+- obj_priv->tiling_mode != I915_TILING_NONE;
+-
+- /* Check fence reg constraints and rebind if necessary */
+- if (need_fence &&
+- !i915_gem_object_fence_offset_ok(obj,
+- obj_priv->tiling_mode)) {
+- ret = i915_gem_object_unbind(obj);
+- if (ret)
+- return ret;
+- }
++ struct drm_i915_gem_relocation_entry __user *user_relocs;
++ struct drm_gem_object *target_obj = NULL;
++ uint32_t target_handle = 0;
++ int i, ret = 0;
+
+- /* Choose the GTT offset for our buffer and put it there. */
+- ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+- if (ret)
+- return ret;
++ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
++ for (i = 0; i < entry->relocation_count; i++) {
++ struct drm_i915_gem_relocation_entry reloc;
++ uint32_t target_offset;
+
+- /*
+- * Pre-965 chips need a fence register set up in order to
+- * properly handle blits to/from tiled surfaces.
+- */
+- if (need_fence) {
+- ret = i915_gem_object_get_fence_reg(obj);
+- if (ret != 0) {
+- i915_gem_object_unpin(obj);
+- return ret;
++ if (__copy_from_user_inatomic(&reloc,
++ user_relocs+i,
++ sizeof(reloc))) {
++ ret = -EFAULT;
++ break;
+ }
+- }
+
+- entry->offset = obj_priv->gtt_offset;
++ if (reloc.target_handle != target_handle) {
++ drm_gem_object_unreference(target_obj);
+
+- /* Apply the relocations, using the GTT aperture to avoid cache
+- * flushing requirements.
+- */
+- for (i = 0; i < entry->relocation_count; i++) {
+- struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
+- struct drm_gem_object *target_obj;
+- struct drm_i915_gem_object *target_obj_priv;
+- uint32_t reloc_val, reloc_offset;
+- uint32_t __iomem *reloc_entry;
+-
+- target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+- reloc->target_handle);
+- if (target_obj == NULL) {
+- i915_gem_object_unpin(obj);
+- return -ENOENT;
++ target_obj = drm_gem_object_lookup(dev, file_priv,
++ reloc.target_handle);
++ if (target_obj == NULL) {
++ ret = -ENOENT;
++ break;
++ }
++
++ target_handle = reloc.target_handle;
+ }
+- target_obj_priv = to_intel_bo(target_obj);
++ target_offset = to_intel_bo(target_obj)->gtt_offset;
+
+ #if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+@@ -3231,268 +3296,313 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+- (int) reloc->offset,
+- (int) reloc->target_handle,
+- (int) reloc->read_domains,
+- (int) reloc->write_domain,
+- (int) target_obj_priv->gtt_offset,
+- (int) reloc->presumed_offset,
+- reloc->delta);
++ (int) reloc.offset,
++ (int) reloc.target_handle,
++ (int) reloc.read_domains,
++ (int) reloc.write_domain,
++ (int) target_offset,
++ (int) reloc.presumed_offset,
++ reloc.delta);
+ #endif
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+- if (target_obj_priv->gtt_space == NULL) {
++ if (target_offset == 0) {
+ DRM_ERROR("No GTT space found for object %d\n",
+- reloc->target_handle);
+- drm_gem_object_unreference(target_obj);
+- i915_gem_object_unpin(obj);
+- return -EINVAL;
++ reloc.target_handle);
++ ret = -EINVAL;
++ break;
+ }
+
+ /* Validate that the target is in a valid r/w GPU domain */
+- if (reloc->write_domain & (reloc->write_domain - 1)) {
++ if (reloc.write_domain & (reloc.write_domain - 1)) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+- obj, reloc->target_handle,
+- (int) reloc->offset,
+- reloc->read_domains,
+- reloc->write_domain);
+- drm_gem_object_unreference(target_obj);
+- i915_gem_object_unpin(obj);
+- return -EINVAL;
++ obj, reloc.target_handle,
++ (int) reloc.offset,
++ reloc.read_domains,
++ reloc.write_domain);
++ ret = -EINVAL;
++ break;
+ }
+- if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+- reloc->read_domains & I915_GEM_DOMAIN_CPU) {
++ if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
++ reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+- obj, reloc->target_handle,
+- (int) reloc->offset,
+- reloc->read_domains,
+- reloc->write_domain);
+- drm_gem_object_unreference(target_obj);
+- i915_gem_object_unpin(obj);
+- return -EINVAL;
++ obj, reloc.target_handle,
++ (int) reloc.offset,
++ reloc.read_domains,
++ reloc.write_domain);
++ ret = -EINVAL;
++ break;
+ }
+- if (reloc->write_domain && target_obj->pending_write_domain &&
+- reloc->write_domain != target_obj->pending_write_domain) {
++ if (reloc.write_domain && target_obj->pending_write_domain &&
++ reloc.write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+- obj, reloc->target_handle,
+- (int) reloc->offset,
+- reloc->write_domain,
++ obj, reloc.target_handle,
++ (int) reloc.offset,
++ reloc.write_domain,
+ target_obj->pending_write_domain);
+- drm_gem_object_unreference(target_obj);
+- i915_gem_object_unpin(obj);
+- return -EINVAL;
++ ret = -EINVAL;
++ break;
+ }
+
+- target_obj->pending_read_domains |= reloc->read_domains;
+- target_obj->pending_write_domain |= reloc->write_domain;
++ target_obj->pending_read_domains |= reloc.read_domains;
++ target_obj->pending_write_domain |= reloc.write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+- if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
+- drm_gem_object_unreference(target_obj);
++ if (target_offset == reloc.presumed_offset)
+ continue;
+- }
+
+ /* Check that the relocation address is valid... */
+- if (reloc->offset > obj->size - 4) {
++ if (reloc.offset > obj->base.size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+- obj, reloc->target_handle,
+- (int) reloc->offset, (int) obj->size);
+- drm_gem_object_unreference(target_obj);
+- i915_gem_object_unpin(obj);
+- return -EINVAL;
++ obj, reloc.target_handle,
++ (int) reloc.offset, (int) obj->base.size);
++ ret = -EINVAL;
++ break;
+ }
+- if (reloc->offset & 3) {
++ if (reloc.offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+- obj, reloc->target_handle,
+- (int) reloc->offset);
+- drm_gem_object_unreference(target_obj);
+- i915_gem_object_unpin(obj);
+- return -EINVAL;
++ obj, reloc.target_handle,
++ (int) reloc.offset);
++ ret = -EINVAL;
++ break;
+ }
+
+ /* and points to somewhere within the target object. */
+- if (reloc->delta >= target_obj->size) {
++ if (reloc.delta >= target_obj->size) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+- obj, reloc->target_handle,
+- (int) reloc->delta, (int) target_obj->size);
+- drm_gem_object_unreference(target_obj);
+- i915_gem_object_unpin(obj);
+- return -EINVAL;
++ obj, reloc.target_handle,
++ (int) reloc.delta, (int) target_obj->size);
++ ret = -EINVAL;
++ break;
+ }
+
+- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+- if (ret != 0) {
+- drm_gem_object_unreference(target_obj);
+- i915_gem_object_unpin(obj);
+- return -EINVAL;
+- }
++ reloc.delta += target_offset;
++ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
++ uint32_t page_offset = reloc.offset & ~PAGE_MASK;
++ char *vaddr;
+
+- /* Map the page containing the relocation we're going to
+- * perform.
+- */
+- reloc_offset = obj_priv->gtt_offset + reloc->offset;
+- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+- (reloc_offset &
+- ~(PAGE_SIZE - 1)),
+- KM_USER0);
+- reloc_entry = (uint32_t __iomem *)(reloc_page +
+- (reloc_offset & (PAGE_SIZE - 1)));
+- reloc_val = target_obj_priv->gtt_offset + reloc->delta;
+-
+-#if WATCH_BUF
+- DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
+- obj, (unsigned int) reloc->offset,
+- readl(reloc_entry), reloc_val);
+-#endif
+- writel(reloc_val, reloc_entry);
+- io_mapping_unmap_atomic(reloc_page, KM_USER0);
++ vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
++ *(uint32_t *)(vaddr + page_offset) = reloc.delta;
++ kunmap_atomic(vaddr, KM_USER0);
++ } else {
++ uint32_t __iomem *reloc_entry;
++ void __iomem *reloc_page;
+
+- /* The updated presumed offset for this entry will be
+- * copied back out to the user.
+- */
+- reloc->presumed_offset = target_obj_priv->gtt_offset;
++ ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
++ if (ret)
++ break;
++
++ /* Map the page containing the relocation we're going to perform. */
++ reloc.offset += obj->gtt_offset;
++ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
++ reloc.offset & PAGE_MASK,
++ KM_USER0);
++ reloc_entry = (uint32_t __iomem *)
++ (reloc_page + (reloc.offset & ~PAGE_MASK));
++ iowrite32(reloc.delta, reloc_entry);
++ io_mapping_unmap_atomic(reloc_page, KM_USER0);
++ }
+
+- drm_gem_object_unreference(target_obj);
++ /* and update the user's relocation entry */
++ reloc.presumed_offset = target_offset;
++ if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
++ &reloc.presumed_offset,
++ sizeof(reloc.presumed_offset))) {
++ ret = -EFAULT;
++ break;
++ }
+ }
+
+-#if WATCH_BUF
+- if (0)
+- i915_gem_dump_object(obj, 128, __func__, ~0);
+-#endif
+- return 0;
++ drm_gem_object_unreference(target_obj);
++ return ret;
+ }
+
+-/* Throttle our rendering by waiting until the ring has completed our requests
+- * emitted over 20 msec ago.
+- *
+- * Note that if we were to use the current jiffies each time around the loop,
+- * we wouldn't escape the function with any frames outstanding if the time to
+- * render a frame was over 20ms.
+- *
+- * This should get us reasonable parallelism between CPU and GPU but also
+- * relatively low latency when blocking on a particular request to finish.
+- */
+ static int
+-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
++i915_gem_execbuffer_pin(struct drm_device *dev,
++ struct drm_file *file,
++ struct drm_gem_object **object_list,
++ struct drm_i915_gem_exec_object2 *exec_list,
++ int count)
+ {
+- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+- int ret = 0;
+- unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int ret, i, retry;
+
+- mutex_lock(&dev->struct_mutex);
+- while (!list_empty(&i915_file_priv->mm.request_list)) {
+- struct drm_i915_gem_request *request;
++ /* attempt to pin all of the buffers into the GTT */
++ for (retry = 0; retry < 2; retry++) {
++ ret = 0;
++ for (i = 0; i < count; i++) {
++ struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
++ struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
++ bool need_fence =
++ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
++ obj->tiling_mode != I915_TILING_NONE;
++
++ /* Check fence reg constraints and rebind if necessary */
++ if (need_fence &&
++ !i915_gem_object_fence_offset_ok(&obj->base,
++ obj->tiling_mode)) {
++ ret = i915_gem_object_unbind(&obj->base);
++ if (ret)
++ break;
++ }
+
+- request = list_first_entry(&i915_file_priv->mm.request_list,
+- struct drm_i915_gem_request,
+- client_list);
++ ret = i915_gem_object_pin(&obj->base, entry->alignment);
++ if (ret)
++ break;
+
+- if (time_after_eq(request->emitted_jiffies, recent_enough))
+- break;
++ /*
++ * Pre-965 chips need a fence register set up in order
++ * to properly handle blits to/from tiled surfaces.
++ */
++ if (need_fence) {
++ ret = i915_gem_object_get_fence_reg(&obj->base, true);
++ if (ret) {
++ i915_gem_object_unpin(&obj->base);
++ break;
++ }
++
++ dev_priv->fence_regs[obj->fence_reg].gpu = true;
++ }
++
++ entry->offset = obj->gtt_offset;
++ }
+
+- ret = i915_wait_request(dev, request->seqno, request->ring);
+- if (ret != 0)
++ while (i--)
++ i915_gem_object_unpin(object_list[i]);
++
++ if (ret == 0)
+ break;
++
++ if (ret != -ENOSPC || retry)
++ return ret;
++
++ ret = i915_gem_evict_everything(dev);
++ if (ret)
++ return ret;
+ }
+- mutex_unlock(&dev->struct_mutex);
+
+- return ret;
++ return 0;
+ }
+
+ static int
+-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
+- uint32_t buffer_count,
+- struct drm_i915_gem_relocation_entry **relocs)
++i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
++ struct drm_file *file,
++ struct intel_ring_buffer *ring,
++ struct drm_gem_object **objects,
++ int count)
+ {
+- uint32_t reloc_count = 0, reloc_index = 0, i;
+- int ret;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int ret, i;
+
+- *relocs = NULL;
+- for (i = 0; i < buffer_count; i++) {
+- if (reloc_count + exec_list[i].relocation_count < reloc_count)
+- return -EINVAL;
+- reloc_count += exec_list[i].relocation_count;
+- }
++ /* Zero the global flush/invalidate flags. These
++ * will be modified as new domains are computed
++ * for each object
++ */
++ dev->invalidate_domains = 0;
++ dev->flush_domains = 0;
++ dev_priv->mm.flush_rings = 0;
++ for (i = 0; i < count; i++)
++ i915_gem_object_set_to_gpu_domain(objects[i], ring);
+
+- *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
+- if (*relocs == NULL) {
+- DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
+- return -ENOMEM;
++ if (dev->invalidate_domains | dev->flush_domains) {
++#if WATCH_EXEC
++ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
++ __func__,
++ dev->invalidate_domains,
++ dev->flush_domains);
++#endif
++ i915_gem_flush(dev, file,
++ dev->invalidate_domains,
++ dev->flush_domains,
++ dev_priv->mm.flush_rings);
+ }
+
+- for (i = 0; i < buffer_count; i++) {
+- struct drm_i915_gem_relocation_entry __user *user_relocs;
+-
+- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+-
+- ret = copy_from_user(&(*relocs)[reloc_index],
+- user_relocs,
+- exec_list[i].relocation_count *
+- sizeof(**relocs));
+- if (ret != 0) {
+- drm_free_large(*relocs);
+- *relocs = NULL;
+- return -EFAULT;
++ for (i = 0; i < count; i++) {
++ struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
++ /* XXX replace with semaphores */
++ if (obj->ring && ring != obj->ring) {
++ ret = i915_gem_object_wait_rendering(&obj->base, true);
++ if (ret)
++ return ret;
+ }
+-
+- reloc_index += exec_list[i].relocation_count;
+ }
+
+ return 0;
+ }
+
++/* Throttle our rendering by waiting until the ring has completed our requests
++ * emitted over 20 msec ago.
++ *
++ * Note that if we were to use the current jiffies each time around the loop,
++ * we wouldn't escape the function with any frames outstanding if the time to
++ * render a frame was over 20ms.
++ *
++ * This should get us reasonable parallelism between CPU and GPU but also
++ * relatively low latency when blocking on a particular request to finish.
++ */
+ static int
+-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
+- uint32_t buffer_count,
+- struct drm_i915_gem_relocation_entry *relocs)
++i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+ {
+- uint32_t reloc_count = 0, i;
+- int ret = 0;
+-
+- if (relocs == NULL)
+- return 0;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct drm_i915_file_private *file_priv = file->driver_priv;
++ unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
++ struct drm_i915_gem_request *request;
++ struct intel_ring_buffer *ring = NULL;
++ u32 seqno = 0;
++ int ret;
+
+- for (i = 0; i < buffer_count; i++) {
+- struct drm_i915_gem_relocation_entry __user *user_relocs;
+- int unwritten;
++ spin_lock(&file_priv->mm.lock);
++ list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
++ if (time_after_eq(request->emitted_jiffies, recent_enough))
++ break;
+
+- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
++ ring = request->ring;
++ seqno = request->seqno;
++ }
++ spin_unlock(&file_priv->mm.lock);
+
+- unwritten = copy_to_user(user_relocs,
+- &relocs[reloc_count],
+- exec_list[i].relocation_count *
+- sizeof(*relocs));
++ if (seqno == 0)
++ return 0;
+
+- if (unwritten) {
+- ret = -EFAULT;
+- goto err;
+- }
++ ret = 0;
++ if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
++ /* And wait for the seqno passing without holding any locks and
++ * causing extra latency for others. This is safe as the irq
++ * generation is designed to be run atomically and so is
++ * lockless.
++ */
++ ring->user_irq_get(dev, ring);
++ ret = wait_event_interruptible(ring->irq_queue,
++ i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
++ || atomic_read(&dev_priv->mm.wedged));
++ ring->user_irq_put(dev, ring);
+
+- reloc_count += exec_list[i].relocation_count;
++ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
++ ret = -EIO;
+ }
+
+-err:
+- drm_free_large(relocs);
++ if (ret == 0)
++ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+
+ return ret;
+ }
+
+ static int
+-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
+- uint64_t exec_offset)
++i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
++ uint64_t exec_offset)
+ {
+ uint32_t exec_start, exec_len;
+
+@@ -3509,44 +3619,32 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
+ }
+
+ static int
+-i915_gem_wait_for_pending_flip(struct drm_device *dev,
+- struct drm_gem_object **object_list,
+- int count)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- struct drm_i915_gem_object *obj_priv;
+- DEFINE_WAIT(wait);
+- int i, ret = 0;
++validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
++ int count)
++{
++ int i;
+
+- for (;;) {
+- prepare_to_wait(&dev_priv->pending_flip_queue,
+- &wait, TASK_INTERRUPTIBLE);
+- for (i = 0; i < count; i++) {
+- obj_priv = to_intel_bo(object_list[i]);
+- if (atomic_read(&obj_priv->pending_flip) > 0)
+- break;
+- }
+- if (i == count)
+- break;
++ for (i = 0; i < count; i++) {
++ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
++ size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
+
+- if (!signal_pending(current)) {
+- mutex_unlock(&dev->struct_mutex);
+- schedule();
+- mutex_lock(&dev->struct_mutex);
+- continue;
+- }
+- ret = -ERESTARTSYS;
+- break;
++ if (!access_ok(VERIFY_READ, ptr, length))
++ return -EFAULT;
++
++ /* we may also need to update the presumed offsets */
++ if (!access_ok(VERIFY_WRITE, ptr, length))
++ return -EFAULT;
++
++ if (fault_in_pages_readable(ptr, length))
++ return -EFAULT;
+ }
+- finish_wait(&dev_priv->pending_flip_queue, &wait);
+
+- return ret;
++ return 0;
+ }
+
+-
+-int
++static int
+ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+- struct drm_file *file_priv,
++ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec_list)
+ {
+@@ -3555,26 +3653,47 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_gem_object *batch_obj;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_clip_rect *cliprects = NULL;
+- struct drm_i915_gem_relocation_entry *relocs = NULL;
+- int ret = 0, ret2, i, pinned = 0;
++ struct drm_i915_gem_request *request = NULL;
++ int ret, i, flips;
+ uint64_t exec_offset;
+- uint32_t seqno, flush_domains, reloc_index;
+- int pin_tries, flips;
+
+ struct intel_ring_buffer *ring = NULL;
+
++ ret = i915_gem_check_is_wedged(dev);
++ if (ret)
++ return ret;
++
++ ret = validate_exec_list(exec_list, args->buffer_count);
++ if (ret)
++ return ret;
++
+ #if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+ #endif
+- if (args->flags & I915_EXEC_BSD) {
++ switch (args->flags & I915_EXEC_RING_MASK) {
++ case I915_EXEC_DEFAULT:
++ case I915_EXEC_RENDER:
++ ring = &dev_priv->render_ring;
++ break;
++ case I915_EXEC_BSD:
+ if (!HAS_BSD(dev)) {
+- DRM_ERROR("execbuf with wrong flag\n");
++ DRM_ERROR("execbuf with invalid ring (BSD)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->bsd_ring;
+- } else {
+- ring = &dev_priv->render_ring;
++ break;
++ case I915_EXEC_BLT:
++ if (!HAS_BLT(dev)) {
++ DRM_ERROR("execbuf with invalid ring (BLT)\n");
++ return -EINVAL;
++ }
++ ring = &dev_priv->blt_ring;
++ break;
++ default:
++ DRM_ERROR("execbuf with unknown ring: %d\n",
++ (int)(args->flags & I915_EXEC_RING_MASK));
++ return -EINVAL;
+ }
+
+ if (args->buffer_count < 1) {
+@@ -3609,20 +3728,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ }
+ }
+
+- ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
+- &relocs);
+- if (ret != 0)
++ request = kzalloc(sizeof(*request), GFP_KERNEL);
++ if (request == NULL) {
++ ret = -ENOMEM;
+ goto pre_mutex_err;
++ }
+
+- mutex_lock(&dev->struct_mutex);
+-
+- i915_verify_inactive(dev, __FILE__, __LINE__);
+-
+- if (atomic_read(&dev_priv->mm.wedged)) {
+- mutex_unlock(&dev->struct_mutex);
+- ret = -EIO;
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
+ goto pre_mutex_err;
+- }
+
+ if (dev_priv->mm.suspended) {
+ mutex_unlock(&dev->struct_mutex);
+@@ -3631,9 +3745,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ }
+
+ /* Look up object handles */
+- flips = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+- object_list[i] = drm_gem_object_lookup(dev, file_priv,
++ object_list[i] = drm_gem_object_lookup(dev, file,
+ exec_list[i].handle);
+ if (object_list[i] == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+@@ -3654,75 +3767,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ goto err;
+ }
+ obj_priv->in_execbuffer = true;
+- flips += atomic_read(&obj_priv->pending_flip);
+- }
+-
+- if (flips > 0) {
+- ret = i915_gem_wait_for_pending_flip(dev, object_list,
+- args->buffer_count);
+- if (ret)
+- goto err;
+ }
+
+- /* Pin and relocate */
+- for (pin_tries = 0; ; pin_tries++) {
+- ret = 0;
+- reloc_index = 0;
+-
+- for (i = 0; i < args->buffer_count; i++) {
+- object_list[i]->pending_read_domains = 0;
+- object_list[i]->pending_write_domain = 0;
+- ret = i915_gem_object_pin_and_relocate(object_list[i],
+- file_priv,
+- &exec_list[i],
+- &relocs[reloc_index]);
+- if (ret)
+- break;
+- pinned = i + 1;
+- reloc_index += exec_list[i].relocation_count;
+- }
+- /* success */
+- if (ret == 0)
+- break;
+-
+- /* error other than GTT full, or we've already tried again */
+- if (ret != -ENOSPC || pin_tries >= 1) {
+- if (ret != -ERESTARTSYS) {
+- unsigned long long total_size = 0;
+- int num_fences = 0;
+- for (i = 0; i < args->buffer_count; i++) {
+- obj_priv = to_intel_bo(object_list[i]);
+-
+- total_size += object_list[i]->size;
+- num_fences +=
+- exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
+- obj_priv->tiling_mode != I915_TILING_NONE;
+- }
+- DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
+- pinned+1, args->buffer_count,
+- total_size, num_fences,
+- ret);
+- DRM_ERROR("%d objects [%d pinned], "
+- "%d object bytes [%d pinned], "
+- "%d/%d gtt bytes\n",
+- atomic_read(&dev->object_count),
+- atomic_read(&dev->pin_count),
+- atomic_read(&dev->object_memory),
+- atomic_read(&dev->pin_memory),
+- atomic_read(&dev->gtt_memory),
+- dev->gtt_total);
+- }
+- goto err;
+- }
+-
+- /* unpin all of our buffers */
+- for (i = 0; i < pinned; i++)
+- i915_gem_object_unpin(object_list[i]);
+- pinned = 0;
++ /* Move the objects en-masse into the GTT, evicting if necessary. */
++ ret = i915_gem_execbuffer_pin(dev, file,
++ object_list, exec_list,
++ args->buffer_count);
++ if (ret)
++ goto err;
+
+- /* evict everyone we can from the aperture */
+- ret = i915_gem_evict_everything(dev);
+- if (ret && ret != -ENOSPC)
++ /* The objects are in their final locations, apply the relocations. */
++ for (i = 0; i < args->buffer_count; i++) {
++ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
++ obj->base.pending_read_domains = 0;
++ obj->base.pending_write_domain = 0;
++ ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
++ if (ret)
+ goto err;
+ }
+
+@@ -3735,72 +3795,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ }
+ batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+- /* Sanity check the batch buffer, prior to moving objects */
+- exec_offset = exec_list[args->buffer_count - 1].offset;
+- ret = i915_gem_check_execbuffer (args, exec_offset);
++ /* Sanity check the batch buffer */
++ exec_offset = to_intel_bo(batch_obj)->gtt_offset;
++ ret = i915_gem_check_execbuffer(args, exec_offset);
+ if (ret != 0) {
+ DRM_ERROR("execbuf with invalid offset/length\n");
+ goto err;
+ }
+
+- i915_verify_inactive(dev, __FILE__, __LINE__);
+-
+- /* Zero the global flush/invalidate flags. These
+- * will be modified as new domains are computed
+- * for each object
+- */
+- dev->invalidate_domains = 0;
+- dev->flush_domains = 0;
+- dev_priv->flush_rings = 0;
+-
+- for (i = 0; i < args->buffer_count; i++) {
+- struct drm_gem_object *obj = object_list[i];
+-
+- /* Compute new gpu domains and update invalidate/flush */
+- i915_gem_object_set_to_gpu_domain(obj);
+- }
+-
+- i915_verify_inactive(dev, __FILE__, __LINE__);
+-
+- if (dev->invalidate_domains | dev->flush_domains) {
+-#if WATCH_EXEC
+- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+- __func__,
+- dev->invalidate_domains,
+- dev->flush_domains);
+-#endif
+- i915_gem_flush(dev,
+- dev->invalidate_domains,
+- dev->flush_domains);
+- if (dev_priv->flush_rings & FLUSH_RENDER_RING)
+- (void)i915_add_request(dev, file_priv,
+- dev->flush_domains,
+- &dev_priv->render_ring);
+- if (dev_priv->flush_rings & FLUSH_BSD_RING)
+- (void)i915_add_request(dev, file_priv,
+- dev->flush_domains,
+- &dev_priv->bsd_ring);
+- }
++ ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
++ object_list, args->buffer_count);
++ if (ret)
++ goto err;
+
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ uint32_t old_write_domain = obj->write_domain;
+-
+ obj->write_domain = obj->pending_write_domain;
+- if (obj->write_domain)
+- list_move_tail(&obj_priv->gpu_write_list,
+- &dev_priv->mm.gpu_write_list);
+- else
+- list_del_init(&obj_priv->gpu_write_list);
+-
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
+ }
+
+- i915_verify_inactive(dev, __FILE__, __LINE__);
+-
+ #if WATCH_COHERENCY
+ for (i = 0; i < args->buffer_count; i++) {
+ i915_gem_object_check_coherency(object_list[i],
+@@ -3815,9 +3831,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ ~0);
+ #endif
+
++ /* Check for any pending flips. As we only maintain a flip queue depth
++ * of 1, we can simply insert a WAIT for the next display flip prior
++ * to executing the batch and avoid stalling the CPU.
++ */
++ flips = 0;
++ for (i = 0; i < args->buffer_count; i++) {
++ if (object_list[i]->write_domain)
++ flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
++ }
++ if (flips) {
++ int plane, flip_mask;
++
++ for (plane = 0; flips >> plane; plane++) {
++ if (((flips >> plane) & 1) == 0)
++ continue;
++
++ if (plane)
++ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
++ else
++ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
++
++ intel_ring_begin(dev, ring, 2);
++ intel_ring_emit(dev, ring,
++ MI_WAIT_FOR_EVENT | flip_mask);
++ intel_ring_emit(dev, ring, MI_NOOP);
++ intel_ring_advance(dev, ring);
++ }
++ }
++
+ /* Exec the batchbuffer */
+ ret = ring->dispatch_gem_execbuffer(dev, ring, args,
+- cliprects, exec_offset);
++ cliprects, exec_offset);
+ if (ret) {
+ DRM_ERROR("dispatch failed %d\n", ret);
+ goto err;
+@@ -3827,38 +3872,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires
+ */
+- flush_domains = i915_retire_commands(dev, ring);
+-
+- i915_verify_inactive(dev, __FILE__, __LINE__);
++ i915_retire_commands(dev, ring);
+
+- /*
+- * Get a seqno representing the execution of the current buffer,
+- * which we can wait on. We would like to mitigate these interrupts,
+- * likely by only creating seqnos occasionally (so that we have
+- * *some* interrupts representing completion of buffers that we can
+- * wait on when trying to clear up gtt space).
+- */
+- seqno = i915_add_request(dev, file_priv, flush_domains, ring);
+- BUG_ON(seqno == 0);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+- obj_priv = to_intel_bo(obj);
+
+- i915_gem_object_move_to_active(obj, seqno, ring);
+-#if WATCH_LRU
+- DRM_INFO("%s: move to exec list %p\n", __func__, obj);
+-#endif
++ i915_gem_object_move_to_active(obj, ring);
++ if (obj->write_domain)
++ list_move_tail(&to_intel_bo(obj)->gpu_write_list,
++ &ring->gpu_write_list);
+ }
+-#if WATCH_LRU
+- i915_dump_lru(dev, __func__);
+-#endif
+
+- i915_verify_inactive(dev, __FILE__, __LINE__);
++ i915_add_request(dev, file, request, ring);
++ request = NULL;
+
+ err:
+- for (i = 0; i < pinned; i++)
+- i915_gem_object_unpin(object_list[i]);
+-
+ for (i = 0; i < args->buffer_count; i++) {
+ if (object_list[i]) {
+ obj_priv = to_intel_bo(object_list[i]);
+@@ -3870,22 +3898,9 @@ err:
+ mutex_unlock(&dev->struct_mutex);
+
+ pre_mutex_err:
+- /* Copy the updated relocations out regardless of current error
+- * state. Failure to update the relocs would mean that the next
+- * time userland calls execbuf, it would do so with presumed offset
+- * state that didn't match the actual object state.
+- */
+- ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+- relocs);
+- if (ret2 != 0) {
+- DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+-
+- if (ret == 0)
+- ret = ret2;
+- }
+-
+ drm_free_large(object_list);
+ kfree(cliprects);
++ kfree(request);
+
+ return ret;
+ }
+@@ -3942,7 +3957,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+- if (!IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen < 4)
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+@@ -4039,12 +4054,12 @@ int
+ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+ {
+ struct drm_device *dev = obj->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ int ret;
+
+ BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+-
+- i915_verify_inactive(dev, __FILE__, __LINE__);
++ WARN_ON(i915_verify_lists(dev));
+
+ if (obj_priv->gtt_space != NULL) {
+ if (alignment == 0)
+@@ -4072,14 +4087,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+ * remove it from the inactive list
+ */
+ if (obj_priv->pin_count == 1) {
+- atomic_inc(&dev->pin_count);
+- atomic_add(obj->size, &dev->pin_memory);
+- if (!obj_priv->active &&
+- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+- list_del_init(&obj_priv->list);
++ i915_gem_info_add_pin(dev_priv, obj->size);
++ if (!obj_priv->active)
++ list_move_tail(&obj_priv->mm_list,
++ &dev_priv->mm.pinned_list);
+ }
+- i915_verify_inactive(dev, __FILE__, __LINE__);
+
++ WARN_ON(i915_verify_lists(dev));
+ return 0;
+ }
+
+@@ -4090,7 +4104,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+- i915_verify_inactive(dev, __FILE__, __LINE__);
++ WARN_ON(i915_verify_lists(dev));
+ obj_priv->pin_count--;
+ BUG_ON(obj_priv->pin_count < 0);
+ BUG_ON(obj_priv->gtt_space == NULL);
+@@ -4100,14 +4114,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
+ * the inactive list
+ */
+ if (obj_priv->pin_count == 0) {
+- if (!obj_priv->active &&
+- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+- list_move_tail(&obj_priv->list,
++ if (!obj_priv->active)
++ list_move_tail(&obj_priv->mm_list,
+ &dev_priv->mm.inactive_list);
+- atomic_dec(&dev->pin_count);
+- atomic_sub(obj->size, &dev->pin_memory);
++ i915_gem_info_remove_pin(dev_priv, obj->size);
+ }
+- i915_verify_inactive(dev, __FILE__, __LINE__);
++ WARN_ON(i915_verify_lists(dev));
+ }
+
+ int
+@@ -4119,41 +4131,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+- mutex_lock(&dev->struct_mutex);
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
++ return ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+- DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+- args->handle);
+- mutex_unlock(&dev->struct_mutex);
+- return -ENOENT;
++ ret = -ENOENT;
++ goto unlock;
+ }
+ obj_priv = to_intel_bo(obj);
+
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to pin a purgeable buffer\n");
+- drm_gem_object_unreference(obj);
+- mutex_unlock(&dev->struct_mutex);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+
+ if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+ DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+- drm_gem_object_unreference(obj);
+- mutex_unlock(&dev->struct_mutex);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+
+ obj_priv->user_pin_count++;
+ obj_priv->pin_filp = file_priv;
+ if (obj_priv->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args->alignment);
+- if (ret != 0) {
+- drm_gem_object_unreference(obj);
+- mutex_unlock(&dev->struct_mutex);
+- return ret;
+- }
++ if (ret)
++ goto out;
+ }
+
+ /* XXX - flush the CPU caches for pinned objects
+@@ -4161,10 +4168,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ */
+ i915_gem_object_flush_cpu_write_domain(obj);
+ args->offset = obj_priv->gtt_offset;
++out:
+ drm_gem_object_unreference(obj);
++unlock:
+ mutex_unlock(&dev->struct_mutex);
+-
+- return 0;
++ return ret;
+ }
+
+ int
+@@ -4174,24 +4182,24 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_i915_gem_pin *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
++ int ret;
+
+- mutex_lock(&dev->struct_mutex);
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
++ return ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+- DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+- args->handle);
+- mutex_unlock(&dev->struct_mutex);
+- return -ENOENT;
++ ret = -ENOENT;
++ goto unlock;
+ }
+-
+ obj_priv = to_intel_bo(obj);
++
+ if (obj_priv->pin_filp != file_priv) {
+ DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+- drm_gem_object_unreference(obj);
+- mutex_unlock(&dev->struct_mutex);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+ obj_priv->user_pin_count--;
+ if (obj_priv->user_pin_count == 0) {
+@@ -4199,9 +4207,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ i915_gem_object_unpin(obj);
+ }
+
++out:
+ drm_gem_object_unreference(obj);
++unlock:
+ mutex_unlock(&dev->struct_mutex);
+- return 0;
++ return ret;
+ }
+
+ int
+@@ -4211,22 +4221,24 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_i915_gem_busy *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
++ int ret;
++
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
++ return ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+- DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+- args->handle);
+- return -ENOENT;
++ ret = -ENOENT;
++ goto unlock;
+ }
+-
+- mutex_lock(&dev->struct_mutex);
++ obj_priv = to_intel_bo(obj);
+
+ /* Count all active objects as busy, even if they are currently not used
+ * by the gpu. Users of this interface expect objects to eventually
+ * become non-busy without any further actions, therefore emit any
+ * necessary flushes here.
+ */
+- obj_priv = to_intel_bo(obj);
+ args->busy = obj_priv->active;
+ if (args->busy) {
+ /* Unconditionally flush objects, even when the gpu still uses this
+@@ -4234,10 +4246,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ * use this buffer rather sooner than later, so issuing the required
+ * flush earlier is beneficial.
+ */
+- if (obj->write_domain) {
+- i915_gem_flush(dev, 0, obj->write_domain);
+- (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
+- }
++ if (obj->write_domain & I915_GEM_GPU_DOMAINS)
++ i915_gem_flush_ring(dev, file_priv,
++ obj_priv->ring,
++ 0, obj->write_domain);
+
+ /* Update the active list for the hardware's current position.
+ * Otherwise this only updates on a delayed timer or when irqs
+@@ -4250,8 +4262,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ }
+
+ drm_gem_object_unreference(obj);
++unlock:
+ mutex_unlock(&dev->struct_mutex);
+- return 0;
++ return ret;
+ }
+
+ int
+@@ -4268,6 +4281,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_i915_gem_madvise *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
++ int ret;
+
+ switch (args->madv) {
+ case I915_MADV_DONTNEED:
+@@ -4277,22 +4291,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+
++ ret = i915_mutex_lock_interruptible(dev);
++ if (ret)
++ return ret;
++
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+- DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
+- args->handle);
+- return -ENOENT;
++ ret = -ENOENT;
++ goto unlock;
+ }
+-
+- mutex_lock(&dev->struct_mutex);
+ obj_priv = to_intel_bo(obj);
+
+ if (obj_priv->pin_count) {
+- drm_gem_object_unreference(obj);
+- mutex_unlock(&dev->struct_mutex);
+-
+- DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+
+ if (obj_priv->madv != __I915_MADV_PURGED)
+@@ -4305,15 +4317,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+
+ args->retained = obj_priv->madv != __I915_MADV_PURGED;
+
++out:
+ drm_gem_object_unreference(obj);
++unlock:
+ mutex_unlock(&dev->struct_mutex);
+-
+- return 0;
++ return ret;
+ }
+
+ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
+ {
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+@@ -4325,18 +4339,19 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ return NULL;
+ }
+
++ i915_gem_info_add_obj(dev_priv, size);
++
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+ obj->agp_type = AGP_USER_MEMORY;
+ obj->base.driver_private = NULL;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+- INIT_LIST_HEAD(&obj->list);
++ INIT_LIST_HEAD(&obj->mm_list);
++ INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->gpu_write_list);
+ obj->madv = I915_MADV_WILLNEED;
+
+- trace_i915_gem_object_create(&obj->base);
+-
+ return &obj->base;
+ }
+
+@@ -4356,7 +4371,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret == -ERESTARTSYS) {
+- list_move(&obj_priv->list,
++ list_move(&obj_priv->mm_list,
+ &dev_priv->mm.deferred_free_list);
+ return;
+ }
+@@ -4365,6 +4380,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+ i915_gem_free_mmap_offset(obj);
+
+ drm_gem_object_release(obj);
++ i915_gem_info_remove_obj(dev_priv, obj->size);
+
+ kfree(obj_priv->page_cpu_valid);
+ kfree(obj_priv->bit_17);
+@@ -4395,10 +4411,7 @@ i915_gem_idle(struct drm_device *dev)
+
+ mutex_lock(&dev->struct_mutex);
+
+- if (dev_priv->mm.suspended ||
+- (dev_priv->render_ring.gem_object == NULL) ||
+- (HAS_BSD(dev) &&
+- dev_priv->bsd_ring.gem_object == NULL)) {
++ if (dev_priv->mm.suspended) {
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+@@ -4423,7 +4436,7 @@ i915_gem_idle(struct drm_device *dev)
+ * And not confound mm.suspended!
+ */
+ dev_priv->mm.suspended = 1;
+- del_timer(&dev_priv->hangcheck_timer);
++ del_timer_sync(&dev_priv->hangcheck_timer);
+
+ i915_kernel_lost_context(dev);
+ i915_gem_cleanup_ringbuffer(dev);
+@@ -4503,36 +4516,34 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+- dev_priv->render_ring = render_ring;
+-
+- if (!I915_NEED_GFX_HWS(dev)) {
+- dev_priv->render_ring.status_page.page_addr
+- = dev_priv->status_page_dmah->vaddr;
+- memset(dev_priv->render_ring.status_page.page_addr,
+- 0, PAGE_SIZE);
+- }
+-
+ if (HAS_PIPE_CONTROL(dev)) {
+ ret = i915_gem_init_pipe_control(dev);
+ if (ret)
+ return ret;
+ }
+
+- ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
++ ret = intel_init_render_ring_buffer(dev);
+ if (ret)
+ goto cleanup_pipe_control;
+
+ if (HAS_BSD(dev)) {
+- dev_priv->bsd_ring = bsd_ring;
+- ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
++ ret = intel_init_bsd_ring_buffer(dev);
+ if (ret)
+ goto cleanup_render_ring;
+ }
+
++ if (HAS_BLT(dev)) {
++ ret = intel_init_blt_ring_buffer(dev);
++ if (ret)
++ goto cleanup_bsd_ring;
++ }
++
+ dev_priv->next_seqno = 1;
+
+ return 0;
+
++cleanup_bsd_ring:
++ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ cleanup_render_ring:
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+ cleanup_pipe_control:
+@@ -4547,8 +4558,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+- if (HAS_BSD(dev))
+- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
++ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
++ intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
+ }
+@@ -4577,15 +4588,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ return ret;
+ }
+
+- spin_lock(&dev_priv->mm.active_list_lock);
++ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+- BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
+- spin_unlock(&dev_priv->mm.active_list_lock);
+-
++ BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
++ BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+- BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
++ BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
++ BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = drm_irq_install(dev);
+@@ -4627,28 +4638,34 @@ i915_gem_lastclose(struct drm_device *dev)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ }
+
++static void
++init_ring_lists(struct intel_ring_buffer *ring)
++{
++ INIT_LIST_HEAD(&ring->active_list);
++ INIT_LIST_HEAD(&ring->request_list);
++ INIT_LIST_HEAD(&ring->gpu_write_list);
++}
++
+ void
+ i915_gem_load(struct drm_device *dev)
+ {
+ int i;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+- spin_lock_init(&dev_priv->mm.active_list_lock);
++ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+- INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
++ INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
+- INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+- INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+- if (HAS_BSD(dev)) {
+- INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+- INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+- }
++ init_ring_lists(&dev_priv->render_ring);
++ init_ring_lists(&dev_priv->bsd_ring);
++ init_ring_lists(&dev_priv->blt_ring);
+ for (i = 0; i < 16; i++)
+ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ i915_gem_retire_work_handler);
++ init_completion(&dev_priv->error_completion);
+ spin_lock(&shrink_list_lock);
+ list_add(&dev_priv->mm.shrink_list, &shrink_list);
+ spin_unlock(&shrink_list_lock);
+@@ -4667,21 +4684,30 @@ i915_gem_load(struct drm_device *dev)
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->fence_reg_start = 3;
+
+- if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
++ if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dev_priv->num_fence_regs = 16;
+ else
+ dev_priv->num_fence_regs = 8;
+
+ /* Initialize fence registers to zero */
+- if (IS_I965G(dev)) {
++ switch (INTEL_INFO(dev)->gen) {
++ case 6:
++ for (i = 0; i < 16; i++)
++ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
++ break;
++ case 5:
++ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+- } else {
+- for (i = 0; i < 8; i++)
+- I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
++ break;
++ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
++ case 2:
++ for (i = 0; i < 8; i++)
++ I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
++ break;
+ }
+ i915_gem_detect_bit_6_swizzle(dev);
+ init_waitqueue_head(&dev_priv->pending_flip_queue);
+@@ -4691,8 +4717,8 @@ i915_gem_load(struct drm_device *dev)
+ * Create a physically contiguous memory object for this object
+ * e.g. for cursor + overlay regs
+ */
+-int i915_gem_init_phys_object(struct drm_device *dev,
+- int id, int size, int align)
++static int i915_gem_init_phys_object(struct drm_device *dev,
++ int id, int size, int align)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_phys_object *phys_obj;
+@@ -4724,7 +4750,7 @@ kfree_obj:
+ return ret;
+ }
+
+-void i915_gem_free_phys_object(struct drm_device *dev, int id)
++static void i915_gem_free_phys_object(struct drm_device *dev, int id)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_phys_object *phys_obj;
+@@ -4853,34 +4879,48 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+- void *obj_addr;
+- int ret;
+- char __user *user_data;
++ void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
++ char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
+
+- user_data = (char __user *) (uintptr_t) args->data_ptr;
+- obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
++ DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
+
+- DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
+- ret = copy_from_user(obj_addr, user_data, args->size);
+- if (ret)
+- return -EFAULT;
++ if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
++ unsigned long unwritten;
++
++ /* The physical object once assigned is fixed for the lifetime
++ * of the obj, so we can safely drop the lock and continue
++ * to access vaddr.
++ */
++ mutex_unlock(&dev->struct_mutex);
++ unwritten = copy_from_user(vaddr, user_data, args->size);
++ mutex_lock(&dev->struct_mutex);
++ if (unwritten)
++ return -EFAULT;
++ }
+
+ drm_agp_chipset_flush(dev);
+ return 0;
+ }
+
+-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
++void i915_gem_release(struct drm_device *dev, struct drm_file *file)
+ {
+- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ /* Clean up our request list when the client is going away, so that
+ * later retire_requests won't dereference our soon-to-be-gone
+ * file_priv.
+ */
+- mutex_lock(&dev->struct_mutex);
+- while (!list_empty(&i915_file_priv->mm.request_list))
+- list_del_init(i915_file_priv->mm.request_list.next);
+- mutex_unlock(&dev->struct_mutex);
++ spin_lock(&file_priv->mm.lock);
++ while (!list_empty(&file_priv->mm.request_list)) {
++ struct drm_i915_gem_request *request;
++
++ request = list_first_entry(&file_priv->mm.request_list,
++ struct drm_i915_gem_request,
++ client_list);
++ list_del(&request->client_list);
++ request->file_priv = NULL;
++ }
++ spin_unlock(&file_priv->mm.lock);
+ }
+
+ static int
+@@ -4889,12 +4929,8 @@ i915_gpu_is_active(struct drm_device *dev)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int lists_empty;
+
+- spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+- list_empty(&dev_priv->render_ring.active_list);
+- if (HAS_BSD(dev))
+- lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
+- spin_unlock(&dev_priv->mm.active_list_lock);
++ list_empty(&dev_priv->mm.active_list);
+
+ return !lists_empty;
+ }
+@@ -4916,7 +4952,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
+ if (mutex_trylock(&dev->struct_mutex)) {
+ list_for_each_entry(obj_priv,
+ &dev_priv->mm.inactive_list,
+- list)
++ mm_list)
+ cnt++;
+ mutex_unlock(&dev->struct_mutex);
+ }
+@@ -4942,7 +4978,7 @@ rescan:
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+- list) {
++ mm_list) {
+ if (i915_gem_object_is_purgeable(obj_priv)) {
+ i915_gem_object_unbind(&obj_priv->base);
+ if (--nr_to_scan <= 0)
+@@ -4971,7 +5007,7 @@ rescan:
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+- list) {
++ mm_list) {
+ if (nr_to_scan > 0) {
+ i915_gem_object_unbind(&obj_priv->base);
+ nr_to_scan--;
+diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
+index 80f380b..48644b8 100644
+--- a/drivers/gpu/drm/i915/i915_gem_debug.c
++++ b/drivers/gpu/drm/i915/i915_gem_debug.c
+@@ -30,29 +30,112 @@
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+
+-#if WATCH_INACTIVE
+-void
+-i915_verify_inactive(struct drm_device *dev, char *file, int line)
++#if WATCH_LISTS
++int
++i915_verify_lists(struct drm_device *dev)
+ {
++ static int warned;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- struct drm_gem_object *obj;
+- struct drm_i915_gem_object *obj_priv;
+-
+- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+- obj = &obj_priv->base;
+- if (obj_priv->pin_count || obj_priv->active ||
+- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+- I915_GEM_DOMAIN_GTT)))
+- DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
++ struct drm_i915_gem_object *obj;
++ int err = 0;
++
++ if (warned)
++ return 0;
++
++ list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
++ if (obj->base.dev != dev ||
++ !atomic_read(&obj->base.refcount.refcount)) {
++ DRM_ERROR("freed render active %p\n", obj);
++ err++;
++ break;
++ } else if (!obj->active ||
++ (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
++ DRM_ERROR("invalid render active %p (a %d r %x)\n",
++ obj,
++ obj->active,
++ obj->base.read_domains);
++ err++;
++ } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
++ DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
++ obj,
++ obj->base.write_domain,
++ !list_empty(&obj->gpu_write_list));
++ err++;
++ }
++ }
++
++ list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
++ if (obj->base.dev != dev ||
++ !atomic_read(&obj->base.refcount.refcount)) {
++ DRM_ERROR("freed flushing %p\n", obj);
++ err++;
++ break;
++ } else if (!obj->active ||
++ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
++ list_empty(&obj->gpu_write_list)){
++ DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
+ obj,
+- obj_priv->pin_count, obj_priv->active,
+- obj->write_domain, file, line);
++ obj->active,
++ obj->base.write_domain,
++ !list_empty(&obj->gpu_write_list));
++ err++;
++ }
++ }
++
++ list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
++ if (obj->base.dev != dev ||
++ !atomic_read(&obj->base.refcount.refcount)) {
++ DRM_ERROR("freed gpu write %p\n", obj);
++ err++;
++ break;
++ } else if (!obj->active ||
++ (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
++ DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
++ obj,
++ obj->active,
++ obj->base.write_domain);
++ err++;
++ }
++ }
++
++ list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
++ if (obj->base.dev != dev ||
++ !atomic_read(&obj->base.refcount.refcount)) {
++ DRM_ERROR("freed inactive %p\n", obj);
++ err++;
++ break;
++ } else if (obj->pin_count || obj->active ||
++ (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
++ DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
++ obj,
++ obj->pin_count, obj->active,
++ obj->base.write_domain);
++ err++;
++ }
+ }
++
++ list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
++ if (obj->base.dev != dev ||
++ !atomic_read(&obj->base.refcount.refcount)) {
++ DRM_ERROR("freed pinned %p\n", obj);
++ err++;
++ break;
++ } else if (!obj->pin_count || obj->active ||
++ (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
++ DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
++ obj,
++ obj->pin_count, obj->active,
++ obj->base.write_domain);
++ err++;
++ }
++ }
++
++ return warned = err;
+ }
+ #endif /* WATCH_INACTIVE */
+
+
+-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
++#if WATCH_EXEC | WATCH_PWRITE
+ static void
+ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
+ uint32_t bias, uint32_t mark)
+@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ }
+ #endif
+
+-#if WATCH_LRU
+-void
+-i915_dump_lru(struct drm_device *dev, const char *where)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- struct drm_i915_gem_object *obj_priv;
+-
+- DRM_INFO("active list %s {\n", where);
+- spin_lock(&dev_priv->mm.active_list_lock);
+- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+- list)
+- {
+- DRM_INFO(" %p: %08x\n", obj_priv,
+- obj_priv->last_rendering_seqno);
+- }
+- spin_unlock(&dev_priv->mm.active_list_lock);
+- DRM_INFO("}\n");
+- DRM_INFO("flushing list %s {\n", where);
+- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+- list)
+- {
+- DRM_INFO(" %p: %08x\n", obj_priv,
+- obj_priv->last_rendering_seqno);
+- }
+- DRM_INFO("}\n");
+- DRM_INFO("inactive %s {\n", where);
+- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+- DRM_INFO(" %p: %08x\n", obj_priv,
+- obj_priv->last_rendering_seqno);
+- }
+- DRM_INFO("}\n");
+-}
+-#endif
+-
+-
+ #if WATCH_COHERENCY
+ void
+ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
+index 5c428fa..d8ae7d1 100644
+--- a/drivers/gpu/drm/i915/i915_gem_evict.c
++++ b/drivers/gpu/drm/i915/i915_gem_evict.c
+@@ -31,49 +31,6 @@
+ #include "i915_drv.h"
+ #include "i915_drm.h"
+
+-static struct drm_i915_gem_object *
+-i915_gem_next_active_object(struct drm_device *dev,
+- struct list_head **render_iter,
+- struct list_head **bsd_iter)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
+-
+- if (*render_iter != &dev_priv->render_ring.active_list)
+- render_obj = list_entry(*render_iter,
+- struct drm_i915_gem_object,
+- list);
+-
+- if (HAS_BSD(dev)) {
+- if (*bsd_iter != &dev_priv->bsd_ring.active_list)
+- bsd_obj = list_entry(*bsd_iter,
+- struct drm_i915_gem_object,
+- list);
+-
+- if (render_obj == NULL) {
+- *bsd_iter = (*bsd_iter)->next;
+- return bsd_obj;
+- }
+-
+- if (bsd_obj == NULL) {
+- *render_iter = (*render_iter)->next;
+- return render_obj;
+- }
+-
+- /* XXX can we handle seqno wrapping? */
+- if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
+- *render_iter = (*render_iter)->next;
+- return render_obj;
+- } else {
+- *bsd_iter = (*bsd_iter)->next;
+- return bsd_obj;
+- }
+- } else {
+- *render_iter = (*render_iter)->next;
+- return render_obj;
+- }
+-}
+-
+ static bool
+ mark_free(struct drm_i915_gem_object *obj_priv,
+ struct list_head *unwind)
+@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
+ return drm_mm_scan_add_block(obj_priv->gtt_space);
+ }
+
+-#define i915_for_each_active_object(OBJ, R, B) \
+- *(R) = dev_priv->render_ring.active_list.next; \
+- *(B) = dev_priv->bsd_ring.active_list.next; \
+- while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
+-
+ int
+ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head eviction_list, unwind_list;
+ struct drm_i915_gem_object *obj_priv;
+- struct list_head *render_iter, *bsd_iter;
+ int ret = 0;
+
+ i915_gem_retire_requests(dev);
+@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+
+ /* First see if there is a large enough contiguous idle region... */
+- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+
+ /* Now merge in the soon-to-be-expired objects... */
+- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ /* Does the object require an outstanding flush? */
+ if (obj_priv->base.write_domain || obj_priv->pin_count)
+ continue;
+@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
+ }
+
+ /* Finally add anything with a pending flush (in order of retirement) */
+- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
++ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
+ if (obj_priv->pin_count)
+ continue;
+
+ if (mark_free(obj_priv, &unwind_list))
+ goto found;
+ }
+- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ if (! obj_priv->base.write_domain || obj_priv->pin_count)
+ continue;
+
+@@ -212,14 +163,9 @@ i915_gem_evict_everything(struct drm_device *dev)
+ int ret;
+ bool lists_empty;
+
+- spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+- list_empty(&dev_priv->render_ring.active_list) &&
+- (!HAS_BSD(dev)
+- || list_empty(&dev_priv->bsd_ring.active_list)));
+- spin_unlock(&dev_priv->mm.active_list_lock);
+-
++ list_empty(&dev_priv->mm.active_list));
+ if (lists_empty)
+ return -ENOSPC;
+
+@@ -234,13 +180,9 @@ i915_gem_evict_everything(struct drm_device *dev)
+ if (ret)
+ return ret;
+
+- spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+- list_empty(&dev_priv->render_ring.active_list) &&
+- (!HAS_BSD(dev)
+- || list_empty(&dev_priv->bsd_ring.active_list)));
+- spin_unlock(&dev_priv->mm.active_list_lock);
++ list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!lists_empty);
+
+ return 0;
+@@ -258,7 +200,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
+
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+- list)->base;
++ mm_list)->base;
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 710eca7..af352de 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+- if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
++ if (IS_GEN5(dev) || IS_GEN6(dev)) {
+ /* On Ironlake whatever DRAM config, GPU always do
+ * same swizzling setup.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+- } else if (!IS_I9XX(dev)) {
++ } else if (IS_GEN2(dev)) {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+ if (tiling_mode == I915_TILING_NONE)
+ return true;
+
+- if (!IS_I9XX(dev) ||
++ if (IS_GEN2(dev) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* check maximum stride & object size */
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ /* i965 stores the end address of the gtt mapping in the fence
+ * reg, so dont bother to check the size */
+ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
+ return false;
+- } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
++ } else {
+ if (stride > 8192)
+ return false;
+
+@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+ }
+
+ /* 965+ just needs multiples of tile width */
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ if (stride & (tile_width - 1))
+ return false;
+ return true;
+@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
+ if (tiling_mode == I915_TILING_NONE)
+ return true;
+
+- if (!IS_I965G(dev)) {
+- if (obj_priv->gtt_offset & (obj->size - 1))
++ if (INTEL_INFO(dev)->gen >= 4)
++ return true;
++
++ if (obj_priv->gtt_offset & (obj->size - 1))
++ return false;
++
++ if (IS_GEN3(dev)) {
++ if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
++ return false;
++ } else {
++ if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ return false;
+- if (IS_I9XX(dev)) {
+- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+- return false;
+- } else {
+- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+- return false;
+- }
+ }
+
+ return true;
+@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+- int ret = 0;
++ int ret;
++
++ ret = i915_gem_check_is_wedged(dev);
++ if (ret)
++ return ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
+ ret = i915_gem_object_unbind(obj);
+ else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+- ret = i915_gem_object_put_fence_reg(obj);
++ ret = i915_gem_object_put_fence_reg(obj, true);
+ else
+ i915_gem_release_mmap(obj);
+
+@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+-static int
++static void
+ i915_gem_swizzle_page(struct page *page)
+ {
++ char temp[64];
+ char *vaddr;
+ int i;
+- char temp[64];
+
+ vaddr = kmap(page);
+- if (vaddr == NULL)
+- return -ENOMEM;
+
+ for (i = 0; i < PAGE_SIZE; i += 128) {
+ memcpy(temp, &vaddr[i], 64);
+@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
+ }
+
+ kunmap(page);
+-
+- return 0;
+ }
+
+ void
+@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+ char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+ if ((new_bit_17 & 0x1) !=
+ (test_bit(i, obj_priv->bit_17) != 0)) {
+- int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
+- if (ret != 0) {
+- DRM_ERROR("Failed to swizzle page\n");
+- return;
+- }
++ i915_gem_swizzle_page(obj_priv->pages[i]);
+ set_page_dirty(obj_priv->pages[i]);
+ }
+ }
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 744225e..237b8bd 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ }
+
+ /* For display hotplug interrupt */
+-void
++static void
+ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ {
+ if ((dev_priv->irq_mask_reg & mask) != 0) {
+@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
+ else {
+ i915_enable_pipestat(dev_priv, 1,
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
+ }
+@@ -191,12 +191,7 @@ static int
+ i915_pipe_enabled(struct drm_device *dev, int pipe)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+- unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
+-
+- if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
+- return 1;
+-
+- return 0;
++ return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+ }
+
+ /* Called from drm generic code, passed a 'crtc', which
+@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long high_frame;
+ unsigned long low_frame;
+- u32 high1, high2, low, count;
+-
+- high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+- low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
++ u32 high1, high2, low;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+ return 0;
+ }
+
++ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
++ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
++
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ do {
+- high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+- PIPE_FRAME_HIGH_SHIFT);
+- low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+- PIPE_FRAME_LOW_SHIFT);
+- high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+- PIPE_FRAME_HIGH_SHIFT);
++ high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
++ low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
++ high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ } while (high1 != high2);
+
+- count = (high1 << 8) | low;
+-
+- return count;
++ high1 >>= PIPE_FRAME_HIGH_SHIFT;
++ low >>= PIPE_FRAME_LOW_SHIFT;
++ return (high1 << 8) | low;
+ }
+
+ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
+ hotplug_work);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+- struct drm_encoder *encoder;
+-
+- if (mode_config->num_encoder) {
+- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+-
+- if (intel_encoder->hot_plug)
+- (*intel_encoder->hot_plug) (intel_encoder);
+- }
+- }
++ struct intel_encoder *encoder;
++
++ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
++ if (encoder->hot_plug)
++ encoder->hot_plug(encoder);
++
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+ }
+@@ -305,13 +293,30 @@ static void i915_handle_rps_change(struct drm_device *dev)
+ return;
+ }
+
+-irqreturn_t ironlake_irq_handler(struct drm_device *dev)
++static void notify_ring(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 seqno = ring->get_seqno(dev, ring);
++ ring->irq_gem_seqno = seqno;
++ trace_i915_gem_request_complete(dev, seqno);
++ wake_up_all(&ring->irq_queue);
++ dev_priv->hangcheck_count = 0;
++ mod_timer(&dev_priv->hangcheck_timer,
++ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
++}
++
++static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int ret = IRQ_NONE;
+ u32 de_iir, gt_iir, de_ier, pch_iir;
++ u32 hotplug_mask;
+ struct drm_i915_master_private *master_priv;
+- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
++ u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
++
++ if (IS_GEN6(dev))
++ bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+@@ -325,6 +330,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ goto done;
+
++ if (HAS_PCH_CPT(dev))
++ hotplug_mask = SDE_HOTPLUG_MASK_CPT;
++ else
++ hotplug_mask = SDE_HOTPLUG_MASK;
++
+ ret = IRQ_HANDLED;
+
+ if (dev->primary->master) {
+@@ -334,29 +344,24 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ READ_BREADCRUMB(dev_priv);
+ }
+
+- if (gt_iir & GT_PIPE_NOTIFY) {
+- u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+- render_ring->irq_gem_seqno = seqno;
+- trace_i915_gem_request_complete(dev, seqno);
+- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+- dev_priv->hangcheck_count = 0;
+- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+- }
+- if (gt_iir & GT_BSD_USER_INTERRUPT)
+- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+-
++ if (gt_iir & GT_PIPE_NOTIFY)
++ notify_ring(dev, &dev_priv->render_ring);
++ if (gt_iir & bsd_usr_interrupt)
++ notify_ring(dev, &dev_priv->bsd_ring);
++ if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
++ notify_ring(dev, &dev_priv->blt_ring);
+
+ if (de_iir & DE_GSE)
+- ironlake_opregion_gse_intr(dev);
++ intel_opregion_gse_intr(dev);
+
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+- intel_finish_page_flip(dev, 0);
++ intel_finish_page_flip_plane(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+- intel_finish_page_flip(dev, 1);
++ intel_finish_page_flip_plane(dev, 1);
+ }
+
+ if (de_iir & DE_PIPEA_VBLANK)
+@@ -366,10 +371,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ drm_handle_vblank(dev, 1);
+
+ /* check event from PCH */
+- if ((de_iir & DE_PCH_EVENT) &&
+- (pch_iir & SDE_HOTPLUG_MASK)) {
++ if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+- }
+
+ if (de_iir & DE_PCU_EVENT) {
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+@@ -404,23 +407,20 @@ static void i915_error_work_func(struct work_struct *work)
+ char *reset_event[] = { "RESET=1", NULL };
+ char *reset_done_event[] = { "ERROR=0", NULL };
+
+- DRM_DEBUG_DRIVER("generating error event\n");
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+- if (IS_I965G(dev)) {
+- DRM_DEBUG_DRIVER("resetting chip\n");
+- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+- if (!i965_reset(dev, GDRST_RENDER)) {
+- atomic_set(&dev_priv->mm.wedged, 0);
+- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+- }
+- } else {
+- DRM_DEBUG_DRIVER("reboot required\n");
++ DRM_DEBUG_DRIVER("resetting chip\n");
++ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
++ if (!i915_reset(dev, GRDOM_RENDER)) {
++ atomic_set(&dev_priv->mm.wedged, 0);
++ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+ }
++ complete_all(&dev_priv->error_completion);
+ }
+ }
+
++#ifdef CONFIG_DEBUG_FS
+ static struct drm_i915_error_object *
+ i915_error_object_create(struct drm_device *dev,
+ struct drm_gem_object *src)
+@@ -511,7 +511,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
+
+ if (IS_I830(dev) || IS_845G(dev))
+ cmd = MI_BATCH_BUFFER;
+- else if (IS_I965G(dev))
++ else if (INTEL_INFO(dev)->gen >= 4)
+ cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ else
+@@ -584,13 +584,16 @@ static void i915_capture_error_state(struct drm_device *dev)
+ return;
+ }
+
+- error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
++ DRM_DEBUG_DRIVER("generating error event\n");
++
++ error->seqno =
++ dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+ error->pipeastat = I915_READ(PIPEASTAT);
+ error->pipebstat = I915_READ(PIPEBSTAT);
+ error->instpm = I915_READ(INSTPM);
+- if (!IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen < 4) {
+ error->ipeir = I915_READ(IPEIR);
+ error->ipehr = I915_READ(IPEHR);
+ error->instdone = I915_READ(INSTDONE);
+@@ -612,9 +615,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ batchbuffer[0] = NULL;
+ batchbuffer[1] = NULL;
+ count = 0;
+- list_for_each_entry(obj_priv,
+- &dev_priv->render_ring.active_list, list) {
+-
++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ if (batchbuffer[0] == NULL &&
+@@ -631,7 +632,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ }
+ /* Scan the other lists for completeness for those bizarre errors. */
+ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
++ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ if (batchbuffer[0] == NULL &&
+@@ -649,7 +650,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ }
+ }
+ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
+- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ if (batchbuffer[0] == NULL &&
+@@ -668,7 +669,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ }
+
+ /* We need to copy these to an anonymous buffer as the simplest
+- * method to avoid being overwritten by userpace.
++ * method to avoid being overwritten by userspace.
+ */
+ error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
+ if (batchbuffer[1] != batchbuffer[0])
+@@ -690,8 +691,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+
+ if (error->active_bo) {
+ int i = 0;
+- list_for_each_entry(obj_priv,
+- &dev_priv->render_ring.active_list, list) {
++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+
+ error->active_bo[i].size = obj->size;
+@@ -744,6 +744,9 @@ void i915_destroy_error_state(struct drm_device *dev)
+ if (error)
+ i915_error_state_free(dev, error);
+ }
++#else
++#define i915_capture_error_state(x)
++#endif
+
+ static void i915_report_and_clear_eir(struct drm_device *dev)
+ {
+@@ -785,7 +788,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
+ }
+ }
+
+- if (IS_I9XX(dev)) {
++ if (!IS_GEN2(dev)) {
+ if (eir & I915_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ printk(KERN_ERR "page table error\n");
+@@ -811,7 +814,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
+ printk(KERN_ERR "instruction error\n");
+ printk(KERN_ERR " INSTPM: 0x%08x\n",
+ I915_READ(INSTPM));
+- if (!IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen < 4) {
+ u32 ipeir = I915_READ(IPEIR);
+
+ printk(KERN_ERR " IPEIR: 0x%08x\n",
+@@ -876,12 +879,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
+ i915_report_and_clear_eir(dev);
+
+ if (wedged) {
++ INIT_COMPLETION(dev_priv->error_completion);
+ atomic_set(&dev_priv->mm.wedged, 1);
+
+ /*
+ * Wakeup waiting processes so they don't hang
+ */
+- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
++ wake_up_all(&dev_priv->render_ring.irq_queue);
++ if (HAS_BSD(dev))
++ wake_up_all(&dev_priv->bsd_ring.irq_queue);
++ if (HAS_BLT(dev))
++ wake_up_all(&dev_priv->blt_ring.irq_queue);
+ }
+
+ queue_work(dev_priv->wq, &dev_priv->error_work);
+@@ -912,7 +920,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+
+ /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+ obj_priv = to_intel_bo(work->pending_flip_obj);
+- if(IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+ stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+ } else {
+@@ -942,7 +950,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ unsigned long irqflags;
+ int irq_received;
+ int ret = IRQ_NONE;
+- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+
+ atomic_inc(&dev_priv->irq_received);
+
+@@ -951,7 +958,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+
+ iir = I915_READ(IIR);
+
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
+ else
+ vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
+@@ -1019,18 +1026,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ READ_BREADCRUMB(dev_priv);
+ }
+
+- if (iir & I915_USER_INTERRUPT) {
+- u32 seqno =
+- render_ring->get_gem_seqno(dev, render_ring);
+- render_ring->irq_gem_seqno = seqno;
+- trace_i915_gem_request_complete(dev, seqno);
+- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+- dev_priv->hangcheck_count = 0;
+- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+- }
+-
++ if (iir & I915_USER_INTERRUPT)
++ notify_ring(dev, &dev_priv->render_ring);
+ if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
+- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
++ notify_ring(dev, &dev_priv->bsd_ring);
+
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 0);
+@@ -1065,7 +1064,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
+ (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
+ (iir & I915_ASLE_INTERRUPT))
+- opregion_asle_intr(dev);
++ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+@@ -1207,18 +1206,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+- u32 pipeconf;
+
+- pipeconf = I915_READ(pipeconf_reg);
+- if (!(pipeconf & PIPEACONF_ENABLE))
++ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+- else if (IS_I965G(dev))
++ else if (INTEL_INFO(dev)->gen >= 4)
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ else
+@@ -1252,7 +1248,7 @@ void i915_enable_interrupt (struct drm_device *dev)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PCH_SPLIT(dev))
+- opregion_enable_asle(dev);
++ intel_opregion_enable_asle(dev);
+ dev_priv->irq_enabled = 1;
+ }
+
+@@ -1311,7 +1307,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+
+-struct drm_i915_gem_request *
++static struct drm_i915_gem_request *
+ i915_get_tail_request(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -1331,11 +1327,7 @@ void i915_hangcheck_elapsed(unsigned long data)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t acthd, instdone, instdone1;
+
+- /* No reset support on this chip yet. */
+- if (IS_GEN6(dev))
+- return;
+-
+- if (!IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen < 4) {
+ acthd = I915_READ(ACTHD);
+ instdone = I915_READ(INSTDONE);
+ instdone1 = 0;
+@@ -1347,9 +1339,8 @@ void i915_hangcheck_elapsed(unsigned long data)
+
+ /* If all work is done then ACTHD clearly hasn't advanced. */
+ if (list_empty(&dev_priv->render_ring.request_list) ||
+- i915_seqno_passed(i915_get_gem_seqno(dev,
+- &dev_priv->render_ring),
+- i915_get_tail_request(dev)->seqno)) {
++ i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
++ i915_get_tail_request(dev)->seqno)) {
+ bool missed_wakeup = false;
+
+ dev_priv->hangcheck_count = 0;
+@@ -1357,13 +1348,19 @@ void i915_hangcheck_elapsed(unsigned long data)
+ /* Issue a wake-up to catch stuck h/w. */
+ if (dev_priv->render_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->render_ring.irq_queue)) {
+- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
++ wake_up_all(&dev_priv->render_ring.irq_queue);
+ missed_wakeup = true;
+ }
+
+ if (dev_priv->bsd_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
+- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
++ wake_up_all(&dev_priv->bsd_ring.irq_queue);
++ missed_wakeup = true;
++ }
++
++ if (dev_priv->blt_ring.waiting_gem_seqno &&
++ waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
++ wake_up_all(&dev_priv->blt_ring.irq_queue);
+ missed_wakeup = true;
+ }
+
+@@ -1377,6 +1374,21 @@ void i915_hangcheck_elapsed(unsigned long data)
+ dev_priv->last_instdone1 == instdone1) {
+ if (dev_priv->hangcheck_count++ > 1) {
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
++
++ if (!IS_GEN2(dev)) {
++ /* Is the chip hanging on a WAIT_FOR_EVENT?
++ * If so we can simply poke the RB_WAIT bit
++ * and break the hang. This should work on
++ * all but the second generation chipsets.
++ */
++ u32 tmp = I915_READ(PRB0_CTL);
++ if (tmp & RING_WAIT) {
++ I915_WRITE(PRB0_CTL, tmp);
++ POSTING_READ(PRB0_CTL);
++ goto out;
++ }
++ }
++
+ i915_handle_error(dev, true);
+ return;
+ }
+@@ -1388,8 +1400,10 @@ void i915_hangcheck_elapsed(unsigned long data)
+ dev_priv->last_instdone1 = instdone1;
+ }
+
++out:
+ /* Reset timer case chip hangs without another request being added */
+- mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
++ mod_timer(&dev_priv->hangcheck_timer,
++ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
+
+ /* drm_dma.h hooks
+@@ -1424,8 +1438,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+ u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
+- u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+- SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
++ u32 hotplug_mask;
+
+ dev_priv->irq_mask_reg = ~display_mask;
+ dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+@@ -1436,20 +1449,35 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
+ I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
+ (void) I915_READ(DEIER);
+
+- /* Gen6 only needs render pipe_control now */
+- if (IS_GEN6(dev))
+- render_mask = GT_PIPE_NOTIFY;
++ if (IS_GEN6(dev)) {
++ render_mask =
++ GT_PIPE_NOTIFY |
++ GT_GEN6_BSD_USER_INTERRUPT |
++ GT_BLT_USER_INTERRUPT;
++ }
+
+ dev_priv->gt_irq_mask_reg = ~render_mask;
+ dev_priv->gt_irq_enable_reg = render_mask;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+- if (IS_GEN6(dev))
++ if (IS_GEN6(dev)) {
+ I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
++ I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
++ I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
++ }
++
+ I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
+ (void) I915_READ(GTIER);
+
++ if (HAS_PCH_CPT(dev)) {
++ hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
++ SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
++ } else {
++ hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
++ SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
++ }
++
+ dev_priv->pch_irq_mask_reg = ~hotplug_mask;
+ dev_priv->pch_irq_enable_reg = hotplug_mask;
+
+@@ -1506,9 +1534,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
+ u32 error_mask;
+
+ DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+-
+ if (HAS_BSD(dev))
+ DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
++ if (HAS_BLT(dev))
++ DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
+
+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+@@ -1578,7 +1607,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
+
+- opregion_enable_asle(dev);
++ intel_opregion_enable_asle(dev);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4f5e155..25ed911 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -25,52 +25,16 @@
+ #ifndef _I915_REG_H_
+ #define _I915_REG_H_
+
++#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
++
+ /*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
++ * This is all handled in the intel-gtt.ko module. i915.ko only
++ * cares about the vga bit for the vga rbiter.
+ */
+ #define INTEL_GMCH_CTRL 0x52
+ #define INTEL_GMCH_VGA_DISABLE (1 << 1)
+-#define INTEL_GMCH_ENABLED 0x4
+-#define INTEL_GMCH_MEM_MASK 0x1
+-#define INTEL_GMCH_MEM_64M 0x1
+-#define INTEL_GMCH_MEM_128M 0
+-
+-#define INTEL_GMCH_GMS_MASK (0xf << 4)
+-#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+-#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+-
+-#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
+-#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
+-#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4)
+-#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4)
+-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+-
+-#define SNB_GMCH_CTRL 0x50
+-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+
+ /* PCI config space */
+
+@@ -106,10 +70,13 @@
+ #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
+ #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
+ #define LBB 0xf4
+-#define GDRST 0xc0
+-#define GDRST_FULL (0<<2)
+-#define GDRST_RENDER (1<<2)
+-#define GDRST_MEDIA (3<<2)
++
++/* Graphics reset regs */
++#define I965_GDRST 0xc0 /* PCI config register */
++#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
++#define GRDOM_FULL (0<<2)
++#define GRDOM_RENDER (1<<2)
++#define GRDOM_MEDIA (3<<2)
+
+ /* VGA stuff */
+
+@@ -192,11 +159,11 @@
+ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+ #define MI_STORE_DWORD_INDEX_SHIFT 2
+ #define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
++#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
+ #define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
+ #define MI_BATCH_NON_SECURE (1)
+ #define MI_BATCH_NON_SECURE_I965 (1<<8)
+ #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+-
+ /*
+ * 3D instructions used by the kernel
+ */
+@@ -249,6 +216,16 @@
+ #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+ #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
+
++
++/*
++ * Reset registers
++ */
++#define DEBUG_RESET_I830 0x6070
++#define DEBUG_RESET_FULL (1<<7)
++#define DEBUG_RESET_RENDER (1<<8)
++#define DEBUG_RESET_DISPLAY (1<<9)
++
++
+ /*
+ * Fence registers
+ */
+@@ -283,6 +260,17 @@
+ #define PRB0_HEAD 0x02034
+ #define PRB0_START 0x02038
+ #define PRB0_CTL 0x0203c
++#define RENDER_RING_BASE 0x02000
++#define BSD_RING_BASE 0x04000
++#define GEN6_BSD_RING_BASE 0x12000
++#define BLT_RING_BASE 0x22000
++#define RING_TAIL(base) ((base)+0x30)
++#define RING_HEAD(base) ((base)+0x34)
++#define RING_START(base) ((base)+0x38)
++#define RING_CTL(base) ((base)+0x3c)
++#define RING_HWS_PGA(base) ((base)+0x80)
++#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
++#define RING_ACTHD(base) ((base)+0x74)
+ #define TAIL_ADDR 0x001FFFF8
+ #define HEAD_WRAP_COUNT 0xFFE00000
+ #define HEAD_WRAP_ONE 0x00200000
+@@ -295,6 +283,8 @@
+ #define RING_VALID_MASK 0x00000001
+ #define RING_VALID 0x00000001
+ #define RING_INVALID 0x00000000
++#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
++#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
+ #define PRB1_TAIL 0x02040 /* 915+ only */
+ #define PRB1_HEAD 0x02044 /* 915+ only */
+ #define PRB1_START 0x02048 /* 915+ only */
+@@ -306,7 +296,6 @@
+ #define INSTDONE1 0x0207c /* 965+ only */
+ #define ACTHD_I965 0x02074
+ #define HWS_PGA 0x02080
+-#define HWS_PGA_GEN6 0x04080
+ #define HWS_ADDRESS_MASK 0xfffff000
+ #define HWS_START_ADDRESS_SHIFT 4
+ #define PWRCTXA 0x2088 /* 965GM+ only */
+@@ -464,17 +453,17 @@
+ #define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
+ #define GEN6_BLITTER_SYNC_STATUS (1 << 24)
+ #define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
+-/*
+- * BSD (bit stream decoder instruction and interrupt control register defines
+- * (G4X and Ironlake only)
+- */
+
+-#define BSD_RING_TAIL 0x04030
+-#define BSD_RING_HEAD 0x04034
+-#define BSD_RING_START 0x04038
+-#define BSD_RING_CTL 0x0403c
+-#define BSD_RING_ACTHD 0x04074
+-#define BSD_HWS_PGA 0x04080
++#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
++#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
++#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
++#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
++#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
++
++#define GEN6_BSD_IMR 0x120a8
++#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
++
++#define GEN6_BSD_RNCID 0x12198
+
+ /*
+ * Framebuffer compression (915+ only)
+@@ -579,12 +568,51 @@
+ # define GPIO_DATA_VAL_IN (1 << 12)
+ # define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+-#define GMBUS0 0x5100
+-#define GMBUS1 0x5104
+-#define GMBUS2 0x5108
+-#define GMBUS3 0x510c
+-#define GMBUS4 0x5110
+-#define GMBUS5 0x5120
++#define GMBUS0 0x5100 /* clock/port select */
++#define GMBUS_RATE_100KHZ (0<<8)
++#define GMBUS_RATE_50KHZ (1<<8)
++#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
++#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
++#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
++#define GMBUS_PORT_DISABLED 0
++#define GMBUS_PORT_SSC 1
++#define GMBUS_PORT_VGADDC 2
++#define GMBUS_PORT_PANEL 3
++#define GMBUS_PORT_DPC 4 /* HDMIC */
++#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
++ /* 6 reserved */
++#define GMBUS_PORT_DPD 7 /* HDMID */
++#define GMBUS_NUM_PORTS 8
++#define GMBUS1 0x5104 /* command/status */
++#define GMBUS_SW_CLR_INT (1<<31)
++#define GMBUS_SW_RDY (1<<30)
++#define GMBUS_ENT (1<<29) /* enable timeout */
++#define GMBUS_CYCLE_NONE (0<<25)
++#define GMBUS_CYCLE_WAIT (1<<25)
++#define GMBUS_CYCLE_INDEX (2<<25)
++#define GMBUS_CYCLE_STOP (4<<25)
++#define GMBUS_BYTE_COUNT_SHIFT 16
++#define GMBUS_SLAVE_INDEX_SHIFT 8
++#define GMBUS_SLAVE_ADDR_SHIFT 1
++#define GMBUS_SLAVE_READ (1<<0)
++#define GMBUS_SLAVE_WRITE (0<<0)
++#define GMBUS2 0x5108 /* status */
++#define GMBUS_INUSE (1<<15)
++#define GMBUS_HW_WAIT_PHASE (1<<14)
++#define GMBUS_STALL_TIMEOUT (1<<13)
++#define GMBUS_INT (1<<12)
++#define GMBUS_HW_RDY (1<<11)
++#define GMBUS_SATOER (1<<10)
++#define GMBUS_ACTIVE (1<<9)
++#define GMBUS3 0x510c /* data buffer bytes 3-0 */
++#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
++#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
++#define GMBUS_NAK_EN (1<<3)
++#define GMBUS_IDLE_EN (1<<2)
++#define GMBUS_HW_WAIT_EN (1<<1)
++#define GMBUS_HW_RDY_EN (1<<0)
++#define GMBUS5 0x5120 /* byte index */
++#define GMBUS_2BYTE_INDEX_EN (1<<31)
+
+ /*
+ * Clock control & power management
+@@ -603,6 +631,7 @@
+ #define VGA1_PD_P1_MASK (0x1f << 8)
+ #define DPLL_A 0x06014
+ #define DPLL_B 0x06018
++#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
+ #define DPLL_VCO_ENABLE (1 << 31)
+ #define DPLL_DVO_HIGH_SPEED (1 << 30)
+ #define DPLL_SYNCLOCK_ENABLE (1 << 29)
+@@ -633,31 +662,6 @@
+ #define LVDS 0x61180
+ #define LVDS_ON (1<<31)
+
+-#define ADPA 0x61100
+-#define ADPA_DPMS_MASK (~(3<<10))
+-#define ADPA_DPMS_ON (0<<10)
+-#define ADPA_DPMS_SUSPEND (1<<10)
+-#define ADPA_DPMS_STANDBY (2<<10)
+-#define ADPA_DPMS_OFF (3<<10)
+-
+-#define RING_TAIL 0x00
+-#define TAIL_ADDR 0x001FFFF8
+-#define RING_HEAD 0x04
+-#define HEAD_WRAP_COUNT 0xFFE00000
+-#define HEAD_WRAP_ONE 0x00200000
+-#define HEAD_ADDR 0x001FFFFC
+-#define RING_START 0x08
+-#define START_ADDR 0xFFFFF000
+-#define RING_LEN 0x0C
+-#define RING_NR_PAGES 0x001FF000
+-#define RING_REPORT_MASK 0x00000006
+-#define RING_REPORT_64K 0x00000002
+-#define RING_REPORT_128K 0x00000004
+-#define RING_NO_REPORT 0x00000000
+-#define RING_VALID_MASK 0x00000001
+-#define RING_VALID 0x00000001
+-#define RING_INVALID 0x00000000
+-
+ /* Scratch pad debug 0 reg:
+ */
+ #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+@@ -736,10 +740,13 @@
+ #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+ #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+ #define DPLL_B_MD 0x06020 /* 965+ only */
++#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
+ #define FPA0 0x06040
+ #define FPA1 0x06044
+ #define FPB0 0x06048
+ #define FPB1 0x0604c
++#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
++#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
+ #define FP_N_DIV_MASK 0x003f0000
+ #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
+ #define FP_N_DIV_SHIFT 16
+@@ -760,6 +767,7 @@
+ #define DPLLA_TEST_M_BYPASS (1 << 2)
+ #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+ #define D_STATE 0x6104
++#define DSTATE_GFX_RESET_I830 (1<<6)
+ #define DSTATE_PLL_D3_OFF (1<<3)
+ #define DSTATE_GFX_CLOCK_GATING (1<<1)
+ #define DSTATE_DOT_CLOCK_GATING (1<<0)
+@@ -926,6 +934,8 @@
+ #define CLKCFG_MEM_800 (3 << 4)
+ #define CLKCFG_MEM_MASK (7 << 4)
+
++#define TSC1 0x11001
++#define TSE (1<<0)
+ #define TR1 0x11006
+ #define TSFS 0x11020
+ #define TSFS_SLOPE_MASK 0x0000ff00
+@@ -1070,6 +1080,8 @@
+ #define MEMSTAT_SRC_CTL_STDBY 3
+ #define RCPREVBSYTUPAVG 0x113b8
+ #define RCPREVBSYTDNAVG 0x113bc
++#define PMMISC 0x11214
++#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
+ #define SDEW 0x1124c
+ #define CSIEW0 0x11250
+ #define CSIEW1 0x11254
+@@ -1150,6 +1162,15 @@
+ #define PIPEBSRC 0x6101c
+ #define BCLRPAT_B 0x61020
+
++#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
++#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
++#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
++#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
++#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
++#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
++#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
++#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
++
+ /* VGA port control */
+ #define ADPA 0x61100
+ #define ADPA_DAC_ENABLE (1<<31)
+@@ -1173,6 +1194,7 @@
+ #define ADPA_DPMS_STANDBY (2<<10)
+ #define ADPA_DPMS_OFF (3<<10)
+
++
+ /* Hotplug control (945+ only) */
+ #define PORT_HOTPLUG_EN 0x61110
+ #define HDMIB_HOTPLUG_INT_EN (1 << 29)
+@@ -1331,6 +1353,22 @@
+ #define LVDS_B0B3_POWER_DOWN (0 << 2)
+ #define LVDS_B0B3_POWER_UP (3 << 2)
+
++/* Video Data Island Packet control */
++#define VIDEO_DIP_DATA 0x61178
++#define VIDEO_DIP_CTL 0x61170
++#define VIDEO_DIP_ENABLE (1 << 31)
++#define VIDEO_DIP_PORT_B (1 << 29)
++#define VIDEO_DIP_PORT_C (2 << 29)
++#define VIDEO_DIP_ENABLE_AVI (1 << 21)
++#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
++#define VIDEO_DIP_ENABLE_SPD (8 << 21)
++#define VIDEO_DIP_SELECT_AVI (0 << 19)
++#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
++#define VIDEO_DIP_SELECT_SPD (3 << 19)
++#define VIDEO_DIP_FREQ_ONCE (0 << 16)
++#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
++#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
++
+ /* Panel power sequencing */
+ #define PP_STATUS 0x61200
+ #define PP_ON (1 << 31)
+@@ -1346,6 +1384,9 @@
+ #define PP_SEQUENCE_ON (1 << 28)
+ #define PP_SEQUENCE_OFF (2 << 28)
+ #define PP_SEQUENCE_MASK 0x30000000
++#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
++#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
++#define PP_SEQUENCE_STATE_MASK 0x0000000f
+ #define PP_CONTROL 0x61204
+ #define POWER_TARGET_ON (1 << 0)
+ #define PP_ON_DELAYS 0x61208
+@@ -1481,6 +1522,7 @@
+ # define TV_TEST_MODE_MASK (7 << 0)
+
+ #define TV_DAC 0x68004
++# define TV_DAC_SAVE 0x00ffff00
+ /**
+ * Reports that DAC state change logic has reported change (RO).
+ *
+@@ -2075,29 +2117,35 @@
+
+ /* Display & cursor control */
+
+-/* dithering flag on Ironlake */
+-#define PIPE_ENABLE_DITHER (1 << 4)
+-#define PIPE_DITHER_TYPE_MASK (3 << 2)
+-#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
+-#define PIPE_DITHER_TYPE_ST01 (1 << 2)
+ /* Pipe A */
+ #define PIPEADSL 0x70000
+-#define DSL_LINEMASK 0x00000fff
++#define DSL_LINEMASK 0x00000fff
+ #define PIPEACONF 0x70008
+-#define PIPEACONF_ENABLE (1<<31)
+-#define PIPEACONF_DISABLE 0
+-#define PIPEACONF_DOUBLE_WIDE (1<<30)
++#define PIPECONF_ENABLE (1<<31)
++#define PIPECONF_DISABLE 0
++#define PIPECONF_DOUBLE_WIDE (1<<30)
+ #define I965_PIPECONF_ACTIVE (1<<30)
+-#define PIPEACONF_SINGLE_WIDE 0
+-#define PIPEACONF_PIPE_UNLOCKED 0
+-#define PIPEACONF_PIPE_LOCKED (1<<25)
+-#define PIPEACONF_PALETTE 0
+-#define PIPEACONF_GAMMA (1<<24)
++#define PIPECONF_SINGLE_WIDE 0
++#define PIPECONF_PIPE_UNLOCKED 0
++#define PIPECONF_PIPE_LOCKED (1<<25)
++#define PIPECONF_PALETTE 0
++#define PIPECONF_GAMMA (1<<24)
+ #define PIPECONF_FORCE_BORDER (1<<25)
+ #define PIPECONF_PROGRESSIVE (0 << 21)
+ #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+ #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+ #define PIPECONF_CXSR_DOWNCLOCK (1<<16)
++#define PIPECONF_BPP_MASK (0x000000e0)
++#define PIPECONF_BPP_8 (0<<5)
++#define PIPECONF_BPP_10 (1<<5)
++#define PIPECONF_BPP_6 (2<<5)
++#define PIPECONF_BPP_12 (3<<5)
++#define PIPECONF_DITHER_EN (1<<4)
++#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
++#define PIPECONF_DITHER_TYPE_SP (0<<2)
++#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
++#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
++#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
+ #define PIPEASTAT 0x70024
+ #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
+ #define PIPE_CRC_ERROR_ENABLE (1UL<<29)
+@@ -2128,12 +2176,15 @@
+ #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
+ #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
+ #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
+-#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
++#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
+ #define PIPE_8BPC (0 << 5)
+ #define PIPE_10BPC (1 << 5)
+ #define PIPE_6BPC (2 << 5)
+ #define PIPE_12BPC (3 << 5)
+
++#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
++#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
++
+ #define DSPARB 0x70030
+ #define DSPARB_CSTART_MASK (0x7f << 7)
+ #define DSPARB_CSTART_SHIFT 7
+@@ -2206,8 +2257,8 @@
+ #define WM1_LP_SR_EN (1<<31)
+ #define WM1_LP_LATENCY_SHIFT 24
+ #define WM1_LP_LATENCY_MASK (0x7f<<24)
+-#define WM1_LP_FBC_LP1_MASK (0xf<<20)
+-#define WM1_LP_FBC_LP1_SHIFT 20
++#define WM1_LP_FBC_MASK (0xf<<20)
++#define WM1_LP_FBC_SHIFT 20
+ #define WM1_LP_SR_MASK (0x1ff<<8)
+ #define WM1_LP_SR_SHIFT 8
+ #define WM1_LP_CURSOR_MASK (0x3f)
+@@ -2333,6 +2384,14 @@
+ #define DSPASURF 0x7019C /* 965+ only */
+ #define DSPATILEOFF 0x701A4 /* 965+ only */
+
++#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
++#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
++#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
++#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
++#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
++#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
++#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
++
+ /* VBIOS flags */
+ #define SWF00 0x71410
+ #define SWF01 0x71414
+@@ -2397,6 +2456,7 @@
+ #define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
+
+ #define FDI_PLL_BIOS_0 0x46000
++#define FDI_PLL_FB_CLOCK_MASK 0xff
+ #define FDI_PLL_BIOS_1 0x46004
+ #define FDI_PLL_BIOS_2 0x46008
+ #define DISPLAY_PORT_PLL_BIOS_0 0x4600c
+@@ -2420,46 +2480,47 @@
+ #define PIPEA_DATA_M1 0x60030
+ #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+ #define TU_SIZE_MASK 0x7e000000
+-#define PIPEA_DATA_M1_OFFSET 0
++#define PIPE_DATA_M1_OFFSET 0
+ #define PIPEA_DATA_N1 0x60034
+-#define PIPEA_DATA_N1_OFFSET 0
++#define PIPE_DATA_N1_OFFSET 0
+
+ #define PIPEA_DATA_M2 0x60038
+-#define PIPEA_DATA_M2_OFFSET 0
++#define PIPE_DATA_M2_OFFSET 0
+ #define PIPEA_DATA_N2 0x6003c
+-#define PIPEA_DATA_N2_OFFSET 0
++#define PIPE_DATA_N2_OFFSET 0
+
+ #define PIPEA_LINK_M1 0x60040
+-#define PIPEA_LINK_M1_OFFSET 0
++#define PIPE_LINK_M1_OFFSET 0
+ #define PIPEA_LINK_N1 0x60044
+-#define PIPEA_LINK_N1_OFFSET 0
++#define PIPE_LINK_N1_OFFSET 0
+
+ #define PIPEA_LINK_M2 0x60048
+-#define PIPEA_LINK_M2_OFFSET 0
++#define PIPE_LINK_M2_OFFSET 0
+ #define PIPEA_LINK_N2 0x6004c
+-#define PIPEA_LINK_N2_OFFSET 0
++#define PIPE_LINK_N2_OFFSET 0
+
+ /* PIPEB timing regs are same start from 0x61000 */
+
+ #define PIPEB_DATA_M1 0x61030
+-#define PIPEB_DATA_M1_OFFSET 0
+ #define PIPEB_DATA_N1 0x61034
+-#define PIPEB_DATA_N1_OFFSET 0
+
+ #define PIPEB_DATA_M2 0x61038
+-#define PIPEB_DATA_M2_OFFSET 0
+ #define PIPEB_DATA_N2 0x6103c
+-#define PIPEB_DATA_N2_OFFSET 0
+
+ #define PIPEB_LINK_M1 0x61040
+-#define PIPEB_LINK_M1_OFFSET 0
+ #define PIPEB_LINK_N1 0x61044
+-#define PIPEB_LINK_N1_OFFSET 0
+
+ #define PIPEB_LINK_M2 0x61048
+-#define PIPEB_LINK_M2_OFFSET 0
+ #define PIPEB_LINK_N2 0x6104c
+-#define PIPEB_LINK_N2_OFFSET 0
++
++#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
++#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
++#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
++#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
++#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
++#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
++#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
++#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
+
+ /* CPU panel fitter */
+ #define PFA_CTL_1 0x68080
+@@ -2516,7 +2577,8 @@
+ #define GT_SYNC_STATUS (1 << 2)
+ #define GT_USER_INTERRUPT (1 << 0)
+ #define GT_BSD_USER_INTERRUPT (1 << 5)
+-
++#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
++#define GT_BLT_USER_INTERRUPT (1 << 22)
+
+ #define GTISR 0x44010
+ #define GTIMR 0x44014
+@@ -2551,6 +2613,10 @@
+ #define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+ #define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+ #define SDE_PORTB_HOTPLUG_CPT (1 << 21)
++#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
++ SDE_PORTD_HOTPLUG_CPT | \
++ SDE_PORTC_HOTPLUG_CPT | \
++ SDE_PORTB_HOTPLUG_CPT)
+
+ #define SDEISR 0xc4000
+ #define SDEIMR 0xc4004
+@@ -2600,11 +2666,14 @@
+
+ #define PCH_DPLL_A 0xc6014
+ #define PCH_DPLL_B 0xc6018
++#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
+
+ #define PCH_FPA0 0xc6040
+ #define PCH_FPA1 0xc6044
+ #define PCH_FPB0 0xc6048
+ #define PCH_FPB1 0xc604c
++#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
++#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
+
+ #define PCH_DPLL_TEST 0xc606c
+
+@@ -2690,6 +2759,13 @@
+ #define TRANS_VBLANK_B 0xe1010
+ #define TRANS_VSYNC_B 0xe1014
+
++#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
++#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
++#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
++#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
++#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
++#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
++
+ #define TRANSB_DATA_M1 0xe1030
+ #define TRANSB_DATA_N1 0xe1034
+ #define TRANSB_DATA_M2 0xe1038
+@@ -2701,6 +2777,7 @@
+
+ #define TRANSACONF 0xf0008
+ #define TRANSBCONF 0xf1008
++#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
+ #define TRANS_DISABLE (0<<31)
+ #define TRANS_ENABLE (1<<31)
+ #define TRANS_STATE_MASK (1<<30)
+@@ -2721,10 +2798,15 @@
+ #define FDI_RXA_CHICKEN 0xc200c
+ #define FDI_RXB_CHICKEN 0xc2010
+ #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
++#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
++
++#define SOUTH_DSPCLK_GATE_D 0xc2020
++#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+
+ /* CPU: FDI_TX */
+ #define FDI_TXA_CTL 0x60100
+ #define FDI_TXB_CTL 0x61100
++#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
+ #define FDI_TX_DISABLE (0<<31)
+ #define FDI_TX_ENABLE (1<<31)
+ #define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
+@@ -2766,8 +2848,8 @@
+ /* FDI_RX, FDI_X is hard-wired to Transcoder_X */
+ #define FDI_RXA_CTL 0xf000c
+ #define FDI_RXB_CTL 0xf100c
++#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
+ #define FDI_RX_ENABLE (1<<31)
+-#define FDI_RX_DISABLE (0<<31)
+ /* train, dp width same as FDI_TX */
+ #define FDI_DP_PORT_WIDTH_X8 (7<<19)
+ #define FDI_8BPC (0<<16)
+@@ -2782,8 +2864,7 @@
+ #define FDI_FS_ERR_REPORT_ENABLE (1<<9)
+ #define FDI_FE_ERR_REPORT_ENABLE (1<<8)
+ #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
+-#define FDI_SEL_RAWCLK (0<<4)
+-#define FDI_SEL_PCDCLK (1<<4)
++#define FDI_PCDCLK (1<<4)
+ /* CPT */
+ #define FDI_AUTO_TRAINING (1<<10)
+ #define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
+@@ -2798,6 +2879,9 @@
+ #define FDI_RXA_TUSIZE2 0xf0038
+ #define FDI_RXB_TUSIZE1 0xf1030
+ #define FDI_RXB_TUSIZE2 0xf1038
++#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
++#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
++#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
+
+ /* FDI_RX interrupt register format */
+ #define FDI_RX_INTER_LANE_ALIGN (1<<10)
+@@ -2816,6 +2900,8 @@
+ #define FDI_RXA_IMR 0xf0018
+ #define FDI_RXB_IIR 0xf1014
+ #define FDI_RXB_IMR 0xf1018
++#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
++#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
+
+ #define FDI_PLL_CTL_1 0xfe000
+ #define FDI_PLL_CTL_2 0xfe004
+@@ -2935,6 +3021,7 @@
+ #define TRANS_DP_CTL_A 0xe0300
+ #define TRANS_DP_CTL_B 0xe1300
+ #define TRANS_DP_CTL_C 0xe2300
++#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
+ #define TRANS_DP_OUTPUT_ENABLE (1<<31)
+ #define TRANS_DP_PORT_SEL_B (0<<29)
+ #define TRANS_DP_PORT_SEL_C (1<<29)
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index 31f0858..454c064 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ dev_priv->saveFPA1 = I915_READ(FPA1);
+ dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+ }
+- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
++ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+ dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+ dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+@@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+ dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+ dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+ dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+ }
+@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ dev_priv->saveFPB1 = I915_READ(FPB1);
+ dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+ }
+- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
++ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+ dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+ dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+@@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+ dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+ dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+- if (IS_I965GM(dev) || IS_GM45(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+ dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+ }
+@@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ POSTING_READ(DPLL_A_MD);
+ }
+@@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
+ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+ I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ }
+@@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ POSTING_READ(DPLL_B_MD);
+ }
+@@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
+ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+ I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ }
+@@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev)
+ dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+- if (!IS_I9XX(dev))
++ if (IS_GEN2(dev))
+ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+
+ /* CRT state */
+@@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev)
+ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->saveLVDS = I915_READ(LVDS);
+@@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev)
+ I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+- if (!IS_I9XX(dev))
++ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+
+ /* CRT state */
+@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
+ I915_WRITE(ADPA, dev_priv->saveADPA);
+
+ /* LVDS state */
+- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
++ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+
+ if (HAS_PCH_SPLIT(dev)) {
+@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
+ /* Clock gating state */
+ intel_init_clock_gating(dev);
+
+- if (HAS_PCH_SPLIT(dev))
++ if (HAS_PCH_SPLIT(dev)) {
+ ironlake_enable_drps(dev);
++ intel_init_emon(dev);
++ }
+
+ /* Cache mode state */
+ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+@@ -878,9 +880,7 @@ int i915_restore_state(struct drm_device *dev)
+ for (i = 0; i < 3; i++)
+ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
+- /* I2C state */
+- intel_i2c_reset_gmbus(dev);
++ intel_i2c_reset(dev);
+
+ return 0;
+ }
+-
+diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
+new file mode 100644
+index 0000000..65c88f9
+--- /dev/null
++++ b/drivers/gpu/drm/i915/intel_acpi.c
+@@ -0,0 +1,286 @@
++/*
++ * Intel ACPI functions
++ *
++ * _DSM related code stolen from nouveau_acpi.c.
++ */
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <linux/vga_switcheroo.h>
++#include <acpi/acpi_drivers.h>
++
++#include "drmP.h"
++
++#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
++
++#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
++#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
++
++static struct intel_dsm_priv {
++ acpi_handle dhandle;
++} intel_dsm_priv;
++
++static const u8 intel_dsm_guid[] = {
++ 0xd3, 0x73, 0xd8, 0x7e,
++ 0xd0, 0xc2,
++ 0x4f, 0x4e,
++ 0xa8, 0x54,
++ 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
++};
++
++static int intel_dsm(acpi_handle handle, int func, int arg)
++{
++ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
++ struct acpi_object_list input;
++ union acpi_object params[4];
++ union acpi_object *obj;
++ u32 result;
++ int ret = 0;
++
++ input.count = 4;
++ input.pointer = params;
++ params[0].type = ACPI_TYPE_BUFFER;
++ params[0].buffer.length = sizeof(intel_dsm_guid);
++ params[0].buffer.pointer = (char *)intel_dsm_guid;
++ params[1].type = ACPI_TYPE_INTEGER;
++ params[1].integer.value = INTEL_DSM_REVISION_ID;
++ params[2].type = ACPI_TYPE_INTEGER;
++ params[2].integer.value = func;
++ params[3].type = ACPI_TYPE_INTEGER;
++ params[3].integer.value = arg;
++
++ ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
++ if (ret) {
++ DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
++ return ret;
++ }
++
++ obj = (union acpi_object *)output.pointer;
++
++ result = 0;
++ switch (obj->type) {
++ case ACPI_TYPE_INTEGER:
++ result = obj->integer.value;
++ break;
++
++ case ACPI_TYPE_BUFFER:
++ if (obj->buffer.length == 4) {
++ result =(obj->buffer.pointer[0] |
++ (obj->buffer.pointer[1] << 8) |
++ (obj->buffer.pointer[2] << 16) |
++ (obj->buffer.pointer[3] << 24));
++ break;
++ }
++ default:
++ ret = -EINVAL;
++ break;
++ }
++ if (result == 0x80000002)
++ ret = -ENODEV;
++
++ kfree(output.pointer);
++ return ret;
++}
++
++static char *intel_dsm_port_name(u8 id)
++{
++ switch (id) {
++ case 0:
++ return "Reserved";
++ case 1:
++ return "Analog VGA";
++ case 2:
++ return "LVDS";
++ case 3:
++ return "Reserved";
++ case 4:
++ return "HDMI/DVI_B";
++ case 5:
++ return "HDMI/DVI_C";
++ case 6:
++ return "HDMI/DVI_D";
++ case 7:
++ return "DisplayPort_A";
++ case 8:
++ return "DisplayPort_B";
++ case 9:
++ return "DisplayPort_C";
++ case 0xa:
++ return "DisplayPort_D";
++ case 0xb:
++ case 0xc:
++ case 0xd:
++ return "Reserved";
++ case 0xe:
++ return "WiDi";
++ default:
++ return "bad type";
++ }
++}
++
++static char *intel_dsm_mux_type(u8 type)
++{
++ switch (type) {
++ case 0:
++ return "unknown";
++ case 1:
++ return "No MUX, iGPU only";
++ case 2:
++ return "No MUX, dGPU only";
++ case 3:
++ return "MUXed between iGPU and dGPU";
++ default:
++ return "bad type";
++ }
++}
++
++static void intel_dsm_platform_mux_info(void)
++{
++ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
++ struct acpi_object_list input;
++ union acpi_object params[4];
++ union acpi_object *pkg;
++ int i, ret;
++
++ input.count = 4;
++ input.pointer = params;
++ params[0].type = ACPI_TYPE_BUFFER;
++ params[0].buffer.length = sizeof(intel_dsm_guid);
++ params[0].buffer.pointer = (char *)intel_dsm_guid;
++ params[1].type = ACPI_TYPE_INTEGER;
++ params[1].integer.value = INTEL_DSM_REVISION_ID;
++ params[2].type = ACPI_TYPE_INTEGER;
++ params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
++ params[3].type = ACPI_TYPE_INTEGER;
++ params[3].integer.value = 0;
++
++ ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
++ &output);
++ if (ret) {
++ DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
++ goto out;
++ }
++
++ pkg = (union acpi_object *)output.pointer;
++
++ if (pkg->type == ACPI_TYPE_PACKAGE) {
++ union acpi_object *connector_count = &pkg->package.elements[0];
++ DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
++ (unsigned long long)connector_count->integer.value);
++ for (i = 1; i < pkg->package.count; i++) {
++ union acpi_object *obj = &pkg->package.elements[i];
++ union acpi_object *connector_id =
++ &obj->package.elements[0];
++ union acpi_object *info = &obj->package.elements[1];
++ DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
++ (unsigned long long)connector_id->integer.value);
++ DRM_DEBUG_DRIVER(" port id: %s\n",
++ intel_dsm_port_name(info->buffer.pointer[0]));
++ DRM_DEBUG_DRIVER(" display mux info: %s\n",
++ intel_dsm_mux_type(info->buffer.pointer[1]));
++ DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
++ intel_dsm_mux_type(info->buffer.pointer[2]));
++ DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
++ intel_dsm_mux_type(info->buffer.pointer[3]));
++ }
++ } else {
++ DRM_ERROR("MUX INFO call failed\n");
++ }
++
++out:
++ kfree(output.pointer);
++}
++
++static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
++{
++ return 0;
++}
++
++static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
++ enum vga_switcheroo_state state)
++{
++ return 0;
++}
++
++static int intel_dsm_init(void)
++{
++ return 0;
++}
++
++static int intel_dsm_get_client_id(struct pci_dev *pdev)
++{
++ if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
++ return VGA_SWITCHEROO_IGD;
++ else
++ return VGA_SWITCHEROO_DIS;
++}
++
++static struct vga_switcheroo_handler intel_dsm_handler = {
++ .switchto = intel_dsm_switchto,
++ .power_state = intel_dsm_power_state,
++ .init = intel_dsm_init,
++ .get_client_id = intel_dsm_get_client_id,
++};
++
++static bool intel_dsm_pci_probe(struct pci_dev *pdev)
++{
++ acpi_handle dhandle, intel_handle;
++ acpi_status status;
++ int ret;
++
++ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
++ if (!dhandle)
++ return false;
++
++ status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
++ if (ACPI_FAILURE(status)) {
++ DRM_DEBUG_KMS("no _DSM method for intel device\n");
++ return false;
++ }
++
++ ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
++ if (ret < 0) {
++ DRM_ERROR("failed to get supported _DSM functions\n");
++ return false;
++ }
++
++ intel_dsm_priv.dhandle = dhandle;
++
++ intel_dsm_platform_mux_info();
++ return true;
++}
++
++static bool intel_dsm_detect(void)
++{
++ char acpi_method_name[255] = { 0 };
++ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
++ struct pci_dev *pdev = NULL;
++ bool has_dsm = false;
++ int vga_count = 0;
++
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
++ vga_count++;
++ has_dsm |= intel_dsm_pci_probe(pdev);
++ }
++
++ if (vga_count == 2 && has_dsm) {
++ acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
++ DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
++ acpi_method_name);
++ return true;
++ }
++
++ return false;
++}
++
++void intel_register_dsm_handler(void)
++{
++ if (!intel_dsm_detect())
++ return;
++
++ vga_switcheroo_register_handler(&intel_dsm_handler);
++}
++
++void intel_unregister_dsm_handler(void)
++{
++ vga_switcheroo_unregister_handler();
++}
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 96f75d7..b0b1200 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -24,6 +24,7 @@
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
++#include <drm/drm_dp_helper.h>
+ #include "drmP.h"
+ #include "drm.h"
+ #include "i915_drm.h"
+@@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ int i, temp_downclock;
+ struct drm_display_mode *temp_mode;
+
+- /* Defaults if we can't find VBT info */
+- dev_priv->lvds_dither = 0;
+- dev_priv->lvds_vbt = 0;
+-
+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return;
+@@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ dev_priv->lvds_dither = lvds_options->pixel_dither;
+ if (lvds_options->panel_type == 0xff)
+ return;
++
+ panel_type = lvds_options->panel_type;
+
+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+@@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ ((unsigned char *)entry + dvo_timing_offset);
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
++ if (!panel_fixed_mode)
++ return;
+
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+
+@@ -230,8 +230,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+
+- dev_priv->sdvo_lvds_vbt_mode = NULL;
+-
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+@@ -260,10 +258,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
+ struct drm_device *dev = dev_priv->dev;
+ struct bdb_general_features *general;
+
+- /* Set sensible defaults in case we can't find the general block */
+- dev_priv->int_tv_support = 1;
+- dev_priv->int_crt_support = 1;
+-
+ general = find_section(bdb, BDB_GENERAL_FEATURES);
+ if (general) {
+ dev_priv->int_tv_support = general->int_tv_support;
+@@ -271,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
+ dev_priv->lvds_use_ssc = general->enable_ssc;
+
+ if (dev_priv->lvds_use_ssc) {
+- if (IS_I85X(dev_priv->dev))
++ if (IS_I85X(dev))
+ dev_priv->lvds_ssc_freq =
+ general->ssc_freq ? 66 : 48;
+- else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
++ else if (IS_GEN5(dev) || IS_GEN6(dev))
+ dev_priv->lvds_ssc_freq =
+ general->ssc_freq ? 100 : 120;
+ else
+@@ -289,14 +283,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+ {
+ struct bdb_general_definitions *general;
+- const int crt_bus_map_table[] = {
+- GPIOB,
+- GPIOA,
+- GPIOC,
+- GPIOD,
+- GPIOE,
+- GPIOF,
+- };
+
+ general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (general) {
+@@ -304,10 +290,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
+ if (block_size >= sizeof(*general)) {
+ int bus_pin = general->crt_ddc_gmbus_pin;
+ DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+- if ((bus_pin >= 1) && (bus_pin <= 6)) {
+- dev_priv->crt_ddc_bus =
+- crt_bus_map_table[bus_pin-1];
+- }
++ if (bus_pin >= 1 && bus_pin <= 6)
++ dev_priv->crt_ddc_pin = bus_pin;
+ } else {
+ DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
+ block_size);
+@@ -317,7 +301,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
+
+ static void
+ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+- struct bdb_header *bdb)
++ struct bdb_header *bdb)
+ {
+ struct sdvo_device_mapping *p_mapping;
+ struct bdb_general_definitions *p_defs;
+@@ -327,7 +311,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+- DRM_DEBUG_KMS("No general definition block is found\n");
++ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+@@ -377,7 +361,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
++ p_mapping->i2c_pin = p_child->i2c_pin;
++ p_mapping->i2c_speed = p_child->i2c_speed;
+ p_mapping->initialized = 1;
++ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
++ p_mapping->dvo_port,
++ p_mapping->slave_addr,
++ p_mapping->dvo_wiring,
++ p_mapping->ddc_pin,
++ p_mapping->i2c_pin,
++ p_mapping->i2c_speed);
+ } else {
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+@@ -409,14 +402,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
+ if (!driver)
+ return;
+
+- if (driver && SUPPORTS_EDP(dev) &&
+- driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
+- dev_priv->edp_support = 1;
+- } else {
+- dev_priv->edp_support = 0;
+- }
++ if (SUPPORTS_EDP(dev) &&
++ driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
++ dev_priv->edp.support = 1;
+
+- if (driver && driver->dual_frequency)
++ if (driver->dual_frequency)
+ dev_priv->render_reclock_avail = true;
+ }
+
+@@ -424,27 +414,78 @@ static void
+ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+ {
+ struct bdb_edp *edp;
++ struct edp_power_seq *edp_pps;
++ struct edp_link_params *edp_link_params;
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
++ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
+- "supported, assume 18bpp panel color "
+- "depth.\n");
+- dev_priv->edp_bpp = 18;
++ "supported, assume %dbpp panel color "
++ "depth.\n",
++ dev_priv->edp.bpp);
+ }
+ return;
+ }
+
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+- dev_priv->edp_bpp = 18;
++ dev_priv->edp.bpp = 18;
+ break;
+ case EDP_24BPP:
+- dev_priv->edp_bpp = 24;
++ dev_priv->edp.bpp = 24;
+ break;
+ case EDP_30BPP:
+- dev_priv->edp_bpp = 30;
++ dev_priv->edp.bpp = 30;
++ break;
++ }
++
++ /* Get the eDP sequencing and link info */
++ edp_pps = &edp->power_seqs[panel_type];
++ edp_link_params = &edp->link_params[panel_type];
++
++ dev_priv->edp.pps = *edp_pps;
++
++ dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
++ DP_LINK_BW_1_62;
++ switch (edp_link_params->lanes) {
++ case 0:
++ dev_priv->edp.lanes = 1;
++ break;
++ case 1:
++ dev_priv->edp.lanes = 2;
++ break;
++ case 3:
++ default:
++ dev_priv->edp.lanes = 4;
++ break;
++ }
++ switch (edp_link_params->preemphasis) {
++ case 0:
++ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
++ break;
++ case 1:
++ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
++ break;
++ case 2:
++ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
++ break;
++ case 3:
++ dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
++ break;
++ }
++ switch (edp_link_params->vswing) {
++ case 0:
++ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
++ break;
++ case 1:
++ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
++ break;
++ case 2:
++ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
++ break;
++ case 3:
++ dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+ break;
+ }
+ }
+@@ -460,7 +501,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+- DRM_DEBUG_KMS("No general definition block is found\n");
++ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+@@ -513,50 +554,83 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
+ }
+ return;
+ }
++
++static void
++init_vbt_defaults(struct drm_i915_private *dev_priv)
++{
++ dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
++
++ /* LFP panel data */
++ dev_priv->lvds_dither = 1;
++ dev_priv->lvds_vbt = 0;
++
++ /* SDVO panel data */
++ dev_priv->sdvo_lvds_vbt_mode = NULL;
++
++ /* general features */
++ dev_priv->int_tv_support = 1;
++ dev_priv->int_crt_support = 1;
++ dev_priv->lvds_use_ssc = 0;
++
++ /* eDP data */
++ dev_priv->edp.bpp = 18;
++}
++
+ /**
+- * intel_init_bios - initialize VBIOS settings & find VBT
++ * intel_parse_bios - find VBT and initialize settings from the BIOS
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+ * to appropriate values.
+ *
+- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+- * feed an updated VBT back through that, compared to what we'll fetch using
+- * this method of groping around in the BIOS data.
+- *
+ * Returns 0 on success, nonzero on failure.
+ */
+ bool
+-intel_init_bios(struct drm_device *dev)
++intel_parse_bios(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+- struct vbt_header *vbt = NULL;
+- struct bdb_header *bdb;
+- u8 __iomem *bios;
+- size_t size;
+- int i;
+-
+- bios = pci_map_rom(pdev, &size);
+- if (!bios)
+- return -1;
+-
+- /* Scour memory looking for the VBT signature */
+- for (i = 0; i + 4 < size; i++) {
+- if (!memcmp(bios + i, "$VBT", 4)) {
+- vbt = (struct vbt_header *)(bios + i);
+- break;
+- }
++ struct bdb_header *bdb = NULL;
++ u8 __iomem *bios = NULL;
++
++ init_vbt_defaults(dev_priv);
++
++ /* XXX Should this validation be moved to intel_opregion.c? */
++ if (dev_priv->opregion.vbt) {
++ struct vbt_header *vbt = dev_priv->opregion.vbt;
++ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
++ DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
++ vbt->signature);
++ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
++ } else
++ dev_priv->opregion.vbt = NULL;
+ }
+
+- if (!vbt) {
+- DRM_ERROR("VBT signature missing\n");
+- pci_unmap_rom(pdev, bios);
+- return -1;
+- }
++ if (bdb == NULL) {
++ struct vbt_header *vbt = NULL;
++ size_t size;
++ int i;
+
+- bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
++ bios = pci_map_rom(pdev, &size);
++ if (!bios)
++ return -1;
++
++ /* Scour memory looking for the VBT signature */
++ for (i = 0; i + 4 < size; i++) {
++ if (!memcmp(bios + i, "$VBT", 4)) {
++ vbt = (struct vbt_header *)(bios + i);
++ break;
++ }
++ }
++
++ if (!vbt) {
++ DRM_ERROR("VBT signature missing\n");
++ pci_unmap_rom(pdev, bios);
++ return -1;
++ }
++
++ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
++ }
+
+ /* Grab useful general definitions */
+ parse_general_features(dev_priv, bdb);
+@@ -568,7 +642,25 @@ intel_init_bios(struct drm_device *dev)
+ parse_driver_features(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
+
+- pci_unmap_rom(pdev, bios);
++ if (bios)
++ pci_unmap_rom(pdev, bios);
+
+ return 0;
+ }
++
++/* Ensure that vital registers have been initialised, even if the BIOS
++ * is absent or just failing to do its job.
++ */
++void intel_setup_bios(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ /* Set the Panel Power On/Off timings if uninitialized. */
++ if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
++ /* Set T2 to 40ms and T5 to 200ms */
++ I915_WRITE(PP_ON_DELAYS, 0x019007d0);
++
++ /* Set T3 to 35ms and Tx to 200ms */
++ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
++ }
++}
+diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
+index 4c18514..5f8e4ed 100644
+--- a/drivers/gpu/drm/i915/intel_bios.h
++++ b/drivers/gpu/drm/i915/intel_bios.h
+@@ -197,7 +197,8 @@ struct bdb_general_features {
+ struct child_device_config {
+ u16 handle;
+ u16 device_type;
+- u8 device_id[10]; /* See DEVICE_TYPE_* above */
++ u8 i2c_speed;
++ u8 rsvd[9];
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+@@ -466,7 +467,8 @@ struct bdb_edp {
+ struct edp_link_params link_params[16];
+ } __attribute__ ((packed));
+
+-bool intel_init_bios(struct drm_device *dev);
++void intel_setup_bios(struct drm_device *dev);
++bool intel_parse_bios(struct drm_device *dev);
+
+ /*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index 197d4f3..c55c770 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+
+- if (!IS_I9XX(dev))
++ if (IS_GEN2(dev))
+ max_clock = 350000;
+ else
+ max_clock = 400000;
+@@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
+ * Disable separate mode multiplier used when cloning SDVO to CRT
+ * XXX this needs to be adjusted when we really are cloning
+ */
+- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ dpll_md = I915_READ(dpll_md_reg);
+ I915_WRITE(dpll_md_reg,
+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+@@ -187,11 +187,12 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+ I915_WRITE(PCH_ADPA, adpa);
+
+ if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+- 1000, 1))
++ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+
+ if (turn_off_dac) {
+- I915_WRITE(PCH_ADPA, temp);
++ /* Make sure hotplug is enabled */
++ I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
+ (void)I915_READ(PCH_ADPA);
+ }
+
+@@ -244,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+ /* wait for FORCE_DETECT to go off */
+ if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT) == 0,
+- 1000, 1))
++ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+ }
+
+@@ -261,21 +262,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+ return ret;
+ }
+
++static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
++{
++ u8 buf;
++ struct i2c_msg msgs[] = {
++ {
++ .addr = 0xA0,
++ .flags = 0,
++ .len = 1,
++ .buf = &buf,
++ },
++ };
++ /* DDC monitor detect: Does it ACK a write to 0xA0? */
++ return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
++}
++
+ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
+ {
+- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
++ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
++ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+
+ /* CRT should always be at 0, but check anyway */
+ if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
+ return false;
+
+- return intel_ddc_probe(intel_encoder);
++ if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
++ DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
++ return true;
++ }
++
++ if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
++ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
++ return true;
++ }
++
++ return false;
+ }
+
+ static enum drm_connector_status
+ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
+ {
+- struct drm_encoder *encoder = &intel_encoder->enc;
++ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+@@ -295,6 +322,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
+ uint8_t st00;
+ enum drm_connector_status status;
+
++ DRM_DEBUG_KMS("starting load-detect on CRT\n");
++
+ if (pipe == 0) {
+ bclrpat_reg = BCLRPAT_A;
+ vtotal_reg = VTOTAL_A;
+@@ -324,9 +353,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
+ /* Set the border color to purple. */
+ I915_WRITE(bclrpat_reg, 0x500050);
+
+- if (IS_I9XX(dev)) {
++ if (!IS_GEN2(dev)) {
+ uint32_t pipeconf = I915_READ(pipeconf_reg);
+ I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
++ POSTING_READ(pipeconf_reg);
+ /* Wait for next Vblank to substitue
+ * border color for Color info */
+ intel_wait_for_vblank(dev, pipe);
+@@ -404,34 +434,37 @@ static enum drm_connector_status
+ intel_crt_detect(struct drm_connector *connector, bool force)
+ {
+ struct drm_device *dev = connector->dev;
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
++ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ int dpms_mode;
+ enum drm_connector_status status;
+
+- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+- if (intel_crt_detect_hotplug(connector))
++ if (I915_HAS_HOTPLUG(dev)) {
++ if (intel_crt_detect_hotplug(connector)) {
++ DRM_DEBUG_KMS("CRT detected via hotplug\n");
+ return connector_status_connected;
+- else
++ } else
+ return connector_status_disconnected;
+ }
+
+- if (intel_crt_detect_ddc(encoder))
++ if (intel_crt_detect_ddc(&encoder->base))
+ return connector_status_connected;
+
+ if (!force)
+ return connector->status;
+
+ /* for pre-945g platforms use load detect */
+- if (encoder->crtc && encoder->crtc->enabled) {
+- status = intel_crt_load_detect(encoder->crtc, intel_encoder);
++ if (encoder->base.crtc && encoder->base.crtc->enabled) {
++ status = intel_crt_load_detect(encoder->base.crtc, encoder);
+ } else {
+- crtc = intel_get_load_detect_pipe(intel_encoder, connector,
++ crtc = intel_get_load_detect_pipe(encoder, connector,
+ NULL, &dpms_mode);
+ if (crtc) {
+- status = intel_crt_load_detect(crtc, intel_encoder);
+- intel_release_load_detect_pipe(intel_encoder,
++ if (intel_crt_detect_ddc(&encoder->base))
++ status = connector_status_connected;
++ else
++ status = intel_crt_load_detect(crtc, encoder);
++ intel_release_load_detect_pipe(encoder,
+ connector, dpms_mode);
+ } else
+ status = connector_status_unknown;
+@@ -449,32 +482,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
+
+ static int intel_crt_get_modes(struct drm_connector *connector)
+ {
+- int ret;
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+- struct i2c_adapter *ddc_bus;
+ struct drm_device *dev = connector->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int ret;
+
+-
+- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
++ ret = intel_ddc_get_modes(connector,
++ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ if (ret || !IS_G4X(dev))
+- goto end;
++ return ret;
+
+ /* Try to probe digital port for output in DVI-I -> VGA mode. */
+- ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+-
+- if (!ddc_bus) {
+- dev_printk(KERN_ERR, &connector->dev->pdev->dev,
+- "DDC bus registration failed for CRTDDC_D.\n");
+- goto end;
+- }
+- /* Try to get modes by GPIOD port */
+- ret = intel_ddc_get_modes(connector, ddc_bus);
+- intel_i2c_destroy(ddc_bus);
+-
+-end:
+- return ret;
+-
++ return intel_ddc_get_modes(connector,
++ &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
+ }
+
+ static int intel_crt_set_property(struct drm_connector *connector,
+@@ -507,7 +526,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+ .mode_valid = intel_crt_mode_valid,
+ .get_modes = intel_crt_get_modes,
+- .best_encoder = intel_attached_encoder,
++ .best_encoder = intel_best_encoder,
+ };
+
+ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+@@ -520,7 +539,6 @@ void intel_crt_init(struct drm_device *dev)
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 i2c_reg;
+
+ intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
+ if (!intel_encoder)
+@@ -536,27 +554,10 @@ void intel_crt_init(struct drm_device *dev)
+ drm_connector_init(dev, &intel_connector->base,
+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+- drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
++ drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+
+- drm_mode_connector_attach_encoder(&intel_connector->base,
+- &intel_encoder->enc);
+-
+- /* Set up the DDC bus. */
+- if (HAS_PCH_SPLIT(dev))
+- i2c_reg = PCH_GPIOA;
+- else {
+- i2c_reg = GPIOA;
+- /* Use VBT information for CRT DDC if available */
+- if (dev_priv->crt_ddc_bus != 0)
+- i2c_reg = dev_priv->crt_ddc_bus;
+- }
+- intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
+- if (!intel_encoder->ddc_bus) {
+- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+- "failed.\n");
+- return;
+- }
++ intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+ intel_encoder->type = INTEL_OUTPUT_ANALOG;
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+@@ -566,7 +567,7 @@ void intel_crt_init(struct drm_device *dev)
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+- drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
++ drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 9792285..0cece04 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -43,8 +43,8 @@
+
+ bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
+ static void intel_update_watermarks(struct drm_device *dev);
+-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
+-static void intel_crtc_update_cursor(struct drm_crtc *crtc);
++static void intel_increase_pllclock(struct drm_crtc *crtc);
++static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+
+ typedef struct {
+ /* given values */
+@@ -342,6 +342,16 @@ static bool
+ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
+
++static inline u32 /* units of 100MHz */
++intel_fdi_link_freq(struct drm_device *dev)
++{
++ if (IS_GEN5(dev)) {
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
++ } else
++ return 27;
++}
++
+ static const intel_limit_t intel_limits_i8xx_dvo = {
+ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
+ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
+@@ -701,16 +711,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+ limit = intel_ironlake_limit(crtc);
+ else if (IS_G4X(dev)) {
+ limit = intel_g4x_limit(crtc);
+- } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
+- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+- limit = &intel_limits_i9xx_lvds;
+- else
+- limit = &intel_limits_i9xx_sdvo;
+ } else if (IS_PINEVIEW(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_pineview_lvds;
+ else
+ limit = &intel_limits_pineview_sdvo;
++ } else if (!IS_GEN2(dev)) {
++ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &intel_limits_i9xx_lvds;
++ else
++ limit = &intel_limits_i9xx_sdvo;
+ } else {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i8xx_lvds;
+@@ -744,20 +754,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
+ /**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+-bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
++bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+ {
+- struct drm_device *dev = crtc->dev;
+- struct drm_mode_config *mode_config = &dev->mode_config;
+- struct drm_encoder *l_entry;
++ struct drm_device *dev = crtc->dev;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct intel_encoder *encoder;
+
+- list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+- if (l_entry && l_entry->crtc == crtc) {
+- struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
+- if (intel_encoder->type == type)
+- return true;
+- }
+- }
+- return false;
++ list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
++ if (encoder->base.crtc == crtc && encoder->type == type)
++ return true;
++
++ return false;
+ }
+
+ #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
+@@ -928,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+
+- /* return directly when it is eDP */
+- if (HAS_eDP)
+- return true;
+-
+ if (target < 200000) {
+ clock.n = 1;
+ clock.p1 = 2;
+@@ -955,26 +958,26 @@ static bool
+ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+ {
+- intel_clock_t clock;
+- if (target < 200000) {
+- clock.p1 = 2;
+- clock.p2 = 10;
+- clock.n = 2;
+- clock.m1 = 23;
+- clock.m2 = 8;
+- } else {
+- clock.p1 = 1;
+- clock.p2 = 10;
+- clock.n = 1;
+- clock.m1 = 14;
+- clock.m2 = 2;
+- }
+- clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+- clock.p = (clock.p1 * clock.p2);
+- clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+- clock.vco = 0;
+- memcpy(best_clock, &clock, sizeof(intel_clock_t));
+- return true;
++ intel_clock_t clock;
++ if (target < 200000) {
++ clock.p1 = 2;
++ clock.p2 = 10;
++ clock.n = 2;
++ clock.m1 = 23;
++ clock.m2 = 8;
++ } else {
++ clock.p1 = 1;
++ clock.p2 = 10;
++ clock.n = 1;
++ clock.m1 = 14;
++ clock.m2 = 2;
++ }
++ clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
++ clock.p = (clock.p1 * clock.p2);
++ clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
++ clock.vco = 0;
++ memcpy(best_clock, &clock, sizeof(intel_clock_t));
++ return true;
+ }
+
+ /**
+@@ -1007,9 +1010,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
+ I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
+ /* Wait for vblank interrupt bit to set */
+- if (wait_for((I915_READ(pipestat_reg) &
+- PIPE_VBLANK_INTERRUPT_STATUS),
+- 50, 0))
++ if (wait_for(I915_READ(pipestat_reg) &
++ PIPE_VBLANK_INTERRUPT_STATUS,
++ 50))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+ }
+
+@@ -1028,36 +1031,35 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
+ * Otherwise:
+ * wait for the display line value to settle (it usually
+ * ends up stopping at the start of the next frame).
+- *
++ *
+ */
+-static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
++void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+- int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
++ int reg = PIPECONF(pipe);
+
+ /* Wait for the Pipe State to go off */
+- if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
+- 100, 0))
++ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
++ 100))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ } else {
+ u32 last_line;
+- int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
++ int reg = PIPEDSL(pipe);
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ /* Wait for the display line to settle */
+ do {
+- last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
++ last_line = I915_READ(reg) & DSL_LINEMASK;
+ mdelay(5);
+- } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
++ } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
+ time_after(timeout, jiffies));
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ }
+ }
+
+-/* Parameters have changed, update FBC info */
+ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ {
+ struct drm_device *dev = crtc->dev;
+@@ -1069,6 +1071,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
++ if (fb->pitch == dev_priv->cfb_pitch &&
++ obj_priv->fence_reg == dev_priv->cfb_fence &&
++ intel_crtc->plane == dev_priv->cfb_plane &&
++ I915_READ(FBC_CONTROL) & FBC_CTL_EN)
++ return;
++
++ i8xx_disable_fbc(dev);
++
+ dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+
+ if (fb->pitch < dev_priv->cfb_pitch)
+@@ -1102,7 +1112,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+- dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
++ dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+ }
+
+ void i8xx_disable_fbc(struct drm_device *dev)
+@@ -1110,19 +1120,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+- if (!I915_HAS_FBC(dev))
+- return;
+-
+- if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
+- return; /* Already off, just return */
+-
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
++ if ((fbc_ctl & FBC_CTL_EN) == 0)
++ return;
++
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
++ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+@@ -1145,14 +1152,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+- int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
+- DPFC_CTL_PLANEB);
++ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
++ dpfc_ctl = I915_READ(DPFC_CONTROL);
++ if (dpfc_ctl & DPFC_CTL_EN) {
++ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
++ dev_priv->cfb_fence == obj_priv->fence_reg &&
++ dev_priv->cfb_plane == intel_crtc->plane &&
++ dev_priv->cfb_y == crtc->y)
++ return;
++
++ I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
++ POSTING_READ(DPFC_CONTROL);
++ intel_wait_for_vblank(dev, intel_crtc->pipe);
++ }
++
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
++ dev_priv->cfb_y = crtc->y;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ if (obj_priv->tiling_mode != I915_TILING_NONE) {
+@@ -1162,7 +1182,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ }
+
+- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+@@ -1181,10 +1200,12 @@ void g4x_disable_fbc(struct drm_device *dev)
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+- dpfc_ctl &= ~DPFC_CTL_EN;
+- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
++ if (dpfc_ctl & DPFC_CTL_EN) {
++ dpfc_ctl &= ~DPFC_CTL_EN;
++ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+- DRM_DEBUG_KMS("disabled FBC\n");
++ DRM_DEBUG_KMS("disabled FBC\n");
++ }
+ }
+
+ static bool g4x_fbc_enabled(struct drm_device *dev)
+@@ -1202,16 +1223,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+- int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
+- DPFC_CTL_PLANEB;
++ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
++ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
++ if (dpfc_ctl & DPFC_CTL_EN) {
++ if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
++ dev_priv->cfb_fence == obj_priv->fence_reg &&
++ dev_priv->cfb_plane == intel_crtc->plane &&
++ dev_priv->cfb_offset == obj_priv->gtt_offset &&
++ dev_priv->cfb_y == crtc->y)
++ return;
++
++ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
++ POSTING_READ(ILK_DPFC_CONTROL);
++ intel_wait_for_vblank(dev, intel_crtc->pipe);
++ }
++
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
++ dev_priv->cfb_offset = obj_priv->gtt_offset;
++ dev_priv->cfb_y = crtc->y;
+
+- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ if (obj_priv->tiling_mode != I915_TILING_NONE) {
+@@ -1221,15 +1256,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ }
+
+- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+- I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
+- DPFC_CTL_EN);
++ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+ }
+@@ -1241,10 +1274,12 @@ void ironlake_disable_fbc(struct drm_device *dev)
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+- dpfc_ctl &= ~DPFC_CTL_EN;
+- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
++ if (dpfc_ctl & DPFC_CTL_EN) {
++ dpfc_ctl &= ~DPFC_CTL_EN;
++ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+- DRM_DEBUG_KMS("disabled FBC\n");
++ DRM_DEBUG_KMS("disabled FBC\n");
++ }
+ }
+
+ static bool ironlake_fbc_enabled(struct drm_device *dev)
+@@ -1286,8 +1321,7 @@ void intel_disable_fbc(struct drm_device *dev)
+
+ /**
+ * intel_update_fbc - enable/disable FBC as needed
+- * @crtc: CRTC to point the compressor at
+- * @mode: mode in use
++ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+@@ -1304,18 +1338,14 @@ void intel_disable_fbc(struct drm_device *dev)
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+-static void intel_update_fbc(struct drm_crtc *crtc,
+- struct drm_display_mode *mode)
++static void intel_update_fbc(struct drm_device *dev)
+ {
+- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct drm_framebuffer *fb = crtc->fb;
++ struct drm_crtc *crtc = NULL, *tmp_crtc;
++ struct intel_crtc *intel_crtc;
++ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+- struct drm_crtc *tmp_crtc;
+- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+- int plane = intel_crtc->plane;
+- int crtcs_enabled = 0;
+
+ DRM_DEBUG_KMS("\n");
+
+@@ -1325,12 +1355,6 @@ static void intel_update_fbc(struct drm_crtc *crtc,
+ if (!I915_HAS_FBC(dev))
+ return;
+
+- if (!crtc->fb)
+- return;
+-
+- intel_fb = to_intel_framebuffer(fb);
+- obj_priv = to_intel_bo(intel_fb->obj);
+-
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+@@ -1341,35 +1365,47 @@ static void intel_update_fbc(struct drm_crtc *crtc,
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+- if (tmp_crtc->enabled)
+- crtcs_enabled++;
++ if (tmp_crtc->enabled) {
++ if (crtc) {
++ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
++ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
++ goto out_disable;
++ }
++ crtc = tmp_crtc;
++ }
+ }
+- DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
+- if (crtcs_enabled > 1) {
+- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
++
++ if (!crtc || crtc->fb == NULL) {
++ DRM_DEBUG_KMS("no output, disabling\n");
++ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ goto out_disable;
+ }
++
++ intel_crtc = to_intel_crtc(crtc);
++ fb = crtc->fb;
++ intel_fb = to_intel_framebuffer(fb);
++ obj_priv = to_intel_bo(intel_fb->obj);
++
+ if (intel_fb->obj->size > dev_priv->cfb_size) {
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+- "compression\n");
++ "compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+- if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+- (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
++ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
++ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+- "disabling\n");
++ "disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ goto out_disable;
+ }
+- if ((mode->hdisplay > 2048) ||
+- (mode->vdisplay > 1536)) {
++ if ((crtc->mode.hdisplay > 2048) ||
++ (crtc->mode.vdisplay > 1536)) {
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ goto out_disable;
+ }
+- if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
++ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ goto out_disable;
+@@ -1384,18 +1420,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
+ if (in_dbg_master())
+ goto out_disable;
+
+- if (intel_fbc_enabled(dev)) {
+- /* We can re-enable it in this case, but need to update pitch */
+- if ((fb->pitch > dev_priv->cfb_pitch) ||
+- (obj_priv->fence_reg != dev_priv->cfb_fence) ||
+- (plane != dev_priv->cfb_plane))
+- intel_disable_fbc(dev);
+- }
+-
+- /* Now try to turn it back on if possible */
+- if (!intel_fbc_enabled(dev))
+- intel_enable_fbc(crtc, 500);
+-
++ intel_enable_fbc(crtc, 500);
+ return;
+
+ out_disable:
+@@ -1407,7 +1432,9 @@ out_disable:
+ }
+
+ int
+-intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
++intel_pin_and_fence_fb_obj(struct drm_device *dev,
++ struct drm_gem_object *obj,
++ bool pipelined)
+ {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ u32 alignment;
+@@ -1417,7 +1444,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+ case I915_TILING_NONE:
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ alignment = 128 * 1024;
+- else if (IS_I965G(dev))
++ else if (INTEL_INFO(dev)->gen >= 4)
+ alignment = 4 * 1024;
+ else
+ alignment = 64 * 1024;
+@@ -1435,9 +1462,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+ }
+
+ ret = i915_gem_object_pin(obj, alignment);
+- if (ret != 0)
++ if (ret)
+ return ret;
+
++ ret = i915_gem_object_set_to_display_plane(obj, pipelined);
++ if (ret)
++ goto err_unpin;
++
+ /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always install
+@@ -1445,14 +1476,16 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+ */
+ if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+ obj_priv->tiling_mode != I915_TILING_NONE) {
+- ret = i915_gem_object_get_fence_reg(obj);
+- if (ret != 0) {
+- i915_gem_object_unpin(obj);
+- return ret;
+- }
++ ret = i915_gem_object_get_fence_reg(obj, false);
++ if (ret)
++ goto err_unpin;
+ }
+
+ return 0;
++
++err_unpin:
++ i915_gem_object_unpin(obj);
++ return ret;
+ }
+
+ /* Assume fb object is pinned & idle & fenced and just update base pointers */
+@@ -1468,12 +1501,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long Start, Offset;
+- int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
+- int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
+- int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
+- int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
+- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
++ u32 reg;
+
+ switch (plane) {
+ case 0:
+@@ -1488,7 +1517,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ obj = intel_fb->obj;
+ obj_priv = to_intel_bo(obj);
+
+- dspcntr = I915_READ(dspcntr_reg);
++ reg = DSPCNTR(plane);
++ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->bits_per_pixel) {
+@@ -1509,7 +1539,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ DRM_ERROR("Unknown color depth\n");
+ return -EINVAL;
+ }
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+@@ -1520,28 +1550,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+- I915_WRITE(dspcntr_reg, dspcntr);
++ I915_WRITE(reg, dspcntr);
+
+ Start = obj_priv->gtt_offset;
+ Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, fb->pitch);
+- I915_WRITE(dspstride, fb->pitch);
+- if (IS_I965G(dev)) {
+- I915_WRITE(dspsurf, Start);
+- I915_WRITE(dsptileoff, (y << 16) | x);
+- I915_WRITE(dspbase, Offset);
+- } else {
+- I915_WRITE(dspbase, Start + Offset);
+- }
+- POSTING_READ(dspbase);
+-
+- if (IS_I965G(dev) || plane == 0)
+- intel_update_fbc(crtc, &crtc->mode);
++ I915_WRITE(DSPSTRIDE(plane), fb->pitch);
++ if (INTEL_INFO(dev)->gen >= 4) {
++ I915_WRITE(DSPSURF(plane), Start);
++ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
++ I915_WRITE(DSPADDR(plane), Offset);
++ } else
++ I915_WRITE(DSPADDR(plane), Start + Offset);
++ POSTING_READ(reg);
+
+- intel_wait_for_vblank(dev, intel_crtc->pipe);
+- intel_increase_pllclock(crtc, true);
++ intel_update_fbc(dev);
++ intel_increase_pllclock(crtc);
+
+ return 0;
+ }
+@@ -1553,11 +1579,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+- struct intel_framebuffer *intel_fb;
+- struct drm_i915_gem_object *obj_priv;
+- struct drm_gem_object *obj;
+- int pipe = intel_crtc->pipe;
+- int plane = intel_crtc->plane;
+ int ret;
+
+ /* no fb bound */
+@@ -1566,45 +1587,41 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ return 0;
+ }
+
+- switch (plane) {
++ switch (intel_crtc->plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+- DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+- intel_fb = to_intel_framebuffer(crtc->fb);
+- obj = intel_fb->obj;
+- obj_priv = to_intel_bo(obj);
+-
+ mutex_lock(&dev->struct_mutex);
+- ret = intel_pin_and_fence_fb_obj(dev, obj);
++ ret = intel_pin_and_fence_fb_obj(dev,
++ to_intel_framebuffer(crtc->fb)->obj,
++ false);
+ if (ret != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+- ret = i915_gem_object_set_to_display_plane(obj);
+- if (ret != 0) {
+- i915_gem_object_unpin(obj);
+- mutex_unlock(&dev->struct_mutex);
+- return ret;
++ if (old_fb) {
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
++
++ wait_event(dev_priv->pending_flip_queue,
++ atomic_read(&obj_priv->pending_flip) == 0);
+ }
+
+ ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+ if (ret) {
+- i915_gem_object_unpin(obj);
++ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+- if (old_fb) {
+- intel_fb = to_intel_framebuffer(old_fb);
+- obj_priv = to_intel_bo(intel_fb->obj);
+- i915_gem_object_unpin(intel_fb->obj);
+- }
++ if (old_fb)
++ i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+
+ mutex_unlock(&dev->struct_mutex);
+
+@@ -1615,7 +1632,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ if (!master_priv->sarea_priv)
+ return 0;
+
+- if (pipe) {
++ if (intel_crtc->pipe) {
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ } else {
+@@ -1626,7 +1643,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ return 0;
+ }
+
+-static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
++static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -1659,9 +1676,41 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
+ }
+ I915_WRITE(DP_A, dpa_ctl);
+
++ POSTING_READ(DP_A);
+ udelay(500);
+ }
+
++static void intel_fdi_normal_train(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++ int pipe = intel_crtc->pipe;
++ u32 reg, temp;
++
++ /* enable normal train */
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
++ temp &= ~FDI_LINK_TRAIN_NONE;
++ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
++ I915_WRITE(reg, temp);
++
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
++ if (HAS_PCH_CPT(dev)) {
++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
++ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
++ } else {
++ temp &= ~FDI_LINK_TRAIN_NONE;
++ temp |= FDI_LINK_TRAIN_NONE;
++ }
++ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
++
++ /* wait one idle pattern time */
++ POSTING_READ(reg);
++ udelay(1000);
++}
++
+ /* The FDI link training functions for ILK/Ibexpeak. */
+ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ {
+@@ -1669,84 +1718,88 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+- u32 temp, tries = 0;
++ u32 reg, temp, tries;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+- temp = I915_READ(fdi_rx_imr_reg);
++ reg = FDI_RX_IMR(pipe);
++ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+- I915_WRITE(fdi_rx_imr_reg, temp);
+- I915_READ(fdi_rx_imr_reg);
++ I915_WRITE(reg, temp);
++ I915_READ(reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+- temp = I915_READ(fdi_tx_reg);
+- temp |= FDI_TX_ENABLE;
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+- I915_WRITE(fdi_tx_reg, temp);
+- I915_READ(fdi_tx_reg);
++ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+- temp = I915_READ(fdi_rx_reg);
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+- I915_READ(fdi_rx_reg);
++ I915_WRITE(reg, temp | FDI_RX_ENABLE);
++
++ POSTING_READ(reg);
+ udelay(150);
+
++ /* Ironlake workaround, enable clock pointer after FDI enable*/
++ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
++
++ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+- temp = I915_READ(fdi_rx_iir_reg);
++ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+- I915_WRITE(fdi_rx_iir_reg,
+- temp | FDI_RX_BIT_LOCK);
++ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+- DRM_DEBUG_KMS("FDI train 1 fail!\n");
++ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+- temp = I915_READ(fdi_tx_reg);
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+- I915_WRITE(fdi_tx_reg, temp);
++ I915_WRITE(reg, temp);
+
+- temp = I915_READ(fdi_rx_reg);
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+- I915_WRITE(fdi_rx_reg, temp);
+- udelay(150);
++ I915_WRITE(reg, temp);
+
+- tries = 0;
++ POSTING_READ(reg);
++ udelay(150);
+
++ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+- temp = I915_READ(fdi_rx_iir_reg);
++ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+- I915_WRITE(fdi_rx_iir_reg,
+- temp | FDI_RX_SYMBOL_LOCK);
++ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+- DRM_DEBUG_KMS("FDI train 2 fail!\n");
++ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done\n");
++
+ }
+
+-static int snb_b_fdi_train_param [] = {
++static const int const snb_b_fdi_train_param [] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+@@ -1760,24 +1813,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+- u32 temp, i;
++ u32 reg, temp, i;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+- temp = I915_READ(fdi_rx_imr_reg);
++ reg = FDI_RX_IMR(pipe);
++ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+- I915_WRITE(fdi_rx_imr_reg, temp);
+- I915_READ(fdi_rx_imr_reg);
++ I915_WRITE(reg, temp);
++
++ POSTING_READ(reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+- temp = I915_READ(fdi_tx_reg);
+- temp |= FDI_TX_ENABLE;
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+@@ -1785,10 +1836,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+- I915_WRITE(fdi_tx_reg, temp);
+- I915_READ(fdi_tx_reg);
++ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+- temp = I915_READ(fdi_rx_reg);
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+@@ -1796,32 +1847,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+- I915_READ(fdi_rx_reg);
++ I915_WRITE(reg, temp | FDI_RX_ENABLE);
++
++ POSTING_READ(reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+- temp = I915_READ(fdi_tx_reg);
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+- I915_WRITE(fdi_tx_reg, temp);
++ I915_WRITE(reg, temp);
++
++ POSTING_READ(reg);
+ udelay(500);
+
+- temp = I915_READ(fdi_rx_iir_reg);
++ reg = FDI_RX_IIR(pipe);
++ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK) {
+- I915_WRITE(fdi_rx_iir_reg,
+- temp | FDI_RX_BIT_LOCK);
++ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+- DRM_DEBUG_KMS("FDI train 1 fail!\n");
++ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+- temp = I915_READ(fdi_tx_reg);
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+@@ -1829,9 +1885,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+- I915_WRITE(fdi_tx_reg, temp);
++ I915_WRITE(reg, temp);
+
+- temp = I915_READ(fdi_rx_reg);
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+@@ -1839,535 +1896,596 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+- I915_WRITE(fdi_rx_reg, temp);
++ I915_WRITE(reg, temp);
++
++ POSTING_READ(reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+- temp = I915_READ(fdi_tx_reg);
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+- I915_WRITE(fdi_tx_reg, temp);
++ I915_WRITE(reg, temp);
++
++ POSTING_READ(reg);
+ udelay(500);
+
+- temp = I915_READ(fdi_rx_iir_reg);
++ reg = FDI_RX_IIR(pipe);
++ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+- I915_WRITE(fdi_rx_iir_reg,
+- temp | FDI_RX_SYMBOL_LOCK);
++ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+- DRM_DEBUG_KMS("FDI train 2 fail!\n");
++ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+ }
+
+-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
++static void ironlake_fdi_enable(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+- int plane = intel_crtc->plane;
+- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
+- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
+- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+- int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
+- int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+- int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+- int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+- int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+- int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+- int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+- int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
+- int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
+- int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
+- int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
+- int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
+- int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+- int trans_dpll_sel = (pipe == 0) ? 0 : 1;
+- u32 temp;
+- u32 pipe_bpc;
+-
+- temp = I915_READ(pipeconf_reg);
+- pipe_bpc = temp & PIPE_BPC_MASK;
++ u32 reg, temp;
+
+- /* XXX: When our outputs are all unaware of DPMS modes other than off
+- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+- */
+- switch (mode) {
+- case DRM_MODE_DPMS_ON:
+- case DRM_MODE_DPMS_STANDBY:
+- case DRM_MODE_DPMS_SUSPEND:
+- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
++ /* Write the TU size bits so error detection works */
++ I915_WRITE(FDI_RX_TUSIZE1(pipe),
++ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+- temp = I915_READ(PCH_LVDS);
+- if ((temp & LVDS_PORT_EN) == 0) {
+- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+- POSTING_READ(PCH_LVDS);
+- }
+- }
++ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
++ temp &= ~((0x7 << 19) | (0x7 << 16));
++ temp |= (intel_crtc->fdi_lanes - 1) << 19;
++ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
++ I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+
+- if (!HAS_eDP) {
++ POSTING_READ(reg);
++ udelay(200);
+
+- /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+- temp = I915_READ(fdi_rx_reg);
+- /*
+- * make the BPC in FDI Rx be consistent with that in
+- * pipeconf reg.
+- */
+- temp &= ~(0x7 << 16);
+- temp |= (pipe_bpc << 11);
+- temp &= ~(7 << 19);
+- temp |= (intel_crtc->fdi_lanes - 1) << 19;
+- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+- I915_READ(fdi_rx_reg);
+- udelay(200);
++ /* Switch from Rawclk to PCDclk */
++ temp = I915_READ(reg);
++ I915_WRITE(reg, temp | FDI_PCDCLK);
+
+- /* Switch from Rawclk to PCDclk */
+- temp = I915_READ(fdi_rx_reg);
+- I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+- I915_READ(fdi_rx_reg);
+- udelay(200);
++ POSTING_READ(reg);
++ udelay(200);
+
+- /* Enable CPU FDI TX PLL, always on for Ironlake */
+- temp = I915_READ(fdi_tx_reg);
+- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+- I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+- I915_READ(fdi_tx_reg);
+- udelay(100);
+- }
+- }
++ /* Enable CPU FDI TX PLL, always on for Ironlake */
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
++ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
++ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+
+- /* Enable panel fitting for LVDS */
+- if (dev_priv->pch_pf_size &&
+- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+- || HAS_eDP || intel_pch_has_edp(crtc))) {
+- /* Force use of hard-coded filter coefficients
+- * as some pre-programmed values are broken,
+- * e.g. x201.
+- */
+- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+- PF_ENABLE | PF_FILTER_MED_3x3);
+- I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+- dev_priv->pch_pf_pos);
+- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+- dev_priv->pch_pf_size);
+- }
++ POSTING_READ(reg);
++ udelay(100);
++ }
++}
+
+- /* Enable CPU pipe */
+- temp = I915_READ(pipeconf_reg);
+- if ((temp & PIPEACONF_ENABLE) == 0) {
+- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+- I915_READ(pipeconf_reg);
+- udelay(100);
+- }
++static void intel_flush_display_plane(struct drm_device *dev,
++ int plane)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 reg = DSPADDR(plane);
++ I915_WRITE(reg, I915_READ(reg));
++}
+
+- /* configure and enable CPU plane */
+- temp = I915_READ(dspcntr_reg);
+- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+- /* Flush the plane changes */
+- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+- }
++/*
++ * When we disable a pipe, we need to clear any pending scanline wait events
++ * to avoid hanging the ring, which we assume we are waiting on.
++ */
++static void intel_clear_scanline_wait(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 tmp;
+
+- if (!HAS_eDP) {
+- /* For PCH output, training FDI link */
+- if (IS_GEN6(dev))
+- gen6_fdi_link_train(crtc);
+- else
+- ironlake_fdi_link_train(crtc);
++ if (IS_GEN2(dev))
++ /* Can't break the hang on i8xx */
++ return;
+
+- /* enable PCH DPLL */
+- temp = I915_READ(pch_dpll_reg);
+- if ((temp & DPLL_VCO_ENABLE) == 0) {
+- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+- I915_READ(pch_dpll_reg);
+- }
+- udelay(200);
++ tmp = I915_READ(PRB0_CTL);
++ if (tmp & RING_WAIT) {
++ I915_WRITE(PRB0_CTL, tmp);
++ POSTING_READ(PRB0_CTL);
++ }
++}
+
+- if (HAS_PCH_CPT(dev)) {
+- /* Be sure PCH DPLL SEL is set */
+- temp = I915_READ(PCH_DPLL_SEL);
+- if (trans_dpll_sel == 0 &&
+- (temp & TRANSA_DPLL_ENABLE) == 0)
+- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+- else if (trans_dpll_sel == 1 &&
+- (temp & TRANSB_DPLL_ENABLE) == 0)
+- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+- I915_WRITE(PCH_DPLL_SEL, temp);
+- I915_READ(PCH_DPLL_SEL);
+- }
++static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
++{
++ struct drm_i915_gem_object *obj_priv;
++ struct drm_i915_private *dev_priv;
+
+- /* set transcoder timing */
+- I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
+- I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
+- I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
+-
+- I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
+- I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
+- I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
+-
+- /* enable normal train */
+- temp = I915_READ(fdi_tx_reg);
+- temp &= ~FDI_LINK_TRAIN_NONE;
+- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+- FDI_TX_ENHANCE_FRAME_ENABLE);
+- I915_READ(fdi_tx_reg);
+-
+- temp = I915_READ(fdi_rx_reg);
+- if (HAS_PCH_CPT(dev)) {
+- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+- } else {
+- temp &= ~FDI_LINK_TRAIN_NONE;
+- temp |= FDI_LINK_TRAIN_NONE;
+- }
+- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+- I915_READ(fdi_rx_reg);
++ if (crtc->fb == NULL)
++ return;
+
+- /* wait one idle pattern time */
+- udelay(100);
++ obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
++ dev_priv = crtc->dev->dev_private;
++ wait_event(dev_priv->pending_flip_queue,
++ atomic_read(&obj_priv->pending_flip) == 0);
++}
+
+- /* For PCH DP, enable TRANS_DP_CTL */
+- if (HAS_PCH_CPT(dev) &&
+- intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+- int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+- int reg;
+-
+- reg = I915_READ(trans_dp_ctl);
+- reg &= ~(TRANS_DP_PORT_SEL_MASK |
+- TRANS_DP_SYNC_MASK);
+- reg |= (TRANS_DP_OUTPUT_ENABLE |
+- TRANS_DP_ENH_FRAMING);
+-
+- if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+- reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+- if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+- reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+-
+- switch (intel_trans_dp_port_sel(crtc)) {
+- case PCH_DP_B:
+- reg |= TRANS_DP_PORT_SEL_B;
+- break;
+- case PCH_DP_C:
+- reg |= TRANS_DP_PORT_SEL_C;
+- break;
+- case PCH_DP_D:
+- reg |= TRANS_DP_PORT_SEL_D;
+- break;
+- default:
+- DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+- reg |= TRANS_DP_PORT_SEL_B;
+- break;
+- }
++static void ironlake_crtc_enable(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++ int pipe = intel_crtc->pipe;
++ int plane = intel_crtc->plane;
++ u32 reg, temp;
+
+- I915_WRITE(trans_dp_ctl, reg);
+- POSTING_READ(trans_dp_ctl);
+- }
++ if (intel_crtc->active)
++ return;
+
+- /* enable PCH transcoder */
+- temp = I915_READ(transconf_reg);
+- /*
+- * make the BPC in transcoder be consistent with
+- * that in pipeconf reg.
+- */
+- temp &= ~PIPE_BPC_MASK;
+- temp |= pipe_bpc;
+- I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
+- I915_READ(transconf_reg);
++ intel_crtc->active = true;
++ intel_update_watermarks(dev);
+
+- if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
+- DRM_ERROR("failed to enable transcoder\n");
+- }
++ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
++ temp = I915_READ(PCH_LVDS);
++ if ((temp & LVDS_PORT_EN) == 0)
++ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
++ }
+
+- intel_crtc_load_lut(crtc);
++ ironlake_fdi_enable(crtc);
+
+- intel_update_fbc(crtc, &crtc->mode);
+- break;
++ /* Enable panel fitting for LVDS */
++ if (dev_priv->pch_pf_size &&
++ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
++ /* Force use of hard-coded filter coefficients
++ * as some pre-programmed values are broken,
++ * e.g. x201.
++ */
++ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
++ PF_ENABLE | PF_FILTER_MED_3x3);
++ I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
++ dev_priv->pch_pf_pos);
++ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
++ dev_priv->pch_pf_size);
++ }
++
++ /* Enable CPU pipe */
++ reg = PIPECONF(pipe);
++ temp = I915_READ(reg);
++ if ((temp & PIPECONF_ENABLE) == 0) {
++ I915_WRITE(reg, temp | PIPECONF_ENABLE);
++ POSTING_READ(reg);
++ intel_wait_for_vblank(dev, intel_crtc->pipe);
++ }
++
++ /* configure and enable CPU plane */
++ reg = DSPCNTR(plane);
++ temp = I915_READ(reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
++ intel_flush_display_plane(dev, plane);
++ }
++
++ /* For PCH output, training FDI link */
++ if (IS_GEN6(dev))
++ gen6_fdi_link_train(crtc);
++ else
++ ironlake_fdi_link_train(crtc);
++
++ /* enable PCH DPLL */
++ reg = PCH_DPLL(pipe);
++ temp = I915_READ(reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
++ POSTING_READ(reg);
++ udelay(200);
++ }
+
+- case DRM_MODE_DPMS_OFF:
+- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
++ if (HAS_PCH_CPT(dev)) {
++ /* Be sure PCH DPLL SEL is set */
++ temp = I915_READ(PCH_DPLL_SEL);
++ if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
++ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
++ else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
++ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
++ I915_WRITE(PCH_DPLL_SEL, temp);
++ }
++
++ /* set transcoder timing */
++ I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
++ I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
++ I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
++
++ I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
++ I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
++ I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
++
++ intel_fdi_normal_train(crtc);
++
++ /* For PCH DP, enable TRANS_DP_CTL */
++ if (HAS_PCH_CPT(dev) &&
++ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
++ reg = TRANS_DP_CTL(pipe);
++ temp = I915_READ(reg);
++ temp &= ~(TRANS_DP_PORT_SEL_MASK |
++ TRANS_DP_SYNC_MASK);
++ temp |= (TRANS_DP_OUTPUT_ENABLE |
++ TRANS_DP_ENH_FRAMING);
+
+- drm_vblank_off(dev, pipe);
+- /* Disable display plane */
+- temp = I915_READ(dspcntr_reg);
+- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+- /* Flush the plane changes */
+- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+- I915_READ(dspbase_reg);
++ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
++ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
++ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
++ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
++
++ switch (intel_trans_dp_port_sel(crtc)) {
++ case PCH_DP_B:
++ temp |= TRANS_DP_PORT_SEL_B;
++ break;
++ case PCH_DP_C:
++ temp |= TRANS_DP_PORT_SEL_C;
++ break;
++ case PCH_DP_D:
++ temp |= TRANS_DP_PORT_SEL_D;
++ break;
++ default:
++ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
++ temp |= TRANS_DP_PORT_SEL_B;
++ break;
+ }
+
+- if (dev_priv->cfb_plane == plane &&
+- dev_priv->display.disable_fbc)
+- dev_priv->display.disable_fbc(dev);
++ I915_WRITE(reg, temp);
++ }
+
+- /* disable cpu pipe, disable after all planes disabled */
+- temp = I915_READ(pipeconf_reg);
+- if ((temp & PIPEACONF_ENABLE) != 0) {
+- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++ /* enable PCH transcoder */
++ reg = TRANSCONF(pipe);
++ temp = I915_READ(reg);
++ /*
++ * make the BPC in transcoder be consistent with
++ * that in pipeconf reg.
++ */
++ temp &= ~PIPE_BPC_MASK;
++ temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
++ I915_WRITE(reg, temp | TRANS_ENABLE);
++ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
++ DRM_ERROR("failed to enable transcoder %d\n", pipe);
+
+- /* wait for cpu pipe off, pipe state */
+- if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
+- DRM_ERROR("failed to turn off cpu pipe\n");
+- } else
+- DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
++ intel_crtc_load_lut(crtc);
++ intel_update_fbc(dev);
++ intel_crtc_update_cursor(crtc, true);
++}
+
+- udelay(100);
++static void ironlake_crtc_disable(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++ int pipe = intel_crtc->pipe;
++ int plane = intel_crtc->plane;
++ u32 reg, temp;
+
+- /* Disable PF */
+- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
++ if (!intel_crtc->active)
++ return;
+
+- /* disable CPU FDI tx and PCH FDI rx */
+- temp = I915_READ(fdi_tx_reg);
+- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
+- I915_READ(fdi_tx_reg);
++ intel_crtc_wait_for_pending_flips(crtc);
++ drm_vblank_off(dev, pipe);
++ intel_crtc_update_cursor(crtc, false);
+
+- temp = I915_READ(fdi_rx_reg);
+- /* BPC in FDI rx is consistent with that in pipeconf */
+- temp &= ~(0x07 << 16);
+- temp |= (pipe_bpc << 11);
+- I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
+- I915_READ(fdi_rx_reg);
++ /* Disable display plane */
++ reg = DSPCNTR(plane);
++ temp = I915_READ(reg);
++ if (temp & DISPLAY_PLANE_ENABLE) {
++ I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
++ intel_flush_display_plane(dev, plane);
++ }
+
+- udelay(100);
++ if (dev_priv->cfb_plane == plane &&
++ dev_priv->display.disable_fbc)
++ dev_priv->display.disable_fbc(dev);
+
+- /* still set train pattern 1 */
+- temp = I915_READ(fdi_tx_reg);
++ /* disable cpu pipe, disable after all planes disabled */
++ reg = PIPECONF(pipe);
++ temp = I915_READ(reg);
++ if (temp & PIPECONF_ENABLE) {
++ I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
++ POSTING_READ(reg);
++ /* wait for cpu pipe off, pipe state */
++ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
++ }
++
++ /* Disable PF */
++ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
++ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
++
++ /* disable CPU FDI tx and PCH FDI rx */
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
++ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
++ POSTING_READ(reg);
++
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
++ temp &= ~(0x7 << 16);
++ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
++ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
++
++ POSTING_READ(reg);
++ udelay(100);
++
++ /* Ironlake workaround, disable clock pointer after downing FDI */
++ if (HAS_PCH_IBX(dev))
++ I915_WRITE(FDI_RX_CHICKEN(pipe),
++ I915_READ(FDI_RX_CHICKEN(pipe) &
++ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
++
++ /* still set train pattern 1 */
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
++ temp &= ~FDI_LINK_TRAIN_NONE;
++ temp |= FDI_LINK_TRAIN_PATTERN_1;
++ I915_WRITE(reg, temp);
++
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
++ if (HAS_PCH_CPT(dev)) {
++ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
++ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
++ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+- I915_WRITE(fdi_tx_reg, temp);
+- POSTING_READ(fdi_tx_reg);
+-
+- temp = I915_READ(fdi_rx_reg);
+- if (HAS_PCH_CPT(dev)) {
+- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+- } else {
+- temp &= ~FDI_LINK_TRAIN_NONE;
+- temp |= FDI_LINK_TRAIN_PATTERN_1;
+- }
+- I915_WRITE(fdi_rx_reg, temp);
+- POSTING_READ(fdi_rx_reg);
++ }
++ /* BPC in FDI rx is consistent with that in PIPECONF */
++ temp &= ~(0x07 << 16);
++ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
++ I915_WRITE(reg, temp);
+
+- udelay(100);
++ POSTING_READ(reg);
++ udelay(100);
+
+- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+- temp = I915_READ(PCH_LVDS);
++ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
++ temp = I915_READ(PCH_LVDS);
++ if (temp & LVDS_PORT_EN) {
+ I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
+- I915_READ(PCH_LVDS);
++ POSTING_READ(PCH_LVDS);
+ udelay(100);
+ }
++ }
+
+- /* disable PCH transcoder */
+- temp = I915_READ(transconf_reg);
+- if ((temp & TRANS_ENABLE) != 0) {
+- I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
++ /* disable PCH transcoder */
++ reg = TRANSCONF(plane);
++ temp = I915_READ(reg);
++ if (temp & TRANS_ENABLE) {
++ I915_WRITE(reg, temp & ~TRANS_ENABLE);
++ /* wait for PCH transcoder off, transcoder state */
++ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
++ DRM_ERROR("failed to disable transcoder\n");
++ }
+
+- /* wait for PCH transcoder off, transcoder state */
+- if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
+- DRM_ERROR("failed to disable transcoder\n");
+- }
++ if (HAS_PCH_CPT(dev)) {
++ /* disable TRANS_DP_CTL */
++ reg = TRANS_DP_CTL(pipe);
++ temp = I915_READ(reg);
++ temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
++ I915_WRITE(reg, temp);
+
+- temp = I915_READ(transconf_reg);
+- /* BPC in transcoder is consistent with that in pipeconf */
+- temp &= ~PIPE_BPC_MASK;
+- temp |= pipe_bpc;
+- I915_WRITE(transconf_reg, temp);
+- I915_READ(transconf_reg);
+- udelay(100);
++ /* disable DPLL_SEL */
++ temp = I915_READ(PCH_DPLL_SEL);
++ if (pipe == 0)
++ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
++ else
++ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
++ I915_WRITE(PCH_DPLL_SEL, temp);
++ }
+
+- if (HAS_PCH_CPT(dev)) {
+- /* disable TRANS_DP_CTL */
+- int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+- int reg;
++ /* disable PCH DPLL */
++ reg = PCH_DPLL(pipe);
++ temp = I915_READ(reg);
++ I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
+
+- reg = I915_READ(trans_dp_ctl);
+- reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+- I915_WRITE(trans_dp_ctl, reg);
+- POSTING_READ(trans_dp_ctl);
++ /* Switch from PCDclk to Rawclk */
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
++ I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+- /* disable DPLL_SEL */
+- temp = I915_READ(PCH_DPLL_SEL);
+- if (trans_dpll_sel == 0)
+- temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+- else
+- temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+- I915_WRITE(PCH_DPLL_SEL, temp);
+- I915_READ(PCH_DPLL_SEL);
++ /* Disable CPU FDI TX PLL */
++ reg = FDI_TX_CTL(pipe);
++ temp = I915_READ(reg);
++ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+- }
++ POSTING_READ(reg);
++ udelay(100);
+
+- /* disable PCH DPLL */
+- temp = I915_READ(pch_dpll_reg);
+- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+- I915_READ(pch_dpll_reg);
+-
+- /* Switch from PCDclk to Rawclk */
+- temp = I915_READ(fdi_rx_reg);
+- temp &= ~FDI_SEL_PCDCLK;
+- I915_WRITE(fdi_rx_reg, temp);
+- I915_READ(fdi_rx_reg);
+-
+- /* Disable CPU FDI TX PLL */
+- temp = I915_READ(fdi_tx_reg);
+- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+- I915_READ(fdi_tx_reg);
+- udelay(100);
++ reg = FDI_RX_CTL(pipe);
++ temp = I915_READ(reg);
++ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+- temp = I915_READ(fdi_rx_reg);
+- temp &= ~FDI_RX_PLL_ENABLE;
+- I915_WRITE(fdi_rx_reg, temp);
+- I915_READ(fdi_rx_reg);
++ /* Wait for the clocks to turn off. */
++ POSTING_READ(reg);
++ udelay(100);
+
+- /* Wait for the clocks to turn off. */
+- udelay(100);
++ intel_crtc->active = false;
++ intel_update_watermarks(dev);
++ intel_update_fbc(dev);
++ intel_clear_scanline_wait(dev);
++}
++
++static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++ int pipe = intel_crtc->pipe;
++ int plane = intel_crtc->plane;
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
++ ironlake_crtc_enable(crtc);
++ break;
++
++ case DRM_MODE_DPMS_OFF:
++ DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
++ ironlake_crtc_disable(crtc);
+ break;
+ }
+ }
+
+ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+ {
+- struct intel_overlay *overlay;
+- int ret;
+-
+ if (!enable && intel_crtc->overlay) {
+- overlay = intel_crtc->overlay;
+- mutex_lock(&overlay->dev->struct_mutex);
+- for (;;) {
+- ret = intel_overlay_switch_off(overlay);
+- if (ret == 0)
+- break;
++ struct drm_device *dev = intel_crtc->base.dev;
+
+- ret = intel_overlay_recover_from_interrupt(overlay, 0);
+- if (ret != 0) {
+- /* overlay doesn't react anymore. Usually
+- * results in a black screen and an unkillable
+- * X server. */
+- BUG();
+- overlay->hw_wedged = HW_WEDGED;
+- break;
+- }
+- }
+- mutex_unlock(&overlay->dev->struct_mutex);
++ mutex_lock(&dev->struct_mutex);
++ (void) intel_overlay_switch_off(intel_crtc->overlay, false);
++ mutex_unlock(&dev->struct_mutex);
+ }
+- /* Let userspace switch the overlay on again. In most cases userspace
+- * has to recompute where to put it anyway. */
+
+- return;
++ /* Let userspace switch the overlay on again. In most cases userspace
++ * has to recompute where to put it anyway.
++ */
+ }
+
+-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
++static void i9xx_crtc_enable(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+- int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
+- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+- u32 temp;
++ u32 reg, temp;
+
+- /* XXX: When our outputs are all unaware of DPMS modes other than off
+- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+- */
+- switch (mode) {
+- case DRM_MODE_DPMS_ON:
+- case DRM_MODE_DPMS_STANDBY:
+- case DRM_MODE_DPMS_SUSPEND:
+- /* Enable the DPLL */
+- temp = I915_READ(dpll_reg);
+- if ((temp & DPLL_VCO_ENABLE) == 0) {
+- I915_WRITE(dpll_reg, temp);
+- I915_READ(dpll_reg);
+- /* Wait for the clocks to stabilize. */
+- udelay(150);
+- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+- I915_READ(dpll_reg);
+- /* Wait for the clocks to stabilize. */
+- udelay(150);
+- I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+- I915_READ(dpll_reg);
+- /* Wait for the clocks to stabilize. */
+- udelay(150);
+- }
++ if (intel_crtc->active)
++ return;
+
+- /* Enable the pipe */
+- temp = I915_READ(pipeconf_reg);
+- if ((temp & PIPEACONF_ENABLE) == 0)
+- I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+-
+- /* Enable the plane */
+- temp = I915_READ(dspcntr_reg);
+- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+- I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+- /* Flush the plane changes */
+- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+- }
++ intel_crtc->active = true;
++ intel_update_watermarks(dev);
+
+- intel_crtc_load_lut(crtc);
++ /* Enable the DPLL */
++ reg = DPLL(pipe);
++ temp = I915_READ(reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ I915_WRITE(reg, temp);
+
+- if ((IS_I965G(dev) || plane == 0))
+- intel_update_fbc(crtc, &crtc->mode);
++ /* Wait for the clocks to stabilize. */
++ POSTING_READ(reg);
++ udelay(150);
+
+- /* Give the overlay scaler a chance to enable if it's on this pipe */
+- intel_crtc_dpms_overlay(intel_crtc, true);
+- break;
+- case DRM_MODE_DPMS_OFF:
+- /* Give the overlay scaler a chance to disable if it's on this pipe */
+- intel_crtc_dpms_overlay(intel_crtc, false);
+- drm_vblank_off(dev, pipe);
+-
+- if (dev_priv->cfb_plane == plane &&
+- dev_priv->display.disable_fbc)
+- dev_priv->display.disable_fbc(dev);
+-
+- /* Disable display plane */
+- temp = I915_READ(dspcntr_reg);
+- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+- I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+- /* Flush the plane changes */
+- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+- I915_READ(dspbase_reg);
+- }
++ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
++
++ /* Wait for the clocks to stabilize. */
++ POSTING_READ(reg);
++ udelay(150);
++
++ I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+
+- /* Don't disable pipe A or pipe A PLLs if needed */
+- if (pipeconf_reg == PIPEACONF &&
+- (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
+- /* Wait for vblank for the disable to take effect */
++ /* Wait for the clocks to stabilize. */
++ POSTING_READ(reg);
++ udelay(150);
++ }
++
++ /* Enable the pipe */
++ reg = PIPECONF(pipe);
++ temp = I915_READ(reg);
++ if ((temp & PIPECONF_ENABLE) == 0)
++ I915_WRITE(reg, temp | PIPECONF_ENABLE);
++
++ /* Enable the plane */
++ reg = DSPCNTR(plane);
++ temp = I915_READ(reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
++ intel_flush_display_plane(dev, plane);
++ }
++
++ intel_crtc_load_lut(crtc);
++ intel_update_fbc(dev);
++
++ /* Give the overlay scaler a chance to enable if it's on this pipe */
++ intel_crtc_dpms_overlay(intel_crtc, true);
++ intel_crtc_update_cursor(crtc, true);
++}
++
++static void i9xx_crtc_disable(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++ int pipe = intel_crtc->pipe;
++ int plane = intel_crtc->plane;
++ u32 reg, temp;
++
++ if (!intel_crtc->active)
++ return;
++
++ /* Give the overlay scaler a chance to disable if it's on this pipe */
++ intel_crtc_wait_for_pending_flips(crtc);
++ drm_vblank_off(dev, pipe);
++ intel_crtc_dpms_overlay(intel_crtc, false);
++ intel_crtc_update_cursor(crtc, false);
++
++ if (dev_priv->cfb_plane == plane &&
++ dev_priv->display.disable_fbc)
++ dev_priv->display.disable_fbc(dev);
++
++ /* Disable display plane */
++ reg = DSPCNTR(plane);
++ temp = I915_READ(reg);
++ if (temp & DISPLAY_PLANE_ENABLE) {
++ I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ intel_flush_display_plane(dev, plane);
++
++ /* Wait for vblank for the disable to take effect */
++ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
+- goto skip_pipe_off;
+- }
++ }
+
+- /* Next, disable display pipes */
+- temp = I915_READ(pipeconf_reg);
+- if ((temp & PIPEACONF_ENABLE) != 0) {
+- I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+- I915_READ(pipeconf_reg);
+- }
++ /* Don't disable pipe A or pipe A PLLs if needed */
++ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
++ goto done;
++
++ /* Next, disable display pipes */
++ reg = PIPECONF(pipe);
++ temp = I915_READ(reg);
++ if (temp & PIPECONF_ENABLE) {
++ I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
+
+ /* Wait for the pipe to turn off */
++ POSTING_READ(reg);
+ intel_wait_for_pipe_off(dev, pipe);
++ }
++
++ reg = DPLL(pipe);
++ temp = I915_READ(reg);
++ if (temp & DPLL_VCO_ENABLE) {
++ I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
+
+- temp = I915_READ(dpll_reg);
+- if ((temp & DPLL_VCO_ENABLE) != 0) {
+- I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+- I915_READ(dpll_reg);
+- }
+- skip_pipe_off:
+ /* Wait for the clocks to turn off. */
++ POSTING_READ(reg);
+ udelay(150);
++ }
++
++done:
++ intel_crtc->active = false;
++ intel_update_fbc(dev);
++ intel_update_watermarks(dev);
++ intel_clear_scanline_wait(dev);
++}
++
++static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ i9xx_crtc_enable(crtc);
++ break;
++ case DRM_MODE_DPMS_OFF:
++ i9xx_crtc_disable(crtc);
+ break;
+ }
+ }
+@@ -2388,26 +2506,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+ return;
+
+ intel_crtc->dpms_mode = mode;
+- intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
+-
+- /* When switching on the display, ensure that SR is disabled
+- * with multiple pipes prior to enabling to new pipe.
+- *
+- * When switching off the display, make sure the cursor is
+- * properly hidden prior to disabling the pipe.
+- */
+- if (mode == DRM_MODE_DPMS_ON)
+- intel_update_watermarks(dev);
+- else
+- intel_crtc_update_cursor(crtc);
+
+ dev_priv->display.dpms(crtc, mode);
+
+- if (mode == DRM_MODE_DPMS_ON)
+- intel_crtc_update_cursor(crtc);
+- else
+- intel_update_watermarks(dev);
+-
+ if (!dev->primary->master)
+ return;
+
+@@ -2432,16 +2533,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+ }
+ }
+
+-static void intel_crtc_prepare (struct drm_crtc *crtc)
++static void intel_crtc_disable(struct drm_crtc *crtc)
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ struct drm_device *dev = crtc->dev;
++
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++ if (crtc->fb) {
++ mutex_lock(&dev->struct_mutex);
++ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
++ mutex_unlock(&dev->struct_mutex);
++ }
++}
++
++/* Prepare for a mode set.
++ *
++ * Note we could be a lot smarter here. We need to figure out which outputs
++ * will be enabled, which disabled (in short, how the config will changes)
++ * and perform the minimum necessary steps to accomplish that, e.g. updating
++ * watermarks, FBC configuration, making sure PLLs are programmed correctly,
++ * panel fitting is in the proper state, etc.
++ */
++static void i9xx_crtc_prepare(struct drm_crtc *crtc)
++{
++ i9xx_crtc_disable(crtc);
+ }
+
+-static void intel_crtc_commit (struct drm_crtc *crtc)
++static void i9xx_crtc_commit(struct drm_crtc *crtc)
+ {
+- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++ i9xx_crtc_enable(crtc);
++}
++
++static void ironlake_crtc_prepare(struct drm_crtc *crtc)
++{
++ ironlake_crtc_disable(crtc);
++}
++
++static void ironlake_crtc_commit(struct drm_crtc *crtc)
++{
++ ironlake_crtc_enable(crtc);
+ }
+
+ void intel_encoder_prepare (struct drm_encoder *encoder)
+@@ -2460,13 +2591,7 @@ void intel_encoder_commit (struct drm_encoder *encoder)
+
+ void intel_encoder_destroy(struct drm_encoder *encoder)
+ {
+- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+-
+- if (intel_encoder->ddc_bus)
+- intel_i2c_destroy(intel_encoder->ddc_bus);
+-
+- if (intel_encoder->i2c_bus)
+- intel_i2c_destroy(intel_encoder->i2c_bus);
++ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+@@ -2557,33 +2682,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
+ return 133000;
+ }
+
+-/**
+- * Return the pipe currently connected to the panel fitter,
+- * or -1 if the panel fitter is not present or not in use
+- */
+-int intel_panel_fitter_pipe (struct drm_device *dev)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 pfit_control;
+-
+- /* i830 doesn't have a panel fitter */
+- if (IS_I830(dev))
+- return -1;
+-
+- pfit_control = I915_READ(PFIT_CONTROL);
+-
+- /* See if the panel fitter is in use */
+- if ((pfit_control & PFIT_ENABLE) == 0)
+- return -1;
+-
+- /* 965 can place panel fitter on either pipe */
+- if (IS_I965G(dev))
+- return (pfit_control >> 29) & 0x3;
+-
+- /* older chips can only use pipe 1 */
+- return 1;
+-}
+-
+ struct fdi_m_n {
+ u32 tu;
+ u32 gmch_m;
+@@ -2902,7 +3000,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+- plane ? "B" : "A", size);
++ plane ? "B" : "A", size);
+
+ return size;
+ }
+@@ -2919,7 +3017,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+- plane ? "B" : "A", size);
++ plane ? "B" : "A", size);
+
+ return size;
+ }
+@@ -2934,8 +3032,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+- plane ? "B" : "A",
+- size);
++ plane ? "B" : "A",
++ size);
+
+ return size;
+ }
+@@ -2950,14 +3048,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+- plane ? "B" : "A", size);
++ plane ? "B" : "A", size);
+
+ return size;
+ }
+
+ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
+- int planeb_clock, int sr_hdisplay, int unused,
+- int pixel_size)
++ int planeb_clock, int sr_hdisplay, int unused,
++ int pixel_size)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct cxsr_latency *latency;
+@@ -3069,13 +3167,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
+
+ /* Use ns/us then divide to preserve precision */
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+- pixel_size * sr_hdisplay;
++ pixel_size * sr_hdisplay;
+ sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
+
+ entries_required = (((sr_latency_ns / line_time_us) +
+ 1000) / 1000) * pixel_size * 64;
+ entries_required = DIV_ROUND_UP(entries_required,
+- g4x_cursor_wm_info.cacheline_size);
++ g4x_cursor_wm_info.cacheline_size);
+ cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
+
+ if (cursor_sr > g4x_cursor_wm_info.max_wm)
+@@ -3087,7 +3185,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+- & ~FW_BLC_SELF_EN);
++ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
+@@ -3125,7 +3223,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
+
+ /* Use ns/us then divide to preserve precision */
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+- pixel_size * sr_hdisplay;
++ pixel_size * sr_hdisplay;
+ sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
+ DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ srwm = I965_FIFO_SIZE - sr_entries;
+@@ -3134,11 +3232,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
+ srwm &= 0x1ff;
+
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+- pixel_size * 64;
++ pixel_size * 64;
+ sr_entries = DIV_ROUND_UP(sr_entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+- (sr_entries + i965_cursor_wm_info.guard_size);
++ (sr_entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+@@ -3146,11 +3244,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+- if (IS_I965GM(dev))
++ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+- if (IS_I965GM(dev))
++ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ }
+@@ -3180,9 +3278,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ int sr_clock, sr_entries = 0;
+
+ /* Create copies of the base settings for each pipe */
+- if (IS_I965GM(dev) || IS_I945GM(dev))
++ if (IS_CRESTLINE(dev) || IS_I945GM(dev))
+ planea_params = planeb_params = i945_wm_info;
+- else if (IS_I9XX(dev))
++ else if (!IS_GEN2(dev))
+ planea_params = planeb_params = i915_wm_info;
+ else
+ planea_params = planeb_params = i855_wm_info;
+@@ -3217,7 +3315,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+
+ /* Use ns/us then divide to preserve precision */
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+- pixel_size * sr_hdisplay;
++ pixel_size * sr_hdisplay;
+ sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
+ srwm = total_size - sr_entries;
+@@ -3242,7 +3340,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+- planea_wm, planeb_wm, cwm, srwm);
++ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+@@ -3276,146 +3374,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
+ #define ILK_LP0_PLANE_LATENCY 700
+ #define ILK_LP0_CURSOR_LATENCY 1300
+
+-static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
+- int planeb_clock, int sr_hdisplay, int sr_htotal,
+- int pixel_size)
++static bool ironlake_compute_wm0(struct drm_device *dev,
++ int pipe,
++ int *plane_wm,
++ int *cursor_wm)
+ {
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+- int sr_wm, cursor_wm;
+- unsigned long line_time_us;
+- int sr_clock, entries_required;
+- u32 reg_value;
+- int line_count;
+- int planea_htotal = 0, planeb_htotal = 0;
+ struct drm_crtc *crtc;
++ int htotal, hdisplay, clock, pixel_size = 0;
++ int line_time_us, line_count, entries;
+
+- /* Need htotal for all active display plane */
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+- if (intel_crtc->plane == 0)
+- planea_htotal = crtc->mode.htotal;
+- else
+- planeb_htotal = crtc->mode.htotal;
+- }
+- }
+-
+- /* Calculate and update the watermark for plane A */
+- if (planea_clock) {
+- entries_required = ((planea_clock / 1000) * pixel_size *
+- ILK_LP0_PLANE_LATENCY) / 1000;
+- entries_required = DIV_ROUND_UP(entries_required,
+- ironlake_display_wm_info.cacheline_size);
+- planea_wm = entries_required +
+- ironlake_display_wm_info.guard_size;
+-
+- if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+- planea_wm = ironlake_display_wm_info.max_wm;
+-
+- /* Use the large buffer method to calculate cursor watermark */
+- line_time_us = (planea_htotal * 1000) / planea_clock;
+-
+- /* Use ns/us then divide to preserve precision */
+- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+-
+- /* calculate the cursor watermark for cursor A */
+- entries_required = line_count * 64 * pixel_size;
+- entries_required = DIV_ROUND_UP(entries_required,
+- ironlake_cursor_wm_info.cacheline_size);
+- cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+- if (cursora_wm > ironlake_cursor_wm_info.max_wm)
+- cursora_wm = ironlake_cursor_wm_info.max_wm;
+-
+- reg_value = I915_READ(WM0_PIPEA_ILK);
+- reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+- reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+- (cursora_wm & WM0_PIPE_CURSOR_MASK);
+- I915_WRITE(WM0_PIPEA_ILK, reg_value);
+- DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+- "cursor: %d\n", planea_wm, cursora_wm);
+- }
+- /* Calculate and update the watermark for plane B */
+- if (planeb_clock) {
+- entries_required = ((planeb_clock / 1000) * pixel_size *
+- ILK_LP0_PLANE_LATENCY) / 1000;
+- entries_required = DIV_ROUND_UP(entries_required,
+- ironlake_display_wm_info.cacheline_size);
+- planeb_wm = entries_required +
+- ironlake_display_wm_info.guard_size;
+-
+- if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+- planeb_wm = ironlake_display_wm_info.max_wm;
++ crtc = intel_get_crtc_for_pipe(dev, pipe);
++ if (crtc->fb == NULL || !crtc->enabled)
++ return false;
+
+- /* Use the large buffer method to calculate cursor watermark */
+- line_time_us = (planeb_htotal * 1000) / planeb_clock;
++ htotal = crtc->mode.htotal;
++ hdisplay = crtc->mode.hdisplay;
++ clock = crtc->mode.clock;
++ pixel_size = crtc->fb->bits_per_pixel / 8;
++
++ /* Use the small buffer method to calculate plane watermark */
++ entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
++ entries = DIV_ROUND_UP(entries,
++ ironlake_display_wm_info.cacheline_size);
++ *plane_wm = entries + ironlake_display_wm_info.guard_size;
++ if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
++ *plane_wm = ironlake_display_wm_info.max_wm;
++
++ /* Use the large buffer method to calculate cursor watermark */
++ line_time_us = ((htotal * 1000) / clock);
++ line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
++ entries = line_count * 64 * pixel_size;
++ entries = DIV_ROUND_UP(entries,
++ ironlake_cursor_wm_info.cacheline_size);
++ *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
++ if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
++ *cursor_wm = ironlake_cursor_wm_info.max_wm;
+
+- /* Use ns/us then divide to preserve precision */
+- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
++ return true;
++}
+
+- /* calculate the cursor watermark for cursor B */
+- entries_required = line_count * 64 * pixel_size;
+- entries_required = DIV_ROUND_UP(entries_required,
+- ironlake_cursor_wm_info.cacheline_size);
+- cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+- if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
+- cursorb_wm = ironlake_cursor_wm_info.max_wm;
++static void ironlake_update_wm(struct drm_device *dev,
++ int planea_clock, int planeb_clock,
++ int sr_hdisplay, int sr_htotal,
++ int pixel_size)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int plane_wm, cursor_wm, enabled;
++ int tmp;
++
++ enabled = 0;
++ if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
++ I915_WRITE(WM0_PIPEA_ILK,
++ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
++ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
++ " plane %d, " "cursor: %d\n",
++ plane_wm, cursor_wm);
++ enabled++;
++ }
+
+- reg_value = I915_READ(WM0_PIPEB_ILK);
+- reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+- reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+- (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+- I915_WRITE(WM0_PIPEB_ILK, reg_value);
+- DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+- "cursor: %d\n", planeb_wm, cursorb_wm);
++ if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
++ I915_WRITE(WM0_PIPEB_ILK,
++ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
++ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
++ " plane %d, cursor: %d\n",
++ plane_wm, cursor_wm);
++ enabled++;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+- if (!planea_clock || !planeb_clock) {
+-
++ tmp = 0;
++ if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
++ unsigned long line_time_us;
++ int small, large, plane_fbc;
++ int sr_clock, entries;
++ int line_count, line_size;
+ /* Read the self-refresh latency. The unit is 0.5us */
+ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+- line_time_us = ((sr_htotal * 1000) / sr_clock);
++ line_time_us = (sr_htotal * 1000) / sr_clock;
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+- / 1000;
++ / 1000;
++ line_size = sr_hdisplay * pixel_size;
+
+- /* calculate the self-refresh watermark for display plane */
+- entries_required = line_count * sr_hdisplay * pixel_size;
+- entries_required = DIV_ROUND_UP(entries_required,
+- ironlake_display_srwm_info.cacheline_size);
+- sr_wm = entries_required +
+- ironlake_display_srwm_info.guard_size;
++ /* Use the minimum of the small and large buffer method for primary */
++ small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
++ large = line_count * line_size;
+
+- /* calculate the self-refresh watermark for display cursor */
+- entries_required = line_count * pixel_size * 64;
+- entries_required = DIV_ROUND_UP(entries_required,
+- ironlake_cursor_srwm_info.cacheline_size);
+- cursor_wm = entries_required +
+- ironlake_cursor_srwm_info.guard_size;
++ entries = DIV_ROUND_UP(min(small, large),
++ ironlake_display_srwm_info.cacheline_size);
+
+- /* configure watermark and enable self-refresh */
+- reg_value = I915_READ(WM1_LP_ILK);
+- reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+- WM1_LP_CURSOR_MASK);
+- reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+- (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
++ plane_fbc = entries * 64;
++ plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
+
+- I915_WRITE(WM1_LP_ILK, reg_value);
+- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+- "cursor %d\n", sr_wm, cursor_wm);
++ plane_wm = entries + ironlake_display_srwm_info.guard_size;
++ if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
++ plane_wm = ironlake_display_srwm_info.max_wm;
+
+- } else {
+- /* Turn off self refresh if both pipes are enabled */
+- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+- }
++ /* calculate the self-refresh watermark for display cursor */
++ entries = line_count * pixel_size * 64;
++ entries = DIV_ROUND_UP(entries,
++ ironlake_cursor_srwm_info.cacheline_size);
++
++ cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
++ if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
++ cursor_wm = ironlake_cursor_srwm_info.max_wm;
++
++ /* configure watermark and enable self-refresh */
++ tmp = (WM1_LP_SR_EN |
++ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
++ (plane_fbc << WM1_LP_FBC_SHIFT) |
++ (plane_wm << WM1_LP_SR_SHIFT) |
++ cursor_wm);
++ DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
++ " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
++ }
++ I915_WRITE(WM1_LP_ILK, tmp);
++ /* XXX setup WM2 and WM3 */
+ }
++
+ /**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+@@ -3447,7 +3529,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+- */
++ */
+ static void intel_update_watermarks(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -3463,15 +3545,15 @@ static void intel_update_watermarks(struct drm_device *dev)
+ /* Get the clock config from both planes */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
++ if (intel_crtc->active) {
+ enabled++;
+ if (intel_crtc->plane == 0) {
+ DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
+- intel_crtc->pipe, crtc->mode.clock);
++ intel_crtc->pipe, crtc->mode.clock);
+ planea_clock = crtc->mode.clock;
+ } else {
+ DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
+- intel_crtc->pipe, crtc->mode.clock);
++ intel_crtc->pipe, crtc->mode.clock);
+ planeb_clock = crtc->mode.clock;
+ }
+ sr_hdisplay = crtc->mode.hdisplay;
+@@ -3502,62 +3584,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+- int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+- int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
+- int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
+- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++ u32 fp_reg, dpll_reg;
+ int refclk, num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+- u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
++ u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ struct intel_encoder *has_edp_encoder = NULL;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+- struct drm_encoder *encoder;
++ struct intel_encoder *encoder;
+ const intel_limit_t *limit;
+ int ret;
+ struct fdi_m_n m_n = {0};
+- int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
+- int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
+- int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
+- int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
+- int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
+- int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
+- int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+- int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+- int trans_dpll_sel = (pipe == 0) ? 0 : 1;
+- int lvds_reg = LVDS;
+- u32 temp;
+- int sdvo_pixel_multiply;
++ u32 reg, temp;
+ int target_clock;
+
+ drm_vblank_pre_modeset(dev, pipe);
+
+- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+- struct intel_encoder *intel_encoder;
+-
+- if (encoder->crtc != crtc)
++ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
++ if (encoder->base.crtc != crtc)
+ continue;
+
+- intel_encoder = enc_to_intel_encoder(encoder);
+- switch (intel_encoder->type) {
++ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+- if (intel_encoder->needs_tv_clock)
++ if (encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_DVO:
+@@ -3573,7 +3628,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+- has_edp_encoder = intel_encoder;
++ has_edp_encoder = encoder;
+ break;
+ }
+
+@@ -3583,15 +3638,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+- refclk / 1000);
+- } else if (IS_I9XX(dev)) {
++ refclk / 1000);
++ } else if (!IS_GEN2(dev)) {
+ refclk = 96000;
+- if (HAS_PCH_SPLIT(dev))
++ if (HAS_PCH_SPLIT(dev) &&
++ (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
+ refclk = 120000; /* 120Mhz refclk */
+ } else {
+ refclk = 48000;
+ }
+-
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+@@ -3607,13 +3662,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+- intel_crtc_update_cursor(crtc);
++ intel_crtc_update_cursor(crtc, true);
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ has_reduced_clock = limit->find_pll(limit, crtc,
+- dev_priv->lvds_downclock,
+- refclk,
+- &reduced_clock);
++ dev_priv->lvds_downclock,
++ refclk,
++ &reduced_clock);
+ if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+ /*
+ * If the different P is found, it means that we can't
+@@ -3622,7 +3677,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ * feature.
+ */
+ DRM_DEBUG_KMS("Different P is found for "
+- "LVDS clock/downclock\n");
++ "LVDS clock/downclock\n");
+ has_reduced_clock = 0;
+ }
+ }
+@@ -3630,14 +3685,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ this mirrors vbios setting. */
+ if (is_sdvo && is_tv) {
+ if (adjusted_mode->clock >= 100000
+- && adjusted_mode->clock < 140500) {
++ && adjusted_mode->clock < 140500) {
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 3;
+ clock.m1 = 16;
+ clock.m2 = 8;
+ } else if (adjusted_mode->clock >= 140500
+- && adjusted_mode->clock <= 200000) {
++ && adjusted_mode->clock <= 200000) {
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 6;
+@@ -3649,34 +3704,41 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ /* FDI link */
+ if (HAS_PCH_SPLIT(dev)) {
+ int lane = 0, link_bw, bpp;
+- /* eDP doesn't require FDI link, so just set DP M/N
++ /* CPU eDP doesn't require FDI link, so just set DP M/N
+ according to current link config */
+- if (has_edp_encoder) {
++ if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
+ target_clock = mode->clock;
+ intel_edp_link_config(has_edp_encoder,
+ &lane, &link_bw);
+ } else {
+- /* DP over FDI requires target mode clock
++ /* [e]DP over FDI requires target mode clock
+ instead of link clock */
+- if (is_dp)
++ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ target_clock = mode->clock;
+ else
+ target_clock = adjusted_mode->clock;
+- link_bw = 270000;
++
++ /* FDI is a binary signal running at ~2.7GHz, encoding
++ * each output octet as 10 bits. The actual frequency
++ * is stored as a divider into a 100MHz clock, and the
++ * mode pixel clock is stored in units of 1KHz.
++ * Hence the bw of each lane in terms of the mode signal
++ * is:
++ */
++ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+ }
+
+ /* determine panel color depth */
+- temp = I915_READ(pipeconf_reg);
++ temp = I915_READ(PIPECONF(pipe));
+ temp &= ~PIPE_BPC_MASK;
+ if (is_lvds) {
+- int lvds_reg = I915_READ(PCH_LVDS);
+ /* the BPC will be 6 if it is 18-bit LVDS panel */
+- if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
++ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ temp |= PIPE_8BPC;
+ else
+ temp |= PIPE_6BPC;
+- } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
+- switch (dev_priv->edp_bpp/3) {
++ } else if (has_edp_encoder) {
++ switch (dev_priv->edp.bpp/3) {
+ case 8:
+ temp |= PIPE_8BPC;
+ break;
+@@ -3692,8 +3754,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ }
+ } else
+ temp |= PIPE_8BPC;
+- I915_WRITE(pipeconf_reg, temp);
+- I915_READ(pipeconf_reg);
++ I915_WRITE(PIPECONF(pipe), temp);
+
+ switch (temp & PIPE_BPC_MASK) {
+ case PIPE_8BPC:
+@@ -3738,33 +3799,39 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ /* Always enable nonspread source */
+ temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+ temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+- I915_WRITE(PCH_DREF_CONTROL, temp);
+- POSTING_READ(PCH_DREF_CONTROL);
+-
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+- POSTING_READ(PCH_DREF_CONTROL);
+
++ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+
+ if (has_edp_encoder) {
+ if (dev_priv->lvds_use_ssc) {
+ temp |= DREF_SSC1_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+- POSTING_READ(PCH_DREF_CONTROL);
+-
+- udelay(200);
+
+- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+- temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+- I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
++ udelay(200);
++ }
++ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
++
++ /* Enable CPU source on CPU attached eDP */
++ if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
++ if (dev_priv->lvds_use_ssc)
++ temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
++ else
++ temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else {
+- temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+- I915_WRITE(PCH_DREF_CONTROL, temp);
+- POSTING_READ(PCH_DREF_CONTROL);
++ /* Enable SSC on PCH eDP if needed */
++ if (dev_priv->lvds_use_ssc) {
++ DRM_ERROR("enabling SSC on PCH\n");
++ temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
++ }
+ }
++ I915_WRITE(PCH_DREF_CONTROL, temp);
++ POSTING_READ(PCH_DREF_CONTROL);
++ udelay(200);
+ }
+ }
+
+@@ -3780,23 +3847,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ reduced_clock.m2;
+ }
+
++ dpll = 0;
+ if (!HAS_PCH_SPLIT(dev))
+ dpll = DPLL_VGA_MODE_DIS;
+
+- if (IS_I9XX(dev)) {
++ if (!IS_GEN2(dev)) {
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
++ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
++ if (pixel_multiplier > 1) {
++ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
++ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++ else if (HAS_PCH_SPLIT(dev))
++ dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
++ }
+ dpll |= DPLL_DVO_HIGH_SPEED;
+- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+- dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+- else if (HAS_PCH_SPLIT(dev))
+- dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+ }
+- if (is_dp)
++ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+@@ -3824,7 +3894,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
++ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+ } else {
+ if (is_lvds) {
+@@ -3851,7 +3921,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ /* setup pipeconf */
+- pipeconf = I915_READ(pipeconf_reg);
++ pipeconf = I915_READ(PIPECONF(pipe));
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+@@ -3865,7 +3935,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ }
+
+- if (pipe == 0 && !IS_I965G(dev)) {
++ if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
+ * core speed.
+ *
+@@ -3874,51 +3944,47 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ */
+ if (mode->clock >
+ dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
+- pipeconf |= PIPEACONF_DOUBLE_WIDE;
++ pipeconf |= PIPECONF_DOUBLE_WIDE;
+ else
+- pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
++ pipeconf &= ~PIPECONF_DOUBLE_WIDE;
+ }
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+- pipeconf |= PIPEACONF_ENABLE;
++ pipeconf |= PIPECONF_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
+
+-
+- /* Disable the panel fitter if it was on our pipe */
+- if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
+- I915_WRITE(PFIT_CONTROL, 0);
+-
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ drm_mode_debug_printmodeline(mode);
+
+ /* assign to Ironlake registers */
+ if (HAS_PCH_SPLIT(dev)) {
+- fp_reg = pch_fp_reg;
+- dpll_reg = pch_dpll_reg;
++ fp_reg = PCH_FP0(pipe);
++ dpll_reg = PCH_DPLL(pipe);
++ } else {
++ fp_reg = FP0(pipe);
++ dpll_reg = DPLL(pipe);
+ }
+
+- if (!has_edp_encoder) {
++ /* PCH eDP needs FDI, but CPU eDP does not */
++ if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ I915_WRITE(fp_reg, fp);
+ I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+- I915_READ(dpll_reg);
++
++ POSTING_READ(dpll_reg);
+ udelay(150);
+ }
+
+ /* enable transcoder DPLL */
+ if (HAS_PCH_CPT(dev)) {
+ temp = I915_READ(PCH_DPLL_SEL);
+- if (trans_dpll_sel == 0)
+- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
++ if (pipe == 0)
++ temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
+ else
+- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
++ temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
+ I915_WRITE(PCH_DPLL_SEL, temp);
+- I915_READ(PCH_DPLL_SEL);
+- udelay(150);
+- }
+
+- if (HAS_PCH_SPLIT(dev)) {
+- pipeconf &= ~PIPE_ENABLE_DITHER;
+- pipeconf &= ~PIPE_DITHER_TYPE_MASK;
++ POSTING_READ(PCH_DPLL_SEL);
++ udelay(150);
+ }
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+@@ -3926,58 +3992,60 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ * things on.
+ */
+ if (is_lvds) {
+- u32 lvds;
+-
++ reg = LVDS;
+ if (HAS_PCH_SPLIT(dev))
+- lvds_reg = PCH_LVDS;
++ reg = PCH_LVDS;
+
+- lvds = I915_READ(lvds_reg);
+- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
++ temp = I915_READ(reg);
++ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+- lvds |= PORT_TRANS_B_SEL_CPT;
++ temp |= PORT_TRANS_B_SEL_CPT;
+ else
+- lvds |= LVDS_PIPEB_SELECT;
++ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ if (HAS_PCH_CPT(dev))
+- lvds &= ~PORT_TRANS_SEL_MASK;
++ temp &= ~PORT_TRANS_SEL_MASK;
+ else
+- lvds &= ~LVDS_PIPEB_SELECT;
++ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ /* set the corresponsding LVDS_BORDER bit */
+- lvds |= dev_priv->lvds_border_bits;
++ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock.p2 == 7)
+- lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
++ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+- lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
++ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+- /* set the dithering flag */
+- if (IS_I965G(dev)) {
+- if (dev_priv->lvds_dither) {
+- if (HAS_PCH_SPLIT(dev)) {
+- pipeconf |= PIPE_ENABLE_DITHER;
+- pipeconf |= PIPE_DITHER_TYPE_ST01;
+- } else
+- lvds |= LVDS_ENABLE_DITHER;
+- } else {
+- if (!HAS_PCH_SPLIT(dev)) {
+- lvds &= ~LVDS_ENABLE_DITHER;
+- }
+- }
++ /* set the dithering flag on non-PCH LVDS as needed */
++ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
++ if (dev_priv->lvds_dither)
++ temp |= LVDS_ENABLE_DITHER;
++ else
++ temp &= ~LVDS_ENABLE_DITHER;
++ }
++ I915_WRITE(reg, temp);
++ }
++
++ /* set the dithering flag and clear for anything other than a panel. */
++ if (HAS_PCH_SPLIT(dev)) {
++ pipeconf &= ~PIPECONF_DITHER_EN;
++ pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
++ if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
++ pipeconf |= PIPECONF_DITHER_EN;
++ pipeconf |= PIPECONF_DITHER_TYPE_ST1;
+ }
+- I915_WRITE(lvds_reg, lvds);
+- I915_READ(lvds_reg);
+ }
+- if (is_dp)
++
++ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+- else if (HAS_PCH_SPLIT(dev)) {
++ } else if (HAS_PCH_SPLIT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery setting.*/
+ if (pipe == 0) {
+ I915_WRITE(TRANSA_DATA_M1, 0);
+@@ -3992,29 +4060,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ }
+ }
+
+- if (!has_edp_encoder) {
++ if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ I915_WRITE(fp_reg, fp);
+ I915_WRITE(dpll_reg, dpll);
+- I915_READ(dpll_reg);
++
+ /* Wait for the clocks to stabilize. */
++ POSTING_READ(dpll_reg);
+ udelay(150);
+
+- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
++ temp = 0;
+ if (is_sdvo) {
+- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+- I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
+- ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+- } else
+- I915_WRITE(dpll_md_reg, 0);
++ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
++ if (temp > 1)
++ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
++ else
++ temp = 0;
++ }
++ I915_WRITE(DPLL_MD(pipe), temp);
+ } else {
+ /* write it again -- the BIOS does, after all */
+ I915_WRITE(dpll_reg, dpll);
+ }
+- I915_READ(dpll_reg);
++
+ /* Wait for the clocks to stabilize. */
++ POSTING_READ(dpll_reg);
+ udelay(150);
+ }
+
++ intel_crtc->lowfreq_avail = false;
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(fp_reg + 4, fp2);
+ intel_crtc->lowfreq_avail = true;
+@@ -4024,7 +4098,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ }
+ } else {
+ I915_WRITE(fp_reg + 4, fp);
+- intel_crtc->lowfreq_avail = false;
+ if (HAS_PIPE_CXSR(dev)) {
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+@@ -4043,70 +4116,62 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ } else
+ pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+
+- I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ I915_WRITE(HTOTAL(pipe),
++ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+- I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ I915_WRITE(HBLANK(pipe),
++ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+- I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ I915_WRITE(HSYNC(pipe),
++ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+- I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++
++ I915_WRITE(VTOTAL(pipe),
++ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+- I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ I915_WRITE(VBLANK(pipe),
++ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+- I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ I915_WRITE(VSYNC(pipe),
++ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+- /* pipesrc and dspsize control the size that is scaled from, which should
+- * always be the user's requested size.
++
++ /* pipesrc and dspsize control the size that is scaled from,
++ * which should always be the user's requested size.
+ */
+ if (!HAS_PCH_SPLIT(dev)) {
+- I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
+- (mode->hdisplay - 1));
+- I915_WRITE(dsppos_reg, 0);
++ I915_WRITE(DSPSIZE(plane),
++ ((mode->vdisplay - 1) << 16) |
++ (mode->hdisplay - 1));
++ I915_WRITE(DSPPOS(plane), 0);
+ }
+- I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++ I915_WRITE(PIPESRC(pipe),
++ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+
+ if (HAS_PCH_SPLIT(dev)) {
+- I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
+- I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
+- I915_WRITE(link_m1_reg, m_n.link_m);
+- I915_WRITE(link_n1_reg, m_n.link_n);
++ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
++ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
++ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
++ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+
+- if (has_edp_encoder) {
++ if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+- } else {
+- /* enable FDI RX PLL too */
+- temp = I915_READ(fdi_rx_reg);
+- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+- I915_READ(fdi_rx_reg);
+- udelay(200);
+-
+- /* enable FDI TX PLL too */
+- temp = I915_READ(fdi_tx_reg);
+- I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+- I915_READ(fdi_tx_reg);
+-
+- /* enable FDI RX PCDCLK */
+- temp = I915_READ(fdi_rx_reg);
+- I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+- I915_READ(fdi_rx_reg);
+- udelay(200);
+ }
+ }
+
+- I915_WRITE(pipeconf_reg, pipeconf);
+- I915_READ(pipeconf_reg);
++ I915_WRITE(PIPECONF(pipe), pipeconf);
++ POSTING_READ(PIPECONF(pipe));
+
+ intel_wait_for_vblank(dev, pipe);
+
+- if (IS_IRONLAKE(dev)) {
++ if (IS_GEN5(dev)) {
+ /* enable address swizzle for tiling buffer */
+ temp = I915_READ(DISP_ARB_CTL);
+ I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
+ }
+
+- I915_WRITE(dspcntr_reg, dspcntr);
++ I915_WRITE(DSPCNTR(plane), dspcntr);
+
+- /* Flush the plane changes */
+ ret = intel_pipe_set_base(crtc, x, y, old_fb);
+
+ intel_update_watermarks(dev);
+@@ -4199,7 +4264,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+ }
+
+ /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+-static void intel_crtc_update_cursor(struct drm_crtc *crtc)
++static void intel_crtc_update_cursor(struct drm_crtc *crtc,
++ bool on)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -4212,7 +4278,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
+
+ pos = 0;
+
+- if (intel_crtc->cursor_on && crtc->fb) {
++ if (on && crtc->enabled && crtc->fb) {
+ base = intel_crtc->cursor_addr;
+ if (x > (int) crtc->fb->width)
+ base = 0;
+@@ -4324,7 +4390,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ addr = obj_priv->phys_obj->handle->busaddr;
+ }
+
+- if (!IS_I9XX(dev))
++ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, (height << 12) | width);
+
+ finish:
+@@ -4344,7 +4410,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ intel_crtc->cursor_width = width;
+ intel_crtc->cursor_height = height;
+
+- intel_crtc_update_cursor(crtc);
++ intel_crtc_update_cursor(crtc, true);
+
+ return 0;
+ fail_unpin:
+@@ -4363,7 +4429,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+ intel_crtc->cursor_x = x;
+ intel_crtc->cursor_y = y;
+
+- intel_crtc_update_cursor(crtc);
++ intel_crtc_update_cursor(crtc, true);
+
+ return 0;
+ }
+@@ -4432,7 +4498,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct intel_crtc *intel_crtc;
+ struct drm_crtc *possible_crtc;
+ struct drm_crtc *supported_crtc =NULL;
+- struct drm_encoder *encoder = &intel_encoder->enc;
++ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = NULL;
+ struct drm_device *dev = encoder->dev;
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+@@ -4513,7 +4579,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector, int dpms_mode)
+ {
+- struct drm_encoder *encoder = &intel_encoder->enc;
++ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+@@ -4559,7 +4625,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
+- if (IS_I9XX(dev)) {
++ if (!IS_GEN2(dev)) {
+ if (IS_PINEVIEW(dev))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+@@ -4663,8 +4729,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
+ struct drm_device *dev = (struct drm_device *)arg;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
+-
+ dev_priv->busy = false;
+
+ queue_work(dev_priv->wq, &dev_priv->idle_work);
+@@ -4678,14 +4742,12 @@ static void intel_crtc_idle_timer(unsigned long arg)
+ struct drm_crtc *crtc = &intel_crtc->base;
+ drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+
+- DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
+-
+ intel_crtc->busy = false;
+
+ queue_work(dev_priv->wq, &dev_priv->idle_work);
+ }
+
+-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
++static void intel_increase_pllclock(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -4720,9 +4782,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+ }
+
+ /* Schedule downclock */
+- if (schedule)
+- mod_timer(&intel_crtc->idle_timer, jiffies +
+- msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
++ mod_timer(&intel_crtc->idle_timer, jiffies +
++ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ }
+
+ static void intel_decrease_pllclock(struct drm_crtc *crtc)
+@@ -4858,7 +4919,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+ I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
+ }
+ /* Non-busy -> busy, upclock */
+- intel_increase_pllclock(crtc, true);
++ intel_increase_pllclock(crtc);
+ intel_crtc->busy = true;
+ } else {
+ /* Busy -> busy, put off timer */
+@@ -4872,8 +4933,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+ static void intel_crtc_destroy(struct drm_crtc *crtc)
+ {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct intel_unpin_work *work;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev->event_lock, flags);
++ work = intel_crtc->unpin_work;
++ intel_crtc->unpin_work = NULL;
++ spin_unlock_irqrestore(&dev->event_lock, flags);
++
++ if (work) {
++ cancel_work_sync(&work->work);
++ kfree(work);
++ }
+
+ drm_crtc_cleanup(crtc);
++
+ kfree(intel_crtc);
+ }
+
+@@ -4928,12 +5003,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+- obj_priv = to_intel_bo(work->pending_flip_obj);
+-
+- /* Initial scanout buffer will have a 0 pending flip count */
+- if ((atomic_read(&obj_priv->pending_flip) == 0) ||
+- atomic_dec_and_test(&obj_priv->pending_flip))
+- DRM_WAKEUP(&dev_priv->pending_flip_queue);
++ obj_priv = to_intel_bo(work->old_fb_obj);
++ atomic_clear_mask(1 << intel_crtc->plane,
++ &obj_priv->pending_flip.counter);
++ if (atomic_read(&obj_priv->pending_flip) == 0)
++ wake_up(&dev_priv->pending_flip_queue);
+ schedule_work(&work->work);
+
+ trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+@@ -5014,7 +5088,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ obj = intel_fb->obj;
+
+ mutex_lock(&dev->struct_mutex);
+- ret = intel_pin_and_fence_fb_obj(dev, obj);
++ ret = intel_pin_and_fence_fb_obj(dev, obj, true);
+ if (ret)
+ goto cleanup_work;
+
+@@ -5023,29 +5097,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ drm_gem_object_reference(obj);
+
+ crtc->fb = fb;
+- ret = i915_gem_object_flush_write_domain(obj);
+- if (ret)
+- goto cleanup_objs;
+
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto cleanup_objs;
+
+- obj_priv = to_intel_bo(obj);
+- atomic_inc(&obj_priv->pending_flip);
++ /* Block clients from rendering to the new back buffer until
++ * the flip occurs and the object is no longer visible.
++ */
++ atomic_add(1 << intel_crtc->plane,
++ &to_intel_bo(work->old_fb_obj)->pending_flip);
++
+ work->pending_flip_obj = obj;
++ obj_priv = to_intel_bo(obj);
+
+ if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ u32 flip_mask;
+
++ /* Can't queue multiple flips, so wait for the previous
++ * one to finish before executing the next.
++ */
++ BEGIN_LP_RING(2);
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+-
+- BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+- OUT_RING(0);
++ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
+
+@@ -5126,15 +5204,14 @@ cleanup_work:
+ return ret;
+ }
+
+-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
++static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .dpms = intel_crtc_dpms,
+ .mode_fixup = intel_crtc_mode_fixup,
+ .mode_set = intel_crtc_mode_set,
+ .mode_set_base = intel_pipe_set_base,
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+- .prepare = intel_crtc_prepare,
+- .commit = intel_crtc_commit,
+ .load_lut = intel_crtc_load_lut,
++ .disable = intel_crtc_disable,
+ };
+
+ static const struct drm_crtc_funcs intel_crtc_funcs = {
+@@ -5160,8 +5237,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
+ drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
+- intel_crtc->pipe = pipe;
+- intel_crtc->plane = pipe;
+ for (i = 0; i < 256; i++) {
+ intel_crtc->lut_r[i] = i;
+ intel_crtc->lut_g[i] = i;
+@@ -5171,9 +5246,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
+ /* Swap pipes & planes for FBC on pre-965 */
+ intel_crtc->pipe = pipe;
+ intel_crtc->plane = pipe;
+- if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
++ if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+ DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
+- intel_crtc->plane = ((pipe == 0) ? 1 : 0);
++ intel_crtc->plane = !pipe;
+ }
+
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+@@ -5183,6 +5258,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
+
+ intel_crtc->cursor_addr = 0;
+ intel_crtc->dpms_mode = -1;
++ intel_crtc->active = true; /* force the pipe off on setup_init_config */
++
++ if (HAS_PCH_SPLIT(dev)) {
++ intel_helper_funcs.prepare = ironlake_crtc_prepare;
++ intel_helper_funcs.commit = ironlake_crtc_commit;
++ } else {
++ intel_helper_funcs.prepare = i9xx_crtc_prepare;
++ intel_helper_funcs.commit = i9xx_crtc_commit;
++ }
++
+ drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+
+ intel_crtc->busy = false;
+@@ -5218,38 +5303,25 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ return 0;
+ }
+
+-struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+-{
+- struct drm_crtc *crtc = NULL;
+-
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+- if (intel_crtc->pipe == pipe)
+- break;
+- }
+- return crtc;
+-}
+-
+ static int intel_encoder_clones(struct drm_device *dev, int type_mask)
+ {
++ struct intel_encoder *encoder;
+ int index_mask = 0;
+- struct drm_encoder *encoder;
+ int entry = 0;
+
+- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+- if (type_mask & intel_encoder->clone_mask)
++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
++ if (type_mask & encoder->clone_mask)
+ index_mask |= (1 << entry);
+ entry++;
+ }
++
+ return index_mask;
+ }
+
+-
+ static void intel_setup_outputs(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct drm_encoder *encoder;
++ struct intel_encoder *encoder;
+ bool dpd_is_edp = false;
+
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+@@ -5338,12 +5410,10 @@ static void intel_setup_outputs(struct drm_device *dev)
+ if (SUPPORTS_TV(dev))
+ intel_tv_init(dev);
+
+- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+-
+- encoder->possible_crtcs = intel_encoder->crtc_mask;
+- encoder->possible_clones = intel_encoder_clones(dev,
+- intel_encoder->clone_mask);
++ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
++ encoder->base.possible_crtcs = encoder->crtc_mask;
++ encoder->base.possible_clones =
++ intel_encoder_clones(dev, encoder->clone_mask);
+ }
+ }
+
+@@ -5377,8 +5447,25 @@ int intel_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
+ {
++ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ int ret;
+
++ if (obj_priv->tiling_mode == I915_TILING_Y)
++ return -EINVAL;
++
++ if (mode_cmd->pitch & 63)
++ return -EINVAL;
++
++ switch (mode_cmd->bpp) {
++ case 8:
++ case 16:
++ case 24:
++ case 32:
++ break;
++ default:
++ return -EINVAL;
++ }
++
+ ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+@@ -5487,6 +5574,10 @@ void ironlake_enable_drps(struct drm_device *dev)
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+
++ /* Enable temp reporting */
++ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
++ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
++
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+@@ -5502,20 +5593,19 @@ void ironlake_enable_drps(struct drm_device *dev)
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+- fstart = fmax;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+- dev_priv->fmax = fstart; /* IPS callback will increase this */
++ dev_priv->fmax = fmax; /* IPS callback will increase this */
+ dev_priv->fstart = fstart;
+
+- dev_priv->max_delay = fmax;
++ dev_priv->max_delay = fstart;
+ dev_priv->min_delay = fmin;
+ dev_priv->cur_delay = fstart;
+
+- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
+- fstart);
++ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
++ fmax, fmin, fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+@@ -5529,7 +5619,7 @@ void ironlake_enable_drps(struct drm_device *dev)
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
++ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ msleep(1);
+
+@@ -5660,7 +5750,7 @@ void intel_init_clock_gating(struct drm_device *dev)
+ if (HAS_PCH_SPLIT(dev)) {
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+- if (IS_IRONLAKE(dev)) {
++ if (IS_GEN5(dev)) {
+ /* Required for FBC */
+ dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
+ /* Required for CxSR */
+@@ -5674,13 +5764,20 @@ void intel_init_clock_gating(struct drm_device *dev)
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
++ * On Ibex Peak and Cougar Point, we need to disable clock
++ * gating for the panel power sequencer or it will fail to
++ * start up when no ports are active.
++ */
++ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
++
++ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+- if (IS_IRONLAKE(dev)) {
++ if (IS_GEN5(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+@@ -5728,20 +5825,20 @@ void intel_init_clock_gating(struct drm_device *dev)
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+- } else if (IS_I965GM(dev)) {
++ } else if (IS_CRESTLINE(dev)) {
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+- } else if (IS_I965G(dev)) {
++ } else if (IS_BROADWATER(dev)) {
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+- } else if (IS_I9XX(dev)) {
++ } else if (IS_GEN3(dev)) {
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+@@ -5823,7 +5920,7 @@ static void intel_init_display(struct drm_device *dev)
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+- } else if (IS_I965GM(dev)) {
++ } else if (IS_CRESTLINE(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+@@ -5856,7 +5953,7 @@ static void intel_init_display(struct drm_device *dev)
+
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev)) {
+- if (IS_IRONLAKE(dev)) {
++ if (IS_GEN5(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+@@ -5883,9 +5980,9 @@ static void intel_init_display(struct drm_device *dev)
+ dev_priv->display.update_wm = pineview_update_wm;
+ } else if (IS_G4X(dev))
+ dev_priv->display.update_wm = g4x_update_wm;
+- else if (IS_I965G(dev))
++ else if (IS_GEN4(dev))
+ dev_priv->display.update_wm = i965_update_wm;
+- else if (IS_I9XX(dev)) {
++ else if (IS_GEN3(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+@@ -5999,24 +6096,24 @@ void intel_modeset_init(struct drm_device *dev)
+
+ intel_init_display(dev);
+
+- if (IS_I965G(dev)) {
+- dev->mode_config.max_width = 8192;
+- dev->mode_config.max_height = 8192;
+- } else if (IS_I9XX(dev)) {
++ if (IS_GEN2(dev)) {
++ dev->mode_config.max_width = 2048;
++ dev->mode_config.max_height = 2048;
++ } else if (IS_GEN3(dev)) {
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ } else {
+- dev->mode_config.max_width = 2048;
+- dev->mode_config.max_height = 2048;
++ dev->mode_config.max_width = 8192;
++ dev->mode_config.max_height = 8192;
+ }
+
+ /* set memory base */
+- if (IS_I9XX(dev))
+- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+- else
++ if (IS_GEN2(dev))
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
++ else
++ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+
+- if (IS_MOBILE(dev) || IS_I9XX(dev))
++ if (IS_MOBILE(dev) || !IS_GEN2(dev))
+ dev_priv->num_pipe = 2;
+ else
+ dev_priv->num_pipe = 1;
+@@ -6052,10 +6149,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+
++ drm_kms_helper_poll_fini(dev);
+ mutex_lock(&dev->struct_mutex);
+
+- drm_kms_helper_poll_fini(dev);
+- intel_fbdev_fini(dev);
++ intel_unregister_dsm_handler();
++
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Skip inactive CRTCs */
+@@ -6063,12 +6161,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+- intel_increase_pllclock(crtc, false);
+- del_timer_sync(&intel_crtc->idle_timer);
++ intel_increase_pllclock(crtc);
+ }
+
+- del_timer_sync(&dev_priv->idle_timer);
+-
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
+@@ -6097,33 +6192,36 @@ void intel_modeset_cleanup(struct drm_device *dev)
+
+ mutex_unlock(&dev->struct_mutex);
+
++ /* Disable the irq before mode object teardown, for the irq might
++ * enqueue unpin/hotplug work. */
++ drm_irq_uninstall(dev);
++ cancel_work_sync(&dev_priv->hotplug_work);
++
++ /* Shut off idle work before the crtcs get freed. */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ intel_crtc = to_intel_crtc(crtc);
++ del_timer_sync(&intel_crtc->idle_timer);
++ }
++ del_timer_sync(&dev_priv->idle_timer);
++ cancel_work_sync(&dev_priv->idle_work);
++
+ drm_mode_config_cleanup(dev);
+ }
+
+-
+ /*
+ * Return which encoder is currently attached for connector.
+ */
+-struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
++struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+ {
+- struct drm_mode_object *obj;
+- struct drm_encoder *encoder;
+- int i;
+-
+- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+- if (connector->encoder_ids[i] == 0)
+- break;
+-
+- obj = drm_mode_object_find(connector->dev,
+- connector->encoder_ids[i],
+- DRM_MODE_OBJECT_ENCODER);
+- if (!obj)
+- continue;
++ return &intel_attached_encoder(connector)->base;
++}
+
+- encoder = obj_to_encoder(obj);
+- return encoder;
+- }
+- return NULL;
++void intel_connector_attach_encoder(struct intel_connector *connector,
++ struct intel_encoder *encoder)
++{
++ connector->encoder = encoder;
++ drm_mode_connector_attach_encoder(&connector->base,
++ &encoder->base);
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 9ab8708..2d3dee9 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -42,15 +42,13 @@
+
+ #define DP_LINK_CONFIGURATION_SIZE 9
+
+-#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
+-#define IS_PCH_eDP(i) ((i)->is_pch_edp)
+-
+ struct intel_dp {
+ struct intel_encoder base;
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
++ int force_audio;
+ int dpms_mode;
+ uint8_t link_bw;
+ uint8_t lane_count;
+@@ -58,14 +56,69 @@ struct intel_dp {
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+ bool is_pch_edp;
++ uint8_t train_set[4];
++ uint8_t link_status[DP_LINK_STATUS_SIZE];
++
++ struct drm_property *force_audio_property;
+ };
+
++/**
++ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
++ * @intel_dp: DP struct
++ *
++ * If a CPU or PCH DP output is attached to an eDP panel, this function
++ * will return true, and false otherwise.
++ */
++static bool is_edp(struct intel_dp *intel_dp)
++{
++ return intel_dp->base.type == INTEL_OUTPUT_EDP;
++}
++
++/**
++ * is_pch_edp - is the port on the PCH and attached to an eDP panel?
++ * @intel_dp: DP struct
++ *
++ * Returns true if the given DP struct corresponds to a PCH DP port attached
++ * to an eDP panel, false otherwise. Helpful for determining whether we
++ * may need FDI resources for a given DP output or not.
++ */
++static bool is_pch_edp(struct intel_dp *intel_dp)
++{
++ return intel_dp->is_pch_edp;
++}
++
+ static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+ {
+- return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
++ return container_of(encoder, struct intel_dp, base.base);
++}
++
++static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
++{
++ return container_of(intel_attached_encoder(connector),
++ struct intel_dp, base);
++}
++
++/**
++ * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
++ * @encoder: DRM encoder
++ *
++ * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
++ * by intel_display.c.
++ */
++bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
++{
++ struct intel_dp *intel_dp;
++
++ if (!encoder)
++ return false;
++
++ intel_dp = enc_to_intel_dp(encoder);
++
++ return is_pch_edp(intel_dp);
+ }
+
+-static void intel_dp_link_train(struct intel_dp *intel_dp);
++static void intel_dp_start_link_train(struct intel_dp *intel_dp);
++static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+ static void intel_dp_link_down(struct intel_dp *intel_dp);
+
+ void
+@@ -129,8 +182,8 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+- return (pixel_clock * dev_priv->edp_bpp) / 8;
++ if (is_edp(intel_dp))
++ return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
+ else
+ return pixel_clock * 3;
+ }
+@@ -145,15 +198,13 @@ static int
+ intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
++ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+ int max_lanes = intel_dp_max_lane_count(intel_dp);
+
+- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+- dev_priv->panel_fixed_mode) {
++ if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
+ if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+@@ -163,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
+
+ /* only refuse the mode on non eDP since we have seen some wierd eDP panels
+ which are outside spec tolerances but somehow work by magic */
+- if (!IS_eDP(intel_dp) &&
++ if (!is_edp(intel_dp) &&
+ (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
+ > intel_dp_max_data_rate(max_link_clock, max_lanes)))
+ return MODE_CLOCK_HIGH;
+@@ -233,7 +284,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ uint8_t *recv, int recv_size)
+ {
+ uint32_t output_reg = intel_dp->output_reg;
+- struct drm_device *dev = intel_dp->base.enc.dev;
++ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+@@ -246,8 +297,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
++ *
++ * Note that PCH attached eDP panels should use a 125MHz input
++ * clock divider.
+ */
+- if (IS_eDP(intel_dp)) {
++ if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
+ if (IS_GEN6(dev))
+ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ else
+@@ -519,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+
+- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+- dev_priv->panel_fixed_mode) {
++ if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
+ intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+ mode, adjusted_mode);
+@@ -531,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ mode->clock = dev_priv->panel_fixed_mode->clock;
+ }
+
++ /* Just use VBT values for eDP */
++ if (is_edp(intel_dp)) {
++ intel_dp->lane_count = dev_priv->edp.lanes;
++ intel_dp->link_bw = dev_priv->edp.rate;
++ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
++ DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
++ intel_dp->link_bw, intel_dp->lane_count,
++ adjusted_mode->clock);
++ return true;
++ }
++
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+@@ -549,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ }
+ }
+
+- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+- /* okay we failed just pick the highest */
+- intel_dp->lane_count = max_lane_count;
+- intel_dp->link_bw = bws[max_clock];
+- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+- DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+- "count %d clock %d\n",
+- intel_dp->link_bw, intel_dp->lane_count,
+- adjusted_mode->clock);
+-
+- return true;
+- }
+-
+ return false;
+ }
+
+@@ -598,25 +649,6 @@ intel_dp_compute_m_n(int bpp,
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+ }
+
+-bool intel_pch_has_edp(struct drm_crtc *crtc)
+-{
+- struct drm_device *dev = crtc->dev;
+- struct drm_mode_config *mode_config = &dev->mode_config;
+- struct drm_encoder *encoder;
+-
+- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+- struct intel_dp *intel_dp;
+-
+- if (encoder->crtc != crtc)
+- continue;
+-
+- intel_dp = enc_to_intel_dp(encoder);
+- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+- return intel_dp->is_pch_edp;
+- }
+- return false;
+-}
+-
+ void
+ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+@@ -641,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = intel_dp->lane_count;
+- if (IS_PCH_eDP(intel_dp))
+- bpp = dev_priv->edp_bpp;
++ break;
++ } else if (is_edp(intel_dp)) {
++ lane_count = dev_priv->edp.lanes;
++ bpp = dev_priv->edp.bpp;
+ break;
+ }
+ }
+@@ -698,7 +732,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ {
+ struct drm_device *dev = encoder->dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+- struct drm_crtc *crtc = intel_dp->base.enc.crtc;
++ struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_dp->DP = (DP_VOLTAGE_0_4 |
+@@ -709,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+
+- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+@@ -744,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
+ intel_dp->DP |= DP_PIPEB_SELECT;
+
+- if (IS_eDP(intel_dp)) {
++ if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+@@ -754,13 +788,16 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ }
+ }
+
+-static void ironlake_edp_panel_on (struct drm_device *dev)
++/* Returns true if the panel was already on when called */
++static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
+ {
++ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 pp;
++ u32 pp, idle_on = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
++ u32 idle_on_mask = PP_ON | PP_SEQUENCE_STATE_MASK;
+
+ if (I915_READ(PCH_PP_STATUS) & PP_ON)
+- return;
++ return true;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+
+@@ -771,21 +808,30 @@ static void ironlake_edp_panel_on (struct drm_device *dev)
+
+ pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
+ I915_WRITE(PCH_PP_CONTROL, pp);
++ POSTING_READ(PCH_PP_CONTROL);
+
+- if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
++ /* Ouch. We need to wait here for some panels, like Dell e6510
++ * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
++ */
++ msleep(300);
++
++ if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on,
++ 5000))
+ DRM_ERROR("panel on wait timed out: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS));
+
+- pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
++
++ return false;
+ }
+
+ static void ironlake_edp_panel_off (struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 pp;
++ u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
++ PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+
+@@ -796,15 +842,20 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
+
+ pp &= ~POWER_TARGET_ON;
+ I915_WRITE(PCH_PP_CONTROL, pp);
++ POSTING_READ(PCH_PP_CONTROL);
+
+- if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
++ if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
+ DRM_ERROR("panel off wait timed out: 0x%08x\n",
+ I915_READ(PCH_PP_STATUS));
+
+- /* Make sure VDD is enabled so DP AUX will work */
+- pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
++ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
++
++ /* Ouch. We need to wait here for some panels, like Dell e6510
++ * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
++ */
++ msleep(300);
+ }
+
+ static void ironlake_edp_backlight_on (struct drm_device *dev)
+@@ -813,6 +864,13 @@ static void ironlake_edp_backlight_on (struct drm_device *dev)
+ u32 pp;
+
+ DRM_DEBUG_KMS("\n");
++ /*
++ * If we enable the backlight right away following a panel power
++ * on, we may see slight flicker as the panel syncs with the eDP
++ * link. So delay a bit to make sure the image is solid before
++ * allowing it to appear.
++ */
++ msleep(300);
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp |= EDP_BLC_ENABLE;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+@@ -837,8 +895,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+
+ DRM_DEBUG_KMS("\n");
+ dpa_ctl = I915_READ(DP_A);
+- dpa_ctl &= ~DP_PLL_ENABLE;
++ dpa_ctl |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
++ POSTING_READ(DP_A);
++ udelay(200);
+ }
+
+ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+@@ -848,8 +908,9 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+ u32 dpa_ctl;
+
+ dpa_ctl = I915_READ(DP_A);
+- dpa_ctl |= DP_PLL_ENABLE;
++ dpa_ctl &= ~DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
++ POSTING_READ(DP_A);
+ udelay(200);
+ }
+
+@@ -857,29 +918,32 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
+ {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->dev;
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+- if (IS_eDP(intel_dp)) {
++ if (is_edp(intel_dp)) {
+ ironlake_edp_backlight_off(dev);
+- ironlake_edp_panel_on(dev);
+- ironlake_edp_pll_on(encoder);
++ ironlake_edp_panel_off(dev);
++ ironlake_edp_panel_on(intel_dp);
++ if (!is_pch_edp(intel_dp))
++ ironlake_edp_pll_on(encoder);
++ else
++ ironlake_edp_pll_off(encoder);
+ }
+- if (dp_reg & DP_PORT_EN)
+- intel_dp_link_down(intel_dp);
++ intel_dp_link_down(intel_dp);
+ }
+
+ static void intel_dp_commit(struct drm_encoder *encoder)
+ {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_device *dev = encoder->dev;
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+- if (!(dp_reg & DP_PORT_EN)) {
+- intel_dp_link_train(intel_dp);
+- }
+- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
++ intel_dp_start_link_train(intel_dp);
++
++ if (is_edp(intel_dp))
++ ironlake_edp_panel_on(intel_dp);
++
++ intel_dp_complete_link_train(intel_dp);
++
++ if (is_edp(intel_dp))
+ ironlake_edp_backlight_on(dev);
+ }
+
+@@ -892,22 +956,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
++ if (is_edp(intel_dp))
+ ironlake_edp_backlight_off(dev);
++ intel_dp_link_down(intel_dp);
++ if (is_edp(intel_dp))
+ ironlake_edp_panel_off(dev);
+- }
+- if (dp_reg & DP_PORT_EN)
+- intel_dp_link_down(intel_dp);
+- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
++ if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
+ ironlake_edp_pll_off(encoder);
+ } else {
++ if (is_edp(intel_dp))
++ ironlake_edp_panel_on(intel_dp);
+ if (!(dp_reg & DP_PORT_EN)) {
+- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+- ironlake_edp_panel_on(dev);
+- intel_dp_link_train(intel_dp);
+- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+- ironlake_edp_backlight_on(dev);
++ intel_dp_start_link_train(intel_dp);
++ intel_dp_complete_link_train(intel_dp);
+ }
++ if (is_edp(intel_dp))
++ ironlake_edp_backlight_on(dev);
+ }
+ intel_dp->dpms_mode = mode;
+ }
+@@ -917,14 +981,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
+ * link status information
+ */
+ static bool
+-intel_dp_get_link_status(struct intel_dp *intel_dp,
+- uint8_t link_status[DP_LINK_STATUS_SIZE])
++intel_dp_get_link_status(struct intel_dp *intel_dp)
+ {
+ int ret;
+
+ ret = intel_dp_aux_native_read(intel_dp,
+ DP_LANE0_1_STATUS,
+- link_status, DP_LINK_STATUS_SIZE);
++ intel_dp->link_status, DP_LINK_STATUS_SIZE);
+ if (ret != DP_LINK_STATUS_SIZE)
+ return false;
+ return true;
+@@ -999,18 +1062,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+ }
+
+ static void
+-intel_get_adjust_train(struct intel_dp *intel_dp,
+- uint8_t link_status[DP_LINK_STATUS_SIZE],
+- int lane_count,
+- uint8_t train_set[4])
++intel_get_adjust_train(struct intel_dp *intel_dp)
+ {
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+
+- for (lane = 0; lane < lane_count; lane++) {
+- uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
+- uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
++ for (lane = 0; lane < intel_dp->lane_count; lane++) {
++ uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
++ uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+@@ -1025,15 +1085,25 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
+ p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+- train_set[lane] = v | p;
++ intel_dp->train_set[lane] = v | p;
+ }
+
+ static uint32_t
+-intel_dp_signal_levels(uint8_t train_set, int lane_count)
++intel_dp_signal_levels(struct intel_dp *intel_dp)
+ {
+- uint32_t signal_levels = 0;
++ struct drm_device *dev = intel_dp->base.base.dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ uint32_t signal_levels = 0;
++ u8 train_set = intel_dp->train_set[0];
++ u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
++ u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
++
++ if (is_edp(intel_dp)) {
++ vswing = dev_priv->edp.vswing;
++ preemphasis = dev_priv->edp.preemphasis;
++ }
+
+- switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
++ switch (vswing) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+@@ -1048,7 +1118,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+- switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
++ switch (preemphasis) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+@@ -1116,18 +1186,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+ static bool
+-intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
++intel_channel_eq_ok(struct intel_dp *intel_dp)
+ {
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+- lane_align = intel_dp_link_status(link_status,
++ lane_align = intel_dp_link_status(intel_dp->link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+- for (lane = 0; lane < lane_count; lane++) {
+- lane_status = intel_get_lane_status(link_status, lane);
++ for (lane = 0; lane < intel_dp->lane_count; lane++) {
++ lane_status = intel_get_lane_status(intel_dp->link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
+ }
+@@ -1135,159 +1205,194 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+ }
+
+ static bool
++intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
++{
++ struct drm_device *dev = intel_dp->base.base.dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
++ return false;
++
++ return true;
++}
++
++static bool
+ intel_dp_set_link_train(struct intel_dp *intel_dp,
+ uint32_t dp_reg_value,
+- uint8_t dp_train_pat,
+- uint8_t train_set[4])
++ uint8_t dp_train_pat)
+ {
+- struct drm_device *dev = intel_dp->base.enc.dev;
++ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ I915_WRITE(intel_dp->output_reg, dp_reg_value);
+ POSTING_READ(intel_dp->output_reg);
+
++ if (!intel_dp_aux_handshake_required(intel_dp))
++ return true;
++
+ intel_dp_aux_native_write_1(intel_dp,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ ret = intel_dp_aux_native_write(intel_dp,
+- DP_TRAINING_LANE0_SET, train_set, 4);
++ DP_TRAINING_LANE0_SET,
++ intel_dp->train_set, 4);
+ if (ret != 4)
+ return false;
+
+ return true;
+ }
+
++/* Enable corresponding port and start training pattern 1 */
+ static void
+-intel_dp_link_train(struct intel_dp *intel_dp)
++intel_dp_start_link_train(struct intel_dp *intel_dp)
+ {
+- struct drm_device *dev = intel_dp->base.enc.dev;
++ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- uint8_t train_set[4];
+- uint8_t link_status[DP_LINK_STATUS_SIZE];
++ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+- bool channel_eq = false;
+ int tries;
+ u32 reg;
+ uint32_t DP = intel_dp->DP;
+- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
+
+ /* Enable output, wait for it to become active */
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+- /* Write the link configuration data */
+- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+- intel_dp->link_configuration,
+- DP_LINK_CONFIGURATION_SIZE);
++ if (intel_dp_aux_handshake_required(intel_dp))
++ /* Write the link configuration data */
++ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
++ intel_dp->link_configuration,
++ DP_LINK_CONFIGURATION_SIZE);
+
+ DP |= DP_PORT_EN;
+- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ else
+ DP &= ~DP_LINK_TRAIN_MASK;
+- memset(train_set, 0, 4);
++ memset(intel_dp->train_set, 0, 4);
+ voltage = 0xff;
+ tries = 0;
+ clock_recovery = false;
+ for (;;) {
+- /* Use train_set[0] to set the voltage and pre emphasis values */
++ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels;
+- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
+- signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
++ if (IS_GEN6(dev) && is_edp(intel_dp)) {
++ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
++ signal_levels = intel_dp_signal_levels(intel_dp);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_1;
+
+ if (!intel_dp_set_link_train(intel_dp, reg,
+- DP_TRAINING_PATTERN_1, train_set))
++ DP_TRAINING_PATTERN_1))
+ break;
+ /* Set training pattern 1 */
+
+- udelay(100);
+- if (!intel_dp_get_link_status(intel_dp, link_status))
++ udelay(500);
++ if (intel_dp_aux_handshake_required(intel_dp)) {
+ break;
++ } else {
++ if (!intel_dp_get_link_status(intel_dp))
++ break;
+
+- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+- clock_recovery = true;
+- break;
+- }
+-
+- /* Check to see if we've tried the max voltage */
+- for (i = 0; i < intel_dp->lane_count; i++)
+- if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
++ if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
++ clock_recovery = true;
+ break;
+- if (i == intel_dp->lane_count)
+- break;
++ }
+
+- /* Check to see if we've tried the same voltage 5 times */
+- if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+- ++tries;
+- if (tries == 5)
++ /* Check to see if we've tried the max voltage */
++ for (i = 0; i < intel_dp->lane_count; i++)
++ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
++ break;
++ if (i == intel_dp->lane_count)
+ break;
+- } else
+- tries = 0;
+- voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+- /* Compute new train_set as requested by target */
+- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
++ /* Check to see if we've tried the same voltage 5 times */
++ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++ ++tries;
++ if (tries == 5)
++ break;
++ } else
++ tries = 0;
++ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
++
++ /* Compute new intel_dp->train_set as requested by target */
++ intel_get_adjust_train(intel_dp);
++ }
+ }
+
++ intel_dp->DP = DP;
++}
++
++static void
++intel_dp_complete_link_train(struct intel_dp *intel_dp)
++{
++ struct drm_device *dev = intel_dp->base.base.dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ bool channel_eq = false;
++ int tries;
++ u32 reg;
++ uint32_t DP = intel_dp->DP;
++
+ /* channel equalization */
+ tries = 0;
+ channel_eq = false;
+ for (;;) {
+- /* Use train_set[0] to set the voltage and pre emphasis values */
++ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels;
+
+- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
+- signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
++ if (IS_GEN6(dev) && is_edp(intel_dp)) {
++ signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
++ signal_levels = intel_dp_signal_levels(intel_dp);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_2;
+
+ /* channel eq pattern */
+ if (!intel_dp_set_link_train(intel_dp, reg,
+- DP_TRAINING_PATTERN_2, train_set))
++ DP_TRAINING_PATTERN_2))
+ break;
+
+- udelay(400);
+- if (!intel_dp_get_link_status(intel_dp, link_status))
+- break;
++ udelay(500);
+
+- if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
+- channel_eq = true;
++ if (!intel_dp_aux_handshake_required(intel_dp)) {
+ break;
+- }
++ } else {
++ if (!intel_dp_get_link_status(intel_dp))
++ break;
+
+- /* Try 5 times */
+- if (tries > 5)
+- break;
++ if (intel_channel_eq_ok(intel_dp)) {
++ channel_eq = true;
++ break;
++ }
+
+- /* Compute new train_set as requested by target */
+- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+- ++tries;
+- }
++ /* Try 5 times */
++ if (tries > 5)
++ break;
+
+- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
++ /* Compute new intel_dp->train_set as requested by target */
++ intel_get_adjust_train(intel_dp);
++ ++tries;
++ }
++ }
++ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
+ reg = DP | DP_LINK_TRAIN_OFF_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_OFF;
+@@ -1301,32 +1406,31 @@ intel_dp_link_train(struct intel_dp *intel_dp)
+ static void
+ intel_dp_link_down(struct intel_dp *intel_dp)
+ {
+- struct drm_device *dev = intel_dp->base.enc.dev;
++ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t DP = intel_dp->DP;
+
+ DRM_DEBUG_KMS("\n");
+
+- if (IS_eDP(intel_dp)) {
++ if (is_edp(intel_dp)) {
+ DP &= ~DP_PLL_ENABLE;
+ I915_WRITE(intel_dp->output_reg, DP);
+ POSTING_READ(intel_dp->output_reg);
+ udelay(100);
+ }
+
+- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
++ if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+- POSTING_READ(intel_dp->output_reg);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+- POSTING_READ(intel_dp->output_reg);
+ }
++ POSTING_READ(intel_dp->output_reg);
+
+- udelay(17000);
++ msleep(17);
+
+- if (IS_eDP(intel_dp))
++ if (is_edp(intel_dp))
+ DP |= DP_LINK_TRAIN_OFF;
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+@@ -1344,32 +1448,34 @@ intel_dp_link_down(struct intel_dp *intel_dp)
+ static void
+ intel_dp_check_link_status(struct intel_dp *intel_dp)
+ {
+- uint8_t link_status[DP_LINK_STATUS_SIZE];
+-
+- if (!intel_dp->base.enc.crtc)
++ if (!intel_dp->base.base.crtc)
+ return;
+
+- if (!intel_dp_get_link_status(intel_dp, link_status)) {
++ if (!intel_dp_get_link_status(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
+- if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
+- intel_dp_link_train(intel_dp);
++ if (!intel_channel_eq_ok(intel_dp)) {
++ intel_dp_start_link_train(intel_dp);
++ intel_dp_complete_link_train(intel_dp);
++ }
+ }
+
+ static enum drm_connector_status
+-ironlake_dp_detect(struct drm_connector *connector)
++ironlake_dp_detect(struct intel_dp *intel_dp)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ enum drm_connector_status status;
+
++ /* Can't disconnect eDP */
++ if (is_edp(intel_dp))
++ return connector_status_connected;
++
+ status = connector_status_disconnected;
+ if (intel_dp_aux_native_read(intel_dp,
+ 0x000, intel_dp->dpcd,
+- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+- {
++ sizeof (intel_dp->dpcd))
++ == sizeof(intel_dp->dpcd)) {
+ if (intel_dp->dpcd[0] != 0)
+ status = connector_status_connected;
+ }
+@@ -1378,26 +1484,13 @@ ironlake_dp_detect(struct drm_connector *connector)
+ return status;
+ }
+
+-/**
+- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+- *
+- * \return true if DP port is connected.
+- * \return false if DP port is disconnected.
+- */
+ static enum drm_connector_status
+-intel_dp_detect(struct drm_connector *connector, bool force)
++g4x_dp_detect(struct intel_dp *intel_dp)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+- struct drm_device *dev = intel_dp->base.enc.dev;
++ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- uint32_t temp, bit;
+ enum drm_connector_status status;
+-
+- intel_dp->has_audio = false;
+-
+- if (HAS_PCH_SPLIT(dev))
+- return ironlake_dp_detect(connector);
++ uint32_t temp, bit;
+
+ switch (intel_dp->output_reg) {
+ case DP_B:
+@@ -1419,31 +1512,66 @@ intel_dp_detect(struct drm_connector *connector, bool force)
+ return connector_status_disconnected;
+
+ status = connector_status_disconnected;
+- if (intel_dp_aux_native_read(intel_dp,
+- 0x000, intel_dp->dpcd,
++ if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ {
+ if (intel_dp->dpcd[0] != 0)
+ status = connector_status_connected;
+ }
++
+ return status;
+ }
+
++/**
++ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
++ *
++ * \return true if DP port is connected.
++ * \return false if DP port is disconnected.
++ */
++static enum drm_connector_status
++intel_dp_detect(struct drm_connector *connector, bool force)
++{
++ struct intel_dp *intel_dp = intel_attached_dp(connector);
++ struct drm_device *dev = intel_dp->base.base.dev;
++ enum drm_connector_status status;
++ struct edid *edid = NULL;
++
++ intel_dp->has_audio = false;
++
++ if (HAS_PCH_SPLIT(dev))
++ status = ironlake_dp_detect(intel_dp);
++ else
++ status = g4x_dp_detect(intel_dp);
++ if (status != connector_status_connected)
++ return status;
++
++ if (intel_dp->force_audio) {
++ intel_dp->has_audio = intel_dp->force_audio > 0;
++ } else {
++ edid = drm_get_edid(connector, &intel_dp->adapter);
++ if (edid) {
++ intel_dp->has_audio = drm_detect_monitor_audio(edid);
++ connector->display_info.raw_edid = NULL;
++ kfree(edid);
++ }
++ }
++
++ return connector_status_connected;
++}
++
+ static int intel_dp_get_modes(struct drm_connector *connector)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+- struct drm_device *dev = intel_dp->base.enc.dev;
++ struct intel_dp *intel_dp = intel_attached_dp(connector);
++ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* We should parse the EDID data and find out if it has an audio sink
+ */
+
+- ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
++ ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
+ if (ret) {
+- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+- !dev_priv->panel_fixed_mode) {
++ if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
+ struct drm_display_mode *newmode;
+ list_for_each_entry(newmode, &connector->probed_modes,
+ head) {
+@@ -1459,7 +1587,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
+ }
+
+ /* if eDP has no EDID, try to use fixed panel mode from VBT */
+- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
++ if (is_edp(intel_dp)) {
+ if (dev_priv->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
+@@ -1470,6 +1598,46 @@ static int intel_dp_get_modes(struct drm_connector *connector)
+ return 0;
+ }
+
++static int
++intel_dp_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t val)
++{
++ struct intel_dp *intel_dp = intel_attached_dp(connector);
++ int ret;
++
++ ret = drm_connector_property_set_value(connector, property, val);
++ if (ret)
++ return ret;
++
++ if (property == intel_dp->force_audio_property) {
++ if (val == intel_dp->force_audio)
++ return 0;
++
++ intel_dp->force_audio = val;
++
++ if (val > 0 && intel_dp->has_audio)
++ return 0;
++ if (val < 0 && !intel_dp->has_audio)
++ return 0;
++
++ intel_dp->has_audio = val > 0;
++ goto done;
++ }
++
++ return -EINVAL;
++
++done:
++ if (intel_dp->base.base.crtc) {
++ struct drm_crtc *crtc = intel_dp->base.base.crtc;
++ drm_crtc_helper_set_mode(crtc, &crtc->mode,
++ crtc->x, crtc->y,
++ crtc->fb);
++ }
++
++ return 0;
++}
++
+ static void
+ intel_dp_destroy (struct drm_connector *connector)
+ {
+@@ -1478,6 +1646,15 @@ intel_dp_destroy (struct drm_connector *connector)
+ kfree(connector);
+ }
+
++static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
++{
++ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
++
++ i2c_del_adapter(&intel_dp->adapter);
++ drm_encoder_cleanup(encoder);
++ kfree(intel_dp);
++}
++
+ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+ .dpms = intel_dp_dpms,
+ .mode_fixup = intel_dp_mode_fixup,
+@@ -1490,20 +1667,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = intel_dp_set_property,
+ .destroy = intel_dp_destroy,
+ };
+
+ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .get_modes = intel_dp_get_modes,
+ .mode_valid = intel_dp_mode_valid,
+- .best_encoder = intel_attached_encoder,
++ .best_encoder = intel_best_encoder,
+ };
+
+ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+- .destroy = intel_encoder_destroy,
++ .destroy = intel_dp_encoder_destroy,
+ };
+
+-void
++static void
+ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
+ {
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+@@ -1554,6 +1732,20 @@ bool intel_dpd_is_edp(struct drm_device *dev)
+ return false;
+ }
+
++static void
++intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++
++ intel_dp->force_audio_property =
++ drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
++ if (intel_dp->force_audio_property) {
++ intel_dp->force_audio_property->values[0] = -1;
++ intel_dp->force_audio_property->values[1] = 1;
++ drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
++ }
++}
++
+ void
+ intel_dp_init(struct drm_device *dev, int output_reg)
+ {
+@@ -1580,7 +1772,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ if (intel_dpd_is_edp(dev))
+ intel_dp->is_pch_edp = true;
+
+- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
++ if (output_reg == DP_A || is_pch_edp(intel_dp)) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else {
+@@ -1601,7 +1793,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ else if (output_reg == DP_D || output_reg == PCH_DP_D)
+ intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+
+- if (IS_eDP(intel_dp))
++ if (is_edp(intel_dp))
+ intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
+
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+@@ -1612,12 +1804,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ intel_dp->has_audio = false;
+ intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
+
+- drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
++ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+- drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
++ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+- drm_mode_connector_attach_encoder(&intel_connector->base,
+- &intel_encoder->enc);
++ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ drm_sysfs_connector_add(connector);
+
+ /* Set up the DDC bus. */
+@@ -1647,10 +1838,29 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
+- intel_encoder->ddc_bus = &intel_dp->adapter;
++ /* Cache some DPCD data in the eDP case */
++ if (is_edp(intel_dp)) {
++ int ret;
++ bool was_on;
++
++ was_on = ironlake_edp_panel_on(intel_dp);
++ ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
++ intel_dp->dpcd,
++ sizeof(intel_dp->dpcd));
++ if (ret == sizeof(intel_dp->dpcd)) {
++ if (intel_dp->dpcd[0] >= 0x11)
++ dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
++ DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
++ } else {
++ DRM_ERROR("failed to retrieve link info\n");
++ }
++ if (!was_on)
++ ironlake_edp_panel_off(dev);
++ }
++
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
+- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
++ if (is_edp(intel_dp)) {
+ /* initialize panel mode from VBT if available for eDP */
+ if (dev_priv->lfp_lvds_vbt_mode) {
+ dev_priv->panel_fixed_mode =
+@@ -1662,6 +1872,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ }
+ }
+
++ intel_dp_add_properties(intel_dp, connector);
++
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 8828b3a..21551fe 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -26,14 +26,12 @@
+ #define __INTEL_DRV_H__
+
+ #include <linux/i2c.h>
+-#include <linux/i2c-id.h>
+-#include <linux/i2c-algo-bit.h>
+ #include "i915_drv.h"
+ #include "drm_crtc.h"
+-
+ #include "drm_crtc_helper.h"
++#include "drm_fb_helper.h"
+
+-#define wait_for(COND, MS, W) ({ \
++#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+@@ -41,11 +39,24 @@
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+- if (W) msleep(W); \
++ if (W && !in_dbg_master()) msleep(W); \
+ } \
+ ret__; \
+ })
+
++#define wait_for(COND, MS) _wait_for(COND, MS, 1)
++#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
++
++#define MSLEEP(x) do { \
++ if (in_dbg_master()) \
++ mdelay(x); \
++ else \
++ msleep(x); \
++} while(0)
++
++#define KHz(x) (1000*x)
++#define MHz(x) KHz(1000*x)
++
+ /*
+ * Display related stuff
+ */
+@@ -96,24 +107,39 @@
+ #define INTEL_DVO_CHIP_TMDS 2
+ #define INTEL_DVO_CHIP_TVOUT 4
+
+-struct intel_i2c_chan {
+- struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
+- u32 reg; /* GPIO reg */
+- struct i2c_adapter adapter;
+- struct i2c_algo_bit_data algo;
+-};
++/* drm_display_mode->private_flags */
++#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
++#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
++
++static inline void
++intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
++ int multiplier)
++{
++ mode->clock *= multiplier;
++ mode->private_flags |= multiplier;
++}
++
++static inline int
++intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
++{
++ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
++}
+
+ struct intel_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+ };
+
++struct intel_fbdev {
++ struct drm_fb_helper helper;
++ struct intel_framebuffer ifb;
++ struct list_head fbdev_list;
++ struct drm_display_mode *our_mode;
++};
+
+ struct intel_encoder {
+- struct drm_encoder enc;
++ struct drm_encoder base;
+ int type;
+- struct i2c_adapter *i2c_bus;
+- struct i2c_adapter *ddc_bus;
+ bool load_detect_temp;
+ bool needs_tv_clock;
+ void (*hot_plug)(struct intel_encoder *);
+@@ -123,32 +149,7 @@ struct intel_encoder {
+
+ struct intel_connector {
+ struct drm_connector base;
+-};
+-
+-struct intel_crtc;
+-struct intel_overlay {
+- struct drm_device *dev;
+- struct intel_crtc *crtc;
+- struct drm_i915_gem_object *vid_bo;
+- struct drm_i915_gem_object *old_vid_bo;
+- int active;
+- int pfit_active;
+- u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+- u32 color_key;
+- u32 brightness, contrast, saturation;
+- u32 old_xscale, old_yscale;
+- /* register access */
+- u32 flip_addr;
+- struct drm_i915_gem_object *reg_bo;
+- void *virt_addr;
+- /* flip handling */
+- uint32_t last_flip_req;
+- int hw_wedged;
+-#define HW_WEDGED 1
+-#define NEEDS_WAIT_FOR_FLIP 2
+-#define RELEASE_OLD_VID 3
+-#define SWITCH_OFF_STAGE_1 4
+-#define SWITCH_OFF_STAGE_2 5
++ struct intel_encoder *encoder;
+ };
+
+ struct intel_crtc {
+@@ -157,6 +158,7 @@ struct intel_crtc {
+ enum plane plane;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int dpms_mode;
++ bool active; /* is the crtc on? independent of the dpms mode */
+ bool busy; /* is scanout buffer being updated frequently? */
+ struct timer_list idle_timer;
+ bool lowfreq_avail;
+@@ -168,14 +170,53 @@ struct intel_crtc {
+ uint32_t cursor_addr;
+ int16_t cursor_x, cursor_y;
+ int16_t cursor_width, cursor_height;
+- bool cursor_visible, cursor_on;
++ bool cursor_visible;
+ };
+
+ #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+ #define to_intel_connector(x) container_of(x, struct intel_connector, base)
+-#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
++#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+ #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+
++#define DIP_TYPE_AVI 0x82
++#define DIP_VERSION_AVI 0x2
++#define DIP_LEN_AVI 13
++
++struct dip_infoframe {
++ uint8_t type; /* HB0 */
++ uint8_t ver; /* HB1 */
++ uint8_t len; /* HB2 - body len, not including checksum */
++ uint8_t ecc; /* Header ECC */
++ uint8_t checksum; /* PB0 */
++ union {
++ struct {
++ /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
++ uint8_t Y_A_B_S;
++ /* PB2 - C 7:6, M 5:4, R 3:0 */
++ uint8_t C_M_R;
++ /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
++ uint8_t ITC_EC_Q_SC;
++ /* PB4 - VIC 6:0 */
++ uint8_t VIC;
++ /* PB5 - PR 3:0 */
++ uint8_t PR;
++ /* PB6 to PB13 */
++ uint16_t top_bar_end;
++ uint16_t bottom_bar_start;
++ uint16_t left_bar_end;
++ uint16_t right_bar_start;
++ } avi;
++ uint8_t payload[27];
++ } __attribute__ ((packed)) body;
++} __attribute__((packed));
++
++static inline struct drm_crtc *
++intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ return dev_priv->pipe_to_crtc_mapping[pipe];
++}
++
+ struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_device *dev;
+@@ -186,16 +227,12 @@ struct intel_unpin_work {
+ bool enable_stall_check;
+ };
+
+-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+- const char *name);
+-void intel_i2c_destroy(struct i2c_adapter *adapter);
+ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
+-void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
+-void intel_i2c_reset_gmbus(struct drm_device *dev);
++extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
+
+ extern void intel_crt_init(struct drm_device *dev);
+ extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
++void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
+ extern void intel_dvo_init(struct drm_device *dev);
+ extern void intel_tv_init(struct drm_device *dev);
+@@ -205,32 +242,41 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+ void
+ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+-extern bool intel_pch_has_edp(struct drm_crtc *crtc);
+ extern bool intel_dpd_is_edp(struct drm_device *dev);
+ extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
++extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+
+-
++/* intel_panel.c */
+ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+ extern void intel_pch_panel_fitting(struct drm_device *dev,
+ int fitting_mode,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
++extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
++extern u32 intel_panel_get_backlight(struct drm_device *dev);
++extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+
+-extern int intel_panel_fitter_pipe (struct drm_device *dev);
+ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+ extern void intel_encoder_prepare (struct drm_encoder *encoder);
+ extern void intel_encoder_commit (struct drm_encoder *encoder);
+ extern void intel_encoder_destroy(struct drm_encoder *encoder);
+
+-extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
++static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
++{
++ return to_intel_connector(connector)->encoder;
++}
++
++extern void intel_connector_attach_encoder(struct intel_connector *connector,
++ struct intel_encoder *encoder);
++extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+
+ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+-extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
++extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+ extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode,
+@@ -250,9 +296,11 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ extern void intel_init_clock_gating(struct drm_device *dev);
+ extern void ironlake_enable_drps(struct drm_device *dev);
+ extern void ironlake_disable_drps(struct drm_device *dev);
++extern void intel_init_emon(struct drm_device *dev);
+
+ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+- struct drm_gem_object *obj);
++ struct drm_gem_object *obj,
++ bool pipelined);
+
+ extern int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+@@ -267,9 +315,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+ extern void intel_setup_overlay(struct drm_device *dev);
+ extern void intel_cleanup_overlay(struct drm_device *dev);
+-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+-extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+- int interruptible);
++extern int intel_overlay_switch_off(struct intel_overlay *overlay,
++ bool interruptible);
+ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
+index 7c9ec14..ea37328 100644
+--- a/drivers/gpu/drm/i915/intel_dvo.c
++++ b/drivers/gpu/drm/i915/intel_dvo.c
+@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
+ .name = "ch7017",
+ .dvo_reg = DVOC,
+ .slave_addr = 0x75,
+- .gpio = GPIOE,
++ .gpio = GMBUS_PORT_DPB,
+ .dev_ops = &ch7017_ops,
+ }
+ };
+@@ -88,7 +88,13 @@ struct intel_dvo {
+
+ static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+ {
+- return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
++ return container_of(encoder, struct intel_dvo, base.base);
++}
++
++static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
++{
++ return container_of(intel_attached_encoder(connector),
++ struct intel_dvo, base);
+ }
+
+ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+ static int intel_dvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
++ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
+ static enum drm_connector_status
+ intel_dvo_detect(struct drm_connector *connector, bool force)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+-
++ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+ }
+
+ static int intel_dvo_get_modes(struct drm_connector *connector)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
++ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
++ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ /* We should probably have an i2c driver get_modes function for those
+ * devices which will have a fixed set of modes determined by the chip
+ * (TV-out, for example), but for now with just TMDS and LVDS,
+ * that's not the case.
+ */
+- intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
++ intel_ddc_get_modes(connector,
++ &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
+ if (!list_empty(&connector->probed_modes))
+ return 1;
+
+@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+ .mode_valid = intel_dvo_mode_valid,
+ .get_modes = intel_dvo_get_modes,
+- .best_encoder = intel_attached_encoder,
++ .best_encoder = intel_best_encoder,
+ };
+
+ static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
+@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
+ {
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
++ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
+ struct drm_display_mode *mode = NULL;
+
+@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
+ struct drm_crtc *crtc;
+ int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
+
+- crtc = intel_get_crtc_from_pipe(dev, pipe);
++ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc) {
+ mode = intel_crtc_mode_get(dev, crtc);
+ if (mode) {
+@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
+
+ void intel_dvo_init(struct drm_device *dev)
+ {
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_dvo *intel_dvo;
+ struct intel_connector *intel_connector;
+- struct i2c_adapter *i2cbus = NULL;
+- int ret = 0;
+ int i;
+ int encoder_type = DRM_MODE_ENCODER_NONE;
+
+@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
+ }
+
+ intel_encoder = &intel_dvo->base;
+-
+- /* Set up the DDC bus */
+- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
+- if (!intel_encoder->ddc_bus)
+- goto free_intel;
++ drm_encoder_init(dev, &intel_encoder->base,
++ &intel_dvo_enc_funcs, encoder_type);
+
+ /* Now, try to find a controller */
+ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+ struct drm_connector *connector = &intel_connector->base;
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
++ struct i2c_adapter *i2c;
+ int gpio;
+
+ /* Allow the I2C driver info to specify the GPIO to be used in
+@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
+ if (dvo->gpio != 0)
+ gpio = dvo->gpio;
+ else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+- gpio = GPIOB;
++ gpio = GMBUS_PORT_SSC;
+ else
+- gpio = GPIOE;
++ gpio = GMBUS_PORT_DPB;
+
+ /* Set up the I2C bus necessary for the chip we're probing.
+ * It appears that everything is on GPIOE except for panels
+ * on i830 laptops, which are on GPIOB (DVOA).
+ */
+- if (i2cbus != NULL)
+- intel_i2c_destroy(i2cbus);
+- if (!(i2cbus = intel_i2c_create(dev, gpio,
+- gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
+- continue;
+- }
++ i2c = &dev_priv->gmbus[gpio].adapter;
+
+ intel_dvo->dev = *dvo;
+- ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
+- if (!ret)
++ if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
+ continue;
+
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+- drm_encoder_init(dev, &intel_encoder->enc,
+- &intel_dvo_enc_funcs, encoder_type);
+- drm_encoder_helper_add(&intel_encoder->enc,
++ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_dvo_helper_funcs);
+
+- drm_mode_connector_attach_encoder(&intel_connector->base,
+- &intel_encoder->enc);
++ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+ /* For our LVDS chipsets, we should hopefully be able
+ * to dig the fixed panel mode out of the BIOS data.
+@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
+ return;
+ }
+
+- intel_i2c_destroy(intel_encoder->ddc_bus);
+- /* Didn't find a chip, so tear down. */
+- if (i2cbus != NULL)
+- intel_i2c_destroy(i2cbus);
+-free_intel:
++ drm_encoder_cleanup(&intel_encoder->base);
+ kfree(intel_dvo);
+ kfree(intel_connector);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
+index b61966c..af2a1dd 100644
+--- a/drivers/gpu/drm/i915/intel_fb.c
++++ b/drivers/gpu/drm/i915/intel_fb.c
+@@ -44,13 +44,6 @@
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+
+-struct intel_fbdev {
+- struct drm_fb_helper helper;
+- struct intel_framebuffer ifb;
+- struct list_head fbdev_list;
+- struct drm_display_mode *our_mode;
+-};
+-
+ static struct fb_ops intelfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
+ struct drm_gem_object *fbo = NULL;
+ struct drm_i915_gem_object *obj_priv;
+ struct device *device = &dev->pdev->dev;
+- int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
++ int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
+
+ mutex_lock(&dev->struct_mutex);
+
+- ret = intel_pin_and_fence_fb_obj(dev, fbo);
++ /* Flush everything out, we'll be doing GTT only from now on */
++ ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
+ if (ret) {
+ DRM_ERROR("failed to pin fb: %d\n", ret);
+ goto out_unref;
+ }
+
+- /* Flush everything out, we'll be doing GTT only from now on */
+- ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
+- if (ret) {
+- DRM_ERROR("failed to bind fb: %d.\n", ret);
+- goto out_unpin;
+- }
+-
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
+ goto out_unpin;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
+- if (IS_I9XX(dev))
++ if (!IS_GEN2(dev))
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
+ else
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .fb_probe = intel_fb_find_or_create_single,
+ };
+
+-int intel_fbdev_destroy(struct drm_device *dev,
+- struct intel_fbdev *ifbdev)
++static void intel_fbdev_destroy(struct drm_device *dev,
++ struct intel_fbdev *ifbdev)
+ {
+ struct fb_info *info;
+ struct intel_framebuffer *ifb = &ifbdev->ifb;
+@@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev,
+
+ drm_framebuffer_cleanup(&ifb->base);
+ if (ifb->obj) {
+- drm_gem_object_unreference(ifb->obj);
++ drm_gem_object_unreference_unlocked(ifb->obj);
+ ifb->obj = NULL;
+ }
+-
+- return 0;
+ }
+
+ int intel_fbdev_init(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 926934a..0d0273e 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -40,12 +40,76 @@
+ struct intel_hdmi {
+ struct intel_encoder base;
+ u32 sdvox_reg;
++ int ddc_bus;
+ bool has_hdmi_sink;
++ bool has_audio;
++ int force_audio;
++ struct drm_property *force_audio_property;
+ };
+
+ static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+ {
+- return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
++ return container_of(encoder, struct intel_hdmi, base.base);
++}
++
++static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
++{
++ return container_of(intel_attached_encoder(connector),
++ struct intel_hdmi, base);
++}
++
++void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
++{
++ uint8_t *data = (uint8_t *)avi_if;
++ uint8_t sum = 0;
++ unsigned i;
++
++ avi_if->checksum = 0;
++ avi_if->ecc = 0;
++
++ for (i = 0; i < sizeof(*avi_if); i++)
++ sum += data[i];
++
++ avi_if->checksum = 0x100 - sum;
++}
++
++static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
++{
++ struct dip_infoframe avi_if = {
++ .type = DIP_TYPE_AVI,
++ .ver = DIP_VERSION_AVI,
++ .len = DIP_LEN_AVI,
++ };
++ uint32_t *data = (uint32_t *)&avi_if;
++ struct drm_device *dev = encoder->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
++ u32 port;
++ unsigned i;
++
++ if (!intel_hdmi->has_hdmi_sink)
++ return;
++
++ /* XXX first guess at handling video port, is this corrent? */
++ if (intel_hdmi->sdvox_reg == SDVOB)
++ port = VIDEO_DIP_PORT_B;
++ else if (intel_hdmi->sdvox_reg == SDVOC)
++ port = VIDEO_DIP_PORT_C;
++ else
++ return;
++
++ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
++ VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
++
++ intel_dip_infoframe_csum(&avi_if);
++ for (i = 0; i < sizeof(avi_if); i += 4) {
++ I915_WRITE(VIDEO_DIP_DATA, *data);
++ data++;
++ }
++
++ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
++ VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
++ VIDEO_DIP_ENABLE_AVI);
+ }
+
+ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
+@@ -65,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+
+- if (intel_hdmi->has_hdmi_sink) {
++ /* Required on CPT */
++ if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
++ sdvox |= HDMI_MODE_SELECT;
++
++ if (intel_hdmi->has_audio) {
+ sdvox |= SDVO_AUDIO_ENABLE;
+- if (HAS_PCH_CPT(dev))
+- sdvox |= HDMI_MODE_SELECT;
++ sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
+ }
+
+ if (intel_crtc->pipe == 1) {
+@@ -80,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
+
+ I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
+ POSTING_READ(intel_hdmi->sdvox_reg);
++
++ intel_hdmi_set_avi_infoframe(encoder);
+ }
+
+ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+@@ -141,36 +210,85 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ static enum drm_connector_status
+ intel_hdmi_detect(struct drm_connector *connector, bool force)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+- struct edid *edid = NULL;
++ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
++ struct drm_i915_private *dev_priv = connector->dev->dev_private;
++ struct edid *edid;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ intel_hdmi->has_hdmi_sink = false;
+- edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
++ intel_hdmi->has_audio = false;
++ edid = drm_get_edid(connector,
++ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
++ intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ }
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+
++ if (status == connector_status_connected) {
++ if (intel_hdmi->force_audio)
++ intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
++ }
++
+ return status;
+ }
+
+ static int intel_hdmi_get_modes(struct drm_connector *connector)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
++ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
++ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ /* We should parse the EDID data and find out if it's an HDMI sink so
+ * we can send audio to it.
+ */
+
+- return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
++ return intel_ddc_get_modes(connector,
++ &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
++}
++
++static int
++intel_hdmi_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t val)
++{
++ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
++ int ret;
++
++ ret = drm_connector_property_set_value(connector, property, val);
++ if (ret)
++ return ret;
++
++ if (property == intel_hdmi->force_audio_property) {
++ if (val == intel_hdmi->force_audio)
++ return 0;
++
++ intel_hdmi->force_audio = val;
++
++ if (val > 0 && intel_hdmi->has_audio)
++ return 0;
++ if (val < 0 && !intel_hdmi->has_audio)
++ return 0;
++
++ intel_hdmi->has_audio = val > 0;
++ goto done;
++ }
++
++ return -EINVAL;
++
++done:
++ if (intel_hdmi->base.base.crtc) {
++ struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
++ drm_crtc_helper_set_mode(crtc, &crtc->mode,
++ crtc->x, crtc->y,
++ crtc->fb);
++ }
++
++ return 0;
+ }
+
+ static void intel_hdmi_destroy(struct drm_connector *connector)
+@@ -192,19 +310,34 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = intel_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = intel_hdmi_set_property,
+ .destroy = intel_hdmi_destroy,
+ };
+
+ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
+ .get_modes = intel_hdmi_get_modes,
+ .mode_valid = intel_hdmi_mode_valid,
+- .best_encoder = intel_attached_encoder,
++ .best_encoder = intel_best_encoder,
+ };
+
+ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+ };
+
++static void
++intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++
++ intel_hdmi->force_audio_property =
++ drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
++ if (intel_hdmi->force_audio_property) {
++ intel_hdmi->force_audio_property->values[0] = -1;
++ intel_hdmi->force_audio_property->values[1] = 1;
++ drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
++ }
++}
++
+ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -224,6 +357,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ }
+
+ intel_encoder = &intel_hdmi->base;
++ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
++ DRM_MODE_ENCODER_TMDS);
++
+ connector = &intel_connector->base;
+ drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+@@ -239,39 +375,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ /* Set up the DDC bus. */
+ if (sdvox_reg == SDVOB) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
++ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == SDVOC) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
++ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == HDMIB) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
+- "HDMIB");
++ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == HDMIC) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
+- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
+- "HDMIC");
++ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == HDMID) {
+ intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
+- intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
+- "HDMID");
++ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ }
+- if (!intel_encoder->ddc_bus)
+- goto err_connector;
+
+ intel_hdmi->sdvox_reg = sdvox_reg;
+
+- drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
+- DRM_MODE_ENCODER_TMDS);
+- drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
++ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
++
++ intel_hdmi_add_properties(intel_hdmi, connector);
+
+- drm_mode_connector_attach_encoder(&intel_connector->base,
+- &intel_encoder->enc);
++ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ drm_sysfs_connector_add(connector);
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+@@ -282,13 +412,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+-
+- return;
+-
+-err_connector:
+- drm_connector_cleanup(connector);
+- kfree(intel_hdmi);
+- kfree(intel_connector);
+-
+- return;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index c2649c7..2be4f72 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -1,6 +1,6 @@
+ /*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+- * Copyright © 2006-2008 Intel Corporation
++ * Copyright © 2006-2008,2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+@@ -24,10 +24,9 @@
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
++ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+ #include <linux/i2c.h>
+-#include <linux/slab.h>
+-#include <linux/i2c-id.h>
+ #include <linux/i2c-algo-bit.h>
+ #include "drmP.h"
+ #include "drm.h"
+@@ -35,79 +34,106 @@
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+
+-void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
++/* Intel GPIO access functions */
++
++#define I2C_RISEFALL_TIME 20
++
++static inline struct intel_gmbus *
++to_intel_gmbus(struct i2c_adapter *i2c)
++{
++ return container_of(i2c, struct intel_gmbus, adapter);
++}
++
++struct intel_gpio {
++ struct i2c_adapter adapter;
++ struct i2c_algo_bit_data algo;
++ struct drm_i915_private *dev_priv;
++ u32 reg;
++};
++
++void
++intel_i2c_reset(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
++ if (HAS_PCH_SPLIT(dev))
++ I915_WRITE(PCH_GMBUS0, 0);
++ else
++ I915_WRITE(GMBUS0, 0);
++}
++
++static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
++{
++ u32 val;
+
+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
+- if (!IS_PINEVIEW(dev))
++ if (!IS_PINEVIEW(dev_priv->dev))
+ return;
++
++ val = I915_READ(DSPCLK_GATE_D);
+ if (enable)
+- I915_WRITE(DSPCLK_GATE_D,
+- I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
++ val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ else
+- I915_WRITE(DSPCLK_GATE_D,
+- I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
++ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
++ I915_WRITE(DSPCLK_GATE_D, val);
+ }
+
+-/*
+- * Intel GPIO access functions
+- */
++static u32 get_reserved(struct intel_gpio *gpio)
++{
++ struct drm_i915_private *dev_priv = gpio->dev_priv;
++ struct drm_device *dev = dev_priv->dev;
++ u32 reserved = 0;
+
+-#define I2C_RISEFALL_TIME 20
++ /* On most chips, these bits must be preserved in software. */
++ if (!IS_I830(dev) && !IS_845G(dev))
++ reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
++ GPIO_CLOCK_PULLUP_DISABLE);
++
++ return reserved;
++}
+
+ static int get_clock(void *data)
+ {
+- struct intel_i2c_chan *chan = data;
+- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+- u32 val;
+-
+- val = I915_READ(chan->reg);
+- return ((val & GPIO_CLOCK_VAL_IN) != 0);
++ struct intel_gpio *gpio = data;
++ struct drm_i915_private *dev_priv = gpio->dev_priv;
++ u32 reserved = get_reserved(gpio);
++ I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
++ I915_WRITE(gpio->reg, reserved);
++ return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+ }
+
+ static int get_data(void *data)
+ {
+- struct intel_i2c_chan *chan = data;
+- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+- u32 val;
+-
+- val = I915_READ(chan->reg);
+- return ((val & GPIO_DATA_VAL_IN) != 0);
++ struct intel_gpio *gpio = data;
++ struct drm_i915_private *dev_priv = gpio->dev_priv;
++ u32 reserved = get_reserved(gpio);
++ I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
++ I915_WRITE(gpio->reg, reserved);
++ return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+ }
+
+ static void set_clock(void *data, int state_high)
+ {
+- struct intel_i2c_chan *chan = data;
+- struct drm_device *dev = chan->drm_dev;
+- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+- u32 reserved = 0, clock_bits;
+-
+- /* On most chips, these bits must be preserved in software. */
+- if (!IS_I830(dev) && !IS_845G(dev))
+- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+- GPIO_CLOCK_PULLUP_DISABLE);
++ struct intel_gpio *gpio = data;
++ struct drm_i915_private *dev_priv = gpio->dev_priv;
++ u32 reserved = get_reserved(gpio);
++ u32 clock_bits;
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+- I915_WRITE(chan->reg, reserved | clock_bits);
+- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++
++ I915_WRITE(gpio->reg, reserved | clock_bits);
++ POSTING_READ(gpio->reg);
+ }
+
+ static void set_data(void *data, int state_high)
+ {
+- struct intel_i2c_chan *chan = data;
+- struct drm_device *dev = chan->drm_dev;
+- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+- u32 reserved = 0, data_bits;
+-
+- /* On most chips, these bits must be preserved in software. */
+- if (!IS_I830(dev) && !IS_845G(dev))
+- reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+- GPIO_CLOCK_PULLUP_DISABLE);
++ struct intel_gpio *gpio = data;
++ struct drm_i915_private *dev_priv = gpio->dev_priv;
++ u32 reserved = get_reserved(gpio);
++ u32 data_bits;
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+@@ -115,109 +141,313 @@ static void set_data(void *data, int state_high)
+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+- I915_WRITE(chan->reg, reserved | data_bits);
+- udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++ I915_WRITE(gpio->reg, reserved | data_bits);
++ POSTING_READ(gpio->reg);
+ }
+
+-/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
+- * engine, but if the BIOS leaves it enabled, then that can break our use
+- * of the bit-banging I2C interfaces. This is notably the case with the
+- * Mac Mini in EFI mode.
+- */
+-void
+-intel_i2c_reset_gmbus(struct drm_device *dev)
++static struct i2c_adapter *
++intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
+ {
+- struct drm_i915_private *dev_priv = dev->dev_private;
++ static const int map_pin_to_reg[] = {
++ 0,
++ GPIOB,
++ GPIOA,
++ GPIOC,
++ GPIOD,
++ GPIOE,
++ 0,
++ GPIOF,
++ };
++ struct intel_gpio *gpio;
+
+- if (HAS_PCH_SPLIT(dev)) {
+- I915_WRITE(PCH_GMBUS0, 0);
+- } else {
+- I915_WRITE(GMBUS0, 0);
++ if (pin < 1 || pin > 7)
++ return NULL;
++
++ gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
++ if (gpio == NULL)
++ return NULL;
++
++ gpio->reg = map_pin_to_reg[pin];
++ if (HAS_PCH_SPLIT(dev_priv->dev))
++ gpio->reg += PCH_GPIOA - GPIOA;
++ gpio->dev_priv = dev_priv;
++
++ snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
++ gpio->adapter.owner = THIS_MODULE;
++ gpio->adapter.algo_data = &gpio->algo;
++ gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
++ gpio->algo.setsda = set_data;
++ gpio->algo.setscl = set_clock;
++ gpio->algo.getsda = get_data;
++ gpio->algo.getscl = get_clock;
++ gpio->algo.udelay = I2C_RISEFALL_TIME;
++ gpio->algo.timeout = usecs_to_jiffies(2200);
++ gpio->algo.data = gpio;
++
++ if (i2c_bit_add_bus(&gpio->adapter))
++ goto out_free;
++
++ return &gpio->adapter;
++
++out_free:
++ kfree(gpio);
++ return NULL;
++}
++
++static int
++intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
++ struct i2c_adapter *adapter,
++ struct i2c_msg *msgs,
++ int num)
++{
++ struct intel_gpio *gpio = container_of(adapter,
++ struct intel_gpio,
++ adapter);
++ int ret;
++
++ intel_i2c_reset(dev_priv->dev);
++
++ intel_i2c_quirk_set(dev_priv, true);
++ set_data(gpio, 1);
++ set_clock(gpio, 1);
++ udelay(I2C_RISEFALL_TIME);
++
++ ret = adapter->algo->master_xfer(adapter, msgs, num);
++
++ set_data(gpio, 1);
++ set_clock(gpio, 1);
++ intel_i2c_quirk_set(dev_priv, false);
++
++ return ret;
++}
++
++static int
++gmbus_xfer(struct i2c_adapter *adapter,
++ struct i2c_msg *msgs,
++ int num)
++{
++ struct intel_gmbus *bus = container_of(adapter,
++ struct intel_gmbus,
++ adapter);
++ struct drm_i915_private *dev_priv = adapter->algo_data;
++ int i, reg_offset;
++
++ if (bus->force_bit)
++ return intel_i2c_quirk_xfer(dev_priv,
++ bus->force_bit, msgs, num);
++
++ reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
++
++ I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
++
++ for (i = 0; i < num; i++) {
++ u16 len = msgs[i].len;
++ u8 *buf = msgs[i].buf;
++
++ if (msgs[i].flags & I2C_M_RD) {
++ I915_WRITE(GMBUS1 + reg_offset,
++ GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
++ (len << GMBUS_BYTE_COUNT_SHIFT) |
++ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
++ POSTING_READ(GMBUS2+reg_offset);
++ do {
++ u32 val, loop = 0;
++
++ if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
++ goto timeout;
++ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
++ return 0;
++
++ val = I915_READ(GMBUS3 + reg_offset);
++ do {
++ *buf++ = val & 0xff;
++ val >>= 8;
++ } while (--len && ++loop < 4);
++ } while (len);
++ } else {
++ u32 val, loop;
++
++ val = loop = 0;
++ do {
++ val |= *buf++ << (8 * loop);
++ } while (--len && ++loop < 4);
++
++ I915_WRITE(GMBUS3 + reg_offset, val);
++ I915_WRITE(GMBUS1 + reg_offset,
++ (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
++ (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
++ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
++ POSTING_READ(GMBUS2+reg_offset);
++
++ while (len) {
++ if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
++ goto timeout;
++ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
++ return 0;
++
++ val = loop = 0;
++ do {
++ val |= *buf++ << (8 * loop);
++ } while (--len && ++loop < 4);
++
++ I915_WRITE(GMBUS3 + reg_offset, val);
++ POSTING_READ(GMBUS2+reg_offset);
++ }
++ }
++
++ if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
++ goto timeout;
++ if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
++ return 0;
+ }
++
++ return num;
++
++timeout:
++ DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
++ bus->reg0 & 0xff, bus->adapter.name);
++ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
++ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
++ if (!bus->force_bit)
++ return -ENOMEM;
++
++ return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+ }
+
++static u32 gmbus_func(struct i2c_adapter *adapter)
++{
++ struct intel_gmbus *bus = container_of(adapter,
++ struct intel_gmbus,
++ adapter);
++
++ if (bus->force_bit)
++ bus->force_bit->algo->functionality(bus->force_bit);
++
++ return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
++ /* I2C_FUNC_10BIT_ADDR | */
++ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
++ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
++}
++
++static const struct i2c_algorithm gmbus_algorithm = {
++ .master_xfer = gmbus_xfer,
++ .functionality = gmbus_func
++};
++
+ /**
+- * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
++ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+- * @output: driver specific output device
+- * @reg: GPIO reg to use
+- * @name: name for this bus
+- * @slave_addr: slave address (if fixed)
+- *
+- * Creates and registers a new i2c bus with the Linux i2c layer, for use
+- * in output probing and control (e.g. DDC or SDVO control functions).
+- *
+- * Possible values for @reg include:
+- * %GPIOA
+- * %GPIOB
+- * %GPIOC
+- * %GPIOD
+- * %GPIOE
+- * %GPIOF
+- * %GPIOG
+- * %GPIOH
+- * see PRM for details on how these different busses are used.
+ */
+-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+- const char *name)
++int intel_setup_gmbus(struct drm_device *dev)
+ {
+- struct intel_i2c_chan *chan;
++ static const char *names[GMBUS_NUM_PORTS] = {
++ "disabled",
++ "ssc",
++ "vga",
++ "panel",
++ "dpc",
++ "dpb",
++ "reserved"
++ "dpd",
++ };
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int ret, i;
+
+- chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
+- if (!chan)
+- goto out_free;
++ dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
++ GFP_KERNEL);
++ if (dev_priv->gmbus == NULL)
++ return -ENOMEM;
+
+- chan->drm_dev = dev;
+- chan->reg = reg;
+- snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+- chan->adapter.owner = THIS_MODULE;
+- chan->adapter.algo_data = &chan->algo;
+- chan->adapter.dev.parent = &dev->pdev->dev;
+- chan->algo.setsda = set_data;
+- chan->algo.setscl = set_clock;
+- chan->algo.getsda = get_data;
+- chan->algo.getscl = get_clock;
+- chan->algo.udelay = 20;
+- chan->algo.timeout = usecs_to_jiffies(2200);
+- chan->algo.data = chan;
+-
+- i2c_set_adapdata(&chan->adapter, chan);
+-
+- if(i2c_bit_add_bus(&chan->adapter))
+- goto out_free;
++ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
++ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+
+- intel_i2c_reset_gmbus(dev);
++ bus->adapter.owner = THIS_MODULE;
++ bus->adapter.class = I2C_CLASS_DDC;
++ snprintf(bus->adapter.name,
++ I2C_NAME_SIZE,
++ "gmbus %s",
++ names[i]);
+
+- /* JJJ: raise SCL and SDA? */
+- intel_i2c_quirk_set(dev, true);
+- set_data(chan, 1);
+- set_clock(chan, 1);
+- intel_i2c_quirk_set(dev, false);
+- udelay(20);
++ bus->adapter.dev.parent = &dev->pdev->dev;
++ bus->adapter.algo_data = dev_priv;
+
+- return &chan->adapter;
++ bus->adapter.algo = &gmbus_algorithm;
++ ret = i2c_add_adapter(&bus->adapter);
++ if (ret)
++ goto err;
+
+-out_free:
+- kfree(chan);
+- return NULL;
++ /* By default use a conservative clock rate */
++ bus->reg0 = i | GMBUS_RATE_100KHZ;
++
++ /* XXX force bit banging until GMBUS is fully debugged */
++ bus->force_bit = intel_gpio_create(dev_priv, i);
++ }
++
++ intel_i2c_reset(dev_priv->dev);
++
++ return 0;
++
++err:
++ while (--i) {
++ struct intel_gmbus *bus = &dev_priv->gmbus[i];
++ i2c_del_adapter(&bus->adapter);
++ }
++ kfree(dev_priv->gmbus);
++ dev_priv->gmbus = NULL;
++ return ret;
+ }
+
+-/**
+- * intel_i2c_destroy - unregister and free i2c bus resources
+- * @output: channel to free
+- *
+- * Unregister the adapter from the i2c layer, then free the structure.
+- */
+-void intel_i2c_destroy(struct i2c_adapter *adapter)
++void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
++{
++ struct intel_gmbus *bus = to_intel_gmbus(adapter);
++
++ /* speed:
++ * 0x0 = 100 KHz
++ * 0x1 = 50 KHz
++ * 0x2 = 400 KHz
++ * 0x3 = 1000 Khz
++ */
++ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
++}
++
++void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
++{
++ struct intel_gmbus *bus = to_intel_gmbus(adapter);
++
++ if (force_bit) {
++ if (bus->force_bit == NULL) {
++ struct drm_i915_private *dev_priv = adapter->algo_data;
++ bus->force_bit = intel_gpio_create(dev_priv,
++ bus->reg0 & 0xff);
++ }
++ } else {
++ if (bus->force_bit) {
++ i2c_del_adapter(bus->force_bit);
++ kfree(bus->force_bit);
++ bus->force_bit = NULL;
++ }
++ }
++}
++
++void intel_teardown_gmbus(struct drm_device *dev)
+ {
+- struct intel_i2c_chan *chan;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int i;
+
+- if (!adapter)
++ if (dev_priv->gmbus == NULL)
+ return;
+
+- chan = container_of(adapter,
+- struct intel_i2c_chan,
+- adapter);
+- i2c_del_adapter(&chan->adapter);
+- kfree(chan);
++ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
++ struct intel_gmbus *bus = &dev_priv->gmbus[i];
++ if (bus->force_bit) {
++ i2c_del_adapter(bus->force_bit);
++ kfree(bus->force_bit);
++ }
++ i2c_del_adapter(&bus->adapter);
++ }
++
++ kfree(dev_priv->gmbus);
++ dev_priv->gmbus = NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 6ec39a8..4324a32 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -43,102 +43,76 @@
+ /* Private structure for the integrated LVDS support */
+ struct intel_lvds {
+ struct intel_encoder base;
++
++ struct edid *edid;
++
+ int fitting_mode;
+ u32 pfit_control;
+ u32 pfit_pgm_ratios;
++ bool pfit_dirty;
++
++ struct drm_display_mode *fixed_mode;
+ };
+
+-static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
++static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
+ {
+- return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
++ return container_of(encoder, struct intel_lvds, base.base);
+ }
+
+-/**
+- * Sets the backlight level.
+- *
+- * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
+- */
+-static void intel_lvds_set_backlight(struct drm_device *dev, int level)
++static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
+ {
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 blc_pwm_ctl, reg;
+-
+- if (HAS_PCH_SPLIT(dev))
+- reg = BLC_PWM_CPU_CTL;
+- else
+- reg = BLC_PWM_CTL;
+-
+- blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+- I915_WRITE(reg, (blc_pwm_ctl |
+- (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+-}
+-
+-/**
+- * Returns the maximum level of the backlight duty cycle field.
+- */
+-static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 reg;
+-
+- if (HAS_PCH_SPLIT(dev))
+- reg = BLC_PWM_PCH_CTL2;
+- else
+- reg = BLC_PWM_CTL;
+-
+- return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
+- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++ return container_of(intel_attached_encoder(connector),
++ struct intel_lvds, base);
+ }
+
+ /**
+ * Sets the power state for the panel.
+ */
+-static void intel_lvds_set_power(struct drm_device *dev, bool on)
++static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
+ {
++ struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 ctl_reg, status_reg, lvds_reg;
++ u32 ctl_reg, lvds_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+- status_reg = PCH_PP_STATUS;
+ lvds_reg = PCH_LVDS;
+ } else {
+ ctl_reg = PP_CONTROL;
+- status_reg = PP_STATUS;
+ lvds_reg = LVDS;
+ }
+
+ if (on) {
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
+- POSTING_READ(lvds_reg);
+-
+- I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
+- POWER_TARGET_ON);
+- if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
+- DRM_ERROR("timed out waiting to enable LVDS pipe");
+-
+- intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
++ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
++ intel_panel_set_backlight(dev, dev_priv->backlight_level);
+ } else {
+- intel_lvds_set_backlight(dev, 0);
++ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+- I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
+- ~POWER_TARGET_ON);
+- if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
+- DRM_ERROR("timed out waiting for LVDS pipe to turn off");
++ intel_panel_set_backlight(dev, 0);
++ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
++
++ if (intel_lvds->pfit_control) {
++ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
++ DRM_ERROR("timed out waiting for panel to power off\n");
++ I915_WRITE(PFIT_CONTROL, 0);
++ intel_lvds->pfit_control = 0;
++ intel_lvds->pfit_dirty = false;
++ }
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+- POSTING_READ(lvds_reg);
+ }
++ POSTING_READ(lvds_reg);
+ }
+
+ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
+ {
+- struct drm_device *dev = encoder->dev;
++ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+ if (mode == DRM_MODE_DPMS_ON)
+- intel_lvds_set_power(dev, true);
++ intel_lvds_set_power(intel_lvds, true);
+ else
+- intel_lvds_set_power(dev, false);
++ intel_lvds_set_power(intel_lvds, false);
+
+ /* XXX: We never power down the LVDS pairs. */
+ }
+@@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
+ static int intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+- struct drm_device *dev = connector->dev;
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
++ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
++ struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
+
+- if (fixed_mode) {
+- if (mode->hdisplay > fixed_mode->hdisplay)
+- return MODE_PANEL;
+- if (mode->vdisplay > fixed_mode->vdisplay)
+- return MODE_PANEL;
+- }
++ if (mode->hdisplay > fixed_mode->hdisplay)
++ return MODE_PANEL;
++ if (mode->vdisplay > fixed_mode->vdisplay)
++ return MODE_PANEL;
+
+ return MODE_OK;
+ }
+@@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
++ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+ struct drm_encoder *tmp_encoder;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+
+ /* Should never happen!! */
+- if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
++ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+ DRM_ERROR("Can't support LVDS on pipe A\n");
+ return false;
+ }
+@@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ return false;
+ }
+ }
+- /* If we don't have a panel mode, there is nothing we can do */
+- if (dev_priv->panel_fixed_mode == NULL)
+- return true;
+
+ /*
+ * We have timings from the BIOS for the panel, put them in
+@@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+- intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
++ intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+@@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ }
+
+ /* Make sure pre-965s set dither correctly */
+- if (!IS_I965G(dev)) {
+- if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
++ if (INTEL_INFO(dev)->gen < 4) {
++ if (dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ }
+
+@@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ goto out;
+
+ /* 965+ wants fuzzy fitting */
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY);
+
+@@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+
+@@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ * Fortunately this is all done for us in hw.
+ */
+ pfit_control |= PFIT_ENABLE;
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+@@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ }
+
+ out:
+- intel_lvds->pfit_control = pfit_control;
+- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
++ if (pfit_control != intel_lvds->pfit_control ||
++ pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
++ intel_lvds->pfit_control = pfit_control;
++ intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
++ intel_lvds->pfit_dirty = true;
++ }
+ dev_priv->lvds_border_bits = border;
+
+ /*
+@@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
+ {
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 reg;
+-
+- if (HAS_PCH_SPLIT(dev))
+- reg = BLC_PWM_CPU_CTL;
+- else
+- reg = BLC_PWM_CTL;
+-
+- dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
+- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+- BACKLIGHT_DUTY_CYCLE_MASK);
++ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
++
++ dev_priv->backlight_level = intel_panel_get_backlight(dev);
++
++ /* We try to do the minimum that is necessary in order to unlock
++ * the registers for mode setting.
++ *
++ * On Ironlake, this is quite simple as we just set the unlock key
++ * and ignore all subtleties. (This may cause some issues...)
++ *
++ * Prior to Ironlake, we must disable the pipe if we want to adjust
++ * the panel fitter. However at all other times we can just reset
++ * the registers regardless.
++ */
+
+- intel_lvds_set_power(dev, false);
++ if (HAS_PCH_SPLIT(dev)) {
++ I915_WRITE(PCH_PP_CONTROL,
++ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
++ } else if (intel_lvds->pfit_dirty) {
++ I915_WRITE(PP_CONTROL,
++ (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
++ & ~POWER_TARGET_ON);
++ } else {
++ I915_WRITE(PP_CONTROL,
++ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
++ }
+ }
+
+-static void intel_lvds_commit( struct drm_encoder *encoder)
++static void intel_lvds_commit(struct drm_encoder *encoder)
+ {
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+- if (dev_priv->backlight_duty_cycle == 0)
+- dev_priv->backlight_duty_cycle =
+- intel_lvds_get_max_backlight(dev);
++ if (dev_priv->backlight_level == 0)
++ dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+- intel_lvds_set_power(dev, true);
++ /* Undo any unlocking done in prepare to prevent accidental
++ * adjustment of the registers.
++ */
++ if (HAS_PCH_SPLIT(dev)) {
++ u32 val = I915_READ(PCH_PP_CONTROL);
++ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
++ I915_WRITE(PCH_PP_CONTROL, val & 0x3);
++ } else {
++ u32 val = I915_READ(PP_CONTROL);
++ if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
++ I915_WRITE(PP_CONTROL, val & 0x3);
++ }
++
++ /* Always do a full power on as we do not know what state
++ * we were left in.
++ */
++ intel_lvds_set_power(intel_lvds, true);
+ }
+
+ static void intel_lvds_mode_set(struct drm_encoder *encoder,
+@@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
+ {
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
++ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+@@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
++ if (!intel_lvds->pfit_dirty)
++ return;
++
+ /*
+ * Enable automatic panel scaling so that non-native modes fill the
+ * screen. Should be enabled before the pipe is enabled, according to
+ * register description and PRM.
+ */
++ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
++ intel_lvds->pfit_control,
++ intel_lvds->pfit_pgm_ratios);
++ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
++ DRM_ERROR("timed out waiting for panel to power off\n");
++
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
++ intel_lvds->pfit_dirty = false;
+ }
+
+ /**
+@@ -465,38 +477,19 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
+ */
+ static int intel_lvds_get_modes(struct drm_connector *connector)
+ {
++ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct drm_device *dev = connector->dev;
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- int ret = 0;
+-
+- if (dev_priv->lvds_edid_good) {
+- ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+-
+- if (ret)
+- return ret;
+- }
++ struct drm_display_mode *mode;
+
+- /* Didn't get an EDID, so
+- * Set wide sync ranges so we get all modes
+- * handed to valid_mode for checking
+- */
+- connector->display_info.min_vfreq = 0;
+- connector->display_info.max_vfreq = 200;
+- connector->display_info.min_hfreq = 0;
+- connector->display_info.max_hfreq = 200;
+-
+- if (dev_priv->panel_fixed_mode != NULL) {
+- struct drm_display_mode *mode;
+-
+- mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
+- drm_mode_probed_add(connector, mode);
++ if (intel_lvds->edid)
++ return drm_add_edid_modes(connector, intel_lvds->edid);
+
+- return 1;
+- }
++ mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
++ if (mode == 0)
++ return 0;
+
+- return 0;
++ drm_mode_probed_add(connector, mode);
++ return 1;
+ }
+
+ static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+@@ -587,18 +580,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+ {
++ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct drm_device *dev = connector->dev;
+
+- if (property == dev->mode_config.scaling_mode_property &&
+- connector->encoder) {
+- struct drm_crtc *crtc = connector->encoder->crtc;
+- struct drm_encoder *encoder = connector->encoder;
+- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
++ if (property == dev->mode_config.scaling_mode_property) {
++ struct drm_crtc *crtc = intel_lvds->base.base.crtc;
+
+ if (value == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+- return 0;
++ return -EINVAL;
+ }
++
+ if (intel_lvds->fitting_mode == value) {
+ /* the LVDS scaling property is not changed */
+ return 0;
+@@ -628,7 +620,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
+ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+ .get_modes = intel_lvds_get_modes,
+ .mode_valid = intel_lvds_mode_valid,
+- .best_encoder = intel_attached_encoder,
++ .best_encoder = intel_best_encoder,
+ };
+
+ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+@@ -726,16 +718,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ * Find the reduced downclock for LVDS in EDID.
+ */
+ static void intel_find_lvds_downclock(struct drm_device *dev,
+- struct drm_connector *connector)
++ struct drm_display_mode *fixed_mode,
++ struct drm_connector *connector)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct drm_display_mode *scan, *panel_fixed_mode;
++ struct drm_display_mode *scan;
+ int temp_downclock;
+
+- panel_fixed_mode = dev_priv->panel_fixed_mode;
+- temp_downclock = panel_fixed_mode->clock;
+-
+- mutex_lock(&dev->mode_config.mutex);
++ temp_downclock = fixed_mode->clock;
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+@@ -744,14 +734,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+- if (scan->hdisplay == panel_fixed_mode->hdisplay &&
+- scan->hsync_start == panel_fixed_mode->hsync_start &&
+- scan->hsync_end == panel_fixed_mode->hsync_end &&
+- scan->htotal == panel_fixed_mode->htotal &&
+- scan->vdisplay == panel_fixed_mode->vdisplay &&
+- scan->vsync_start == panel_fixed_mode->vsync_start &&
+- scan->vsync_end == panel_fixed_mode->vsync_end &&
+- scan->vtotal == panel_fixed_mode->vtotal) {
++ if (scan->hdisplay == fixed_mode->hdisplay &&
++ scan->hsync_start == fixed_mode->hsync_start &&
++ scan->hsync_end == fixed_mode->hsync_end &&
++ scan->htotal == fixed_mode->htotal &&
++ scan->vdisplay == fixed_mode->vdisplay &&
++ scan->vsync_start == fixed_mode->vsync_start &&
++ scan->vsync_end == fixed_mode->vsync_end &&
++ scan->vtotal == fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+@@ -761,17 +751,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
+ }
+ }
+ }
+- mutex_unlock(&dev->mode_config.mutex);
+- if (temp_downclock < panel_fixed_mode->clock &&
+- i915_lvds_downclock) {
++ if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+- "Normal clock %dKhz, downclock %dKhz\n",
+- panel_fixed_mode->clock, temp_downclock);
++ "Normal clock %dKhz, downclock %dKhz\n",
++ fixed_mode->clock, temp_downclock);
+ }
+- return;
+ }
+
+ /*
+@@ -780,38 +767,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+- * Note: The addin_offset should also be checked for LVDS panel.
+- * Only when it is non-zero, it is assumed that it is present.
+ */
+-static int lvds_is_present_in_vbt(struct drm_device *dev)
++static bool lvds_is_present_in_vbt(struct drm_device *dev,
++ u8 *i2c_pin)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct child_device_config *p_child;
+- int i, ret;
++ int i;
+
+ if (!dev_priv->child_dev_num)
+- return 1;
++ return true;
+
+- ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+- p_child = dev_priv->child_dev + i;
+- /*
+- * If the device type is not LFP, continue.
+- * If the device type is 0x22, it is also regarded as LFP.
++ struct child_device_config *child = dev_priv->child_dev + i;
++
++ /* If the device type is not LFP, continue.
++ * We have to check both the new identifiers as well as the
++ * old for compatibility with some BIOSes.
+ */
+- if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
+- p_child->device_type != DEVICE_TYPE_LFP)
++ if (child->device_type != DEVICE_TYPE_INT_LFP &&
++ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+- /* The addin_offset should be checked. Only when it is
+- * non-zero, it is regarded as present.
++ if (child->i2c_pin)
++ *i2c_pin = child->i2c_pin;
++
++ /* However, we cannot trust the BIOS writers to populate
++ * the VBT correctly. Since LVDS requires additional
++ * information from AIM blocks, a non-zero addin offset is
++ * a good indicator that the LVDS is actually present.
+ */
+- if (p_child->addin_offset) {
+- ret = 1;
+- break;
+- }
++ if (child->addin_offset)
++ return true;
++
++ /* But even then some BIOS writers perform some black magic
++ * and instantiate the device without reference to any
++ * additional data. Trust that if the VBT was written into
++ * the OpRegion then they have validated the LVDS's existence.
++ */
++ if (dev_priv->opregion.vbt)
++ return true;
+ }
+- return ret;
++
++ return false;
++}
++
++static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u8 buf = 0;
++ struct i2c_msg msgs[] = {
++ {
++ .addr = 0xA0,
++ .flags = 0,
++ .len = 1,
++ .buf = &buf,
++ },
++ };
++ struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
++ /* XXX this only appears to work when using GMBUS */
++ if (intel_gmbus_is_forced_bit(i2c))
++ return true;
++ return i2c_transfer(i2c, msgs, 1) == 1;
+ }
+
+ /**
+@@ -832,13 +848,15 @@ void intel_lvds_init(struct drm_device *dev)
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_crtc *crtc;
+ u32 lvds;
+- int pipe, gpio = GPIOC;
++ int pipe;
++ u8 pin;
+
+ /* Skip init on machines we know falsely report LVDS */
+ if (dmi_check_system(intel_no_lvds))
+ return;
+
+- if (!lvds_is_present_in_vbt(dev)) {
++ pin = GMBUS_PORT_PANEL;
++ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return;
+ }
+@@ -846,11 +864,15 @@ void intel_lvds_init(struct drm_device *dev)
+ if (HAS_PCH_SPLIT(dev)) {
+ if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
+ return;
+- if (dev_priv->edp_support) {
++ if (dev_priv->edp.support) {
+ DRM_DEBUG_KMS("disable LVDS for eDP support\n");
+ return;
+ }
+- gpio = PCH_GPIOC;
++ }
++
++ if (!intel_lvds_ddc_probe(dev, pin)) {
++ DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
++ return;
+ }
+
+ intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
+@@ -864,16 +886,20 @@ void intel_lvds_init(struct drm_device *dev)
+ return;
+ }
+
++ if (!HAS_PCH_SPLIT(dev)) {
++ intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
++ }
++
+ intel_encoder = &intel_lvds->base;
+- encoder = &intel_encoder->enc;
++ encoder = &intel_encoder->base;
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+- drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
++ drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+- drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
++ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+@@ -904,43 +930,50 @@ void intel_lvds_init(struct drm_device *dev)
+ * if closed, act like it's not there for now
+ */
+
+- /* Set up the DDC bus. */
+- intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
+- if (!intel_encoder->ddc_bus) {
+- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+- "failed.\n");
+- goto failed;
+- }
+-
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+- dev_priv->lvds_edid_good = true;
+-
+- if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
+- dev_priv->lvds_edid_good = false;
++ intel_lvds->edid = drm_get_edid(connector,
++ &dev_priv->gmbus[pin].adapter);
++ if (intel_lvds->edid) {
++ if (drm_add_edid_modes(connector,
++ intel_lvds->edid)) {
++ drm_mode_connector_update_edid_property(connector,
++ intel_lvds->edid);
++ } else {
++ kfree(intel_lvds->edid);
++ intel_lvds->edid = NULL;
++ }
++ }
++ if (!intel_lvds->edid) {
++ /* Didn't get an EDID, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++ }
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+- mutex_lock(&dev->mode_config.mutex);
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+- dev_priv->panel_fixed_mode =
++ intel_lvds->fixed_mode =
+ drm_mode_duplicate(dev, scan);
+- mutex_unlock(&dev->mode_config.mutex);
+- intel_find_lvds_downclock(dev, connector);
++ intel_find_lvds_downclock(dev,
++ intel_lvds->fixed_mode,
++ connector);
+ goto out;
+ }
+- mutex_unlock(&dev->mode_config.mutex);
+ }
+
+ /* Failed to get EDID, what about VBT? */
+ if (dev_priv->lfp_lvds_vbt_mode) {
+- mutex_lock(&dev->mode_config.mutex);
+- dev_priv->panel_fixed_mode =
++ intel_lvds->fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+- mutex_unlock(&dev->mode_config.mutex);
+- if (dev_priv->panel_fixed_mode) {
+- dev_priv->panel_fixed_mode->type |=
++ if (intel_lvds->fixed_mode) {
++ intel_lvds->fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out;
+ }
+@@ -958,19 +991,19 @@ void intel_lvds_init(struct drm_device *dev)
+
+ lvds = I915_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+- crtc = intel_get_crtc_from_pipe(dev, pipe);
++ crtc = intel_get_crtc_for_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+- dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
+- if (dev_priv->panel_fixed_mode) {
+- dev_priv->panel_fixed_mode->type |=
++ intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
++ if (intel_lvds->fixed_mode) {
++ intel_lvds->fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out;
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+- if (!dev_priv->panel_fixed_mode)
++ if (!intel_lvds->fixed_mode)
+ goto failed;
+
+ out:
+@@ -997,8 +1030,6 @@ out:
+
+ failed:
+ DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+- if (intel_encoder->ddc_bus)
+- intel_i2c_destroy(intel_encoder->ddc_bus);
+ drm_connector_cleanup(connector);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_lvds);
+diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
+index 4b1fd3d..f70b7cf 100644
+--- a/drivers/gpu/drm/i915/intel_modes.c
++++ b/drivers/gpu/drm/i915/intel_modes.c
+@@ -1,6 +1,6 @@
+ /*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+- * Copyright (c) 2007 Intel Corporation
++ * Copyright (c) 2007, 2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+@@ -34,11 +34,11 @@
+ * intel_ddc_probe
+ *
+ */
+-bool intel_ddc_probe(struct intel_encoder *intel_encoder)
++bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
+ {
++ struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
+ u8 out_buf[] = { 0x0, 0x0};
+ u8 buf[2];
+- int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0x50,
+@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
+ }
+ };
+
+- intel_i2c_quirk_set(intel_encoder->enc.dev, true);
+- ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
+- intel_i2c_quirk_set(intel_encoder->enc.dev, false);
+- if (ret == 2)
+- return true;
+-
+- return false;
++ return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
+ }
+
+ /**
+@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid;
+ int ret = 0;
+
+- intel_i2c_quirk_set(connector->dev, true);
+ edid = drm_get_edid(connector, adapter);
+- intel_i2c_quirk_set(connector->dev, false);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
+new file mode 100644
+index 0000000..9b0d9a8
+--- /dev/null
++++ b/drivers/gpu/drm/i915/intel_opregion.c
+@@ -0,0 +1,517 @@
++/*
++ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
++ * Copyright 2008 Red Hat <mjg@redhat.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#include <linux/acpi.h>
++#include <acpi/video.h>
++
++#include "drmP.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++#include "intel_drv.h"
++
++#define PCI_ASLE 0xe4
++#define PCI_ASLS 0xfc
++
++#define OPREGION_HEADER_OFFSET 0
++#define OPREGION_ACPI_OFFSET 0x100
++#define OPREGION_SWSCI_OFFSET 0x200
++#define OPREGION_ASLE_OFFSET 0x300
++#define OPREGION_VBT_OFFSET 0x400
++
++#define OPREGION_SIGNATURE "IntelGraphicsMem"
++#define MBOX_ACPI (1<<0)
++#define MBOX_SWSCI (1<<1)
++#define MBOX_ASLE (1<<2)
++
++struct opregion_header {
++ u8 signature[16];
++ u32 size;
++ u32 opregion_ver;
++ u8 bios_ver[32];
++ u8 vbios_ver[16];
++ u8 driver_ver[16];
++ u32 mboxes;
++ u8 reserved[164];
++} __attribute__((packed));
++
++/* OpRegion mailbox #1: public ACPI methods */
++struct opregion_acpi {
++ u32 drdy; /* driver readiness */
++ u32 csts; /* notification status */
++ u32 cevt; /* current event */
++ u8 rsvd1[20];
++ u32 didl[8]; /* supported display devices ID list */
++ u32 cpdl[8]; /* currently presented display list */
++ u32 cadl[8]; /* currently active display list */
++ u32 nadl[8]; /* next active devices list */
++ u32 aslp; /* ASL sleep time-out */
++ u32 tidx; /* toggle table index */
++ u32 chpd; /* current hotplug enable indicator */
++ u32 clid; /* current lid state*/
++ u32 cdck; /* current docking state */
++ u32 sxsw; /* Sx state resume */
++ u32 evts; /* ASL supported events */
++ u32 cnot; /* current OS notification */
++ u32 nrdy; /* driver status */
++ u8 rsvd2[60];
++} __attribute__((packed));
++
++/* OpRegion mailbox #2: SWSCI */
++struct opregion_swsci {
++ u32 scic; /* SWSCI command|status|data */
++ u32 parm; /* command parameters */
++ u32 dslp; /* driver sleep time-out */
++ u8 rsvd[244];
++} __attribute__((packed));
++
++/* OpRegion mailbox #3: ASLE */
++struct opregion_asle {
++ u32 ardy; /* driver readiness */
++ u32 aslc; /* ASLE interrupt command */
++ u32 tche; /* technology enabled indicator */
++ u32 alsi; /* current ALS illuminance reading */
++ u32 bclp; /* backlight brightness to set */
++ u32 pfit; /* panel fitting state */
++ u32 cblv; /* current brightness level */
++ u16 bclm[20]; /* backlight level duty cycle mapping table */
++ u32 cpfm; /* current panel fitting mode */
++ u32 epfm; /* enabled panel fitting modes */
++ u8 plut[74]; /* panel LUT and identifier */
++ u32 pfmb; /* PWM freq and min brightness */
++ u8 rsvd[102];
++} __attribute__((packed));
++
++/* ASLE irq request bits */
++#define ASLE_SET_ALS_ILLUM (1 << 0)
++#define ASLE_SET_BACKLIGHT (1 << 1)
++#define ASLE_SET_PFIT (1 << 2)
++#define ASLE_SET_PWM_FREQ (1 << 3)
++#define ASLE_REQ_MSK 0xf
++
++/* response bits of ASLE irq request */
++#define ASLE_ALS_ILLUM_FAILED (1<<10)
++#define ASLE_BACKLIGHT_FAILED (1<<12)
++#define ASLE_PFIT_FAILED (1<<14)
++#define ASLE_PWM_FREQ_FAILED (1<<16)
++
++/* ASLE backlight brightness to set */
++#define ASLE_BCLP_VALID (1<<31)
++#define ASLE_BCLP_MSK (~(1<<31))
++
++/* ASLE panel fitting request */
++#define ASLE_PFIT_VALID (1<<31)
++#define ASLE_PFIT_CENTER (1<<0)
++#define ASLE_PFIT_STRETCH_TEXT (1<<1)
++#define ASLE_PFIT_STRETCH_GFX (1<<2)
++
++/* PWM frequency and minimum brightness */
++#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
++#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
++#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
++#define ASLE_PFMB_PWM_VALID (1<<31)
++
++#define ASLE_CBLV_VALID (1<<31)
++
++#define ACPI_OTHER_OUTPUT (0<<8)
++#define ACPI_VGA_OUTPUT (1<<8)
++#define ACPI_TV_OUTPUT (2<<8)
++#define ACPI_DIGITAL_OUTPUT (3<<8)
++#define ACPI_LVDS_OUTPUT (4<<8)
++
++#ifdef CONFIG_ACPI
++static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct opregion_asle *asle = dev_priv->opregion.asle;
++ u32 max;
++
++ if (!(bclp & ASLE_BCLP_VALID))
++ return ASLE_BACKLIGHT_FAILED;
++
++ bclp &= ASLE_BCLP_MSK;
++ if (bclp > 255)
++ return ASLE_BACKLIGHT_FAILED;
++
++ max = intel_panel_get_max_backlight(dev);
++ intel_panel_set_backlight(dev, bclp * max / 255);
++ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
++
++ return 0;
++}
++
++static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
++{
++ /* alsi is the current ALS reading in lux. 0 indicates below sensor
++ range, 0xffff indicates above sensor range. 1-0xfffe are valid */
++ return 0;
++}
++
++static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ if (pfmb & ASLE_PFMB_PWM_VALID) {
++ u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
++ u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
++ blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
++ pwm = pwm >> 9;
++ /* FIXME - what do we do with the PWM? */
++ }
++ return 0;
++}
++
++static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
++{
++ /* Panel fitting is currently controlled by the X code, so this is a
++ noop until modesetting support works fully */
++ if (!(pfit & ASLE_PFIT_VALID))
++ return ASLE_PFIT_FAILED;
++ return 0;
++}
++
++void intel_opregion_asle_intr(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct opregion_asle *asle = dev_priv->opregion.asle;
++ u32 asle_stat = 0;
++ u32 asle_req;
++
++ if (!asle)
++ return;
++
++ asle_req = asle->aslc & ASLE_REQ_MSK;
++
++ if (!asle_req) {
++ DRM_DEBUG_DRIVER("non asle set request??\n");
++ return;
++ }
++
++ if (asle_req & ASLE_SET_ALS_ILLUM)
++ asle_stat |= asle_set_als_illum(dev, asle->alsi);
++
++ if (asle_req & ASLE_SET_BACKLIGHT)
++ asle_stat |= asle_set_backlight(dev, asle->bclp);
++
++ if (asle_req & ASLE_SET_PFIT)
++ asle_stat |= asle_set_pfit(dev, asle->pfit);
++
++ if (asle_req & ASLE_SET_PWM_FREQ)
++ asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
++
++ asle->aslc = asle_stat;
++}
++
++/* Only present on Ironlake+ */
++void intel_opregion_gse_intr(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct opregion_asle *asle = dev_priv->opregion.asle;
++ u32 asle_stat = 0;
++ u32 asle_req;
++
++ if (!asle)
++ return;
++
++ asle_req = asle->aslc & ASLE_REQ_MSK;
++
++ if (!asle_req) {
++ DRM_DEBUG_DRIVER("non asle set request??\n");
++ return;
++ }
++
++ if (asle_req & ASLE_SET_ALS_ILLUM) {
++ DRM_DEBUG_DRIVER("Illum is not supported\n");
++ asle_stat |= ASLE_ALS_ILLUM_FAILED;
++ }
++
++ if (asle_req & ASLE_SET_BACKLIGHT)
++ asle_stat |= asle_set_backlight(dev, asle->bclp);
++
++ if (asle_req & ASLE_SET_PFIT) {
++ DRM_DEBUG_DRIVER("Pfit is not supported\n");
++ asle_stat |= ASLE_PFIT_FAILED;
++ }
++
++ if (asle_req & ASLE_SET_PWM_FREQ) {
++ DRM_DEBUG_DRIVER("PWM freq is not supported\n");
++ asle_stat |= ASLE_PWM_FREQ_FAILED;
++ }
++
++ asle->aslc = asle_stat;
++}
++#define ASLE_ALS_EN (1<<0)
++#define ASLE_BLC_EN (1<<1)
++#define ASLE_PFIT_EN (1<<2)
++#define ASLE_PFMB_EN (1<<3)
++
++void intel_opregion_enable_asle(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct opregion_asle *asle = dev_priv->opregion.asle;
++
++ if (asle) {
++ if (IS_MOBILE(dev)) {
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
++ intel_enable_asle(dev);
++ spin_unlock_irqrestore(&dev_priv->user_irq_lock,
++ irqflags);
++ }
++
++ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
++ ASLE_PFMB_EN;
++ asle->ardy = 1;
++ }
++}
++
++#define ACPI_EV_DISPLAY_SWITCH (1<<0)
++#define ACPI_EV_LID (1<<1)
++#define ACPI_EV_DOCK (1<<2)
++
++static struct intel_opregion *system_opregion;
++
++static int intel_opregion_video_event(struct notifier_block *nb,
++ unsigned long val, void *data)
++{
++ /* The only video events relevant to opregion are 0x80. These indicate
++ either a docking event, lid switch or display switch request. In
++ Linux, these are handled by the dock, button and video drivers.
++ We might want to fix the video driver to be opregion-aware in
++ future, but right now we just indicate to the firmware that the
++ request has been handled */
++
++ struct opregion_acpi *acpi;
++
++ if (!system_opregion)
++ return NOTIFY_DONE;
++
++ acpi = system_opregion->acpi;
++ acpi->csts = 0;
++
++ return NOTIFY_OK;
++}
++
++static struct notifier_block intel_opregion_notifier = {
++ .notifier_call = intel_opregion_video_event,
++};
++
++/*
++ * Initialise the DIDL field in opregion. This passes a list of devices to
++ * the firmware. Values are defined by section B.4.2 of the ACPI specification
++ * (version 3)
++ */
++
++static void intel_didl_outputs(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++ struct drm_connector *connector;
++ acpi_handle handle;
++ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
++ unsigned long long device_id;
++ acpi_status status;
++ int i = 0;
++
++ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
++ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
++ return;
++
++ if (acpi_is_video_device(acpi_dev))
++ acpi_video_bus = acpi_dev;
++ else {
++ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
++ if (acpi_is_video_device(acpi_cdev)) {
++ acpi_video_bus = acpi_cdev;
++ break;
++ }
++ }
++ }
++
++ if (!acpi_video_bus) {
++ printk(KERN_WARNING "No ACPI video bus found\n");
++ return;
++ }
++
++ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
++ if (i >= 8) {
++ dev_printk (KERN_ERR, &dev->pdev->dev,
++ "More than 8 outputs detected\n");
++ return;
++ }
++ status =
++ acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
++ NULL, &device_id);
++ if (ACPI_SUCCESS(status)) {
++ if (!device_id)
++ goto blind_set;
++ opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
++ i++;
++ }
++ }
++
++end:
++ /* If fewer than 8 outputs, the list must be null terminated */
++ if (i < 8)
++ opregion->acpi->didl[i] = 0;
++ return;
++
++blind_set:
++ i = 0;
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ int output_type = ACPI_OTHER_OUTPUT;
++ if (i >= 8) {
++ dev_printk (KERN_ERR, &dev->pdev->dev,
++ "More than 8 outputs detected\n");
++ return;
++ }
++ switch (connector->connector_type) {
++ case DRM_MODE_CONNECTOR_VGA:
++ case DRM_MODE_CONNECTOR_DVIA:
++ output_type = ACPI_VGA_OUTPUT;
++ break;
++ case DRM_MODE_CONNECTOR_Composite:
++ case DRM_MODE_CONNECTOR_SVIDEO:
++ case DRM_MODE_CONNECTOR_Component:
++ case DRM_MODE_CONNECTOR_9PinDIN:
++ output_type = ACPI_TV_OUTPUT;
++ break;
++ case DRM_MODE_CONNECTOR_DVII:
++ case DRM_MODE_CONNECTOR_DVID:
++ case DRM_MODE_CONNECTOR_DisplayPort:
++ case DRM_MODE_CONNECTOR_HDMIA:
++ case DRM_MODE_CONNECTOR_HDMIB:
++ output_type = ACPI_DIGITAL_OUTPUT;
++ break;
++ case DRM_MODE_CONNECTOR_LVDS:
++ output_type = ACPI_LVDS_OUTPUT;
++ break;
++ }
++ opregion->acpi->didl[i] |= (1<<31) | output_type | i;
++ i++;
++ }
++ goto end;
++}
++
++void intel_opregion_init(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++
++ if (!opregion->header)
++ return;
++
++ if (opregion->acpi) {
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ intel_didl_outputs(dev);
++
++ /* Notify BIOS we are ready to handle ACPI video ext notifs.
++ * Right now, all the events are handled by the ACPI video module.
++ * We don't actually need to do anything with them. */
++ opregion->acpi->csts = 0;
++ opregion->acpi->drdy = 1;
++
++ system_opregion = opregion;
++ register_acpi_notifier(&intel_opregion_notifier);
++ }
++
++ if (opregion->asle)
++ intel_opregion_enable_asle(dev);
++}
++
++void intel_opregion_fini(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++
++ if (!opregion->header)
++ return;
++
++ if (opregion->acpi) {
++ opregion->acpi->drdy = 0;
++
++ system_opregion = NULL;
++ unregister_acpi_notifier(&intel_opregion_notifier);
++ }
++
++ /* just clear all opregion memory pointers now */
++ iounmap(opregion->header);
++ opregion->header = NULL;
++ opregion->acpi = NULL;
++ opregion->swsci = NULL;
++ opregion->asle = NULL;
++ opregion->vbt = NULL;
++}
++#endif
++
++int intel_opregion_setup(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++ void *base;
++ u32 asls, mboxes;
++ int err = 0;
++
++ pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
++ DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
++ if (asls == 0) {
++ DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
++ return -ENOTSUPP;
++ }
++
++ base = ioremap(asls, OPREGION_SIZE);
++ if (!base)
++ return -ENOMEM;
++
++ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
++ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
++ err = -EINVAL;
++ goto err_out;
++ }
++ opregion->header = base;
++ opregion->vbt = base + OPREGION_VBT_OFFSET;
++
++ mboxes = opregion->header->mboxes;
++ if (mboxes & MBOX_ACPI) {
++ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
++ opregion->acpi = base + OPREGION_ACPI_OFFSET;
++ }
++
++ if (mboxes & MBOX_SWSCI) {
++ DRM_DEBUG_DRIVER("SWSCI supported\n");
++ opregion->swsci = base + OPREGION_SWSCI_OFFSET;
++ }
++ if (mboxes & MBOX_ASLE) {
++ DRM_DEBUG_DRIVER("ASLE supported\n");
++ opregion->asle = base + OPREGION_ASLE_OFFSET;
++ }
++
++ return 0;
++
++err_out:
++ iounmap(base);
++ return err;
++}
+diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
+index 1d306a4..5b513ea 100644
+--- a/drivers/gpu/drm/i915/intel_overlay.c
++++ b/drivers/gpu/drm/i915/intel_overlay.c
+@@ -170,57 +170,143 @@ struct overlay_registers {
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+ };
+
+-/* overlay flip addr flag */
+-#define OFC_UPDATE 0x1
+-
+-#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
+-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
+-
++struct intel_overlay {
++ struct drm_device *dev;
++ struct intel_crtc *crtc;
++ struct drm_i915_gem_object *vid_bo;
++ struct drm_i915_gem_object *old_vid_bo;
++ int active;
++ int pfit_active;
++ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
++ u32 color_key;
++ u32 brightness, contrast, saturation;
++ u32 old_xscale, old_yscale;
++ /* register access */
++ u32 flip_addr;
++ struct drm_i915_gem_object *reg_bo;
++ /* flip handling */
++ uint32_t last_flip_req;
++ void (*flip_tail)(struct intel_overlay *);
++};
+
+-static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
++static struct overlay_registers *
++intel_overlay_map_regs(struct intel_overlay *overlay)
+ {
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+- /* no recursive mappings */
+- BUG_ON(overlay->virt_addr);
++ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++ regs = overlay->reg_bo->phys_obj->handle->vaddr;
++ else
++ regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
++ overlay->reg_bo->gtt_offset);
+
+- if (OVERLAY_NONPHYSICAL(overlay->dev)) {
+- regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+- overlay->reg_bo->gtt_offset,
+- KM_USER0);
++ return regs;
++}
+
+- if (!regs) {
+- DRM_ERROR("failed to map overlay regs in GTT\n");
+- return NULL;
+- }
+- } else
+- regs = overlay->reg_bo->phys_obj->handle->vaddr;
++static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
++ struct overlay_registers *regs)
++{
++ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++ io_mapping_unmap(regs);
++}
+
+- return overlay->virt_addr = regs;
++static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
++ struct drm_i915_gem_request *request,
++ bool interruptible,
++ void (*tail)(struct intel_overlay *))
++{
++ struct drm_device *dev = overlay->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ int ret;
++
++ BUG_ON(overlay->last_flip_req);
++ overlay->last_flip_req =
++ i915_add_request(dev, NULL, request, &dev_priv->render_ring);
++ if (overlay->last_flip_req == 0)
++ return -ENOMEM;
++
++ overlay->flip_tail = tail;
++ ret = i915_do_wait_request(dev,
++ overlay->last_flip_req, true,
++ &dev_priv->render_ring);
++ if (ret)
++ return ret;
++
++ overlay->last_flip_req = 0;
++ return 0;
+ }
+
+-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
++/* Workaround for i830 bug where pipe a must be enable to change control regs */
++static int
++i830_activate_pipe_a(struct drm_device *dev)
+ {
+- if (OVERLAY_NONPHYSICAL(overlay->dev))
+- io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct intel_crtc *crtc;
++ struct drm_crtc_helper_funcs *crtc_funcs;
++ struct drm_display_mode vesa_640x480 = {
++ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
++ 752, 800, 0, 480, 489, 492, 525, 0,
++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
++ }, *mode;
++
++ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
++ if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
++ return 0;
+
+- overlay->virt_addr = NULL;
++ /* most i8xx have pipe a forced on, so don't trust dpms mode */
++ if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
++ return 0;
+
+- return;
++ crtc_funcs = crtc->base.helper_private;
++ if (crtc_funcs->dpms == NULL)
++ return 0;
++
++ DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
++
++ mode = drm_mode_duplicate(dev, &vesa_640x480);
++ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
++ if(!drm_crtc_helper_set_mode(&crtc->base, mode,
++ crtc->base.x, crtc->base.y,
++ crtc->base.fb))
++ return 0;
++
++ crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
++ return 1;
++}
++
++static void
++i830_deactivate_pipe_a(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
+
+ /* overlay needs to be disable in OCMD reg */
+ static int intel_overlay_on(struct intel_overlay *overlay)
+ {
+ struct drm_device *dev = overlay->dev;
++ struct drm_i915_gem_request *request;
++ int pipe_a_quirk = 0;
+ int ret;
+- drm_i915_private_t *dev_priv = dev->dev_private;
+
+ BUG_ON(overlay->active);
+-
+ overlay->active = 1;
+- overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
++
++ if (IS_I830(dev)) {
++ pipe_a_quirk = i830_activate_pipe_a(dev);
++ if (pipe_a_quirk < 0)
++ return pipe_a_quirk;
++ }
++
++ request = kzalloc(sizeof(*request), GFP_KERNEL);
++ if (request == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+@@ -229,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+- overlay->last_flip_req =
+- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+- if (overlay->last_flip_req == 0)
+- return -ENOMEM;
+-
+- ret = i915_do_wait_request(dev,
+- overlay->last_flip_req, 1, &dev_priv->render_ring);
+- if (ret != 0)
+- return ret;
++ ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
++out:
++ if (pipe_a_quirk)
++ i830_deactivate_pipe_a(dev);
+
+- overlay->hw_wedged = 0;
+- overlay->last_flip_req = 0;
+- return 0;
++ return ret;
+ }
+
+ /* overlay needs to be enabled in OCMD reg */
+-static void intel_overlay_continue(struct intel_overlay *overlay,
+- bool load_polyphase_filter)
++static int intel_overlay_continue(struct intel_overlay *overlay,
++ bool load_polyphase_filter)
+ {
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_request *request;
+ u32 flip_addr = overlay->flip_addr;
+ u32 tmp;
+
+ BUG_ON(!overlay->active);
+
++ request = kzalloc(sizeof(*request), GFP_KERNEL);
++ if (request == NULL)
++ return -ENOMEM;
++
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+@@ -269,220 +353,132 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req =
+- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
++ i915_add_request(dev, NULL, request, &dev_priv->render_ring);
++ return 0;
+ }
+
+-static int intel_overlay_wait_flip(struct intel_overlay *overlay)
++static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+ {
+- struct drm_device *dev = overlay->dev;
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- int ret;
+- u32 tmp;
+-
+- if (overlay->last_flip_req != 0) {
+- ret = i915_do_wait_request(dev, overlay->last_flip_req,
+- 1, &dev_priv->render_ring);
+- if (ret == 0) {
+- overlay->last_flip_req = 0;
+-
+- tmp = I915_READ(ISR);
++ struct drm_gem_object *obj = &overlay->old_vid_bo->base;
+
+- if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
+- return 0;
+- }
+- }
++ i915_gem_object_unpin(obj);
++ drm_gem_object_unreference(obj);
+
+- /* synchronous slowpath */
+- overlay->hw_wedged = RELEASE_OLD_VID;
++ overlay->old_vid_bo = NULL;
++}
+
+- BEGIN_LP_RING(2);
+- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+- OUT_RING(MI_NOOP);
+- ADVANCE_LP_RING();
++static void intel_overlay_off_tail(struct intel_overlay *overlay)
++{
++ struct drm_gem_object *obj;
+
+- overlay->last_flip_req =
+- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+- if (overlay->last_flip_req == 0)
+- return -ENOMEM;
++ /* never have the overlay hw on without showing a frame */
++ BUG_ON(!overlay->vid_bo);
++ obj = &overlay->vid_bo->base;
+
+- ret = i915_do_wait_request(dev, overlay->last_flip_req,
+- 1, &dev_priv->render_ring);
+- if (ret != 0)
+- return ret;
++ i915_gem_object_unpin(obj);
++ drm_gem_object_unreference(obj);
++ overlay->vid_bo = NULL;
+
+- overlay->hw_wedged = 0;
+- overlay->last_flip_req = 0;
+- return 0;
++ overlay->crtc->overlay = NULL;
++ overlay->crtc = NULL;
++ overlay->active = 0;
+ }
+
+ /* overlay needs to be disabled in OCMD reg */
+-static int intel_overlay_off(struct intel_overlay *overlay)
++static int intel_overlay_off(struct intel_overlay *overlay,
++ bool interruptible)
+ {
+- u32 flip_addr = overlay->flip_addr;
+ struct drm_device *dev = overlay->dev;
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- int ret;
++ u32 flip_addr = overlay->flip_addr;
++ struct drm_i915_gem_request *request;
+
+ BUG_ON(!overlay->active);
+
++ request = kzalloc(sizeof(*request), GFP_KERNEL);
++ if (request == NULL)
++ return -ENOMEM;
++
+ /* According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases */
+ flip_addr |= OFC_UPDATE;
+
++ BEGIN_LP_RING(6);
+ /* wait for overlay to go idle */
+- overlay->hw_wedged = SWITCH_OFF_STAGE_1;
+-
+- BEGIN_LP_RING(4);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ OUT_RING(flip_addr);
+- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+- OUT_RING(MI_NOOP);
+- ADVANCE_LP_RING();
+-
+- overlay->last_flip_req =
+- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+- if (overlay->last_flip_req == 0)
+- return -ENOMEM;
+-
+- ret = i915_do_wait_request(dev, overlay->last_flip_req,
+- 1, &dev_priv->render_ring);
+- if (ret != 0)
+- return ret;
+-
++ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ /* turn overlay off */
+- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+-
+- BEGIN_LP_RING(4);
+- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
++ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(flip_addr);
+- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+- OUT_RING(MI_NOOP);
++ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ ADVANCE_LP_RING();
+
+- overlay->last_flip_req =
+- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+- if (overlay->last_flip_req == 0)
+- return -ENOMEM;
+-
+- ret = i915_do_wait_request(dev, overlay->last_flip_req,
+- 1, &dev_priv->render_ring);
+- if (ret != 0)
+- return ret;
+-
+- overlay->hw_wedged = 0;
+- overlay->last_flip_req = 0;
+- return ret;
+-}
+-
+-static void intel_overlay_off_tail(struct intel_overlay *overlay)
+-{
+- struct drm_gem_object *obj;
+-
+- /* never have the overlay hw on without showing a frame */
+- BUG_ON(!overlay->vid_bo);
+- obj = &overlay->vid_bo->base;
+-
+- i915_gem_object_unpin(obj);
+- drm_gem_object_unreference(obj);
+- overlay->vid_bo = NULL;
+-
+- overlay->crtc->overlay = NULL;
+- overlay->crtc = NULL;
+- overlay->active = 0;
++ return intel_overlay_do_wait_request(overlay, request, interruptible,
++ intel_overlay_off_tail);
+ }
+
+ /* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+-int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+- int interruptible)
++static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
++ bool interruptible)
+ {
+ struct drm_device *dev = overlay->dev;
+- struct drm_gem_object *obj;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- u32 flip_addr;
+ int ret;
+
+- if (overlay->hw_wedged == HW_WEDGED)
+- return -EIO;
+-
+- if (overlay->last_flip_req == 0) {
+- overlay->last_flip_req =
+- i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+- if (overlay->last_flip_req == 0)
+- return -ENOMEM;
+- }
++ if (overlay->last_flip_req == 0)
++ return 0;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+- interruptible, &dev_priv->render_ring);
+- if (ret != 0)
++ interruptible, &dev_priv->render_ring);
++ if (ret)
+ return ret;
+
+- switch (overlay->hw_wedged) {
+- case RELEASE_OLD_VID:
+- obj = &overlay->old_vid_bo->base;
+- i915_gem_object_unpin(obj);
+- drm_gem_object_unreference(obj);
+- overlay->old_vid_bo = NULL;
+- break;
+- case SWITCH_OFF_STAGE_1:
+- flip_addr = overlay->flip_addr;
+- flip_addr |= OFC_UPDATE;
+-
+- overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+-
+- BEGIN_LP_RING(4);
+- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+- OUT_RING(flip_addr);
+- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+- OUT_RING(MI_NOOP);
+- ADVANCE_LP_RING();
+-
+- overlay->last_flip_req = i915_add_request(dev, NULL,
+- 0, &dev_priv->render_ring);
+- if (overlay->last_flip_req == 0)
+- return -ENOMEM;
+-
+- ret = i915_do_wait_request(dev, overlay->last_flip_req,
+- interruptible, &dev_priv->render_ring);
+- if (ret != 0)
+- return ret;
+-
+- case SWITCH_OFF_STAGE_2:
+- intel_overlay_off_tail(overlay);
+- break;
+- default:
+- BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
+- }
++ if (overlay->flip_tail)
++ overlay->flip_tail(overlay);
+
+- overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+ }
+
+ /* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+- * via intel_overlay_(un)map_regs_atomic */
++ * via intel_overlay_(un)map_regs
++ */
+ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+ {
++ struct drm_device *dev = overlay->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+- struct drm_gem_object *obj;
+
+- /* only wait if there is actually an old frame to release to
+- * guarantee forward progress */
++ /* Only wait if there is actually an old frame to release to
++ * guarantee forward progress.
++ */
+ if (!overlay->old_vid_bo)
+ return 0;
+
+- ret = intel_overlay_wait_flip(overlay);
+- if (ret != 0)
+- return ret;
++ if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
++ struct drm_i915_gem_request *request;
+
+- obj = &overlay->old_vid_bo->base;
+- i915_gem_object_unpin(obj);
+- drm_gem_object_unreference(obj);
+- overlay->old_vid_bo = NULL;
++ /* synchronous slowpath */
++ request = kzalloc(sizeof(*request), GFP_KERNEL);
++ if (request == NULL)
++ return -ENOMEM;
++
++ BEGIN_LP_RING(2);
++ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++ OUT_RING(MI_NOOP);
++ ADVANCE_LP_RING();
++
++ ret = intel_overlay_do_wait_request(overlay, request, true,
++ intel_overlay_release_old_vid_tail);
++ if (ret)
++ return ret;
++ }
+
++ intel_overlay_release_old_vid_tail(overlay);
+ return 0;
+ }
+
+@@ -506,65 +502,65 @@ struct put_image_params {
+ static int packed_depth_bytes(u32 format)
+ {
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+- case I915_OVERLAY_YUV422:
+- return 4;
+- case I915_OVERLAY_YUV411:
+- /* return 6; not implemented */
+- default:
+- return -EINVAL;
++ case I915_OVERLAY_YUV422:
++ return 4;
++ case I915_OVERLAY_YUV411:
++ /* return 6; not implemented */
++ default:
++ return -EINVAL;
+ }
+ }
+
+ static int packed_width_bytes(u32 format, short width)
+ {
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+- case I915_OVERLAY_YUV422:
+- return width << 1;
+- default:
+- return -EINVAL;
++ case I915_OVERLAY_YUV422:
++ return width << 1;
++ default:
++ return -EINVAL;
+ }
+ }
+
+ static int uv_hsubsampling(u32 format)
+ {
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+- case I915_OVERLAY_YUV422:
+- case I915_OVERLAY_YUV420:
+- return 2;
+- case I915_OVERLAY_YUV411:
+- case I915_OVERLAY_YUV410:
+- return 4;
+- default:
+- return -EINVAL;
++ case I915_OVERLAY_YUV422:
++ case I915_OVERLAY_YUV420:
++ return 2;
++ case I915_OVERLAY_YUV411:
++ case I915_OVERLAY_YUV410:
++ return 4;
++ default:
++ return -EINVAL;
+ }
+ }
+
+ static int uv_vsubsampling(u32 format)
+ {
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+- case I915_OVERLAY_YUV420:
+- case I915_OVERLAY_YUV410:
+- return 2;
+- case I915_OVERLAY_YUV422:
+- case I915_OVERLAY_YUV411:
+- return 1;
+- default:
+- return -EINVAL;
++ case I915_OVERLAY_YUV420:
++ case I915_OVERLAY_YUV410:
++ return 2;
++ case I915_OVERLAY_YUV422:
++ case I915_OVERLAY_YUV411:
++ return 1;
++ default:
++ return -EINVAL;
+ }
+ }
+
+ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+ {
+ u32 mask, shift, ret;
+- if (IS_I9XX(dev)) {
+- mask = 0x3f;
+- shift = 6;
+- } else {
++ if (IS_GEN2(dev)) {
+ mask = 0x1f;
+ shift = 5;
++ } else {
++ mask = 0x3f;
++ shift = 6;
+ }
+ ret = ((offset + width + mask) >> shift) - (offset >> shift);
+- if (IS_I9XX(dev))
++ if (!IS_GEN2(dev))
+ ret <<= 1;
+ ret -=1;
+ return ret << 2;
+@@ -587,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+ 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+ 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+ 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+- 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
++ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
++};
++
+ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+ 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+@@ -597,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+ 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+ 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+- 0x3000, 0x0800, 0x3000};
++ 0x3000, 0x0800, 0x3000
++};
+
+ static void update_polyphase_filter(struct overlay_registers *regs)
+ {
+@@ -630,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
+ yscale = 1 << FP_SHIFT;
+
+ /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+- xscale_UV = xscale/uv_hscale;
+- yscale_UV = yscale/uv_vscale;
+- /* make the Y scale to UV scale ratio an exact multiply */
+- xscale = xscale_UV * uv_hscale;
+- yscale = yscale_UV * uv_vscale;
++ xscale_UV = xscale/uv_hscale;
++ yscale_UV = yscale/uv_vscale;
++ /* make the Y scale to UV scale ratio an exact multiply */
++ xscale = xscale_UV * uv_hscale;
++ yscale = yscale_UV * uv_vscale;
+ /*} else {
+- xscale_UV = 0;
+- yscale_UV = 0;
+- }*/
++ xscale_UV = 0;
++ yscale_UV = 0;
++ }*/
+
+ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+ scale_changed = true;
+ overlay->old_xscale = xscale;
+ overlay->old_yscale = yscale;
+
+- regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
+- | ((xscale >> FP_SHIFT) << 16)
+- | ((xscale & FRACT_MASK) << 3);
+- regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
+- | ((xscale_UV >> FP_SHIFT) << 16)
+- | ((xscale_UV & FRACT_MASK) << 3);
+- regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
+- | ((yscale_UV >> FP_SHIFT) << 0);
++ regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
++ ((xscale >> FP_SHIFT) << 16) |
++ ((xscale & FRACT_MASK) << 3));
++
++ regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
++ ((xscale_UV >> FP_SHIFT) << 16) |
++ ((xscale_UV & FRACT_MASK) << 3));
++
++ regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
++ ((yscale_UV >> FP_SHIFT) << 0)));
+
+ if (scale_changed)
+ update_polyphase_filter(regs);
+@@ -664,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+ {
+ u32 key = overlay->color_key;
++
+ switch (overlay->crtc->base.fb->bits_per_pixel) {
+- case 8:
+- regs->DCLRKV = 0;
+- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+- case 16:
+- if (overlay->crtc->base.fb->depth == 15) {
+- regs->DCLRKV = RGB15_TO_COLORKEY(key);
+- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+- } else {
+- regs->DCLRKV = RGB16_TO_COLORKEY(key);
+- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+- }
+- case 24:
+- case 32:
+- regs->DCLRKV = key;
+- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
++ case 8:
++ regs->DCLRKV = 0;
++ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
++ break;
++
++ case 16:
++ if (overlay->crtc->base.fb->depth == 15) {
++ regs->DCLRKV = RGB15_TO_COLORKEY(key);
++ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
++ } else {
++ regs->DCLRKV = RGB16_TO_COLORKEY(key);
++ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
++ }
++ break;
++
++ case 24:
++ case 32:
++ regs->DCLRKV = key;
++ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
++ break;
+ }
+ }
+
+@@ -689,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+- case I915_OVERLAY_YUV422:
+- cmd |= OCMD_YUV_422_PLANAR;
+- break;
+- case I915_OVERLAY_YUV420:
+- cmd |= OCMD_YUV_420_PLANAR;
+- break;
+- case I915_OVERLAY_YUV411:
+- case I915_OVERLAY_YUV410:
+- cmd |= OCMD_YUV_410_PLANAR;
+- break;
++ case I915_OVERLAY_YUV422:
++ cmd |= OCMD_YUV_422_PLANAR;
++ break;
++ case I915_OVERLAY_YUV420:
++ cmd |= OCMD_YUV_420_PLANAR;
++ break;
++ case I915_OVERLAY_YUV411:
++ case I915_OVERLAY_YUV410:
++ cmd |= OCMD_YUV_410_PLANAR;
++ break;
+ }
+ } else { /* YUV packed */
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+- case I915_OVERLAY_YUV422:
+- cmd |= OCMD_YUV_422_PACKED;
+- break;
+- case I915_OVERLAY_YUV411:
+- cmd |= OCMD_YUV_411_PACKED;
+- break;
++ case I915_OVERLAY_YUV422:
++ cmd |= OCMD_YUV_422_PACKED;
++ break;
++ case I915_OVERLAY_YUV411:
++ cmd |= OCMD_YUV_411_PACKED;
++ break;
+ }
+
+ switch (params->format & I915_OVERLAY_SWAP_MASK) {
+- case I915_OVERLAY_NO_SWAP:
+- break;
+- case I915_OVERLAY_UV_SWAP:
+- cmd |= OCMD_UV_SWAP;
+- break;
+- case I915_OVERLAY_Y_SWAP:
+- cmd |= OCMD_Y_SWAP;
+- break;
+- case I915_OVERLAY_Y_AND_UV_SWAP:
+- cmd |= OCMD_Y_AND_UV_SWAP;
+- break;
++ case I915_OVERLAY_NO_SWAP:
++ break;
++ case I915_OVERLAY_UV_SWAP:
++ cmd |= OCMD_UV_SWAP;
++ break;
++ case I915_OVERLAY_Y_SWAP:
++ cmd |= OCMD_Y_SWAP;
++ break;
++ case I915_OVERLAY_Y_AND_UV_SWAP:
++ cmd |= OCMD_Y_AND_UV_SWAP;
++ break;
+ }
+ }
+
+ return cmd;
+ }
+
+-int intel_overlay_do_put_image(struct intel_overlay *overlay,
+- struct drm_gem_object *new_bo,
+- struct put_image_params *params)
++static int intel_overlay_do_put_image(struct intel_overlay *overlay,
++ struct drm_gem_object *new_bo,
++ struct put_image_params *params)
+ {
+ int ret, tmp_width;
+ struct overlay_registers *regs;
+@@ -755,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ goto out_unpin;
+
+ if (!overlay->active) {
+- regs = intel_overlay_map_regs_atomic(overlay);
++ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ regs->OCONFIG = OCONF_CC_OUT_8BIT;
+- if (IS_I965GM(overlay->dev))
++ if (IS_GEN4(overlay->dev))
+ regs->OCONFIG |= OCONF_CSC_MODE_BT709;
+ regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ OCONF_PIPE_A : OCONF_PIPE_B;
+- intel_overlay_unmap_regs_atomic(overlay);
++ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_on(overlay);
+ if (ret != 0)
+ goto out_unpin;
+ }
+
+- regs = intel_overlay_map_regs_atomic(overlay);
++ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+@@ -788,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
+
+ regs->SWIDTH = params->src_w;
+ regs->SWIDTHSW = calc_swidthsw(overlay->dev,
+- params->offset_Y, tmp_width);
++ params->offset_Y, tmp_width);
+ regs->SHEIGHT = params->src_h;
+ regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+ regs->OSTRIDE = params->stride_Y;
+@@ -799,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ u32 tmp_U, tmp_V;
+ regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+- params->src_w/uv_hscale);
++ params->src_w/uv_hscale);
+ tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+- params->src_w/uv_hscale);
++ params->src_w/uv_hscale);
+ regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
+ regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
+ regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
+@@ -815,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
+
+ regs->OCMD = overlay_cmd_reg(params);
+
+- intel_overlay_unmap_regs_atomic(overlay);
++ intel_overlay_unmap_regs(overlay, regs);
+
+- intel_overlay_continue(overlay, scale_changed);
++ ret = intel_overlay_continue(overlay, scale_changed);
++ if (ret)
++ goto out_unpin;
+
+ overlay->old_vid_bo = overlay->vid_bo;
+ overlay->vid_bo = to_intel_bo(new_bo);
+@@ -829,20 +838,19 @@ out_unpin:
+ return ret;
+ }
+
+-int intel_overlay_switch_off(struct intel_overlay *overlay)
++int intel_overlay_switch_off(struct intel_overlay *overlay,
++ bool interruptible)
+ {
+- int ret;
+ struct overlay_registers *regs;
+ struct drm_device *dev = overlay->dev;
++ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+- if (overlay->hw_wedged) {
+- ret = intel_overlay_recover_from_interrupt(overlay, 1);
+- if (ret != 0)
+- return ret;
+- }
++ ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
++ if (ret != 0)
++ return ret;
+
+ if (!overlay->active)
+ return 0;
+@@ -851,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
+ if (ret != 0)
+ return ret;
+
+- regs = intel_overlay_map_regs_atomic(overlay);
++ regs = intel_overlay_map_regs(overlay);
+ regs->OCMD = 0;
+- intel_overlay_unmap_regs_atomic(overlay);
++ intel_overlay_unmap_regs(overlay, regs);
+
+- ret = intel_overlay_off(overlay);
++ ret = intel_overlay_off(overlay, interruptible);
+ if (ret != 0)
+ return ret;
+
+ intel_overlay_off_tail(overlay);
+-
+ return 0;
+ }
+
+ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ struct intel_crtc *crtc)
+ {
+- drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+- u32 pipeconf;
+- int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
++ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+
+- if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
++ if (!crtc->active)
+ return -EINVAL;
+
+- pipeconf = I915_READ(pipeconf_reg);
+-
+ /* can't use the overlay with double wide pipe */
+- if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
++ if (INTEL_INFO(overlay->dev)->gen < 4 &&
++ (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+ return -EINVAL;
+
+ return 0;
+@@ -886,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+ {
+ struct drm_device *dev = overlay->dev;
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- u32 ratio;
++ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 pfit_control = I915_READ(PFIT_CONTROL);
++ u32 ratio;
+
+ /* XXX: This is not the same logic as in the xorg driver, but more in
+- * line with the intel documentation for the i965 */
+- if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
+- ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
+- } else { /* on i965 use the PGM reg to read out the autoscaler values */
+- ratio = I915_READ(PFIT_PGM_RATIOS);
+- if (IS_I965G(dev))
+- ratio >>= PFIT_VERT_SCALE_SHIFT_965;
++ * line with the intel documentation for the i965
++ */
++ if (INTEL_INFO(dev)->gen >= 4) {
++ /* on i965 use the PGM reg to read out the autoscaler values */
++ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
++ } else {
++ if (pfit_control & VERT_AUTO_SCALE)
++ ratio = I915_READ(PFIT_AUTO_RATIOS);
+ else
+- ratio >>= PFIT_VERT_SCALE_SHIFT;
++ ratio = I915_READ(PFIT_PGM_RATIOS);
++ ratio >>= PFIT_VERT_SCALE_SHIFT;
+ }
+
+ overlay->pfit_vscale_ratio = ratio;
+@@ -910,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
+ {
+ struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+- if ((rec->dst_x < mode->crtc_hdisplay)
+- && (rec->dst_x + rec->dst_width
+- <= mode->crtc_hdisplay)
+- && (rec->dst_y < mode->crtc_vdisplay)
+- && (rec->dst_y + rec->dst_height
+- <= mode->crtc_vdisplay))
++ if (rec->dst_x < mode->crtc_hdisplay &&
++ rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
++ rec->dst_y < mode->crtc_vdisplay &&
++ rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
+ return 0;
+ else
+ return -EINVAL;
+@@ -940,53 +944,59 @@ static int check_overlay_src(struct drm_device *dev,
+ struct drm_intel_overlay_put_image *rec,
+ struct drm_gem_object *new_bo)
+ {
+- u32 stride_mask;
+- int depth;
+ int uv_hscale = uv_hsubsampling(rec->flags);
+ int uv_vscale = uv_vsubsampling(rec->flags);
+- size_t tmp;
++ u32 stride_mask;
++ int depth;
++ u32 tmp;
+
+ /* check src dimensions */
+ if (IS_845G(dev) || IS_I830(dev)) {
+- if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
+- || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
++ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
++ rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ return -EINVAL;
+ } else {
+- if (rec->src_height > IMAGE_MAX_HEIGHT
+- || rec->src_width > IMAGE_MAX_WIDTH)
++ if (rec->src_height > IMAGE_MAX_HEIGHT ||
++ rec->src_width > IMAGE_MAX_WIDTH)
+ return -EINVAL;
+ }
++
+ /* better safe than sorry, use 4 as the maximal subsampling ratio */
+- if (rec->src_height < N_VERT_Y_TAPS*4
+- || rec->src_width < N_HORIZ_Y_TAPS*4)
++ if (rec->src_height < N_VERT_Y_TAPS*4 ||
++ rec->src_width < N_HORIZ_Y_TAPS*4)
+ return -EINVAL;
+
+ /* check alignment constraints */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+- case I915_OVERLAY_RGB:
+- /* not implemented */
++ case I915_OVERLAY_RGB:
++ /* not implemented */
++ return -EINVAL;
++
++ case I915_OVERLAY_YUV_PACKED:
++ if (uv_vscale != 1)
+ return -EINVAL;
+- case I915_OVERLAY_YUV_PACKED:
+- depth = packed_depth_bytes(rec->flags);
+- if (uv_vscale != 1)
+- return -EINVAL;
+- if (depth < 0)
+- return depth;
+- /* ignore UV planes */
+- rec->stride_UV = 0;
+- rec->offset_U = 0;
+- rec->offset_V = 0;
+- /* check pixel alignment */
+- if (rec->offset_Y % depth)
+- return -EINVAL;
+- break;
+- case I915_OVERLAY_YUV_PLANAR:
+- if (uv_vscale < 0 || uv_hscale < 0)
+- return -EINVAL;
+- /* no offset restrictions for planar formats */
+- break;
+- default:
++
++ depth = packed_depth_bytes(rec->flags);
++ if (depth < 0)
++ return depth;
++
++ /* ignore UV planes */
++ rec->stride_UV = 0;
++ rec->offset_U = 0;
++ rec->offset_V = 0;
++ /* check pixel alignment */
++ if (rec->offset_Y % depth)
+ return -EINVAL;
++ break;
++
++ case I915_OVERLAY_YUV_PLANAR:
++ if (uv_vscale < 0 || uv_hscale < 0)
++ return -EINVAL;
++ /* no offset restrictions for planar formats */
++ break;
++
++ default:
++ return -EINVAL;
+ }
+
+ if (rec->src_width % uv_hscale)
+@@ -1000,47 +1010,74 @@ static int check_overlay_src(struct drm_device *dev,
+
+ if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ return -EINVAL;
+- if (IS_I965G(dev) && rec->stride_Y < 512)
++ if (IS_GEN4(dev) && rec->stride_Y < 512)
+ return -EINVAL;
+
+ tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+- 4 : 8;
+- if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
++ 4096 : 8192;
++ if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
+ return -EINVAL;
+
+ /* check buffer dimensions */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+- case I915_OVERLAY_RGB:
+- case I915_OVERLAY_YUV_PACKED:
+- /* always 4 Y values per depth pixels */
+- if (packed_width_bytes(rec->flags, rec->src_width)
+- > rec->stride_Y)
+- return -EINVAL;
+-
+- tmp = rec->stride_Y*rec->src_height;
+- if (rec->offset_Y + tmp > new_bo->size)
+- return -EINVAL;
+- break;
+- case I915_OVERLAY_YUV_PLANAR:
+- if (rec->src_width > rec->stride_Y)
+- return -EINVAL;
+- if (rec->src_width/uv_hscale > rec->stride_UV)
+- return -EINVAL;
+-
+- tmp = rec->stride_Y*rec->src_height;
+- if (rec->offset_Y + tmp > new_bo->size)
+- return -EINVAL;
+- tmp = rec->stride_UV*rec->src_height;
+- tmp /= uv_vscale;
+- if (rec->offset_U + tmp > new_bo->size
+- || rec->offset_V + tmp > new_bo->size)
+- return -EINVAL;
+- break;
++ case I915_OVERLAY_RGB:
++ case I915_OVERLAY_YUV_PACKED:
++ /* always 4 Y values per depth pixels */
++ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
++ return -EINVAL;
++
++ tmp = rec->stride_Y*rec->src_height;
++ if (rec->offset_Y + tmp > new_bo->size)
++ return -EINVAL;
++ break;
++
++ case I915_OVERLAY_YUV_PLANAR:
++ if (rec->src_width > rec->stride_Y)
++ return -EINVAL;
++ if (rec->src_width/uv_hscale > rec->stride_UV)
++ return -EINVAL;
++
++ tmp = rec->stride_Y * rec->src_height;
++ if (rec->offset_Y + tmp > new_bo->size)
++ return -EINVAL;
++
++ tmp = rec->stride_UV * (rec->src_height / uv_vscale);
++ if (rec->offset_U + tmp > new_bo->size ||
++ rec->offset_V + tmp > new_bo->size)
++ return -EINVAL;
++ break;
+ }
+
+ return 0;
+ }
+
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int intel_panel_fitter_pipe(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 pfit_control;
++
++ /* i830 doesn't have a panel fitter */
++ if (IS_I830(dev))
++ return -1;
++
++ pfit_control = I915_READ(PFIT_CONTROL);
++
++ /* See if the panel fitter is in use */
++ if ((pfit_control & PFIT_ENABLE) == 0)
++ return -1;
++
++ /* 965 can place panel fitter on either pipe */
++ if (IS_GEN4(dev))
++ return (pfit_control >> 29) & 0x3;
++
++ /* older chips can only use pipe 1 */
++ return 1;
++}
++
+ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+@@ -1068,7 +1105,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+- ret = intel_overlay_switch_off(overlay);
++ ret = intel_overlay_switch_off(overlay, true);
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+@@ -1081,7 +1118,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ return -ENOMEM;
+
+ drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+- DRM_MODE_OBJECT_CRTC);
++ DRM_MODE_OBJECT_CRTC);
+ if (!drmmode_obj) {
+ ret = -ENOENT;
+ goto out_free;
+@@ -1089,7 +1126,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+ new_bo = drm_gem_object_lookup(dev, file_priv,
+- put_image_rec->bo_handle);
++ put_image_rec->bo_handle);
+ if (!new_bo) {
+ ret = -ENOENT;
+ goto out_free;
+@@ -1098,15 +1135,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+- if (overlay->hw_wedged) {
+- ret = intel_overlay_recover_from_interrupt(overlay, 1);
+- if (ret != 0)
+- goto out_unlock;
+- }
++ ret = intel_overlay_recover_from_interrupt(overlay, true);
++ if (ret != 0)
++ goto out_unlock;
+
+ if (overlay->crtc != crtc) {
+ struct drm_display_mode *mode = &crtc->base.mode;
+- ret = intel_overlay_switch_off(overlay);
++ ret = intel_overlay_switch_off(overlay, true);
+ if (ret != 0)
+ goto out_unlock;
+
+@@ -1117,9 +1152,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ overlay->crtc = crtc;
+ crtc->overlay = overlay;
+
+- if (intel_panel_fitter_pipe(dev) == crtc->pipe
+- /* and line to wide, i.e. one-line-mode */
+- && mode->hdisplay > 1024) {
++ /* line too wide, i.e. one-line-mode */
++ if (mode->hdisplay > 1024 &&
++ intel_panel_fitter_pipe(dev) == crtc->pipe) {
+ overlay->pfit_active = 1;
+ update_pfit_vscale_ratio(overlay);
+ } else
+@@ -1132,10 +1167,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+
+ if (overlay->pfit_active) {
+ params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+- overlay->pfit_vscale_ratio);
++ overlay->pfit_vscale_ratio);
+ /* shifting right rounds downwards, so add 1 */
+ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+- overlay->pfit_vscale_ratio) + 1;
++ overlay->pfit_vscale_ratio) + 1;
+ } else {
+ params->dst_y = put_image_rec->dst_y;
+ params->dst_h = put_image_rec->dst_height;
+@@ -1147,8 +1182,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
+ params->src_h = put_image_rec->src_height;
+ params->src_scan_w = put_image_rec->src_scan_width;
+ params->src_scan_h = put_image_rec->src_scan_height;
+- if (params->src_scan_h > params->src_h
+- || params->src_scan_w > params->src_w) {
++ if (params->src_scan_h > params->src_h ||
++ params->src_scan_w > params->src_w) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+@@ -1204,7 +1239,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+ return false;
+
+ for (i = 0; i < 3; i++) {
+- if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
++ if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ return false;
+ }
+
+@@ -1225,16 +1260,18 @@ static bool check_gamma5_errata(u32 gamma5)
+
+ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+ {
+- if (!check_gamma_bounds(0, attrs->gamma0)
+- || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
+- || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
+- || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
+- || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
+- || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
+- || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
++ if (!check_gamma_bounds(0, attrs->gamma0) ||
++ !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
++ !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
++ !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
++ !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
++ !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
++ !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ return -EINVAL;
++
+ if (!check_gamma5_errata(attrs->gamma5))
+ return -EINVAL;
++
+ return 0;
+ }
+
+@@ -1261,13 +1298,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
++ ret = -EINVAL;
+ if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+- attrs->color_key = overlay->color_key;
++ attrs->color_key = overlay->color_key;
+ attrs->brightness = overlay->brightness;
+- attrs->contrast = overlay->contrast;
++ attrs->contrast = overlay->contrast;
+ attrs->saturation = overlay->saturation;
+
+- if (IS_I9XX(dev)) {
++ if (!IS_GEN2(dev)) {
+ attrs->gamma0 = I915_READ(OGAMC0);
+ attrs->gamma1 = I915_READ(OGAMC1);
+ attrs->gamma2 = I915_READ(OGAMC2);
+@@ -1275,29 +1313,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+ attrs->gamma4 = I915_READ(OGAMC4);
+ attrs->gamma5 = I915_READ(OGAMC5);
+ }
+- ret = 0;
+ } else {
+- overlay->color_key = attrs->color_key;
+- if (attrs->brightness >= -128 && attrs->brightness <= 127) {
+- overlay->brightness = attrs->brightness;
+- } else {
+- ret = -EINVAL;
++ if (attrs->brightness < -128 || attrs->brightness > 127)
+ goto out_unlock;
+- }
+- if (attrs->contrast <= 255) {
+- overlay->contrast = attrs->contrast;
+- } else {
+- ret = -EINVAL;
++ if (attrs->contrast > 255)
+ goto out_unlock;
+- }
+- if (attrs->saturation <= 1023) {
+- overlay->saturation = attrs->saturation;
+- } else {
+- ret = -EINVAL;
++ if (attrs->saturation > 1023)
+ goto out_unlock;
+- }
+
+- regs = intel_overlay_map_regs_atomic(overlay);
++ overlay->color_key = attrs->color_key;
++ overlay->brightness = attrs->brightness;
++ overlay->contrast = attrs->contrast;
++ overlay->saturation = attrs->saturation;
++
++ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unlock;
+@@ -1305,13 +1334,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+
+ update_reg_attrs(overlay, regs);
+
+- intel_overlay_unmap_regs_atomic(overlay);
++ intel_overlay_unmap_regs(overlay, regs);
+
+ if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+- if (!IS_I9XX(dev)) {
+- ret = -EINVAL;
++ if (IS_GEN2(dev))
+ goto out_unlock;
+- }
+
+ if (overlay->active) {
+ ret = -EBUSY;
+@@ -1319,7 +1346,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+ }
+
+ ret = check_gamma(attrs);
+- if (ret != 0)
++ if (ret)
+ goto out_unlock;
+
+ I915_WRITE(OGAMC0, attrs->gamma0);
+@@ -1329,9 +1356,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
+ I915_WRITE(OGAMC4, attrs->gamma4);
+ I915_WRITE(OGAMC5, attrs->gamma5);
+ }
+- ret = 0;
+ }
+
++ ret = 0;
+ out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+@@ -1347,7 +1374,7 @@ void intel_setup_overlay(struct drm_device *dev)
+ struct overlay_registers *regs;
+ int ret;
+
+- if (!OVERLAY_EXISTS(dev))
++ if (!HAS_OVERLAY(dev))
+ return;
+
+ overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+@@ -1360,22 +1387,28 @@ void intel_setup_overlay(struct drm_device *dev)
+ goto out_free;
+ overlay->reg_bo = to_intel_bo(reg_bo);
+
+- if (OVERLAY_NONPHYSICAL(dev)) {
+- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+- if (ret) {
+- DRM_ERROR("failed to pin overlay register bo\n");
+- goto out_free_bo;
+- }
+- overlay->flip_addr = overlay->reg_bo->gtt_offset;
+- } else {
++ if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+ ret = i915_gem_attach_phys_object(dev, reg_bo,
+ I915_GEM_PHYS_OVERLAY_REGS,
+- 0);
++ PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
++ } else {
++ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
++ if (ret) {
++ DRM_ERROR("failed to pin overlay register bo\n");
++ goto out_free_bo;
++ }
++ overlay->flip_addr = overlay->reg_bo->gtt_offset;
++
++ ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
++ if (ret) {
++ DRM_ERROR("failed to move overlay register bo into the GTT\n");
++ goto out_unpin_bo;
++ }
+ }
+
+ /* init all values */
+@@ -1384,21 +1417,22 @@ void intel_setup_overlay(struct drm_device *dev)
+ overlay->contrast = 75;
+ overlay->saturation = 146;
+
+- regs = intel_overlay_map_regs_atomic(overlay);
++ regs = intel_overlay_map_regs(overlay);
+ if (!regs)
+ goto out_free_bo;
+
+ memset(regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(regs);
+-
+ update_reg_attrs(overlay, regs);
+
+- intel_overlay_unmap_regs_atomic(overlay);
++ intel_overlay_unmap_regs(overlay, regs);
+
+ dev_priv->overlay = overlay;
+ DRM_INFO("initialized overlay support\n");
+ return;
+
++out_unpin_bo:
++ i915_gem_object_unpin(reg_bo);
+ out_free_bo:
+ drm_gem_object_unreference(reg_bo);
+ out_free:
+@@ -1408,18 +1442,23 @@ out_free:
+
+ void intel_cleanup_overlay(struct drm_device *dev)
+ {
+- drm_i915_private_t *dev_priv = dev->dev_private;
++ drm_i915_private_t *dev_priv = dev->dev_private;
+
+- if (dev_priv->overlay) {
+- /* The bo's should be free'd by the generic code already.
+- * Furthermore modesetting teardown happens beforehand so the
+- * hardware should be off already */
+- BUG_ON(dev_priv->overlay->active);
++ if (!dev_priv->overlay)
++ return;
+
+- kfree(dev_priv->overlay);
+- }
++ /* The bo's should be free'd by the generic code already.
++ * Furthermore modesetting teardown happens beforehand so the
++ * hardware should be off already */
++ BUG_ON(dev_priv->overlay->active);
++
++ drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
++ kfree(dev_priv->overlay);
+ }
+
++#ifdef CONFIG_DEBUG_FS
++#include <linux/seq_file.h>
++
+ struct intel_overlay_error_state {
+ struct overlay_registers regs;
+ unsigned long base;
+@@ -1427,6 +1466,30 @@ struct intel_overlay_error_state {
+ u32 isr;
+ };
+
++static struct overlay_registers *
++intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
++{
++ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
++ struct overlay_registers *regs;
++
++ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++ regs = overlay->reg_bo->phys_obj->handle->vaddr;
++ else
++ regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
++ overlay->reg_bo->gtt_offset,
++ KM_USER0);
++
++ return regs;
++}
++
++static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
++ struct overlay_registers *regs)
++{
++ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++ io_mapping_unmap_atomic(regs, KM_USER0);
++}
++
++
+ struct intel_overlay_error_state *
+ intel_overlay_capture_error_state(struct drm_device *dev)
+ {
+@@ -1444,17 +1507,17 @@ intel_overlay_capture_error_state(struct drm_device *dev)
+
+ error->dovsta = I915_READ(DOVSTA);
+ error->isr = I915_READ(ISR);
+- if (OVERLAY_NONPHYSICAL(overlay->dev))
+- error->base = (long) overlay->reg_bo->gtt_offset;
+- else
++ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
++ else
++ error->base = (long) overlay->reg_bo->gtt_offset;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs)
+ goto err;
+
+ memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
+- intel_overlay_unmap_regs_atomic(overlay);
++ intel_overlay_unmap_regs_atomic(overlay, regs);
+
+ return error;
+
+@@ -1515,3 +1578,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s
+ P(UVSCALEV);
+ #undef P
+ }
++#endif
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index e7f5299..92ff8f3 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -30,6 +30,8 @@
+
+ #include "intel_drv.h"
+
++#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
++
+ void
+ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+@@ -109,3 +111,110 @@ done:
+ dev_priv->pch_pf_pos = (x << 16) | y;
+ dev_priv->pch_pf_size = (width << 16) | height;
+ }
++
++static int is_backlight_combination_mode(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ if (INTEL_INFO(dev)->gen >= 4)
++ return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
++
++ if (IS_GEN2(dev))
++ return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
++
++ return 0;
++}
++
++u32 intel_panel_get_max_backlight(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 max;
++
++ if (HAS_PCH_SPLIT(dev)) {
++ max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
++ } else {
++ max = I915_READ(BLC_PWM_CTL);
++ if (IS_PINEVIEW(dev)) {
++ max >>= 17;
++ } else {
++ max >>= 16;
++ if (INTEL_INFO(dev)->gen < 4)
++ max &= ~1;
++ }
++
++ if (is_backlight_combination_mode(dev))
++ max *= 0xff;
++ }
++
++ if (max == 0) {
++ /* XXX add code here to query mode clock or hardware clock
++ * and program max PWM appropriately.
++ */
++ DRM_ERROR("fixme: max PWM is zero.\n");
++ max = 1;
++ }
++
++ DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
++ return max;
++}
++
++u32 intel_panel_get_backlight(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 val;
++
++ if (HAS_PCH_SPLIT(dev)) {
++ val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
++ } else {
++ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
++ if (IS_PINEVIEW(dev))
++ val >>= 1;
++
++ if (is_backlight_combination_mode(dev)){
++ u8 lbpc;
++
++ val &= ~1;
++ pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
++ val *= lbpc;
++ val >>= 1;
++ }
++ }
++
++ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
++ return val;
++}
++
++static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
++ I915_WRITE(BLC_PWM_CPU_CTL, val | level);
++}
++
++void intel_panel_set_backlight(struct drm_device *dev, u32 level)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 tmp;
++
++ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
++
++ if (HAS_PCH_SPLIT(dev))
++ return intel_pch_panel_set_backlight(dev, level);
++
++ if (is_backlight_combination_mode(dev)){
++ u32 max = intel_panel_get_max_backlight(dev);
++ u8 lpbc;
++
++ lpbc = level * 0xfe / max + 1;
++ level /= lpbc;
++ pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
++ }
++
++ tmp = I915_READ(BLC_PWM_CTL);
++ if (IS_PINEVIEW(dev)) {
++ tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
++ level <<= 1;
++ } else
++ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
++ I915_WRITE(BLC_PWM_CTL, tmp | level);
++}
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index cb3508f..b83306f 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -32,6 +32,7 @@
+ #include "i915_drv.h"
+ #include "i915_drm.h"
+ #include "i915_trace.h"
++#include "intel_drv.h"
+
+ static u32 i915_gem_get_seqno(struct drm_device *dev)
+ {
+@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
+
+ static void
+ render_ring_flush(struct drm_device *dev,
+- struct intel_ring_buffer *ring,
+- u32 invalidate_domains,
+- u32 flush_domains)
++ struct intel_ring_buffer *ring,
++ u32 invalidate_domains,
++ u32 flush_domains)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 cmd;
+@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+- if (!IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen < 4) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
+ }
+ }
+
+-static unsigned int render_ring_get_head(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+-}
+-
+-static unsigned int render_ring_get_tail(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++static void ring_write_tail(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ u32 value)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- return I915_READ(PRB0_TAIL) & TAIL_ADDR;
++ I915_WRITE_TAIL(ring, value);
+ }
+
+-static unsigned int render_ring_get_active_head(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++u32 intel_ring_get_active_head(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
++ u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
++ RING_ACTHD(ring->mmio_base) : ACTHD;
+
+ return I915_READ(acthd_reg);
+ }
+
+-static void render_ring_advance_ring(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- I915_WRITE(PRB0_TAIL, ring->tail);
+-}
+-
+ static int init_ring_common(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ u32 head;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev,
+ obj_priv = to_intel_bo(ring->gem_object);
+
+ /* Stop the ring if it's running. */
+- I915_WRITE(ring->regs.ctl, 0);
+- I915_WRITE(ring->regs.head, 0);
+- I915_WRITE(ring->regs.tail, 0);
++ I915_WRITE_CTL(ring, 0);
++ I915_WRITE_HEAD(ring, 0);
++ ring->write_tail(dev, ring, 0);
+
+ /* Initialize the ring. */
+- I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
+- head = ring->get_head(dev, ring);
++ I915_WRITE_START(ring, obj_priv->gtt_offset);
++ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+ /* G45 ring initialization fails to reset head to zero */
+ if (head != 0) {
+ DRM_ERROR("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+- I915_READ(ring->regs.ctl),
+- I915_READ(ring->regs.head),
+- I915_READ(ring->regs.tail),
+- I915_READ(ring->regs.start));
++ I915_READ_CTL(ring),
++ I915_READ_HEAD(ring),
++ I915_READ_TAIL(ring),
++ I915_READ_START(ring));
+
+- I915_WRITE(ring->regs.head, 0);
++ I915_WRITE_HEAD(ring, 0);
+
+ DRM_ERROR("%s head forced to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+- I915_READ(ring->regs.ctl),
+- I915_READ(ring->regs.head),
+- I915_READ(ring->regs.tail),
+- I915_READ(ring->regs.start));
++ I915_READ_CTL(ring),
++ I915_READ_HEAD(ring),
++ I915_READ_TAIL(ring),
++ I915_READ_START(ring));
+ }
+
+- I915_WRITE(ring->regs.ctl,
++ I915_WRITE_CTL(ring,
+ ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+- | RING_NO_REPORT | RING_VALID);
++ | RING_REPORT_64K | RING_VALID);
+
+- head = I915_READ(ring->regs.head) & HEAD_ADDR;
++ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ /* If the head is still not zero, the ring is dead */
+ if (head != 0) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+- I915_READ(ring->regs.ctl),
+- I915_READ(ring->regs.head),
+- I915_READ(ring->regs.tail),
+- I915_READ(ring->regs.start));
++ I915_READ_CTL(ring),
++ I915_READ_HEAD(ring),
++ I915_READ_TAIL(ring),
++ I915_READ_START(ring));
+ return -EIO;
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+- ring->head = ring->get_head(dev, ring);
+- ring->tail = ring->get_tail(dev, ring);
++ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
++ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev,
+ }
+
+ static int init_render_ring(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = init_ring_common(dev, ring);
+ int mode;
+
+- if (IS_I9XX(dev) && !IS_GEN3(dev)) {
++ if (INTEL_INFO(dev)->gen > 3) {
+ mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ if (IS_GEN6(dev))
+ mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+@@ -250,9 +239,8 @@ do { \
+ */
+ static u32
+ render_ring_add_request(struct drm_device *dev,
+- struct intel_ring_buffer *ring,
+- struct drm_file *file_priv,
+- u32 flush_domains)
++ struct intel_ring_buffer *ring,
++ u32 flush_domains)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 seqno;
+@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev,
+ }
+
+ static u32
+-render_ring_get_gem_seqno(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++render_ring_get_seqno(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ if (HAS_PIPE_CONTROL(dev))
+@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
+
+ static void
+ render_ring_get_user_irq(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev,
+
+ static void
+ render_ring_put_user_irq(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev,
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ }
+
+-static void render_setup_status_page(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++void intel_ring_setup_status_page(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (IS_GEN6(dev)) {
+- I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
+- I915_READ(HWS_PGA_GEN6); /* posting read */
++ I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
++ ring->status_page.gfx_addr);
++ I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
+ } else {
+- I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+- I915_READ(HWS_PGA); /* posting read */
++ I915_WRITE(RING_HWS_PGA(ring->mmio_base),
++ ring->status_page.gfx_addr);
++ I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
+ }
+
+ }
+
+-void
++static void
+ bsd_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev,
+ intel_ring_advance(dev, ring);
+ }
+
+-static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
+-}
+-
+-static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
+-}
+-
+-static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- return I915_READ(BSD_RING_ACTHD);
+-}
+-
+-static inline void bsd_ring_advance_ring(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- I915_WRITE(BSD_RING_TAIL, ring->tail);
+-}
+-
+ static int init_bsd_ring(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ return init_ring_common(dev, ring);
+ }
+
+ static u32
+-bsd_ring_add_request(struct drm_device *dev,
+- struct intel_ring_buffer *ring,
+- struct drm_file *file_priv,
+- u32 flush_domains)
++ring_add_request(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ u32 flush_domains)
+ {
+ u32 seqno;
+
+@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev,
+ return seqno;
+ }
+
+-static void bsd_setup_status_page(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
+- I915_READ(BSD_HWS_PGA);
+-}
+-
+ static void
+ bsd_ring_get_user_irq(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ /* do nothing */
+ }
+ static void
+ bsd_ring_put_user_irq(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ /* do nothing */
+ }
+
+ static u32
+-bsd_ring_get_gem_seqno(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ring_status_page_get_seqno(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
+ {
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ }
+
+ static int
+-bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring,
+- struct drm_i915_gem_execbuffer2 *exec,
+- struct drm_clip_rect *cliprects,
+- uint64_t exec_offset)
++ring_dispatch_gem_execbuffer(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ struct drm_i915_gem_execbuffer2 *exec,
++ struct drm_clip_rect *cliprects,
++ uint64_t exec_offset)
+ {
+ uint32_t exec_start;
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+@@ -488,13 +441,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ return 0;
+ }
+
+-
+ static int
+ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring,
+- struct drm_i915_gem_execbuffer2 *exec,
+- struct drm_clip_rect *cliprects,
+- uint64_t exec_offset)
++ struct intel_ring_buffer *ring,
++ struct drm_i915_gem_execbuffer2 *exec,
++ struct drm_clip_rect *cliprects,
++ uint64_t exec_offset)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int nbox = exec->num_cliprects;
+@@ -523,8 +475,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ intel_ring_emit(dev, ring, exec_start + exec_len - 4);
+ intel_ring_emit(dev, ring, 0);
+ } else {
+- intel_ring_begin(dev, ring, 4);
+- if (IS_I965G(dev)) {
++ intel_ring_begin(dev, ring, 2);
++ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_ring_emit(dev, ring,
+ MI_BATCH_BUFFER_START | (2 << 6)
+ | MI_BATCH_NON_SECURE_I965);
+@@ -539,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ intel_ring_advance(dev, ring);
+ }
+
+- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
++ if (IS_G4X(dev) || IS_GEN5(dev)) {
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring, MI_FLUSH |
+ MI_NO_WRITE_FLUSH |
+@@ -553,7 +505,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ }
+
+ static void cleanup_status_page(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+@@ -573,7 +525,7 @@ static void cleanup_status_page(struct drm_device *dev,
+ }
+
+ static int init_status_page(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+@@ -603,7 +555,7 @@ static int init_status_page(struct drm_device *dev,
+ ring->status_page.obj = obj;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+- ring->setup_status_page(dev, ring);
++ intel_ring_setup_status_page(dev, ring);
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ ring->name, ring->status_page.gfx_addr);
+
+@@ -617,15 +569,18 @@ err:
+ return ret;
+ }
+
+-
+ int intel_init_ring_buffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ int ret;
+
+ ring->dev = dev;
++ INIT_LIST_HEAD(&ring->active_list);
++ INIT_LIST_HEAD(&ring->request_list);
++ INIT_LIST_HEAD(&ring->gpu_write_list);
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(dev, ring);
+@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
+
+ ring->gem_object = obj;
+
+- ret = i915_gem_object_pin(obj, ring->alignment);
++ ret = i915_gem_object_pin(obj, PAGE_SIZE);
+ if (ret)
+ goto err_unref;
+
+@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+- ring->head = ring->get_head(dev, ring);
+- ring->tail = ring->get_tail(dev, ring);
++ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
++ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ }
+- INIT_LIST_HEAD(&ring->active_list);
+- INIT_LIST_HEAD(&ring->request_list);
+ return ret;
+
+ err_unmap:
+@@ -691,7 +644,7 @@ err_hws:
+ }
+
+ void intel_cleanup_ring_buffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ if (ring->gem_object == NULL)
+ return;
+@@ -701,11 +654,15 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
+ i915_gem_object_unpin(ring->gem_object);
+ drm_gem_object_unreference(ring->gem_object);
+ ring->gem_object = NULL;
++
++ if (ring->cleanup)
++ ring->cleanup(ring);
++
+ cleanup_status_page(dev, ring);
+ }
+
+-int intel_wrap_ring_buffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++static int intel_wrap_ring_buffer(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
+ {
+ unsigned int *virt;
+ int rem;
+@@ -731,14 +688,26 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
+ }
+
+ int intel_wait_ring_buffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring, int n)
++ struct intel_ring_buffer *ring, int n)
+ {
+ unsigned long end;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ u32 head;
++
++ head = intel_read_status_page(ring, 4);
++ if (head) {
++ ring->head = head & HEAD_ADDR;
++ ring->space = ring->head - (ring->tail + 8);
++ if (ring->space < 0)
++ ring->space += ring->size;
++ if (ring->space >= n)
++ return 0;
++ }
+
+ trace_i915_ring_wait_begin (dev);
+ end = jiffies + 3 * HZ;
+ do {
+- ring->head = ring->get_head(dev, ring);
++ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+@@ -753,14 +722,15 @@ int intel_wait_ring_buffer(struct drm_device *dev,
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+
+- yield();
++ msleep(1);
+ } while (!time_after(jiffies, end));
+ trace_i915_ring_wait_end (dev);
+ return -EBUSY;
+ }
+
+ void intel_ring_begin(struct drm_device *dev,
+- struct intel_ring_buffer *ring, int num_dwords)
++ struct intel_ring_buffer *ring,
++ int num_dwords)
+ {
+ int n = 4*num_dwords;
+ if (unlikely(ring->tail + n > ring->size))
+@@ -772,97 +742,287 @@ void intel_ring_begin(struct drm_device *dev,
+ }
+
+ void intel_ring_advance(struct drm_device *dev,
+- struct intel_ring_buffer *ring)
++ struct intel_ring_buffer *ring)
+ {
+ ring->tail &= ring->size - 1;
+- ring->advance_ring(dev, ring);
++ ring->write_tail(dev, ring, ring->tail);
+ }
+
+-void intel_fill_struct(struct drm_device *dev,
+- struct intel_ring_buffer *ring,
+- void *data,
+- unsigned int len)
+-{
+- unsigned int *virt = ring->virtual_start + ring->tail;
+- BUG_ON((len&~(4-1)) != 0);
+- intel_ring_begin(dev, ring, len/4);
+- memcpy(virt, data, len);
+- ring->tail += len;
+- ring->tail &= ring->size - 1;
+- ring->space -= len;
+- intel_ring_advance(dev, ring);
+-}
+-
+-struct intel_ring_buffer render_ring = {
++static const struct intel_ring_buffer render_ring = {
+ .name = "render ring",
+- .regs = {
+- .ctl = PRB0_CTL,
+- .head = PRB0_HEAD,
+- .tail = PRB0_TAIL,
+- .start = PRB0_START
+- },
+- .ring_flag = I915_EXEC_RENDER,
++ .id = RING_RENDER,
++ .mmio_base = RENDER_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+- .alignment = PAGE_SIZE,
+- .virtual_start = NULL,
+- .dev = NULL,
+- .gem_object = NULL,
+- .head = 0,
+- .tail = 0,
+- .space = 0,
+- .user_irq_refcount = 0,
+- .irq_gem_seqno = 0,
+- .waiting_gem_seqno = 0,
+- .setup_status_page = render_setup_status_page,
+ .init = init_render_ring,
+- .get_head = render_ring_get_head,
+- .get_tail = render_ring_get_tail,
+- .get_active_head = render_ring_get_active_head,
+- .advance_ring = render_ring_advance_ring,
++ .write_tail = ring_write_tail,
+ .flush = render_ring_flush,
+ .add_request = render_ring_add_request,
+- .get_gem_seqno = render_ring_get_gem_seqno,
++ .get_seqno = render_ring_get_seqno,
+ .user_irq_get = render_ring_get_user_irq,
+ .user_irq_put = render_ring_put_user_irq,
+ .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+- .status_page = {NULL, 0, NULL},
+- .map = {0,}
+ };
+
+ /* ring buffer for bit-stream decoder */
+
+-struct intel_ring_buffer bsd_ring = {
++static const struct intel_ring_buffer bsd_ring = {
+ .name = "bsd ring",
+- .regs = {
+- .ctl = BSD_RING_CTL,
+- .head = BSD_RING_HEAD,
+- .tail = BSD_RING_TAIL,
+- .start = BSD_RING_START
+- },
+- .ring_flag = I915_EXEC_BSD,
++ .id = RING_BSD,
++ .mmio_base = BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+- .alignment = PAGE_SIZE,
+- .virtual_start = NULL,
+- .dev = NULL,
+- .gem_object = NULL,
+- .head = 0,
+- .tail = 0,
+- .space = 0,
+- .user_irq_refcount = 0,
+- .irq_gem_seqno = 0,
+- .waiting_gem_seqno = 0,
+- .setup_status_page = bsd_setup_status_page,
+ .init = init_bsd_ring,
+- .get_head = bsd_ring_get_head,
+- .get_tail = bsd_ring_get_tail,
+- .get_active_head = bsd_ring_get_active_head,
+- .advance_ring = bsd_ring_advance_ring,
++ .write_tail = ring_write_tail,
+ .flush = bsd_ring_flush,
+- .add_request = bsd_ring_add_request,
+- .get_gem_seqno = bsd_ring_get_gem_seqno,
++ .add_request = ring_add_request,
++ .get_seqno = ring_status_page_get_seqno,
+ .user_irq_get = bsd_ring_get_user_irq,
+ .user_irq_put = bsd_ring_put_user_irq,
+- .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
+- .status_page = {NULL, 0, NULL},
+- .map = {0,}
++ .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
++};
++
++
++static void gen6_bsd_ring_write_tail(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ u32 value)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ /* Every tail move must follow the sequence below */
++ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
++ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
++ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
++ I915_WRITE(GEN6_BSD_RNCID, 0x0);
++
++ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
++ GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
++ 50))
++ DRM_ERROR("timed out waiting for IDLE Indicator\n");
++
++ I915_WRITE_TAIL(ring, value);
++ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
++ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
++ GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
++}
++
++static void gen6_ring_flush(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ u32 invalidate_domains,
++ u32 flush_domains)
++{
++ intel_ring_begin(dev, ring, 4);
++ intel_ring_emit(dev, ring, MI_FLUSH_DW);
++ intel_ring_emit(dev, ring, 0);
++ intel_ring_emit(dev, ring, 0);
++ intel_ring_emit(dev, ring, 0);
++ intel_ring_advance(dev, ring);
++}
++
++static int
++gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ struct drm_i915_gem_execbuffer2 *exec,
++ struct drm_clip_rect *cliprects,
++ uint64_t exec_offset)
++{
++ uint32_t exec_start;
++
++ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
++
++ intel_ring_begin(dev, ring, 2);
++ intel_ring_emit(dev, ring,
++ MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
++ /* bit0-7 is the length on GEN6+ */
++ intel_ring_emit(dev, ring, exec_start);
++ intel_ring_advance(dev, ring);
++
++ return 0;
++}
++
++/* ring buffer for Video Codec for Gen6+ */
++static const struct intel_ring_buffer gen6_bsd_ring = {
++ .name = "gen6 bsd ring",
++ .id = RING_BSD,
++ .mmio_base = GEN6_BSD_RING_BASE,
++ .size = 32 * PAGE_SIZE,
++ .init = init_bsd_ring,
++ .write_tail = gen6_bsd_ring_write_tail,
++ .flush = gen6_ring_flush,
++ .add_request = ring_add_request,
++ .get_seqno = ring_status_page_get_seqno,
++ .user_irq_get = bsd_ring_get_user_irq,
++ .user_irq_put = bsd_ring_put_user_irq,
++ .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
++};
++
++/* Blitter support (SandyBridge+) */
++
++static void
++blt_ring_get_user_irq(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
++{
++ /* do nothing */
++}
++static void
++blt_ring_put_user_irq(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
++{
++ /* do nothing */
++}
++
++
++/* Workaround for some stepping of SNB,
++ * each time when BLT engine ring tail moved,
++ * the first command in the ring to be parsed
++ * should be MI_BATCH_BUFFER_START
++ */
++#define NEED_BLT_WORKAROUND(dev) \
++ (IS_GEN6(dev) && (dev->pdev->revision < 8))
++
++static inline struct drm_i915_gem_object *
++to_blt_workaround(struct intel_ring_buffer *ring)
++{
++ return ring->private;
++}
++
++static int blt_ring_init(struct drm_device *dev,
++ struct intel_ring_buffer *ring)
++{
++ if (NEED_BLT_WORKAROUND(dev)) {
++ struct drm_i915_gem_object *obj;
++ u32 __iomem *ptr;
++ int ret;
++
++ obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
++ if (obj == NULL)
++ return -ENOMEM;
++
++ ret = i915_gem_object_pin(&obj->base, 4096);
++ if (ret) {
++ drm_gem_object_unreference(&obj->base);
++ return ret;
++ }
++
++ ptr = kmap(obj->pages[0]);
++ iowrite32(MI_BATCH_BUFFER_END, ptr);
++ iowrite32(MI_NOOP, ptr+1);
++ kunmap(obj->pages[0]);
++
++ ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
++ if (ret) {
++ i915_gem_object_unpin(&obj->base);
++ drm_gem_object_unreference(&obj->base);
++ return ret;
++ }
++
++ ring->private = obj;
++ }
++
++ return init_ring_common(dev, ring);
++}
++
++static void blt_ring_begin(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ int num_dwords)
++{
++ if (ring->private) {
++ intel_ring_begin(dev, ring, num_dwords+2);
++ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
++ intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
++ } else
++ intel_ring_begin(dev, ring, 4);
++}
++
++static void blt_ring_flush(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ u32 invalidate_domains,
++ u32 flush_domains)
++{
++ blt_ring_begin(dev, ring, 4);
++ intel_ring_emit(dev, ring, MI_FLUSH_DW);
++ intel_ring_emit(dev, ring, 0);
++ intel_ring_emit(dev, ring, 0);
++ intel_ring_emit(dev, ring, 0);
++ intel_ring_advance(dev, ring);
++}
++
++static u32
++blt_ring_add_request(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ u32 flush_domains)
++{
++ u32 seqno = i915_gem_get_seqno(dev);
++
++ blt_ring_begin(dev, ring, 4);
++ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
++ intel_ring_emit(dev, ring,
++ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
++ intel_ring_emit(dev, ring, seqno);
++ intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
++ intel_ring_advance(dev, ring);
++
++ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
++ return seqno;
++}
++
++static void blt_ring_cleanup(struct intel_ring_buffer *ring)
++{
++ if (!ring->private)
++ return;
++
++ i915_gem_object_unpin(ring->private);
++ drm_gem_object_unreference(ring->private);
++ ring->private = NULL;
++}
++
++static const struct intel_ring_buffer gen6_blt_ring = {
++ .name = "blt ring",
++ .id = RING_BLT,
++ .mmio_base = BLT_RING_BASE,
++ .size = 32 * PAGE_SIZE,
++ .init = blt_ring_init,
++ .write_tail = ring_write_tail,
++ .flush = blt_ring_flush,
++ .add_request = blt_ring_add_request,
++ .get_seqno = ring_status_page_get_seqno,
++ .user_irq_get = blt_ring_get_user_irq,
++ .user_irq_put = blt_ring_put_user_irq,
++ .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
++ .cleanup = blt_ring_cleanup,
+ };
++
++int intel_init_render_ring_buffer(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ dev_priv->render_ring = render_ring;
++
++ if (!I915_NEED_GFX_HWS(dev)) {
++ dev_priv->render_ring.status_page.page_addr
++ = dev_priv->status_page_dmah->vaddr;
++ memset(dev_priv->render_ring.status_page.page_addr,
++ 0, PAGE_SIZE);
++ }
++
++ return intel_init_ring_buffer(dev, &dev_priv->render_ring);
++}
++
++int intel_init_bsd_ring_buffer(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ if (IS_GEN6(dev))
++ dev_priv->bsd_ring = gen6_bsd_ring;
++ else
++ dev_priv->bsd_ring = bsd_ring;
++
++ return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
++}
++
++int intel_init_blt_ring_buffer(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ dev_priv->blt_ring = gen6_blt_ring;
++
++ return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
++}
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
+index 525e7d3..3126c26 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
+@@ -7,25 +7,32 @@ struct intel_hw_status_page {
+ struct drm_gem_object *obj;
+ };
+
++#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
++#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
++#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
++#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
++#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
++#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
++#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
++#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
++
+ struct drm_i915_gem_execbuffer2;
+ struct intel_ring_buffer {
+ const char *name;
+- struct ring_regs {
+- u32 ctl;
+- u32 head;
+- u32 tail;
+- u32 start;
+- } regs;
+- unsigned int ring_flag;
++ enum intel_ring_id {
++ RING_RENDER = 0x1,
++ RING_BSD = 0x2,
++ RING_BLT = 0x4,
++ } id;
++ u32 mmio_base;
+ unsigned long size;
+- unsigned int alignment;
+ void *virtual_start;
+ struct drm_device *dev;
+ struct drm_gem_object *gem_object;
+
+ unsigned int head;
+ unsigned int tail;
+- unsigned int space;
++ int space;
+ struct intel_hw_status_page status_page;
+
+ u32 irq_gem_seqno; /* last seq seem at irq time */
+@@ -35,35 +42,28 @@ struct intel_ring_buffer {
+ struct intel_ring_buffer *ring);
+ void (*user_irq_put)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+- void (*setup_status_page)(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
+
+ int (*init)(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+- unsigned int (*get_head)(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
+- unsigned int (*get_tail)(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
+- unsigned int (*get_active_head)(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
+- void (*advance_ring)(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
++ void (*write_tail)(struct drm_device *dev,
++ struct intel_ring_buffer *ring,
++ u32 value);
+ void (*flush)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ u32 (*add_request)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+- struct drm_file *file_priv,
+ u32 flush_domains);
+- u32 (*get_gem_seqno)(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
++ u32 (*get_seqno)(struct drm_device *dev,
++ struct intel_ring_buffer *ring);
+ int (*dispatch_gem_execbuffer)(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset);
++ void (*cleanup)(struct intel_ring_buffer *ring);
+
+ /**
+ * List of objects currently involved in rendering from the
+@@ -83,8 +83,24 @@ struct intel_ring_buffer {
+ */
+ struct list_head request_list;
+
++ /**
++ * List of objects currently pending a GPU write flush.
++ *
++ * All elements on this list will belong to either the
++ * active_list or flushing_list, last_rendering_seqno can
++ * be used to differentiate between the two elements.
++ */
++ struct list_head gpu_write_list;
++
++ /**
++ * Do we have some not yet emitted requests outstanding?
++ */
++ bool outstanding_lazy_request;
++
+ wait_queue_head_t irq_queue;
+ drm_local_map_t map;
++
++ void *private;
+ };
+
+ static inline u32
+@@ -96,15 +112,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
+ }
+
+ int intel_init_ring_buffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
++ struct intel_ring_buffer *ring);
+ void intel_cleanup_ring_buffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
++ struct intel_ring_buffer *ring);
+ int intel_wait_ring_buffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring, int n);
+-int intel_wrap_ring_buffer(struct drm_device *dev,
+- struct intel_ring_buffer *ring);
++ struct intel_ring_buffer *ring, int n);
+ void intel_ring_begin(struct drm_device *dev,
+- struct intel_ring_buffer *ring, int n);
++ struct intel_ring_buffer *ring, int n);
+
+ static inline void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+@@ -115,17 +129,19 @@ static inline void intel_ring_emit(struct drm_device *dev,
+ ring->tail += 4;
+ }
+
+-void intel_fill_struct(struct drm_device *dev,
+- struct intel_ring_buffer *ring,
+- void *data,
+- unsigned int len);
+ void intel_ring_advance(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+ u32 intel_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring);
+
+-extern struct intel_ring_buffer render_ring;
+-extern struct intel_ring_buffer bsd_ring;
++int intel_init_render_ring_buffer(struct drm_device *dev);
++int intel_init_bsd_ring_buffer(struct drm_device *dev);
++int intel_init_blt_ring_buffer(struct drm_device *dev);
++
++u32 intel_ring_get_active_head(struct drm_device *dev,
++ struct intel_ring_buffer *ring);
++void intel_ring_setup_status_page(struct drm_device *dev,
++ struct intel_ring_buffer *ring);
+
+ #endif /* _INTEL_RINGBUFFER_H_ */
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index ee73e42..de158b7 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -65,8 +65,11 @@ static const char *tv_format_names[] = {
+ struct intel_sdvo {
+ struct intel_encoder base;
+
++ struct i2c_adapter *i2c;
+ u8 slave_addr;
+
++ struct i2c_adapter ddc;
++
+ /* Register for the SDVO device: SDVOB or SDVOC */
+ int sdvo_reg;
+
+@@ -104,34 +107,24 @@ struct intel_sdvo {
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
++ bool has_audio;
+
+ /**
+- * This is set if we detect output of sdvo device as LVDS.
++ * This is set if we detect output of sdvo device as LVDS and
++ * have a valid fixed mode to use with the panel.
+ */
+ bool is_lvds;
+
+ /**
+- * This is sdvo flags for input timing.
+- */
+- uint8_t sdvo_flags;
+-
+- /**
+ * This is sdvo fixed pannel mode pointer
+ */
+ struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+- /*
+- * supported encoding mode, used to determine whether HDMI is
+- * supported
+- */
+- struct intel_sdvo_encode encode;
+-
+ /* DDC bus used by this SDVO encoder */
+ uint8_t ddc_bus;
+
+- /* Mac mini hack -- use the same DDC as the analog connector */
+- struct i2c_adapter *analog_ddc_bus;
+-
++ /* Input timings for adjusted_mode */
++ struct intel_sdvo_dtd input_dtd;
+ };
+
+ struct intel_sdvo_connector {
+@@ -140,11 +133,15 @@ struct intel_sdvo_connector {
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
++ int force_audio;
++
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
+
++ struct drm_property *force_audio_property;
++
+ /* add the property for the SDVO-TV */
+ struct drm_property *left;
+ struct drm_property *right;
+@@ -186,9 +183,15 @@ struct intel_sdvo_connector {
+ u32 cur_dot_crawl, max_dot_crawl;
+ };
+
+-static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
++static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
++{
++ return container_of(encoder, struct intel_sdvo, base.base);
++}
++
++static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+ {
+- return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
++ return container_of(intel_attached_encoder(connector),
++ struct intel_sdvo, base);
+ }
+
+ static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+@@ -213,7 +216,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ */
+ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+ {
+- struct drm_device *dev = intel_sdvo->base.enc.dev;
++ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 bval = val, cval = val;
+ int i;
+@@ -245,49 +248,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+
+ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+ {
+- u8 out_buf[2] = { addr, 0 };
+- u8 buf[2];
+ struct i2c_msg msgs[] = {
+ {
+- .addr = intel_sdvo->slave_addr >> 1,
++ .addr = intel_sdvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+- .buf = out_buf,
++ .buf = &addr,
+ },
+ {
+- .addr = intel_sdvo->slave_addr >> 1,
++ .addr = intel_sdvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+- .buf = buf,
++ .buf = ch,
+ }
+ };
+ int ret;
+
+- if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
+- {
+- *ch = buf[0];
++ if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
+ return true;
+- }
+
+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ return false;
+ }
+
+-static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
+-{
+- u8 out_buf[2] = { addr, ch };
+- struct i2c_msg msgs[] = {
+- {
+- .addr = intel_sdvo->slave_addr >> 1,
+- .flags = 0,
+- .len = 2,
+- .buf = out_buf,
+- }
+- };
+-
+- return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
+-}
+-
+ #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+ /** Mapping of command numbers to names, for debug output */
+ static const struct _sdvo_cmd_name {
+@@ -432,22 +415,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+ DRM_LOG_KMS("\n");
+ }
+
+-static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+- const void *args, int args_len)
+-{
+- int i;
+-
+- intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+-
+- for (i = 0; i < args_len; i++) {
+- if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
+- ((u8*)args)[i]))
+- return false;
+- }
+-
+- return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
+-}
+-
+ static const char *cmd_status_names[] = {
+ "Power on",
+ "Success",
+@@ -458,54 +425,115 @@ static const char *cmd_status_names[] = {
+ "Scaling not supported"
+ };
+
+-static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
+- void *response, int response_len,
+- u8 status)
++static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
++ const void *args, int args_len)
+ {
+- int i;
++ u8 buf[args_len*2 + 2], status;
++ struct i2c_msg msgs[args_len + 3];
++ int i, ret;
+
+- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+- for (i = 0; i < response_len; i++)
+- DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
+- for (; i < 8; i++)
+- DRM_LOG_KMS(" ");
+- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+- DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+- else
+- DRM_LOG_KMS("(??? %d)", status);
+- DRM_LOG_KMS("\n");
++ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
++
++ for (i = 0; i < args_len; i++) {
++ msgs[i].addr = intel_sdvo->slave_addr;
++ msgs[i].flags = 0;
++ msgs[i].len = 2;
++ msgs[i].buf = buf + 2 *i;
++ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
++ buf[2*i + 1] = ((u8*)args)[i];
++ }
++ msgs[i].addr = intel_sdvo->slave_addr;
++ msgs[i].flags = 0;
++ msgs[i].len = 2;
++ msgs[i].buf = buf + 2*i;
++ buf[2*i + 0] = SDVO_I2C_OPCODE;
++ buf[2*i + 1] = cmd;
++
++ /* the following two are to read the response */
++ status = SDVO_I2C_CMD_STATUS;
++ msgs[i+1].addr = intel_sdvo->slave_addr;
++ msgs[i+1].flags = 0;
++ msgs[i+1].len = 1;
++ msgs[i+1].buf = &status;
++
++ msgs[i+2].addr = intel_sdvo->slave_addr;
++ msgs[i+2].flags = I2C_M_RD;
++ msgs[i+2].len = 1;
++ msgs[i+2].buf = &status;
++
++ ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
++ if (ret < 0) {
++ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
++ return false;
++ }
++ if (ret != i+3) {
++ /* failure in I2C transfer */
++ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
++ return false;
++ }
++
++ i = 3;
++ while (status == SDVO_CMD_STATUS_PENDING && i--) {
++ if (!intel_sdvo_read_byte(intel_sdvo,
++ SDVO_I2C_CMD_STATUS,
++ &status))
++ return false;
++ }
++ if (status != SDVO_CMD_STATUS_SUCCESS) {
++ DRM_DEBUG_KMS("command returns response %s [%d]\n",
++ status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
++ status);
++ return false;
++ }
++
++ return true;
+ }
+
+ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+ void *response, int response_len)
+ {
+- int i;
++ u8 retry = 5;
+ u8 status;
+- u8 retry = 50;
+-
+- while (retry--) {
+- /* Read the command response */
+- for (i = 0; i < response_len; i++) {
+- if (!intel_sdvo_read_byte(intel_sdvo,
+- SDVO_I2C_RETURN_0 + i,
+- &((u8 *)response)[i]))
+- return false;
+- }
++ int i;
+
+- /* read the return status */
+- if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
++ /*
++ * The documentation states that all commands will be
++ * processed within 15µs, and that we need only poll
++ * the status byte a maximum of 3 times in order for the
++ * command to be complete.
++ *
++ * Check 5 times in case the hardware failed to read the docs.
++ */
++ do {
++ if (!intel_sdvo_read_byte(intel_sdvo,
++ SDVO_I2C_CMD_STATUS,
+ &status))
+ return false;
++ } while (status == SDVO_CMD_STATUS_PENDING && --retry);
+
+- intel_sdvo_debug_response(intel_sdvo, response, response_len,
+- status);
+- if (status != SDVO_CMD_STATUS_PENDING)
+- break;
++ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
++ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
++ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
++ else
++ DRM_LOG_KMS("(??? %d)", status);
+
+- mdelay(50);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ goto log_fail;
++
++ /* Read the command response */
++ for (i = 0; i < response_len; i++) {
++ if (!intel_sdvo_read_byte(intel_sdvo,
++ SDVO_I2C_RETURN_0 + i,
++ &((u8 *)response)[i]))
++ goto log_fail;
++ DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+ }
++ DRM_LOG_KMS("\n");
++ return true;
+
+- return status == SDVO_CMD_STATUS_SUCCESS;
++log_fail:
++ DRM_LOG_KMS("\n");
++ return false;
+ }
+
+ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+@@ -518,71 +546,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+ return 4;
+ }
+
+-/**
+- * Try to read the response after issuie the DDC switch command. But it
+- * is noted that we must do the action of reading response and issuing DDC
+- * switch command in one I2C transaction. Otherwise when we try to start
+- * another I2C transaction after issuing the DDC bus switch, it will be
+- * switched to the internal SDVO register.
+- */
+-static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+- u8 target)
++static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
++ u8 ddc_bus)
+ {
+- u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
+- struct i2c_msg msgs[] = {
+- {
+- .addr = intel_sdvo->slave_addr >> 1,
+- .flags = 0,
+- .len = 2,
+- .buf = out_buf,
+- },
+- /* the following two are to read the response */
+- {
+- .addr = intel_sdvo->slave_addr >> 1,
+- .flags = 0,
+- .len = 1,
+- .buf = cmd_buf,
+- },
+- {
+- .addr = intel_sdvo->slave_addr >> 1,
+- .flags = I2C_M_RD,
+- .len = 1,
+- .buf = ret_value,
+- },
+- };
+-
+- intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+- &target, 1);
+- /* write the DDC switch command argument */
+- intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
+-
+- out_buf[0] = SDVO_I2C_OPCODE;
+- out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
+- cmd_buf[0] = SDVO_I2C_CMD_STATUS;
+- cmd_buf[1] = 0;
+- ret_value[0] = 0;
+- ret_value[1] = 0;
+-
+- ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
+- if (ret != 3) {
+- /* failure in I2C transfer */
+- DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+- return;
+- }
+- if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
+- DRM_DEBUG_KMS("DDC switch command returns response %d\n",
+- ret_value[0]);
+- return;
+- }
+- return;
++ return intel_sdvo_write_cmd(intel_sdvo,
++ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
++ &ddc_bus, 1);
+ }
+
+ static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+ {
+- if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+- return false;
+-
+- return intel_sdvo_read_response(intel_sdvo, NULL, 0);
++ return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
+ }
+
+ static bool
+@@ -819,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ }
+
+-static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
+- struct intel_sdvo_encode *encode)
++static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
+ {
+- if (intel_sdvo_get_value(intel_sdvo,
+- SDVO_CMD_GET_SUPP_ENCODE,
+- encode, sizeof(*encode)))
+- return true;
++ struct intel_sdvo_encode encode;
+
+- /* non-support means DVI */
+- memset(encode, 0, sizeof(*encode));
+- return false;
++ return intel_sdvo_get_value(intel_sdvo,
++ SDVO_CMD_GET_SUPP_ENCODE,
++ &encode, sizeof(encode));
+ }
+
+ static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+@@ -874,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+ }
+ #endif
+
+-static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
+- int index,
+- uint8_t *data, int8_t size, uint8_t tx_rate)
+-{
+- uint8_t set_buf_index[2];
+-
+- set_buf_index[0] = index;
+- set_buf_index[1] = 0;
+-
+- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+- set_buf_index, 2))
+- return false;
+-
+- for (; size > 0; size -= 8) {
+- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
+- return false;
+-
+- data += 8;
+- }
+-
+- return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+-}
+-
+-static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
+-{
+- uint8_t csum = 0;
+- int i;
+-
+- for (i = 0; i < size; i++)
+- csum += data[i];
+-
+- return 0x100 - csum;
+-}
+-
+-#define DIP_TYPE_AVI 0x82
+-#define DIP_VERSION_AVI 0x2
+-#define DIP_LEN_AVI 13
+-
+-struct dip_infoframe {
+- uint8_t type;
+- uint8_t version;
+- uint8_t len;
+- uint8_t checksum;
+- union {
+- struct {
+- /* Packet Byte #1 */
+- uint8_t S:2;
+- uint8_t B:2;
+- uint8_t A:1;
+- uint8_t Y:2;
+- uint8_t rsvd1:1;
+- /* Packet Byte #2 */
+- uint8_t R:4;
+- uint8_t M:2;
+- uint8_t C:2;
+- /* Packet Byte #3 */
+- uint8_t SC:2;
+- uint8_t Q:2;
+- uint8_t EC:3;
+- uint8_t ITC:1;
+- /* Packet Byte #4 */
+- uint8_t VIC:7;
+- uint8_t rsvd2:1;
+- /* Packet Byte #5 */
+- uint8_t PR:4;
+- uint8_t rsvd3:4;
+- /* Packet Byte #6~13 */
+- uint16_t top_bar_end;
+- uint16_t bottom_bar_start;
+- uint16_t left_bar_end;
+- uint16_t right_bar_start;
+- } avi;
+- struct {
+- /* Packet Byte #1 */
+- uint8_t channel_count:3;
+- uint8_t rsvd1:1;
+- uint8_t coding_type:4;
+- /* Packet Byte #2 */
+- uint8_t sample_size:2; /* SS0, SS1 */
+- uint8_t sample_frequency:3;
+- uint8_t rsvd2:3;
+- /* Packet Byte #3 */
+- uint8_t coding_type_private:5;
+- uint8_t rsvd3:3;
+- /* Packet Byte #4 */
+- uint8_t channel_allocation;
+- /* Packet Byte #5 */
+- uint8_t rsvd4:3;
+- uint8_t level_shift:4;
+- uint8_t downmix_inhibit:1;
+- } audio;
+- uint8_t payload[28];
+- } __attribute__ ((packed)) u;
+-} __attribute__((packed));
+-
+-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+- struct drm_display_mode * mode)
++static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
+ {
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+- .version = DIP_VERSION_AVI,
++ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
++ uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
++ uint8_t set_buf_index[2] = { 1, 0 };
++ uint64_t *data = (uint64_t *)&avi_if;
++ unsigned i;
++
++ intel_dip_infoframe_csum(&avi_if);
++
++ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
++ set_buf_index, 2))
++ return false;
+
+- avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
+- 4 + avi_if.len);
+- return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
+- 4 + avi_if.len,
+- SDVO_HBUF_TX_VSYNC);
++ for (i = 0; i < sizeof(avi_if); i += 8) {
++ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
++ data, 8))
++ return false;
++ data++;
++ }
++
++ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
++ &tx_rate, 1);
+ }
+
+ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+@@ -1022,8 +910,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+ {
+- struct intel_sdvo_dtd input_dtd;
+-
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return false;
+@@ -1035,14 +921,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
+ return false;
+
+ if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+- &input_dtd))
++ &intel_sdvo->input_dtd))
+ return false;
+
+- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+- intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
++ intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+- mode->clock = adjusted_mode->clock;
+ return true;
+ }
+
+@@ -1050,7 +934,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+ {
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
++ int multiplier;
+
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+@@ -1065,10 +950,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ mode,
+ adjusted_mode);
+ } else if (intel_sdvo->is_lvds) {
+- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
+-
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+- intel_sdvo->sdvo_lvds_fixed_mode))
++ intel_sdvo->sdvo_lvds_fixed_mode))
+ return false;
+
+ (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+@@ -1077,9 +960,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ }
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+- * SDVO device will be told of the multiplier during mode_set.
++ * SDVO device will factor out the multiplier during mode_set.
+ */
+- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
++ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
++ intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+ return true;
+ }
+@@ -1092,11 +976,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+- u32 sdvox = 0;
+- int sdvo_pixel_multiply, rate;
++ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
++ u32 sdvox;
+ struct intel_sdvo_in_out_map in_out;
+ struct intel_sdvo_dtd input_dtd;
++ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
++ int rate;
+
+ if (!mode)
+ return;
+@@ -1114,28 +999,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+
+- if (intel_sdvo->is_hdmi) {
+- if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
+- return;
+-
+- sdvox |= SDVO_AUDIO_ENABLE;
+- }
++ /* Set the output timings to the screen */
++ if (!intel_sdvo_set_target_output(intel_sdvo,
++ intel_sdvo->attached_output))
++ return;
+
+ /* We have tried to get input timing in mode_fixup, and filled into
+- adjusted_mode */
+- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+- if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+- input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
+-
+- /* If it's a TV, we already set the output timing in mode_fixup.
+- * Otherwise, the output timing is equal to the input timing.
++ * adjusted_mode.
+ */
+- if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
++ if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
++ input_dtd = intel_sdvo->input_dtd;
++ } else {
+ /* Set the output timing to the screen */
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
+
++ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
+ }
+
+@@ -1143,31 +1023,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return;
+
+- if (intel_sdvo->is_tv) {
+- if (!intel_sdvo_set_tv_format(intel_sdvo))
+- return;
+- }
++ if (intel_sdvo->is_hdmi &&
++ !intel_sdvo_set_avi_infoframe(intel_sdvo))
++ return;
+
+- /* We would like to use intel_sdvo_create_preferred_input_timing() to
+- * provide the device with a timing it can support, if it supports that
+- * feature. However, presumably we would need to adjust the CRTC to
+- * output the preferred timing, and we don't support that currently.
+- */
+-#if 0
+- success = intel_sdvo_create_preferred_input_timing(encoder, clock,
+- width, height);
+- if (success) {
+- struct intel_sdvo_dtd *input_dtd;
++ if (intel_sdvo->is_tv &&
++ !intel_sdvo_set_tv_format(intel_sdvo))
++ return;
+
+- intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
+- intel_sdvo_set_input_timing(encoder, &input_dtd);
+- }
+-#else
+ (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
+-#endif
+
+- sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
+- switch (sdvo_pixel_multiply) {
++ switch (pixel_multiplier) {
++ default:
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+@@ -1176,14 +1043,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ return;
+
+ /* Set the SDVO control regs. */
+- if (IS_I965G(dev)) {
+- sdvox |= SDVO_BORDER_ENABLE;
++ if (INTEL_INFO(dev)->gen >= 4) {
++ sdvox = SDVO_BORDER_ENABLE;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+ } else {
+- sdvox |= I915_READ(intel_sdvo->sdvo_reg);
++ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ switch (intel_sdvo->sdvo_reg) {
+ case SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+@@ -1196,16 +1063,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ }
+ if (intel_crtc->pipe == 1)
+ sdvox |= SDVO_PIPE_B_SELECT;
++ if (intel_sdvo->has_audio)
++ sdvox |= SDVO_AUDIO_ENABLE;
+
+- if (IS_I965G(dev)) {
++ if (INTEL_INFO(dev)->gen >= 4) {
+ /* done in crtc_mode_set as the dpll_md reg must be written early */
+ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ /* done in crtc_mode_set as it lives inside the dpll register */
+ } else {
+- sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
++ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ }
+
+- if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
++ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ sdvox |= SDVO_STALL_SELECT;
+ intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+ }
+@@ -1214,7 +1083,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+ {
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 temp;
+
+@@ -1260,8 +1129,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+ static int intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+@@ -1285,7 +1153,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
+
+ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+ {
+- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
++ if (!intel_sdvo_get_value(intel_sdvo,
++ SDVO_CMD_GET_DEVICE_CAPS,
++ caps, sizeof(*caps)))
++ return false;
++
++ DRM_DEBUG_KMS("SDVO capabilities:\n"
++ " vendor_id: %d\n"
++ " device_id: %d\n"
++ " device_rev_id: %d\n"
++ " sdvo_version_major: %d\n"
++ " sdvo_version_minor: %d\n"
++ " sdvo_inputs_mask: %d\n"
++ " smooth_scaling: %d\n"
++ " sharp_scaling: %d\n"
++ " up_scaling: %d\n"
++ " down_scaling: %d\n"
++ " stall_support: %d\n"
++ " output_flags: %d\n",
++ caps->vendor_id,
++ caps->device_id,
++ caps->device_rev_id,
++ caps->sdvo_version_major,
++ caps->sdvo_version_minor,
++ caps->sdvo_inputs_mask,
++ caps->smooth_scaling,
++ caps->sharp_scaling,
++ caps->up_scaling,
++ caps->down_scaling,
++ caps->stall_support,
++ caps->output_flags);
++
++ return true;
+ }
+
+ /* No use! */
+@@ -1389,22 +1288,33 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+ return (caps > 1);
+ }
+
++static struct edid *
++intel_sdvo_get_edid(struct drm_connector *connector)
++{
++ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
++ return drm_get_edid(connector, &sdvo->ddc);
++}
++
+ static struct drm_connector *
+ intel_find_analog_connector(struct drm_device *dev)
+ {
+ struct drm_connector *connector;
+- struct drm_encoder *encoder;
+- struct intel_sdvo *intel_sdvo;
+-
+- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+- intel_sdvo = enc_to_intel_sdvo(encoder);
+- if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
+- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+- if (encoder == intel_attached_encoder(connector))
++ struct intel_sdvo *encoder;
++
++ list_for_each_entry(encoder,
++ &dev->mode_config.encoder_list,
++ base.base.head) {
++ if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
++ list_for_each_entry(connector,
++ &dev->mode_config.connector_list,
++ head) {
++ if (&encoder->base ==
++ intel_attached_encoder(connector))
+ return connector;
+ }
+ }
+ }
++
+ return NULL;
+ }
+
+@@ -1424,64 +1334,72 @@ intel_analog_is_connected(struct drm_device *dev)
+ return true;
+ }
+
++/* Mac mini hack -- use the same DDC as the analog connector */
++static struct edid *
++intel_sdvo_get_analog_edid(struct drm_connector *connector)
++{
++ struct drm_i915_private *dev_priv = connector->dev->dev_private;
++
++ if (!intel_analog_is_connected(connector->dev))
++ return NULL;
++
++ return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
++}
++
+ enum drm_connector_status
+ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+- enum drm_connector_status status = connector_status_connected;
+- struct edid *edid = NULL;
++ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
++ enum drm_connector_status status;
++ struct edid *edid;
+
+- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
++ edid = intel_sdvo_get_edid(connector);
+
+- /* This is only applied to SDVO cards with multiple outputs */
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+- uint8_t saved_ddc, temp_ddc;
+- saved_ddc = intel_sdvo->ddc_bus;
+- temp_ddc = intel_sdvo->ddc_bus >> 1;
++ u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
++
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+- while(temp_ddc > 1) {
+- intel_sdvo->ddc_bus = temp_ddc;
+- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+- if (edid) {
+- /*
+- * When we can get the EDID, maybe it is the
+- * correct DDC bus. Update it.
+- */
+- intel_sdvo->ddc_bus = temp_ddc;
++ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
++ intel_sdvo->ddc_bus = ddc;
++ edid = intel_sdvo_get_edid(connector);
++ if (edid)
+ break;
+- }
+- temp_ddc >>= 1;
+ }
++ /*
++ * If we found the EDID on the other bus,
++ * assume that is the correct DDC bus.
++ */
+ if (edid == NULL)
+ intel_sdvo->ddc_bus = saved_ddc;
+ }
+- /* when there is no edid and no monitor is connected with VGA
+- * port, try to use the CRT ddc to read the EDID for DVI-connector
++
++ /*
++ * When there is no edid and no monitor is connected with VGA
++ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+- if (edid == NULL && intel_sdvo->analog_ddc_bus &&
+- !intel_analog_is_connected(connector->dev))
+- edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
++ if (edid == NULL)
++ edid = intel_sdvo_get_analog_edid(connector);
+
++ status = connector_status_unknown;
+ if (edid != NULL) {
+- bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+- bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
+-
+ /* DDC bus is shared, match EDID to connector type */
+- if (is_digital && need_digital)
++ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
++ status = connector_status_connected;
+ intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
+- else if (is_digital != need_digital)
+- status = connector_status_disconnected;
+-
++ intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
++ }
+ connector->display_info.raw_edid = NULL;
+- } else
+- status = connector_status_disconnected;
+-
+- kfree(edid);
++ kfree(edid);
++ }
++
++ if (status == connector_status_connected) {
++ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
++ if (intel_sdvo_connector->force_audio)
++ intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
++ }
+
+ return status;
+ }
+@@ -1490,13 +1408,12 @@ static enum drm_connector_status
+ intel_sdvo_detect(struct drm_connector *connector, bool force)
+ {
+ uint16_t response;
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
+
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
++ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ return connector_status_unknown;
+ if (intel_sdvo->is_tv) {
+ /* add 30ms delay when the output type is SDVO-TV */
+@@ -1505,7 +1422,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
+ if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+ return connector_status_unknown;
+
+- DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
++ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
++ response & 0xff, response >> 8,
++ intel_sdvo_connector->output_flag);
+
+ if (response == 0)
+ return connector_status_disconnected;
+@@ -1538,12 +1457,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
+
+ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+- int num_modes;
++ struct edid *edid;
+
+ /* set the bus switch and get the modes */
+- num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
++ edid = intel_sdvo_get_edid(connector);
+
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+@@ -1551,12 +1468,14 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
+ */
+- if (num_modes == 0 &&
+- intel_sdvo->analog_ddc_bus &&
+- !intel_analog_is_connected(connector->dev)) {
+- /* Switch to the analog ddc bus and try that
+- */
+- (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
++ if (edid == NULL)
++ edid = intel_sdvo_get_analog_edid(connector);
++
++ if (edid != NULL) {
++ drm_mode_connector_update_edid_property(connector, edid);
++ drm_add_edid_modes(connector, edid);
++ connector->display_info.raw_edid = NULL;
++ kfree(edid);
+ }
+ }
+
+@@ -1627,8 +1546,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
+
+ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_sdtv_resolution_request tv_res;
+ uint32_t reply = 0, format_map = 0;
+ int i;
+@@ -1644,7 +1562,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+ return;
+
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
++ if (!intel_sdvo_write_cmd(intel_sdvo,
++ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+@@ -1662,8 +1581,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+
+ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_display_mode *newmode;
+
+@@ -1672,7 +1590,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+- intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
++ intel_ddc_get_modes(connector, intel_sdvo->i2c);
+ if (list_empty(&connector->probed_modes) == false)
+ goto end;
+
+@@ -1693,6 +1611,10 @@ end:
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_sdvo->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, newmode);
++
++ drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
++ 0);
++
+ intel_sdvo->is_lvds = true;
+ break;
+ }
+@@ -1775,8 +1697,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
++ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ uint16_t temp_value;
+ uint8_t cmd;
+@@ -1786,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
+ if (ret)
+ return ret;
+
++ if (property == intel_sdvo_connector->force_audio_property) {
++ if (val == intel_sdvo_connector->force_audio)
++ return 0;
++
++ intel_sdvo_connector->force_audio = val;
++
++ if (val > 0 && intel_sdvo->has_audio)
++ return 0;
++ if (val < 0 && !intel_sdvo->has_audio)
++ return 0;
++
++ intel_sdvo->has_audio = val > 0;
++ goto done;
++ }
++
+ #define CHECK_PROPERTY(name, NAME) \
+ if (intel_sdvo_connector->name == property) { \
+ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+@@ -1879,9 +1815,8 @@ set_value:
+
+
+ done:
+- if (encoder->crtc) {
+- struct drm_crtc *crtc = encoder->crtc;
+-
++ if (intel_sdvo->base.base.crtc) {
++ struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+ crtc->y, crtc->fb);
+ }
+@@ -1909,20 +1844,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+ .get_modes = intel_sdvo_get_modes,
+ .mode_valid = intel_sdvo_mode_valid,
+- .best_encoder = intel_attached_encoder,
++ .best_encoder = intel_best_encoder,
+ };
+
+ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+ {
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+-
+- if (intel_sdvo->analog_ddc_bus)
+- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
++ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+
+ if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+
++ i2c_del_adapter(&intel_sdvo->ddc);
+ intel_encoder_destroy(encoder);
+ }
+
+@@ -1990,53 +1923,48 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ intel_sdvo_guess_ddc_bus(sdvo);
+ }
+
+-static bool
+-intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
++static void
++intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
++ struct intel_sdvo *sdvo, u32 reg)
+ {
+- return intel_sdvo_set_target_output(intel_sdvo,
+- device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
+- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+- &intel_sdvo->is_hdmi, 1);
+-}
++ struct sdvo_device_mapping *mapping;
++ u8 pin, speed;
+
+-static struct intel_sdvo *
+-intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
+-{
+- struct drm_device *dev = chan->drm_dev;
+- struct drm_encoder *encoder;
++ if (IS_SDVOB(reg))
++ mapping = &dev_priv->sdvo_mappings[0];
++ else
++ mapping = &dev_priv->sdvo_mappings[1];
+
+- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+- if (intel_sdvo->base.ddc_bus == &chan->adapter)
+- return intel_sdvo;
++ pin = GMBUS_PORT_DPB;
++ speed = GMBUS_RATE_1MHZ >> 8;
++ if (mapping->initialized) {
++ pin = mapping->i2c_pin;
++ speed = mapping->i2c_speed;
+ }
+
+- return NULL;
++ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
++ intel_gmbus_set_speed(sdvo->i2c, speed);
++ intel_gmbus_force_bit(sdvo->i2c, true);
+ }
+
+-static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
+- struct i2c_msg msgs[], int num)
++static bool
++intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+ {
+- struct intel_sdvo *intel_sdvo;
+- struct i2c_algo_bit_data *algo_data;
+- const struct i2c_algorithm *algo;
++ int is_hdmi;
+
+- algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
+- intel_sdvo =
+- intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
+- (algo_data->data));
+- if (intel_sdvo == NULL)
+- return -EINVAL;
++ if (!intel_sdvo_check_supp_encode(intel_sdvo))
++ return false;
+
+- algo = intel_sdvo->base.i2c_bus->algo;
++ if (!intel_sdvo_set_target_output(intel_sdvo,
++ device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
++ return false;
+
+- intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
+- return algo->master_xfer(i2c_adap, msgs, num);
+-}
++ is_hdmi = 0;
++ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
++ return false;
+
+-static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
+- .master_xfer = intel_sdvo_master_xfer,
+-};
++ return !!is_hdmi;
++}
+
+ static u8
+ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+@@ -2076,26 +2004,44 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+ }
+
+ static void
+-intel_sdvo_connector_init(struct drm_encoder *encoder,
+- struct drm_connector *connector)
++intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
++ struct intel_sdvo *encoder)
+ {
+- drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
+- connector->connector_type);
++ drm_connector_init(encoder->base.base.dev,
++ &connector->base.base,
++ &intel_sdvo_connector_funcs,
++ connector->base.base.connector_type);
++
++ drm_connector_helper_add(&connector->base.base,
++ &intel_sdvo_connector_helper_funcs);
++
++ connector->base.base.interlace_allowed = 0;
++ connector->base.base.doublescan_allowed = 0;
++ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+
+- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
++ intel_connector_attach_encoder(&connector->base, &encoder->base);
++ drm_sysfs_connector_add(&connector->base.base);
++}
+
+- connector->interlace_allowed = 0;
+- connector->doublescan_allowed = 0;
+- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++static void
++intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
++{
++ struct drm_device *dev = connector->base.base.dev;
+
+- drm_mode_connector_attach_encoder(connector, encoder);
+- drm_sysfs_connector_add(connector);
++ connector->force_audio_property =
++ drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
++ if (connector->force_audio_property) {
++ connector->force_audio_property->values[0] = -1;
++ connector->force_audio_property->values[1] = 1;
++ drm_connector_attach_property(&connector->base.base,
++ connector->force_audio_property, 0);
++ }
+ }
+
+ static bool
+ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+ {
+- struct drm_encoder *encoder = &intel_sdvo->base.enc;
++ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+@@ -2118,19 +2064,20 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+- if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
+- && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
+- && intel_sdvo->is_hdmi) {
++ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ /* enable hdmi encoding mode if supported */
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
++ intel_sdvo->is_hdmi = true;
+ }
+ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+- intel_sdvo_connector_init(encoder, connector);
++ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
++
++ intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
+
+ return true;
+ }
+@@ -2138,36 +2085,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+ static bool
+ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+ {
+- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+- struct drm_connector *connector;
+- struct intel_connector *intel_connector;
+- struct intel_sdvo_connector *intel_sdvo_connector;
++ struct drm_encoder *encoder = &intel_sdvo->base.base;
++ struct drm_connector *connector;
++ struct intel_connector *intel_connector;
++ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+- connector = &intel_connector->base;
+- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
++ connector = &intel_connector->base;
++ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
++ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+- intel_sdvo->controlled_output |= type;
+- intel_sdvo_connector->output_flag = type;
++ intel_sdvo->controlled_output |= type;
++ intel_sdvo_connector->output_flag = type;
+
+- intel_sdvo->is_tv = true;
+- intel_sdvo->base.needs_tv_clock = true;
+- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
++ intel_sdvo->is_tv = true;
++ intel_sdvo->base.needs_tv_clock = true;
++ intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+- intel_sdvo_connector_init(encoder, connector);
++ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+
+- if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
++ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ goto err;
+
+- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
++ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+- return true;
++ return true;
+
+ err:
+ intel_sdvo_destroy(connector);
+@@ -2177,43 +2124,44 @@ err:
+ static bool
+ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+ {
+- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+- struct drm_connector *connector;
+- struct intel_connector *intel_connector;
+- struct intel_sdvo_connector *intel_sdvo_connector;
++ struct drm_encoder *encoder = &intel_sdvo->base.base;
++ struct drm_connector *connector;
++ struct intel_connector *intel_connector;
++ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+- connector = &intel_connector->base;
++ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+-
+- if (device == 0) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+- } else if (device == 1) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+- }
+-
+- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
++ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
++ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
++
++ if (device == 0) {
++ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
++ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
++ } else if (device == 1) {
++ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
++ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
++ }
++
++ intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+- intel_sdvo_connector_init(encoder, connector);
+- return true;
++ intel_sdvo_connector_init(intel_sdvo_connector,
++ intel_sdvo);
++ return true;
+ }
+
+ static bool
+ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+ {
+- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+- struct drm_connector *connector;
+- struct intel_connector *intel_connector;
+- struct intel_sdvo_connector *intel_sdvo_connector;
++ struct drm_encoder *encoder = &intel_sdvo->base.base;
++ struct drm_connector *connector;
++ struct intel_connector *intel_connector;
++ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+@@ -2221,22 +2169,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+-
+- if (device == 0) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+- } else if (device == 1) {
+- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+- }
+-
+- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
++ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
++ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
++
++ if (device == 0) {
++ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
++ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
++ } else if (device == 1) {
++ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
++ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
++ }
++
++ intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+
+- intel_sdvo_connector_init(encoder, connector);
+- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
++ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
++ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+@@ -2307,7 +2255,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
+ {
+- struct drm_device *dev = intel_sdvo->base.enc.dev;
++ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct intel_sdvo_tv_format format;
+ uint32_t format_map, i;
+
+@@ -2373,7 +2321,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+ {
+- struct drm_device *dev = intel_sdvo->base.enc.dev;
++ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+@@ -2502,7 +2450,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+ {
+- struct drm_device *dev = intel_sdvo->base.enc.dev;
++ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+@@ -2535,7 +2483,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
++}
++
++static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
++ struct i2c_msg *msgs,
++ int num)
++{
++ struct intel_sdvo *sdvo = adapter->algo_data;
+
++ if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
++ return -EIO;
++
++ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
++}
++
++static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
++{
++ struct intel_sdvo *sdvo = adapter->algo_data;
++ return sdvo->i2c->algo->functionality(sdvo->i2c);
++}
++
++static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
++ .master_xfer = intel_sdvo_ddc_proxy_xfer,
++ .functionality = intel_sdvo_ddc_proxy_func
++};
++
++static bool
++intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
++ struct drm_device *dev)
++{
++ sdvo->ddc.owner = THIS_MODULE;
++ sdvo->ddc.class = I2C_CLASS_DDC;
++ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
++ sdvo->ddc.dev.parent = &dev->pdev->dev;
++ sdvo->ddc.algo_data = sdvo;
++ sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
++
++ return i2c_add_adapter(&sdvo->ddc) == 0;
+ }
+
+ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+@@ -2543,95 +2527,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
+- u8 ch[0x40];
+ int i;
+- u32 i2c_reg, ddc_reg, analog_ddc_reg;
+
+ intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+ if (!intel_sdvo)
+ return false;
+
++ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
++ kfree(intel_sdvo);
++ return false;
++ }
++
+ intel_sdvo->sdvo_reg = sdvo_reg;
+
+ intel_encoder = &intel_sdvo->base;
+ intel_encoder->type = INTEL_OUTPUT_SDVO;
++ /* encoder type will be decided later */
++ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
+
+- if (HAS_PCH_SPLIT(dev)) {
+- i2c_reg = PCH_GPIOE;
+- ddc_reg = PCH_GPIOE;
+- analog_ddc_reg = PCH_GPIOA;
+- } else {
+- i2c_reg = GPIOE;
+- ddc_reg = GPIOE;
+- analog_ddc_reg = GPIOA;
+- }
+-
+- /* setup the DDC bus. */
+- if (IS_SDVOB(sdvo_reg))
+- intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
+- else
+- intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
+-
+- if (!intel_encoder->i2c_bus)
+- goto err_inteloutput;
+-
+- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
+-
+- /* Save the bit-banging i2c functionality for use by the DDC wrapper */
+- intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
++ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
++ intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+- if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
++ u8 byte;
++
++ if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
+ DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+- goto err_i2c;
++ goto err;
+ }
+ }
+
+- /* setup the DDC bus. */
+- if (IS_SDVOB(sdvo_reg)) {
+- intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
+- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+- "SDVOB/VGA DDC BUS");
++ if (IS_SDVOB(sdvo_reg))
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+- } else {
+- intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
+- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+- "SDVOC/VGA DDC BUS");
++ else
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+- }
+- if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
+- goto err_i2c;
+
+- /* Wrap with our custom algo which switches to DDC mode */
+- intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+-
+- /* encoder type will be decided later */
+- drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
+- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
++ drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+
+ /* In default case sdvo lvds is false */
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+- goto err_enc;
++ goto err;
+
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+- goto err_enc;
++ goto err;
+ }
+
+ intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+- goto err_enc;
++ goto err;
+
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+- goto err_enc;
++ goto err;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+@@ -2651,16 +2606,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
+-err_enc:
+- drm_encoder_cleanup(&intel_encoder->enc);
+-err_i2c:
+- if (intel_sdvo->analog_ddc_bus != NULL)
+- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+- if (intel_encoder->ddc_bus != NULL)
+- intel_i2c_destroy(intel_encoder->ddc_bus);
+- if (intel_encoder->i2c_bus != NULL)
+- intel_i2c_destroy(intel_encoder->i2c_bus);
+-err_inteloutput:
++err:
++ drm_encoder_cleanup(&intel_encoder->base);
++ i2c_del_adapter(&intel_sdvo->ddc);
+ kfree(intel_sdvo);
+
+ return false;
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index 4a117e3..2f76819 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -48,7 +48,7 @@ struct intel_tv {
+ struct intel_encoder base;
+
+ int type;
+- char *tv_format;
++ const char *tv_format;
+ int margin[4];
+ u32 save_TV_H_CTL_1;
+ u32 save_TV_H_CTL_2;
+@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
+
+
+ struct tv_mode {
+- char *name;
++ const char *name;
+ int clock;
+ int refresh; /* in millihertz (for precision) */
+ u32 oversample;
+@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
+
+ static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+ {
+- return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
++ return container_of(encoder, struct intel_tv, base.base);
++}
++
++static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
++{
++ return container_of(intel_attached_encoder(connector),
++ struct intel_tv,
++ base);
+ }
+
+ static void
+@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
+ }
+
+ static const struct tv_mode *
+-intel_tv_mode_lookup (char *tv_format)
++intel_tv_mode_lookup(const char *tv_format)
+ {
+ int i;
+
+@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
+ }
+
+ static const struct tv_mode *
+-intel_tv_mode_find (struct intel_tv *intel_tv)
++intel_tv_mode_find(struct intel_tv *intel_tv)
+ {
+ return intel_tv_mode_lookup(intel_tv->tv_format);
+ }
+
+ static enum drm_mode_status
+-intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
++intel_tv_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ /* Ensure TV refresh is close to desired refresh */
+ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
+ < 1000)
+ return MODE_OK;
++
+ return MODE_CLOCK_RANGE;
+ }
+
+@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ color_conversion->av);
+ }
+
+- if (IS_I965G(dev))
++ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ else
+ I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+
+ /* Wait for vblank for the disable to take effect */
+- if (!IS_I9XX(dev))
++ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
++ I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
+ /* Wait for vblank for the disable to take effect. */
+- intel_wait_for_vblank(dev, intel_crtc->pipe);
++ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
+
+ /* Filter ctl must be set before TV_WIN_SIZE */
+ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+- I915_WRITE(TV_DAC, 0);
++ I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
+ I915_WRITE(TV_CTL, tv_ctl);
+ }
+
+@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
+ static int
+ intel_tv_detect_type (struct intel_tv *intel_tv)
+ {
+- struct drm_encoder *encoder = &intel_tv->base.enc;
++ struct drm_encoder *encoder = &intel_tv->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ u32 tv_ctl, save_tv_ctl;
+ u32 tv_dac, save_tv_dac;
+- int type = DRM_MODE_CONNECTOR_Unknown;
+-
+- tv_dac = I915_READ(TV_DAC);
++ int type;
+
+ /* Disable TV interrupts around load detect or we'll recurse */
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+- /*
+- * Detect TV by polling)
+- */
+- save_tv_dac = tv_dac;
+- tv_ctl = I915_READ(TV_CTL);
+- save_tv_ctl = tv_ctl;
+- tv_ctl &= ~TV_ENC_ENABLE;
+- tv_ctl &= ~TV_TEST_MODE_MASK;
++ save_tv_dac = tv_dac = I915_READ(TV_DAC);
++ save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
++
++ /* Poll for TV detection */
++ tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+- tv_dac &= ~TVDAC_SENSE_MASK;
+- tv_dac &= ~DAC_A_MASK;
+- tv_dac &= ~DAC_B_MASK;
+- tv_dac &= ~DAC_C_MASK;
++
++ tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
++
+ I915_WRITE(TV_CTL, tv_ctl);
+ I915_WRITE(TV_DAC, tv_dac);
+ POSTING_READ(TV_DAC);
+- msleep(20);
+
+- tv_dac = I915_READ(TV_DAC);
+- I915_WRITE(TV_DAC, save_tv_dac);
+- I915_WRITE(TV_CTL, save_tv_ctl);
+- POSTING_READ(TV_CTL);
+- msleep(20);
++ intel_wait_for_vblank(intel_tv->base.base.dev,
++ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+- /*
+- * A B C
+- * 0 1 1 Composite
+- * 1 0 X svideo
+- * 0 0 0 Component
+- */
+- if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+- DRM_DEBUG_KMS("Detected Composite TV connection\n");
+- type = DRM_MODE_CONNECTOR_Composite;
+- } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+- type = DRM_MODE_CONNECTOR_SVIDEO;
+- } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+- DRM_DEBUG_KMS("Detected Component TV connection\n");
+- type = DRM_MODE_CONNECTOR_Component;
+- } else {
+- DRM_DEBUG_KMS("No TV connection detected\n");
+- type = -1;
++ type = -1;
++ if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
++ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
++ /*
++ * A B C
++ * 0 1 1 Composite
++ * 1 0 X svideo
++ * 0 0 0 Component
++ */
++ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
++ DRM_DEBUG_KMS("Detected Composite TV connection\n");
++ type = DRM_MODE_CONNECTOR_Composite;
++ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
++ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
++ type = DRM_MODE_CONNECTOR_SVIDEO;
++ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
++ DRM_DEBUG_KMS("Detected Component TV connection\n");
++ type = DRM_MODE_CONNECTOR_Component;
++ } else {
++ DRM_DEBUG_KMS("Unrecognised TV connection\n");
++ }
+ }
+
++ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
++ I915_WRITE(TV_CTL, save_tv_ctl);
++
+ /* Restore interrupt config */
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
+ */
+ static void intel_tv_find_better_format(struct drm_connector *connector)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ int i;
+
+@@ -1344,14 +1347,13 @@ static enum drm_connector_status
+ intel_tv_detect(struct drm_connector *connector, bool force)
+ {
+ struct drm_display_mode mode;
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ int type;
+
+ mode = reported_modes[0];
+ drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
+
+- if (encoder->crtc && encoder->crtc->enabled) {
++ if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
+ type = intel_tv_detect_type(intel_tv);
+ } else if (force) {
+ struct drm_crtc *crtc;
+@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
+ return connector_status_connected;
+ }
+
+-static struct input_res {
+- char *name;
++static const struct input_res {
++ const char *name;
+ int w, h;
+-} input_res_table[] =
+-{
++} input_res_table[] = {
+ {"640x480", 640, 480},
+ {"800x600", 800, 600},
+ {"1024x768", 1024, 768},
+@@ -1396,8 +1397,7 @@ static void
+ intel_tv_chose_preferred_modes(struct drm_connector *connector,
+ struct drm_display_mode *mode_ptr)
+ {
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
+@@ -1422,15 +1422,14 @@ static int
+ intel_tv_get_modes(struct drm_connector *connector)
+ {
+ struct drm_display_mode *mode_ptr;
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
++ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ int j, count = 0;
+ u64 tmp;
+
+ for (j = 0; j < ARRAY_SIZE(input_res_table);
+ j++) {
+- struct input_res *input = &input_res_table[j];
++ const struct input_res *input = &input_res_table[j];
+ unsigned int hactive_s = input->w;
+ unsigned int vactive_s = input->h;
+
+@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
+ uint64_t val)
+ {
+ struct drm_device *dev = connector->dev;
+- struct drm_encoder *encoder = intel_attached_encoder(connector);
+- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+- struct drm_crtc *crtc = encoder->crtc;
++ struct intel_tv *intel_tv = intel_attached_tv(connector);
++ struct drm_crtc *crtc = intel_tv->base.base.crtc;
+ int ret = 0;
+ bool changed = false;
+
+@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
+ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+ .mode_valid = intel_tv_mode_valid,
+ .get_modes = intel_tv_get_modes,
+- .best_encoder = intel_attached_encoder,
++ .best_encoder = intel_best_encoder,
+ };
+
+ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ u32 tv_dac_on, tv_dac_off, save_tv_dac;
+- char **tv_format_names;
++ char *tv_format_names[ARRAY_SIZE(tv_modes)];
+ int i, initial_mode = 0;
+
+ if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
+ drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+ DRM_MODE_CONNECTOR_SVIDEO);
+
+- drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
++ drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
+ DRM_MODE_ENCODER_TVDAC);
+
+- drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
++ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
+- intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
+- intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
++ intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
++ intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+
+ /* BIOS margin values */
+@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
+ intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+ intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+
+- intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
++ intel_tv->tv_format = tv_modes[initial_mode].name;
+
+- drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
++ drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
+ drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /* Create TV properties then attach current values */
+- tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
+- GFP_KERNEL);
+- if (!tv_format_names)
+- goto out;
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
+- tv_format_names[i] = tv_modes[i].name;
+- drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
++ tv_format_names[i] = (char *)tv_modes[i].name;
++ drm_mode_create_tv_properties(dev,
++ ARRAY_SIZE(tv_modes),
++ tv_format_names);
+
+ drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+ initial_mode);
+@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_bottom_margin_property,
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
+-out:
+ drm_sysfs_connector_add(connector);
+ }
+diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
+index 3e5a51a..a4c66f6 100644
+--- a/include/drm/drm_crtc.h
++++ b/include/drm/drm_crtc.h
+@@ -762,6 +762,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+ extern bool drm_detect_hdmi_monitor(struct edid *edid);
++extern bool drm_detect_monitor_audio(struct edid *edid);
+ extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+ extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
+index a49e791..83a389e 100644
+--- a/include/drm/drm_dp_helper.h
++++ b/include/drm/drm_dp_helper.h
+@@ -23,6 +23,9 @@
+ #ifndef _DRM_DP_HELPER_H_
+ #define _DRM_DP_HELPER_H_
+
++#include <linux/types.h>
++#include <linux/i2c.h>
++
+ /* From the VESA DisplayPort spec */
+
+ #define AUX_NATIVE_WRITE 0x8
+diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
+index e41c74f..8c641be 100644
+--- a/include/drm/i915_drm.h
++++ b/include/drm/i915_drm.h
+@@ -286,6 +286,7 @@ typedef struct drm_i915_irq_wait {
+ #define I915_PARAM_HAS_PAGEFLIPPING 8
+ #define I915_PARAM_HAS_EXECBUF2 9
+ #define I915_PARAM_HAS_BSD 10
++#define I915_PARAM_HAS_BLT 11
+
+ typedef struct drm_i915_getparam {
+ int param;
+@@ -627,8 +628,11 @@ struct drm_i915_gem_execbuffer2 {
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
++#define I915_EXEC_RING_MASK (7<<0)
++#define I915_EXEC_DEFAULT (0<<0)
+ #define I915_EXEC_RENDER (1<<0)
+-#define I915_EXEC_BSD (1<<1)
++#define I915_EXEC_BSD (2<<0)
++#define I915_EXEC_BLT (3<<0)
+ __u64 flags;
+ __u64 rsvd1;
+ __u64 rsvd2;
+diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
+new file mode 100644
+index 0000000..d3c8194
+--- /dev/null
++++ b/include/drm/intel-gtt.h
+@@ -0,0 +1,18 @@
++/* Common header for intel-gtt.ko and i915.ko */
++
++#ifndef _DRM_INTEL_GTT_H
++#define _DRM_INTEL_GTT_H
++struct intel_gtt {
++ /* Number of stolen gtt entries at the beginning. */
++ unsigned int gtt_stolen_entries;
++ /* Total number of gtt entries. */
++ unsigned int gtt_total_entries;
++ /* Part of the gtt that is mappable by the cpu, for those chips where
++ * this is not the full gtt. */
++ unsigned int gtt_mappable_entries;
++};
++
++struct intel_gtt *intel_gtt_get(void);
++
++#endif
++
diff --git a/drm-intel-big-hammer.patch b/drm-intel-big-hammer.patch
index 63dc016b..0d7f7f08 100644
--- a/drm-intel-big-hammer.patch
+++ b/drm-intel-big-hammer.patch
@@ -1,16 +1,16 @@
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 37427e4..08af9db 100644
+index 6da2c6d..f508b86 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -2553,6 +2553,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
-
- mutex_lock(&dev->struct_mutex);
+@@ -3738,6 +3738,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ if (ret)
+ goto pre_mutex_err;
+ /* We don't get the flushing right for these chipsets, use the
-+ * big hamer for now to avoid random crashiness. */
++ * big hammer for now to avoid random crashiness. */
+ if (IS_I85X(dev) || IS_I865G(dev))
+ wbinvd();
+
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- if (dev_priv->mm.wedged) {
+ if (dev_priv->mm.suspended) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -EBUSY;
diff --git a/drm-intel-make-lvds-work.patch b/drm-intel-make-lvds-work.patch
index 5ca0152d..6c089b89 100644
--- a/drm-intel-make-lvds-work.patch
+++ b/drm-intel-make-lvds-work.patch
@@ -1,19 +1,20 @@
-diff -up linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c
---- linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c.orig 2010-03-31 16:59:39.901995671 -0400
-+++ linux-2.6.33.noarch/drivers/gpu/drm/i915/intel_display.c 2010-03-31 17:01:05.416996744 -0400
-@@ -3757,7 +3757,6 @@ struct drm_crtc *intel_get_load_detect_p
- void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 0cece04..63bbb4b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4580,7 +4580,6 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector, int dpms_mode)
{
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-@@ -3767,7 +3766,6 @@ void intel_release_load_detect_pipe(stru
- intel_encoder->base.encoder = NULL;
+@@ -4590,7 +4589,6 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ connector->encoder = NULL;
intel_encoder->load_detect_temp = false;
crtc->enabled = drm_helper_crtc_in_use(crtc);
- drm_helper_disable_unused_functions(dev);
}
- /* Switch crtc and output back off if necessary */
+ /* Switch crtc and encoder back off if necessary */
diff --git a/kernel.spec b/kernel.spec
index 8f9f84fb..d73047cf 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -51,7 +51,7 @@ Summary: The Linux kernel
# For non-released -rc kernels, this will be prepended with "0.", so
# for example a 3 here will become 0.3
#
-%global baserelease 2
+%global baserelease 3
%global fedora_build %{baserelease}
# base_sublevel is the kernel version we're starting with and patching
@@ -128,7 +128,7 @@ Summary: The Linux kernel
%define doc_build_fail true
%endif
-%define rawhide_skip_docs 1
+%define rawhide_skip_docs 0
%if 0%{?rawhide_skip_docs}
%define with_doc 0
%define doc_build_fail true
@@ -149,7 +149,7 @@ Summary: The Linux kernel
# Set debugbuildsenabled to 1 for production (build separate debug kernels)
# and 0 for rawhide (all kernels are debug kernels).
# See also 'make debug' and 'make release'.
-%define debugbuildsenabled 0
+%define debugbuildsenabled 1
# Want to build a vanilla kernel build without any non-upstream patches?
%define with_vanilla %{?_with_vanilla: 1} %{?!_with_vanilla: 0}
@@ -649,12 +649,10 @@ Patch1555: fix_xen_guest_on_old_EC2.patch
# nouveau + drm fixes
Patch1810: drm-nouveau-updates.patch
+Patch1811: drm-intel-2.6.37-rc2.patch
Patch1819: drm-intel-big-hammer.patch
-# intel drm is all merged upstream
-Patch1824: drm-intel-next.patch
# make sure the lvds comes back on lid open
Patch1825: drm-intel-make-lvds-work.patch
-Patch1826: drm-i915-reprogram-power-monitoring-registers-on-resume.patch
Patch1900: linux-2.6-intel-iommu-igfx.patch
# linux1394 git patches
@@ -1269,10 +1267,9 @@ ApplyPatch fix_xen_guest_on_old_EC2.patch
ApplyOptionalPatch drm-nouveau-updates.patch
# Intel DRM
-ApplyOptionalPatch drm-intel-next.patch
+ApplyPatch drm-intel-2.6.37-rc2.patch
ApplyPatch drm-intel-big-hammer.patch
ApplyPatch drm-intel-make-lvds-work.patch
-ApplyPatch drm-i915-reprogram-power-monitoring-registers-on-resume.patch
ApplyPatch linux-2.6-intel-iommu-igfx.patch
# linux1394 git patches
@@ -1957,6 +1954,11 @@ fi
# || ||
%changelog
+* Tue Nov 16 2010 Kyle McMartin <kyle@redhat.com> 2.6.36-3
+- Rebase drm/intel to 2.6.37-rc2+edp_fixes, hopefully to sort out most of
+ the issues folks with eDP are having.
+- Switch to release builds and turn on debugging flavours.
+
* Mon Nov 15 2010 Kyle McMartin <kyle@redhat.com>
- rhbz#651019: pull in support for MBA3.