summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2011-03-14 14:15:13 +1000
committerDave Airlie <airlied@redhat.com>2011-03-14 14:15:13 +1000
commit34db18abd376b2075c760c38f0b861aed379415d (patch)
treec4174e39a2f445f17c25ab206d45c66217bbbf85 /drivers
parente73f88af66fcc50083fae4b7e1c39b469179a97a (diff)
parent47ae63e0c2e5fdb582d471dc906eb29be94c732f (diff)
downloadlinux-34db18abd376b2075c760c38f0b861aed379415d.tar.gz
Merge remote branch 'intel/drm-intel-next' of ../drm-next into drm-core-next
* 'intel/drm-intel-next' of ../drm-next: (755 commits)
  drm/i915: Only wait on a pending flip if we intend to write to the buffer
  drm/i915/dp: Sanity check eDP existence
  drm/i915: Rebind the buffer if its alignment constraints changes with tiling
  drm/i915: Disable GPU semaphores by default
  drm/i915: Do not overflow the MMADDR write FIFO
  Revert "drm/i915: fix corruptions on i8xx due to relaxed fencing"
  drm/i915: Don't save/restore hardware status page address register
  drm/i915: don't store the reg value for HWS_PGA
  drm/i915: fix memory corruption with GM965 and >4GB RAM
  Linux 2.6.38-rc7
  Revert "TPM: Long default timeout fix"
  drm/i915: Re-enable GPU semaphores for SandyBridge mobile
  drm/i915: Replace vblank PM QoS with "Interrupt-Based AGPBUSY#"
  Revert "drm/i915: Use PM QoS to prevent C-State starvation of gen3 GPU"
  drm/i915: Allow relocation deltas outside of target bo
  drm/i915: Silence an innocuous compiler warning for an unused variable
  fs/block_dev.c: fix new kernel-doc warning
  ACPI: Fix build for CONFIG_NET unset
  mm: <asm-generic/pgtable.h> must include <linux/mm_types.h>
  x86: Use u32 instead of long to set reset vector back to 0
  ...

Conflicts:
	drivers/gpu/drm/i915/i915_gem.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/evxfgpe.c49
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/video_detect.c5
-rw-r--r--drivers/acpi/wakeup.c6
-rw-r--r--drivers/atm/solos-pci.c5
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/aoe/Makefile2
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/bluetooth/ath3k.c5
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c56
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c12
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c52
-rw-r--r--drivers/char/tpm/tpm.c10
-rw-r--r--drivers/char/virtio_console.c (renamed from drivers/tty/hvc/virtio_console.c)20
-rw-r--r--drivers/dma/amba-pl08x.c53
-rw-r--r--drivers/dma/imx-dma.c26
-rw-r--r--drivers/dma/imx-sdma.c88
-rw-r--r--drivers/dma/ipu/ipu_idmac.c50
-rw-r--r--drivers/edac/amd64_edac.c28
-rw-r--r--drivers/firmware/dmi_scan.c11
-rw-r--r--drivers/gpio/pca953x.c28
-rw-r--r--drivers/gpu/drm/drm_info.c9
-rw-r--r--drivers/gpu/drm/drm_irq.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c82
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c51
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c30
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h137
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c305
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c180
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c218
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h500
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c435
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h301
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c53
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c33
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1927
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c191
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h14
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c50
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c23
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c30
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c41
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c56
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c56
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h42
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c123
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c100
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c23
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c65
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h13
-rw-r--r--drivers/gpu/drm/radeon/r200.c18
-rw-r--r--drivers/gpu/drm/radeon/r300.c44
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c29
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c46
-rw-r--r--drivers/gpu/drm/radeon/r600d.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4207
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5157
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770.c6
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h8
-rw-r--r--drivers/hwmon/Kconfig19
-rw-r--r--drivers/hwmon/ad7414.c1
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/hwmon/emc1403.c2
-rw-r--r--drivers/hwmon/jc42.c35
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/lm63.c59
-rw-r--r--drivers/hwmon/lm85.c23
-rw-r--r--drivers/i2c/busses/i2c-omap.c35
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c5
-rw-r--r--drivers/input/gameport/gameport.c2
-rw-r--r--drivers/input/input.c37
-rw-r--r--drivers/input/keyboard/tegra-kbc.c62
-rw-r--r--drivers/input/misc/rotary_encoder.c4
-rw-r--r--drivers/input/mouse/synaptics.h23
-rw-r--r--drivers/input/serio/serio.c13
-rw-r--r--drivers/input/tablet/wacom_sys.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c38
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c13
-rw-r--r--drivers/isdn/hisax/isdnl2.c28
-rw-r--r--drivers/isdn/hysdn/hysdn_defs.h2
-rw-r--r--drivers/isdn/hysdn/hysdn_init.c26
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c3
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c3
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c78
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c42
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/md/raid5.c61
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c8
-rw-r--r--drivers/message/fusion/mptscsih.c7
-rw-r--r--drivers/misc/tifm_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/net/benet/be_cmds.c5
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c32
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/pch_can.c5
-rw-r--r--drivers/net/can/softing/Kconfig2
-rw-r--r--drivers/net/can/softing/softing_cs.c1
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c80
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/dm9000.c9
-rw-r--r--drivers/net/e1000/e1000_hw.c4
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000e/netdev.c53
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c53
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c116
-rw-r--r--drivers/net/r8169.c44
-rw-r--r--drivers/net/sfc/ethtool.c22
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/tg3.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c227
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/virtio_net.c27
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c143
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/p54/p54pci.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c6
-rw-r--r--drivers/net/wireless/wl1251/main.c3
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/pn544.c4
-rw-r--r--drivers/pci/pci-sysfs.c3
-rw-r--r--drivers/pcmcia/pcmcia_resource.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.h1
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c1
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/asus_acpi.c8
-rw-r--r--drivers/platform/x86/dell-laptop.c24
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c116
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c12
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c1
-rw-r--r--drivers/rtc/Kconfig12
-rw-r--r--drivers/rtc/interface.c23
-rw-r--r--drivers/rtc/rtc-at32ap700x.c19
-rw-r--r--drivers/rtc/rtc-at91rm9200.c20
-rw-r--r--drivers/rtc/rtc-at91sam9.c20
-rw-r--r--drivers/rtc/rtc-bfin.c21
-rw-r--r--drivers/rtc/rtc-dev.c125
-rw-r--r--drivers/rtc/rtc-ds1286.c41
-rw-r--r--drivers/rtc/rtc-ds1305.c43
-rw-r--r--drivers/rtc/rtc-ds1307.c49
-rw-r--r--drivers/rtc/rtc-ds1374.c37
-rw-r--r--drivers/rtc/rtc-ds3232.c14
-rw-r--r--drivers/rtc/rtc-m41t80.c30
-rw-r--r--drivers/rtc/rtc-m48t59.c21
-rw-r--r--drivers/rtc/rtc-mrst.c31
-rw-r--r--drivers/rtc/rtc-msm6242.c2
-rw-r--r--drivers/rtc/rtc-mv.c20
-rw-r--r--drivers/rtc/rtc-omap.c28
-rw-r--r--drivers/rtc/rtc-proc.c6
-rw-r--r--drivers/rtc/rtc-rp5c01.c2
-rw-r--r--drivers/rtc/rtc-rs5c372.c48
-rw-r--r--drivers/rtc/rtc-sa1100.c22
-rw-r--r--drivers/rtc/rtc-sh.c11
-rw-r--r--drivers/rtc/rtc-test.c21
-rw-r--r--drivers/rtc/rtc-vr41xx.c38
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c61
-rw-r--r--drivers/spi/spi_sh_msiof.c6
-rw-r--r--drivers/ssb/pcmcia.c2
-rw-r--r--drivers/staging/brcm80211/sys/wl_mac80211.c12
-rw-r--r--drivers/staging/brcm80211/sys/wlc_mac80211.c13
-rw-r--r--drivers/staging/brcm80211/sys/wlc_pub.h2
-rw-r--r--drivers/staging/comedi/Kconfig5
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/staging/hv/netvsc_drv.c1
-rw-r--r--drivers/staging/intel_sst/intelmid_v2_control.c5
-rw-r--r--drivers/staging/zram/zram_drv.c4
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/target_core_configfs.c155
-rw-r--r--drivers/target/target_core_device.c13
-rw-r--r--drivers/target/target_core_fabric_configfs.c92
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_mib.c1078
-rw-r--r--drivers/target/target_core_mib.h28
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tpg.c29
-rw-r--r--drivers/target/target_core_transport.c48
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/thermal_sys.c40
-rw-r--r--drivers/tty/hvc/Makefile1
-rw-r--r--drivers/tty/n_gsm.c1
-rw-r--r--drivers/tty/serial/68328serial.c29
-rw-r--r--drivers/tty/serial/68360serial.c1
-rw-r--r--drivers/tty/serial/bfin_5xx.c15
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/sysrq.c17
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c3
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/ehci-hub.c7
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c40
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/musb_core.c11
-rw-r--r--drivers/usb/musb/musb_core.h13
-rw-r--r--drivers/usb/musb/musb_dma.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c71
-rw-r--r--drivers/usb/musb/musb_gadget.h8
-rw-r--r--drivers/usb/musb/musb_host.c11
-rw-r--r--drivers/usb/musb/musbhsdma.h19
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/otg/Kconfig2
-rw-r--r--drivers/usb/serial/ftdi_sio.c27
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/usb_wwan.c15
-rw-r--r--drivers/usb/serial/visor.c12
-rw-r--r--drivers/usb/storage/unusual_devs.h14
-rw-r--r--drivers/w1/masters/omap_hdq.c28
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/m54xx_wdt.c (renamed from drivers/watchdog/m548x_wdt.c)50
-rw-r--r--drivers/xen/manage.c10
310 files changed, 6084 insertions, 5050 deletions
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index e9562a7cb2f9..3b20a3401b64 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -212,37 +212,40 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
 		return_ACPI_STATUS(AE_BAD_PARAMETER);
 	}
 
-	/* Validate wake_device is of type Device */
-
-	device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
-	if (device_node->type != ACPI_TYPE_DEVICE) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 
 	/* Ensure that we have a valid GPE number */
 
 	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (gpe_event_info) {
-		/*
-		 * If there is no method or handler for this GPE, then the
-		 * wake_device will be notified whenever this GPE fires (aka
-		 * "implicit notify") Note: The GPE is assumed to be
-		 * level-triggered (for windows compatibility).
-		 */
-		if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-		    ACPI_GPE_DISPATCH_NONE) {
-			gpe_event_info->flags =
-			    (ACPI_GPE_DISPATCH_NOTIFY |
-			     ACPI_GPE_LEVEL_TRIGGERED);
-			gpe_event_info->dispatch.device_node = device_node;
-		}
+	if (!gpe_event_info) {
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * If there is no method or handler for this GPE, then the
+	 * wake_device will be notified whenever this GPE fires (aka
+	 * "implicit notify") Note: The GPE is assumed to be
+	 * level-triggered (for windows compatibility).
+	 */
+	if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+	      ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
 
-		gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
-		status = AE_OK;
+		/* Validate wake_device is of type Device */
+
+		device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
+					    wake_device);
+		if (device_node->type != ACPI_TYPE_DEVICE) {
+			goto unlock_and_exit;
+		}
+		gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
+					 ACPI_GPE_LEVEL_TRIGGERED);
+		gpe_event_info->dispatch.device_node = device_node;
 	}
 
+	gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+	status = AE_OK;
+
+ unlock_and_exit:
 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b0931818cf98..c90c76aa7f8b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -636,17 +636,21 @@ EXPORT_SYMBOL(acpi_os_write_port);
 acpi_status
 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
 {
-	u32 dummy;
 	void __iomem *virt_addr;
-	int size = width / 8, unmap = 0;
+	unsigned int size = width / 8;
+	bool unmap = false;
+	u32 dummy;
 
 	rcu_read_lock();
 	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-	rcu_read_unlock();
 	if (!virt_addr) {
+		rcu_read_unlock();
 		virt_addr = acpi_os_ioremap(phys_addr, size);
-		unmap = 1;
+		if (!virt_addr)
+			return AE_BAD_ADDRESS;
+		unmap = true;
 	}
+
 	if (!value)
 		value = &dummy;
 
@@ -666,6 +670,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
 
 	if (unmap)
 		iounmap(virt_addr);
+	else
+		rcu_read_unlock();
 
 	return AE_OK;
 }
@@ -674,14 +680,17 @@ acpi_status
 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
 {
 	void __iomem *virt_addr;
-	int size = width / 8, unmap = 0;
+	unsigned int size = width / 8;
+	bool unmap = false;
 
 	rcu_read_lock();
 	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-	rcu_read_unlock();
 	if (!virt_addr) {
+		rcu_read_unlock();
 		virt_addr = acpi_os_ioremap(phys_addr, size);
-		unmap = 1;
+		if (!virt_addr)
+			return AE_BAD_ADDRESS;
+		unmap = true;
 	}
 
 	switch (width) {
@@ -700,6 +709,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
 
 	if (unmap)
 		iounmap(virt_addr);
+	else
+		rcu_read_unlock();
 
 	return AE_OK;
 }
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 42d3d72dae85..5af3479714f6 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -82,6 +82,11 @@ long acpi_is_video_device(struct acpi_device *device)
 	if (!device)
 		return 0;
 
+	/* Is this device able to support video switching ? */
+	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
+	    ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
+		video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
+
 	/* Is this device able to retrieve a video ROM ? */
 	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
 		video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index ed6501452507..7bfbe40bc43b 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -86,8 +86,12 @@ int __init acpi_wakeup_device_init(void)
 		struct acpi_device *dev = container_of(node,
 						       struct acpi_device,
 						       wakeup_list);
-		if (device_can_wakeup(&dev->dev))
+		if (device_can_wakeup(&dev->dev)) {
+			/* Button GPEs are supposed to be always enabled. */
+			acpi_enable_gpe(dev->wakeup.gpe_device,
+					dev->wakeup.gpe_number);
 			device_set_wakeup_enable(&dev->dev, true);
+		}
 	}
 	mutex_unlock(&acpi_device_lock);
 	return 0;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 73fb1c4f4cd4..25ef1a4556e6 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc)
 	}
 
 	skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
-	if (!skb && net_ratelimit()) {
-		dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
+	if (!skb) {
+		if (net_ratelimit())
+			dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
 		return -ENOMEM;
 	}
 	header = (void *)skb_put(skb, sizeof(*header));
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index d7f463d6312d..40528ba56d1b 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,4 +39,4 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= xen-blkfront.o
 obj-$(CONFIG_BLK_DEV_DRBD)     += drbd/
 obj-$(CONFIG_BLK_DEV_RBD)     += rbd.o
 
-swim_mod-objs	:= swim.o swim_asm.o
+swim_mod-y	:= swim.o swim_asm.o
diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile
index e76d997183c6..06ea82cdf27d 100644
--- a/drivers/block/aoe/Makefile
+++ b/drivers/block/aoe/Makefile
@@ -3,4 +3,4 @@
 #
 
 obj-$(CONFIG_ATA_OVER_ETH)	+= aoe.o
-aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
+aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 516d5bbec2b6..9279272b3732 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2833,7 +2833,7 @@ static int cciss_revalidate(struct gendisk *disk)
 	sector_t total_size;
 	InquiryData_struct *inq_buff = NULL;
 
-	for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
+	for (logvol = 0; logvol <= h->highest_lun; logvol++) {
 		if (!h->drv[logvol])
 			continue;
 		if (memcmp(h->drv[logvol]->LunID, drv->LunID,
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04fc2b34..77fc76f8aea9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
 			struct block_device *bdev = opened_bdev[cnt];
 			if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
 				continue;
-			__invalidate_device(bdev);
+			__invalidate_device(bdev, true);
 		}
 		mutex_unlock(&open_lock);
 	} else {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 44e18c073c44..49e6a545eb63 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1641,6 +1641,9 @@ out:
 
 static void loop_free(struct loop_device *lo)
 {
+	if (!lo->lo_queue->queue_lock)
+		lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
+
 	blk_cleanup_queue(lo->lo_queue);
 	put_disk(lo->lo_disk);
 	list_del(&lo->lo_list);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a32fb41246f8..e6fc716aca45 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -53,7 +53,6 @@
 #define DBG_BLKDEV      0x0100
 #define DBG_RX          0x0200
 #define DBG_TX          0x0400
-static DEFINE_MUTEX(nbd_mutex);
 static unsigned int debugflags;
 #endif /* NDEBUG */
 
@@ -718,11 +717,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 	dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
 			lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
 
-	mutex_lock(&nbd_mutex);
 	mutex_lock(&lo->tx_lock);
 	error = __nbd_ioctl(bdev, lo, cmd, arg);
 	mutex_unlock(&lo->tx_lock);
-	mutex_unlock(&nbd_mutex);
 
 	return error;
 }
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a126e614601f..6dcd55a74c0a 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -39,6 +39,11 @@ static struct usb_device_id ath3k_table[] = {
 	/* Atheros AR3011 with sflash firmware*/
 	{ USB_DEVICE(0x0CF3, 0x3002) },
 
+	/* Atheros AR9285 Malbec with sflash firmware */
+	{ USB_DEVICE(0x03F0, 0x311D) },
+
+	/* Atheros AR5BBU12 with sflash firmware */
+	{ USB_DEVICE(0x0489, 0xE02C) },
 	{ }	/* Terminating entry */
 };
 
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 1da773f899a2..700a3840fddc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,12 @@ static struct usb_device_id blacklist_table[] = {
 	/* Atheros 3011 with sflash firmware */
 	{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
 
+	/* Atheros AR9285 Malbec with sflash firmware */
+	{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+
+	/* Atheros AR5BBU12 with sflash firmware */
+	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
 	/* Broadcom BCM2035 */
 	{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
 	{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -826,7 +832,7 @@ static void btusb_work(struct work_struct *work)
 
 	if (hdev->conn_hash.sco_num > 0) {
 		if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
-			err = usb_autopm_get_interface(data->isoc);
+			err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
 			if (err < 0) {
 				clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
 				usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -855,7 +861,7 @@ static void btusb_work(struct work_struct *work)
 
 		__set_isoc_interface(hdev, 0);
 		if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
-			usb_autopm_put_interface(data->isoc);
+			usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
 	}
 }
 
@@ -1038,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf,
 
 	usb_set_intfdata(intf, data);
 
-	usb_enable_autosuspend(interface_to_usbdev(intf));
-
 	return 0;
 }
 
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 14033a36bcd0..e2c48a7eccff 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -409,7 +409,8 @@ int register_cdrom(struct cdrom_device_info *cdi)
 	}
 
 	ENSURE(drive_status, CDC_DRIVE_STATUS );
-	ENSURE(media_changed, CDC_MEDIA_CHANGED);
+	if (cdo->check_events == NULL && cdo->media_changed == NULL)
+		*change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
 	ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
 	ENSURE(lock_door, CDC_LOCK);
 	ENSURE(select_speed, CDC_SELECT_SPEED);
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 5bc765d4c3ca..8238f89f73c9 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_SYNCLINK_GT)	+= synclink_gt.o
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_SX)		+= sx.o generic_serial.o
 obj-$(CONFIG_RIO)		+= rio/ generic_serial.o
+obj-$(CONFIG_VIRTIO_CONSOLE)	+= virtio_console.o
 obj-$(CONFIG_RAW_DRIVER)	+= raw.o
 obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
 obj-$(CONFIG_MSPEC)		+= mspec.o
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9252e85706ef..780498d76581 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
 #else
 			printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
 #endif
+			pci_unregister_driver(&agp_amd64_pci_driver);
 			return -ENODEV;
 		}
 
 		/* First check that we have at least one AMD64 NB */
-		if (!pci_dev_present(amd_nb_misc_ids))
+		if (!pci_dev_present(amd_nb_misc_ids)) {
+			pci_unregister_driver(&agp_amd64_pci_driver);
 			return -ENODEV;
+		}
 
 		/* Look for any AGP bridge */
 		agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
 		err = driver_attach(&agp_amd64_pci_driver.driver);
-		if (err == 0 && agp_bridges_found == 0)
+		if (err == 0 && agp_bridges_found == 0) {
+			pci_unregister_driver(&agp_amd64_pci_driver);
 			err = -ENODEV;
+		}
 	}
 	return err;
 }
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c195bfeade11..5feebe2800e9 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -130,6 +130,7 @@
 #define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
 
 #define I915_IFPADDR    0x60
+#define I830_HIC        0x70
 
 /* Intel 965G registers */
 #define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fab3d3265adb..0d09b537bb9a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/pagemap.h>
 #include <linux/agp_backend.h>
+#include <linux/delay.h>
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
@@ -70,12 +71,8 @@ static struct _intel_private {
 	u32 __iomem *gtt;		/* I915G */
 	bool clear_fake_agp; /* on first access via agp, fill with scratch */
 	int num_dcache_entries;
-	union {
-		void __iomem *i9xx_flush_page;
-		void *i8xx_flush_page;
-	};
+	void __iomem *i9xx_flush_page;
 	char *i81x_gtt_table;
-	struct page *i8xx_page;
 	struct resource ifp_resource;
 	int resource_valid;
 	struct page *scratch_page;
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
 
 static void i830_cleanup(void)
 {
-	if (intel_private.i8xx_flush_page) {
-		kunmap(intel_private.i8xx_flush_page);
-		intel_private.i8xx_flush_page = NULL;
-	}
-
-	__free_page(intel_private.i8xx_page);
-	intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
-	/* return if we've already set the flush mechanism up */
-	if (intel_private.i8xx_page)
-		return;
-
-	intel_private.i8xx_page = alloc_page(GFP_KERNEL);
-	if (!intel_private.i8xx_page)
-		return;
-
-	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
-	if (!intel_private.i8xx_flush_page)
-		i830_cleanup();
 }
 
 /* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
  */
 static void i830_chipset_flush(void)
 {
-	unsigned int *pg = intel_private.i8xx_flush_page;
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+	/* Forcibly evict everything from the CPU write buffers.
+	 * clflush appears to be insufficient.
+	 */
+	wbinvd_on_all_cpus();
+
+	/* Now we've only seen documents for this magic bit on 855GM,
+	 * we hope it exists for the other gen2 chipsets...
+	 *
+	 * Also works as advertised on my 845G.
+	 */
+	writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+	       intel_private.registers+I830_HIC);
 
-	memset(pg, 0, 1024);
+	while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+		if (time_after(jiffies, timeout))
+			break;
 
-	if (cpu_has_clflush)
-		clflush_cache_range(pg, 1024);
-	else if (wbinvd_on_all_cpus() != 0)
-		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+		udelay(50);
+	}
 }
 
 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@ static int i830_setup(void)
 
 	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
 
-	intel_i830_setup_flush();
-
 	return 0;
 }
 
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b6ae6e9a9c5f..7855f9f45b8e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -320,6 +320,7 @@ static int unload_when_empty = 1;
 static int add_smi(struct smi_info *smi);
 static int try_smi_init(struct smi_info *smi);
 static void cleanup_one_si(struct smi_info *to_clean);
+static void cleanup_ipmi_si(void);
 
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block *nb)
@@ -3450,16 +3451,7 @@ static int __devinit init_ipmi_si(void)
 	mutex_lock(&smi_infos_lock);
 	if (unload_when_empty && list_empty(&smi_infos)) {
 		mutex_unlock(&smi_infos_lock);
-#ifdef CONFIG_PCI
-		if (pci_registered)
-			pci_unregister_driver(&ipmi_pci_driver);
-#endif
-
-#ifdef CONFIG_PPC_OF
-		if (of_registered)
-			of_unregister_platform_driver(&ipmi_of_platform_driver);
-#endif
-		driver_unregister(&ipmi_driver.driver);
+		cleanup_ipmi_si();
 		printk(KERN_WARNING PFX
 		       "Unable to find any System Interface(s)\n");
 		return -ENODEV;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 777181a2e603..bcbbc71febb7 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
 			    test_bit(IS_ANY_T1, &dev->flags))) {
 				DEBUGP(4, dev, "Perform AUTOPPS\n");
 				set_bit(IS_AUTOPPS_ACT, &dev->flags);
-				ptsreq.protocol = ptsreq.protocol =
-				    (0x01 << dev->proto);
+				ptsreq.protocol = (0x01 << dev->proto);
 				ptsreq.flags = 0x01;
 				ptsreq.pts1 = 0x00;
 				ptsreq.pts2 = 0x00;
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 94b8eb4d691d..444155a305ae 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
 static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 {
 	struct ipw_dev *ipw = priv_data;
-	struct resource *io_resource;
 	int ret;
 
 	p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 	if (ret)
 		return ret;
 
-	io_resource = request_region(p_dev->resource[0]->start,
-				resource_size(p_dev->resource[0]),
-				IPWIRELESS_PCCARD_NAME);
+	if (!request_region(p_dev->resource[0]->start,
+			    resource_size(p_dev->resource[0]),
+			    IPWIRELESS_PCCARD_NAME)) {
+		ret = -EBUSY;
+		goto exit;
+	}
 
 	p_dev->resource[2]->flags |=
 		WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 
 	ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
 	if (ret != 0)
-		goto exit2;
+		goto exit1;
 
 	ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
 
-	ipw->attr_memory = ioremap(p_dev->resource[2]->start,
+	ipw->common_memory = ioremap(p_dev->resource[2]->start,
 				resource_size(p_dev->resource[2]));
-	request_mem_region(p_dev->resource[2]->start,
-			resource_size(p_dev->resource[2]),
-			IPWIRELESS_PCCARD_NAME);
+	if (!request_mem_region(p_dev->resource[2]->start,
+				resource_size(p_dev->resource[2]),
+				IPWIRELESS_PCCARD_NAME)) {
+		ret = -EBUSY;
+		goto exit2;
+	}
 
 	p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
 					WIN_ENABLE;
 	p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
 	ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
 	if (ret != 0)
-		goto exit2;
+		goto exit3;
 
 	ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
 	if (ret != 0)
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
 
 	ipw->attr_memory = ioremap(p_dev->resource[3]->start,
 				resource_size(p_dev->resource[3]));
-	request_mem_region(p_dev->resource[3]->start,
-			resource_size(p_dev->resource[3]),
-			IPWIRELESS_PCCARD_NAME);
+	if (!request_mem_region(p_dev->resource[3]->start,
+				resource_size(p_dev->resource[3]),
+				IPWIRELESS_PCCARD_NAME)) {
+		ret = -EBUSY;
+		goto exit4;
+	}
 
 	return 0;
 
+exit4:
+	iounmap(ipw->attr_memory);
 exit3:
+	release_mem_region(p_dev->resource[2]->start,
+			resource_size(p_dev->resource[2]));
 exit2:
-	if (ipw->common_memory) {
-		release_mem_region(p_dev->resource[2]->start,
-				resource_size(p_dev->resource[2]));
-		iounmap(ipw->common_memory);
-	}
+	iounmap(ipw->common_memory);
 exit1:
-	release_resource(io_resource);
+	release_region(p_dev->resource[0]->start,
+		       resource_size(p_dev->resource[0]));
+exit:
 	pcmcia_disable_device(p_dev);
-	return -1;
+	return ret;
 }
 
 static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@ exit:
 
 static void release_ipwireless(struct ipw_dev *ipw)
 {
+	release_region(ipw->link->resource[0]->start,
+		       resource_size(ipw->link->resource[0]));
 	if (ipw->common_memory) {
 		release_mem_region(ipw->link->resource[2]->start,
 				resource_size(ipw->link->resource[2]));
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 36e0fa161c2b..1f46f1cd9225 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
 		    tpm_protected_ordinal_duration[ordinal &
 						   TPM_PROTECTED_ORDINAL_MASK];
 
-	if (duration_idx != TPM_UNDEFINED) {
+	if (duration_idx != TPM_UNDEFINED)
 		duration = chip->vendor.duration[duration_idx];
-		/* if duration is 0, it's because chip->vendor.duration wasn't */
-		/* filled yet, so we set the lowest timeout just to give enough */
-		/* time for tpm_get_timeouts() to succeed */
-		return (duration <= 0 ? HZ : duration);
-	} else
+	if (duration <= 0)
 		return 2 * 60 * HZ;
+	else
+		return duration;
 }
 EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
 
diff --git a/drivers/tty/hvc/virtio_console.c b/drivers/char/virtio_console.c
index 896a2ced1d27..490393186338 100644
--- a/drivers/tty/hvc/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
- * Copyright (C) 2009, 2010 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -31,7 +32,7 @@
 #include <linux/virtio_console.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
-#include "hvc_console.h"
+#include "../tty/hvc/hvc_console.h"
 
 /*
  * This is a global struct for storing common data for all the devices
@@ -1462,6 +1463,17 @@ static void control_work_handler(struct work_struct *work)
 	spin_unlock(&portdev->cvq_lock);
 }
 
+static void out_intr(struct virtqueue *vq)
+{
+	struct port *port;
+
+	port = find_port_by_vq(vq->vdev->priv, vq);
+	if (!port)
+		return;
+
+	wake_up_interruptible(&port->waitqueue);
+}
+
 static void in_intr(struct virtqueue *vq)
 {
 	struct port *port;
@@ -1566,7 +1578,7 @@ static int init_vqs(struct ports_device *portdev)
 	 */
 	j = 0;
 	io_callbacks[j] = in_intr;
-	io_callbacks[j + 1] = NULL;
+	io_callbacks[j + 1] = out_intr;
 	io_names[j] = "input";
 	io_names[j + 1] = "output";
 	j += 2;
@@ -1580,7 +1592,7 @@ static int init_vqs(struct ports_device *portdev)
 		for (i = 1; i < nr_ports; i++) {
 			j += 2;
 			io_callbacks[j] = in_intr;
-			io_callbacks[j + 1] = NULL;
+			io_callbacks[j + 1] = out_intr;
 			io_names[j] = "input";
 			io_names[j + 1] = "output";
 		}
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 297f48b0cba9..07bca4970e50 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -79,6 +79,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <linux/dmapool.h>
 #include <linux/dmaengine.h>
 #include <linux/amba/bus.h>
@@ -235,16 +236,19 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
 }
 
 /*
- * Overall DMAC remains enabled always.
+ * Pause the channel by setting the HALT bit.
  *
- * Disabling individual channels could lose data.
+ * For M->P transfers, pause the DMAC first and then stop the peripheral -
+ * the FIFO can only drain if the peripheral is still requesting data.
+ * (note: this can still timeout if the DMAC FIFO never drains of data.)
  *
- * Disable the peripheral DMA after disabling the DMAC in order to allow
- * the DMAC FIFO to drain, and hence allow the channel to show inactive
+ * For P->M transfers, disable the peripheral first to stop it filling
+ * the DMAC FIFO, and then pause the DMAC.
  */
 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
 {
 	u32 val;
+	int timeout;
 
 	/* Set the HALT bit and wait for the FIFO to drain */
 	val = readl(ch->base + PL080_CH_CONFIG);
@@ -252,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
 	writel(val, ch->base + PL080_CH_CONFIG);
 
 	/* Wait for channel inactive */
-	while (pl08x_phy_channel_busy(ch))
-		cpu_relax();
+	for (timeout = 1000; timeout; timeout--) {
+		if (!pl08x_phy_channel_busy(ch))
+			break;
+		udelay(1);
+	}
+	if (pl08x_phy_channel_busy(ch))
+		pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
 }
 
 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -267,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
 }
 
 
-/* Stops the channel */
-static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
+/*
+ * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
+ * clears any pending interrupt status.  This should not be used for
+ * an on-going transfer, but as a method of shutting down a channel
+ * (eg, when it's no longer used) or terminating a transfer.
+ */
+static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
+	struct pl08x_phy_chan *ch)
 {
-	u32 val;
+	u32 val = readl(ch->base + PL080_CH_CONFIG);
 
-	pl08x_pause_phy_chan(ch);
+	val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
+	         PL080_CONFIG_TC_IRQ_MASK);
 
-	/* Disable channel */
-	val = readl(ch->base + PL080_CH_CONFIG);
-	val &= ~PL080_CONFIG_ENABLE;
-	val &= ~PL080_CONFIG_ERR_IRQ_MASK;
-	val &= ~PL080_CONFIG_TC_IRQ_MASK;
 	writel(val, ch->base + PL080_CH_CONFIG);
+
+	writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
+	writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
 }
 
 static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -404,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
 {
 	unsigned long flags;
 
+	spin_lock_irqsave(&ch->lock, flags);
+
 	/* Stop the channel and clear its interrupts */
-	pl08x_stop_phy_chan(ch);
-	writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
-	writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
+	pl08x_terminate_phy_chan(pl08x, ch);
 
 	/* Mark it as free */
-	spin_lock_irqsave(&ch->lock, flags);
 	ch->serving = NULL;
 	spin_unlock_irqrestore(&ch->lock, flags);
 }
@@ -1449,7 +1462,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		plchan->state = PL08X_CHAN_IDLE;
 
 		if (plchan->phychan) {
-			pl08x_stop_phy_chan(plchan->phychan);
+			pl08x_terminate_phy_chan(pl08x, plchan->phychan);
 
 			/*
 			 * Mark physical channel as free and free any slave
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e53d438142bb..e18eaabe92b9 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -49,6 +49,7 @@ struct imxdma_channel {
 
 struct imxdma_engine {
 	struct device			*dev;
+	struct device_dma_parameters	dma_parms;
 	struct dma_device		dma_device;
 	struct imxdma_channel		channel[MAX_DMA_CHANNELS];
 };
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
 	else
 		dmamode = DMA_MODE_WRITE;
 
+	switch (imxdmac->word_size) {
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		if (sgl->length & 3 || sgl->dma_address & 3)
+			return NULL;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		if (sgl->length & 1 || sgl->dma_address & 1)
+			return NULL;
+		break;
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		break;
+	default:
+		return NULL;
+	}
+
 	ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
 		 dma_length, imxdmac->per_address, dmamode);
 	if (ret)
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
 
 	INIT_LIST_HEAD(&imxdma->dma_device.channels);
 
+	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
+	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+
 	/* Initialize channel parameters */
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 		struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
 		imxdmac->imxdma = imxdma;
 		spin_lock_init(&imxdmac->lock);
 
-		dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
-		dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
-
 		imxdmac->chan.device = &imxdma->dma_device;
-		imxdmac->chan.chan_id = i;
 		imxdmac->channel = i;
 
 		/* Add the channel to the DMAC list */
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, imxdma);
 
+	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
+	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
+
 	ret = dma_async_device_register(&imxdma->dma_device);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to register\n");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d5a5d4d9c19b..b6d1455fa936 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -230,7 +230,7 @@ struct sdma_engine;
  * struct sdma_channel - housekeeping for a SDMA channel
  *
  * @sdma		pointer to the SDMA engine for this channel
- * @channel		the channel number, matches dmaengine chan_id
+ * @channel		the channel number, matches dmaengine chan_id + 1
  * @direction		transfer type. Needed for setting SDMA script
  * @peripheral_type	Peripheral type. Needed for setting SDMA script
  * @event_id0		aka dma request line
@@ -301,6 +301,7 @@ struct sdma_firmware_header {
 
 struct sdma_engine {
 	struct device			*dev;
+	struct device_dma_parameters	dma_parms;
 	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 	struct sdma_channel_control	*channel_control;
 	void __iomem			*regs;
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
 		if (bd->mode.status & BD_RROR)
 			sdmac->status = DMA_ERROR;
 		else
-			sdmac->status = DMA_SUCCESS;
+			sdmac->status = DMA_IN_PROGRESS;
 
 		bd->mode.status |= BD_DONE;
 		sdmac->buf_tail++;
@@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 	__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
 }
 
-static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
+static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
 {
-	dma_cookie_t cookie = sdma->chan.cookie;
+	dma_cookie_t cookie = sdmac->chan.cookie;
 
 	if (++cookie < 0)
 		cookie = 1;
 
-	sdma->chan.cookie = cookie;
-	sdma->desc.cookie = cookie;
+	sdmac->chan.cookie = cookie;
+	sdmac->desc.cookie = cookie;
 
 	return cookie;
 }
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 
 	cookie = sdma_assign_cookie(sdmac);
 
-	sdma_enable_channel(sdma, tx->chan->chan_id);
+	sdma_enable_channel(sdma, sdmac->channel);
 
 	spin_unlock_irq(&sdmac->lock);
 
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
 	struct imx_dma_data *data = chan->private;
 	int prio, ret;
 
-	/* No need to execute this for internal channel 0 */
-	if (chan->chan_id == 0)
-		return 0;
-
 	if (!data)
 		return -EINVAL;
 
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	int ret, i, count;
-	int channel = chan->chan_id;
+	int channel = sdmac->channel;
 	struct scatterlist *sg;
 
 	if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 			ret =  -EINVAL;
 			goto err_out;
 		}
-		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+		switch (sdmac->word_size) {
+		case DMA_SLAVE_BUSWIDTH_4_BYTES:
 			bd->mode.command = 0;
-		else
-			bd->mode.command = sdmac->word_size;
+			if (count & 3 || sg->dma_address & 3)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_2_BYTES:
+			bd->mode.command = 2;
+			if (count & 1 || sg->dma_address & 1)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_1_BYTE:
+			bd->mode.command = 1;
+			break;
+		default:
+			return NULL;
+		}
 
 		param = BD_DONE | BD_EXTD | BD_CONT;
 
-		if (sdmac->flags & IMX_DMA_SG_LOOP) {
+		if (i + 1 == sg_len) {
 			param |= BD_INTR;
-			if (i + 1 == sg_len)
-				param |= BD_WRAP;
+			param |= BD_LAST;
+			param &= ~BD_CONT;
 		}
 
-		if (i + 1 == sg_len)
-			param |= BD_INTR;
-
 		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
 				i, count, sg->dma_address,
 				param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
 	return &sdmac->desc;
 err_out:
+	sdmac->status = DMA_ERROR;
 	return NULL;
 }
 
@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	int num_periods = buf_len / period_len;
-	int channel = chan->chan_id;
+	int channel = sdmac->channel;
 	int ret, i = 0, buf = 0;
 
 	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	dma_cookie_t last_used;
-	enum dma_status ret;
 
 	last_used = chan->cookie;
 
-	ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
 	dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
 
-	return ret;
+	return sdmac->status;
 }
 
 static void sdma_issue_pending(struct dma_chan *chan)
@@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
 	/* download the RAM image for SDMA */
 	sdma_load_script(sdma, ram_code,
 			header->ram_code_size,
-			sdma->script_addrs->ram_code_start_addr);
+			addr->ram_code_start_addr);
 	clk_disable(sdma->clk);
 
 	sdma_add_scripts(sdma, addr);
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev)
 	struct resource *iores;
 	struct sdma_platform_data *pdata = pdev->dev.platform_data;
 	int i;
-	dma_cap_mask_t mask;
 	struct sdma_engine *sdma;
 
 	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev)
 
 	sdma->version = pdata->sdma_version;
 
+	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
+	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+
 	INIT_LIST_HEAD(&sdma->dma_device.channels);
 	/* Initialize channel parameters */
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev)
 		sdmac->sdma = sdma;
 		spin_lock_init(&sdmac->lock);
 
-		dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
-		dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
-
 		sdmac->chan.device = &sdma->dma_device;
-		sdmac->chan.chan_id = i;
 		sdmac->channel = i;
 
-		/* Add the channel to the DMAC list */
-		list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
+		/*
+		 * Add the channel to the DMAC list. Do not add channel 0 though
+		 * because we need it internally in the SDMA driver. This also means
+		 * that channel 0 in dmaengine counting matches sdma channel 1.
+		 */
+		if (i)
+			list_add_tail(&sdmac->chan.device_node,
+					&sdma->dma_device.channels);
 	}
 
 	ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev)
 	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
 	sdma->dma_device.device_control = sdma_control;
 	sdma->dma_device.device_issue_pending = sdma_issue_pending;
+	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
+	dma_set_max_seg_size(sdma->dma_device.dev, 65535);
 
 	ret = dma_async_device_register(&sdma->dma_device);
 	if (ret) {
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev)
 		goto err_init;
 	}
 
-	/* request channel 0. This is an internal control channel
-	 * to the SDMA engine and not available to clients.
-	 */
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-	dma_request_channel(mask, NULL, NULL);
-
 	dev_info(sdma->dev, "initialized\n");
 
 	return 0;
@@ -1348,7 +1354,7 @@ err_clk:
 err_request_region:
 err_irq:
 	kfree(sdma);
-	return 0;
+	return ret;
 }
 
 static int __exit sdma_remove(struct platform_device *pdev)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb26ee9773d6..c1a125e7d1df 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
 	reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
 	idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
 
-	/*
-	 * Problem (observed with channel DMAIC_7): after enabling the channel
-	 * and initialising buffers, there comes an interrupt with current still
-	 * pointing at buffer 0, whereas it should use buffer 0 first and only
-	 * generate an interrupt when it is done, then current should already
-	 * point to buffer 1. This spurious interrupt also comes on channel
-	 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
-	 * first interrupt, there comes the second with current correctly
-	 * pointing to buffer 1 this time. But sometimes this second interrupt
-	 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
-	 * the channel seems to prevent the channel from hanging, but it doesn't
-	 * prevent the spurious interrupt. This might also be unsafe. Think
-	 * about the IDMAC controller trying to switch to a buffer, when we
-	 * clear the ready bit, and re-enable it a moment later.
-	 */
-	reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
-	idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
-	idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
-
-	reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
-	idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
-	idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
-
 	spin_unlock_irqrestore(&ipu->lock, flags);
 
 	return 0;
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
 
 	/* Other interrupts do not interfere with this channel */
 	spin_lock(&ichan->lock);
-	if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
-		     ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
-		     !list_is_last(ichan->queue.next, &ichan->queue))) {
-		int i = 100;
-
-		/* This doesn't help. See comment in ipu_disable_channel() */
-		while (--i) {
-			curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
-			if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
-				break;
-			cpu_relax();
-		}
-
-		if (!i) {
-			spin_unlock(&ichan->lock);
-			dev_dbg(dev,
-				"IRQ on active buffer on channel %x, active "
-				"%d, ready %x, %x, current %x!\n", chan_id,
-				ichan->active_buffer, ready0, ready1, curbuf);
-			return IRQ_NONE;
-		} else
-			dev_dbg(dev,
-				"Buffer deactivated on channel %x, active "
-				"%d, ready %x, %x, current %x, rest %d!\n", chan_id,
-				ichan->active_buffer, ready0, ready1, curbuf, i);
-	}
-
 	if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
 		     (!ichan->active_buffer && (ready0 >> chan_id) & 1)
 		     )) {
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4a5ecc58025d..23e03554f0d3 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -826,8 +826,6 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
 /* Display and decode various NB registers for debug purposes. */
 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
 {
-	int ganged;
-
 	debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
 
 	debugf1("  NB two channel DRAM capable: %s\n",
@@ -851,28 +849,19 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
 	debugf1("  DramHoleValid: %s\n",
 		(pvt->dhar & DHAR_VALID) ? "yes" : "no");
 
+	amd64_debug_display_dimm_sizes(0, pvt);
+
 	/* everything below this point is Fam10h and above */
-	if (boot_cpu_data.x86 == 0xf) {
-		amd64_debug_display_dimm_sizes(0, pvt);
+	if (boot_cpu_data.x86 == 0xf)
 		return;
-	}
+
+	amd64_debug_display_dimm_sizes(1, pvt);
 
 	amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
 
 	/* Only if NOT ganged does dclr1 have valid info */
 	if (!dct_ganging_enabled(pvt))
 		amd64_dump_dramcfg_low(pvt->dclr1, 1);
-
-	/*
-	 * Determine if ganged and then dump memory sizes for first controller,
-	 * and if NOT ganged dump info for 2nd controller.
-	 */
-	ganged = dct_ganging_enabled(pvt);
-
-	amd64_debug_display_dimm_sizes(0, pvt);
-
-	if (!ganged)
-		amd64_debug_display_dimm_sizes(1, pvt);
 }
 
 /* Read in both of DBAM registers */
@@ -1644,11 +1633,10 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
 		       WARN_ON(ctrl != 0);
 	}
 
-	debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
-		ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
+	dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
+	dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
 
-	dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
-	dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+	debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
 
 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
 
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index e28e41668177..bcb1126e3d00 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -378,10 +378,17 @@ static void __init print_filtered(const char *info)
 
 static void __init dmi_dump_ids(void)
 {
+	const char *board;	/* Board Name is optional */
+
 	printk(KERN_DEBUG "DMI: ");
-	print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
-	printk(KERN_CONT "/");
+	print_filtered(dmi_get_system_info(DMI_SYS_VENDOR));
+	printk(KERN_CONT " ");
 	print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
+	board = dmi_get_system_info(DMI_BOARD_NAME);
+	if (board) {
+		printk(KERN_CONT "/");
+		print_filtered(board);
+	}
 	printk(KERN_CONT ", BIOS ");
 	print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
 	printk(KERN_CONT " ");
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index a261972f603d..b473429eee75 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -60,6 +60,7 @@ struct pca953x_chip {
 	unsigned gpio_start;
 	uint16_t reg_output;
 	uint16_t reg_direction;
+	struct mutex i2c_lock;
 
 #ifdef CONFIG_GPIO_PCA953X_IRQ
 	struct mutex irq_lock;
@@ -119,13 +120,17 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	reg_val = chip->reg_direction | (1u << off);
 	ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
 	if (ret)
-		return ret;
+		goto exit;
 
 	chip->reg_direction = reg_val;
-	return 0;
+	ret = 0;
+exit:
+	mutex_unlock(&chip->i2c_lock);
+	return ret;
 }
 
 static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -137,6 +142,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	/* set output level */
 	if (val)
 		reg_val = chip->reg_output | (1u << off);
@@ -145,7 +151,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
 
 	ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
 	if (ret)
-		return ret;
+		goto exit;
 
 	chip->reg_output = reg_val;
 
@@ -153,10 +159,13 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
 	reg_val = chip->reg_direction & ~(1u << off);
 	ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
 	if (ret)
-		return ret;
+		goto exit;
 
 	chip->reg_direction = reg_val;
-	return 0;
+	ret = 0;
+exit:
+	mutex_unlock(&chip->i2c_lock);
+	return ret;
 }
 
 static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
@@ -167,7 +176,9 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+	mutex_unlock(&chip->i2c_lock);
 	if (ret < 0) {
 		/* NOTE:  diagnostic already emitted; that's all we should
 		 * do unless gpio_*_value_cansleep() calls become different
@@ -187,6 +198,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	if (val)
 		reg_val = chip->reg_output | (1u << off);
 	else
@@ -194,9 +206,11 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
 
 	ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
 	if (ret)
-		return;
+		goto exit;
 
 	chip->reg_output = reg_val;
+exit:
+	mutex_unlock(&chip->i2c_lock);
 }
 
 static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
@@ -517,6 +531,8 @@ static int __devinit pca953x_probe(struct i2c_client *client,
 
 	chip->names = pdata->names;
 
+	mutex_init(&chip->i2c_lock);
+
 	/* initialize cached registers from their original values.
 	 * we can't share this chip with another i2c master.
 	 */
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 812aaac4438a..ab1162da70f8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -272,17 +272,18 @@ int drm_vma_info(struct seq_file *m, void *data)
 #endif
 
 	mutex_lock(&dev->struct_mutex);
-	seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
+	seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
 		   atomic_read(&dev->vma_count),
-		   high_memory, (u64)virt_to_phys(high_memory));
+		   high_memory, (void *)virt_to_phys(high_memory));
 
 	list_for_each_entry(pt, &dev->vmalist, head) {
 		vma = pt->vma;
 		if (!vma)
 			continue;
 		seq_printf(m,
-			   "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
-			   pt->pid, vma->vm_start, vma->vm_end,
+			   "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
+			   pt->pid,
+			   (void *)vma->vm_start, (void *)vma->vm_end,
 			   vma->vm_flags & VM_READ ? 'r' : '-',
 			   vma->vm_flags & VM_WRITE ? 'w' : '-',
 			   vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index cb49685bde01..a34ef97d3c81 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -154,8 +154,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	 * available. In that case we can't account for this and just
 	 * hope for the best.
 	 */
-	if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
 		atomic_inc(&dev->_vblank_count[crtc]);
+		smp_mb__after_atomic_inc();
+	}
 
 	/* Invalidate all timestamps while vblank irq's are off. */
 	clear_vblank_timestamps(dev, crtc);
@@ -481,6 +483,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
 	/* Dot clock in Hz: */
 	dotclock = (u64) crtc->hwmode.clock * 1000;
 
+	/* Fields of interlaced scanout modes are only halve a frame duration.
+	 * Double the dotclock to get halve the frame-/line-/pixelduration.
+	 */
+	if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+		dotclock *= 2;
+
 	/* Valid dotclock? */
 	if (dotclock > 0) {
 		/* Convert scanline length in pixels and video dot clock to
@@ -593,14 +601,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 		return -EAGAIN;
 	}
 
-	/* Don't know yet how to handle interlaced or
-	 * double scan modes. Just no-op for now.
-	 */
-	if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
-		DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
-		return -ENOTSUPP;
-	}
-
 	/* Get current scanout position with system timestamp.
 	 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
 	 * if single query takes longer than max_error nanoseconds.
@@ -848,10 +848,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
 	if (rc) {
 		tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
 		vblanktimestamp(dev, crtc, tslot) = t_vblank;
-		smp_wmb();
 	}
 
+	smp_mb__before_atomic_inc();
 	atomic_add(diff, &dev->_vblank_count[crtc]);
+	smp_mb__after_atomic_inc();
 }
 
 /**
@@ -1001,7 +1002,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
 		    struct drm_file *file_priv)
 {
 	struct drm_modeset_ctl *modeset = data;
-	int crtc, ret = 0;
+	int ret = 0;
+	unsigned int crtc;
 
 	/* If drm_vblank_init() hasn't been called yet, just no-op */
 	if (!dev->num_crtcs)
@@ -1283,15 +1285,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
 	 * e.g., due to spurious vblank interrupts. We need to
 	 * ignore those for accounting.
 	 */
-	if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+	if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
 		/* Store new timestamp in ringbuffer. */
 		vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
-		smp_wmb();
 
 		/* Increment cooked vblank count. This also atomically commits
 		 * the timestamp computed above.
 		 */
+		smp_mb__before_atomic_inc();
 		atomic_inc(&dev->_vblank_count[crtc]);
+		smp_mb__after_atomic_inc();
 	} else {
 		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
 			  crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466c5502..09e0327fc6ce 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -326,21 +326,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 	struct intel_crtc *crtc;
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
-		const char *pipe = crtc->pipe ? "B" : "A";
-		const char *plane = crtc->plane ? "B" : "A";
+		const char pipe = pipe_name(crtc->pipe);
+		const char plane = plane_name(crtc->plane);
 		struct intel_unpin_work *work;
 
 		spin_lock_irqsave(&dev->event_lock, flags);
 		work = crtc->unpin_work;
 		if (work == NULL) {
-			seq_printf(m, "No flip due on pipe %s (plane %s)\n",
+			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
 				   pipe, plane);
 		} else {
 			if (!work->pending) {
-				seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
+				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
 					   pipe, plane);
 			} else {
-				seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
+				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
 					   pipe, plane);
 			}
 			if (work->enable_stall_check)
@@ -458,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret, i;
+	int ret, i, pipe;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
@@ -471,10 +471,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 			   I915_READ(IIR));
 		seq_printf(m, "Interrupt mask:      %08x\n",
 			   I915_READ(IMR));
-		seq_printf(m, "Pipe A stat:         %08x\n",
-			   I915_READ(PIPEASTAT));
-		seq_printf(m, "Pipe B stat:         %08x\n",
-			   I915_READ(PIPEBSTAT));
+		for_each_pipe(pipe)
+			seq_printf(m, "Pipe %c stat:         %08x\n",
+				   pipe_name(pipe),
+				   I915_READ(PIPESTAT(pipe)));
 	} else {
 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
 			   I915_READ(DEIER));
@@ -544,11 +544,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
-	volatile u32 *hws;
+	const volatile u32 __iomem *hws;
 	int i;
 
 	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
-	hws = (volatile u32 *)ring->status_page.page_addr;
+	hws = (volatile u32 __iomem *)ring->status_page.page_addr;
 	if (hws == NULL)
 		return 0;
 
@@ -615,7 +615,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
 	if (!ring->obj) {
 		seq_printf(m, "No ringbuffer setup\n");
 	} else {
-		u8 *virt = ring->virtual_start;
+		const u8 __iomem *virt = ring->virtual_start;
 		uint32_t off;
 
 		for (off = 0; off < ring->size; off += 4) {
@@ -805,15 +805,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
 		}
 	}
 
-	if (error->ringbuffer) {
-		struct drm_i915_error_object *obj = error->ringbuffer;
-
-		seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
-		offset = 0;
-		for (page = 0; page < obj->page_count; page++) {
-			for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-				seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
-				offset += 4;
+	for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
+		if (error->ringbuffer[i]) {
+			struct drm_i915_error_object *obj = error->ringbuffer[i];
+			seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   obj->gtt_offset);
+			offset = 0;
+			for (page = 0; page < obj->page_count; page++) {
+				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+					seq_printf(m, "%08x :  %08x\n",
+						   offset,
+						   obj->pages[page][elt]);
+					offset += 4;
+				}
 			}
 		}
 	}
@@ -862,19 +867,44 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		u32 rpstat;
+		u32 rpupei, rpcurup, rpprevup;
+		u32 rpdownei, rpcurdown, rpprevdown;
 		int max_freq;
 
 		/* RPSTAT1 is in the GT power well */
-		__gen6_force_wake_get(dev_priv);
+		__gen6_gt_force_wake_get(dev_priv);
+
+		rpstat = I915_READ(GEN6_RPSTAT1);
+		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
+		rpcurup = I915_READ(GEN6_RP_CUR_UP);
+		rpprevup = I915_READ(GEN6_RP_PREV_UP);
+		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
+		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
+		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
 
 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
-		seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
 		seq_printf(m, "Render p-state ratio: %d\n",
 			   (gt_perf_status & 0xff00) >> 8);
 		seq_printf(m, "Render p-state VID: %d\n",
 			   gt_perf_status & 0xff);
 		seq_printf(m, "Render p-state limit: %d\n",
 			   rp_state_limits & 0xff);
+		seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
+						GEN6_CAGF_SHIFT) * 100);
+		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
+			   GEN6_CURICONT_MASK);
+		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
+			   GEN6_CURIAVG_MASK);
+		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
+			   GEN6_CURBSYTAVG_MASK);
 
 		max_freq = (rp_state_cap & 0xff0000) >> 16;
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
@@ -888,7 +918,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 			   max_freq * 100);
 
-		__gen6_force_wake_put(dev_priv);
+		__gen6_gt_force_wake_put(dev_priv);
 	} else {
 		seq_printf(m, "no P-state info available\n");
 	}
@@ -1259,7 +1289,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
 }
 
 static struct drm_info_list i915_debugfs_list[] = {
-	{"i915_capabilities", i915_capabilities, 0, 0},
+	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 17bd766f2081..72730377a01b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -43,6 +43,17 @@
 #include <linux/slab.h>
 #include <acpi/video.h>
 
+static void i915_write_hws_pga(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 addr;
+
+	addr = dev_priv->status_page_dmah->busaddr;
+	if (INTEL_INFO(dev)->gen >= 4)
+		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+	I915_WRITE(HWS_PGA, addr);
+}
+
 /**
  * Sets up the hardware status page for devices that need a physical address
  * in the register.
@@ -60,16 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev)
 		DRM_ERROR("Can not allocate hardware status page\n");
 		return -ENOMEM;
 	}
-	ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+	ring->status_page.page_addr =
+		(void __force __iomem *)dev_priv->status_page_dmah->vaddr;
 
-	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+	memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
 
-	if (INTEL_INFO(dev)->gen >= 4)
-		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
-					     0xf0;
+	i915_write_hws_pga(dev);
 
-	I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
 	return 0;
 }
@@ -216,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev)
 	if (ring->status_page.gfx_addr != 0)
 		intel_ring_setup_status_page(ring);
 	else
-		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+		i915_write_hws_pga(dev);
 
 	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
 
@@ -771,6 +779,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_EXEC_CONSTANTS:
 		value = INTEL_INFO(dev)->gen >= 4;
 		break;
+	case I915_PARAM_HAS_RELAXED_DELTA:
+		value = 1;
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
@@ -859,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 				" G33 hw status page\n");
 		return -ENOMEM;
 	}
-	ring->status_page.page_addr = dev_priv->hws_map.handle;
-	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+	ring->status_page.page_addr =
+		(void __force __iomem *)dev_priv->hws_map.handle;
+	memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
 	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
 
 	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1895,6 +1907,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	if (IS_GEN2(dev))
 		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
 
+	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
+	 * using 32bit addressing, overwriting memory if HWS is located
+	 * above 4GB.
+	 *
+	 * The documentation also mentions an issue with undefined
+	 * behaviour if any general state is accessed within a page above 4GB,
+	 * which also needs to be handled carefully.
+	 */
+	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
 	mmio_bar = IS_GEN2(dev) ? 1 : 0;
 	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
 	if (!dev_priv->regs) {
@@ -2002,9 +2025,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->error_lock);
-	dev_priv->trace_irq_seqno = 0;
 
-	ret = drm_vblank_init(dev, I915_NUM_PIPE);
+	if (IS_MOBILE(dev) || !IS_GEN2(dev))
+		dev_priv->num_pipe = 2;
+	else
+		dev_priv->num_pipe = 1;
+
+	ret = drm_vblank_init(dev, dev_priv->num_pipe);
 	if (ret)
 		goto out_gem_unload;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9ad42d583493..c34a8dd31d02 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -43,16 +43,28 @@ module_param_named(modeset, i915_modeset, int, 0400);
 unsigned int i915_fbpercrtc = 0;
 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 
+int i915_panel_ignore_lid = 0;
+module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
+
 unsigned int i915_powersave = 1;
 module_param_named(powersave, i915_powersave, int, 0600);
 
+unsigned int i915_semaphores = 1;
+module_param_named(semaphores, i915_semaphores, int, 0600);
+
+unsigned int i915_enable_rc6 = 0;
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
 
 unsigned int i915_panel_use_ssc = 1;
 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
 
-bool i915_try_reset = true;
+int i915_vbt_sdvo_panel_type = -1;
+module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
+
+static bool i915_try_reset = true;
 module_param_named(reset, i915_try_reset, bool, 0600);
 
 static struct drm_driver driver;
@@ -251,7 +263,7 @@ void intel_detect_pch (struct drm_device *dev)
 	}
 }
 
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
 	int count;
 
@@ -267,12 +279,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
 		udelay(10);
 }
 
-void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
 	POSTING_READ(FORCEWAKE);
 }
 
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+	int loop = 500;
+	u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+	while (fifo < 20 && loop--) {
+		udelay(10);
+		fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+	}
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -360,7 +382,7 @@ static int i915_drm_thaw(struct drm_device *dev)
 		/* Resume the modeset for every activated CRTC */
 		drm_helper_resume_force_mode(dev);
 
-		if (dev_priv->renderctx && dev_priv->pwrctx)
+		if (IS_IRONLAKE_M(dev))
 			ironlake_enable_rc6(dev);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a78197d43ce6..449650545bb4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,17 +49,22 @@
 enum pipe {
 	PIPE_A = 0,
 	PIPE_B,
+	PIPE_C,
+	I915_MAX_PIPES
 };
+#define pipe_name(p) ((p) + 'A')
 
 enum plane {
 	PLANE_A = 0,
 	PLANE_B,
+	PLANE_C,
 };
-
-#define I915_NUM_PIPE	2
+#define plane_name(p) ((p) + 'A')
 
 #define I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
 
+#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+
 /* Interface history:
  *
  * 1.1: Original.
@@ -75,10 +80,7 @@ enum plane {
 #define DRIVER_PATCHLEVEL	0
 
 #define WATCH_COHERENCY	0
-#define WATCH_EXEC	0
-#define WATCH_RELOC	0
 #define WATCH_LISTS	0
-#define WATCH_PWRITE	0
 
 #define I915_GEM_PHYS_CURSOR_0 1
 #define I915_GEM_PHYS_CURSOR_1 2
@@ -111,6 +113,7 @@ struct intel_opregion {
 	struct opregion_swsci *swsci;
 	struct opregion_asle *asle;
 	void *vbt;
+	u32 __iomem *lid_state;
 };
 #define OPREGION_SIZE            (8*1024)
 
@@ -144,8 +147,7 @@ struct intel_display_error_state;
 struct drm_i915_error_state {
 	u32 eir;
 	u32 pgtbl_er;
-	u32 pipeastat;
-	u32 pipebstat;
+	u32 pipestat[I915_MAX_PIPES];
 	u32 ipeir;
 	u32 ipehr;
 	u32 instdone;
@@ -172,7 +174,7 @@ struct drm_i915_error_state {
 		int page_count;
 		u32 gtt_offset;
 		u32 *pages[0];
-	} *ringbuffer, *batchbuffer[I915_NUM_RINGS];
+	} *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
 	struct drm_i915_error_buffer {
 		u32 size;
 		u32 name;
@@ -200,9 +202,7 @@ struct drm_i915_display_funcs {
 	void (*disable_fbc)(struct drm_device *dev);
 	int (*get_display_clock_speed)(struct drm_device *dev);
 	int (*get_fifo_size)(struct drm_device *dev, int plane);
-	void (*update_wm)(struct drm_device *dev, int planea_clock,
-			  int planeb_clock, int sr_hdisplay, int sr_htotal,
-			  int pixel_size);
+	void (*update_wm)(struct drm_device *dev);
 	/* clock updates for mode set */
 	/* cursor updates */
 	/* render clock increase/decrease */
@@ -274,7 +274,6 @@ typedef struct drm_i915_private {
 	uint32_t next_seqno;
 
 	drm_dma_handle_t *status_page_dmah;
-	dma_addr_t dma_status_page;
 	uint32_t counter;
 	drm_local_map_t hws_map;
 	struct drm_i915_gem_object *pwrctx;
@@ -289,7 +288,6 @@ typedef struct drm_i915_private {
 	int page_flipping;
 
 	atomic_t irq_received;
-	u32 trace_irq_seqno;
 
 	/* protects the irq masks */
 	spinlock_t irq_lock;
@@ -324,8 +322,6 @@ typedef struct drm_i915_private {
 	int cfb_plane;
 	int cfb_y;
 
-	int irq_enabled;
-
 	struct intel_opregion opregion;
 
 	/* overlay */
@@ -387,7 +383,6 @@ typedef struct drm_i915_private {
 	u32 saveDSPACNTR;
 	u32 saveDSPBCNTR;
 	u32 saveDSPARB;
-	u32 saveHWS;
 	u32 savePIPEACONF;
 	u32 savePIPEBCONF;
 	u32 savePIPEASRC;
@@ -615,6 +610,12 @@ typedef struct drm_i915_private {
 		struct delayed_work retire_work;
 
 		/**
+		 * Are we in a non-interruptible section of code like
+		 * modesetting?
+		 */
+		bool interruptible;
+
+		/**
 		 * Flag if the X Server, and thus DRM, is not currently in
 		 * control of the device.
 		 *
@@ -652,6 +653,7 @@ typedef struct drm_i915_private {
 	unsigned int lvds_border_bits;
 	/* Panel fitter placement and size for Ironlake+ */
 	u32 pch_pf_pos, pch_pf_size;
+	int panel_t3, panel_t12;
 
 	struct drm_crtc *plane_to_crtc_mapping[2];
 	struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -698,6 +700,8 @@ typedef struct drm_i915_private {
 
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
+
+	struct drm_property *broadcast_rgb_property;
 } drm_i915_private_t;
 
 struct drm_i915_gem_object {
@@ -955,9 +959,13 @@ enum intel_chip_family {
 extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc;
+extern int i915_panel_ignore_lid;
 extern unsigned int i915_powersave;
+extern unsigned int i915_semaphores;
 extern unsigned int i915_lvds_downclock;
 extern unsigned int i915_panel_use_ssc;
+extern int i915_vbt_sdvo_panel_type;
+extern unsigned int i915_enable_rc6;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
@@ -996,8 +1004,6 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-extern void i915_enable_interrupt (struct drm_device *dev);
 
 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
 extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -1049,7 +1055,6 @@ extern void i915_mem_takedown(struct mem_block **heap);
 extern void i915_mem_release(struct drm_device * dev,
 			     struct drm_file *file_priv, struct mem_block *heap);
 /* i915_gem.c */
-int i915_gem_check_is_wedged(struct drm_device *dev);
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -1092,8 +1097,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct drm_device *dev,
-				     struct intel_ring_buffer *ring,
+int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
 				     uint32_t invalidate_domains,
 				     uint32_t flush_domains);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
@@ -1108,8 +1112,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-						bool interruptible);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 				    struct intel_ring_buffer *ring,
 				    u32 seqno);
@@ -1131,16 +1134,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 }
 
 static inline u32
-i915_gem_next_request_seqno(struct drm_device *dev,
-			    struct intel_ring_buffer *ring)
+i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	return ring->outstanding_lazy_request = dev_priv->next_seqno;
 }
 
 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
-					   struct intel_ring_buffer *pipelined,
-					   bool interruptible);
+					   struct intel_ring_buffer *pipelined);
 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
 void i915_gem_retire_requests(struct drm_device *dev);
@@ -1149,8 +1150,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
 					    uint32_t read_domains,
 					    uint32_t write_domain);
-int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
-					   bool interruptible);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 void i915_gem_do_init(struct drm_device *dev,
@@ -1159,14 +1159,11 @@ void i915_gem_do_init(struct drm_device *dev,
 		      unsigned long end);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct drm_device *dev,
-				  struct drm_file *file_priv,
-				  struct drm_i915_gem_request *request,
-				  struct intel_ring_buffer *ring);
-int __must_check i915_do_wait_request(struct drm_device *dev,
-				      uint32_t seqno,
-				      bool interruptible,
-				      struct intel_ring_buffer *ring);
+int __must_check i915_add_request(struct intel_ring_buffer *ring,
+				  struct drm_file *file,
+				  struct drm_i915_gem_request *request);
+int __must_check i915_wait_request(struct intel_ring_buffer *ring,
+				   uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1183,6 +1180,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 void i915_gem_free_all_phys_object(struct drm_device *dev);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+
 /* i915_gem_gtt.c */
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1315,7 +1315,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
 #define __i915_read(x, y) \
 static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
 	u##x val = read##y(dev_priv->regs + reg); \
-	trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
 	return val; \
 }
 __i915_read(8, b)
@@ -1326,7 +1326,7 @@ __i915_read(64, q)
 
 #define __i915_write(x, y) \
 static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
-	trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
 	write##y(val, dev_priv->regs + reg); \
 }
 __i915_write(8, b)
@@ -1359,62 +1359,29 @@ __i915_write(64, q)
  * must be set to prevent GT core from power down and stale values being
  * returned.
  */
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
-void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
-static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
 {
 	u32 val;
 
 	if (dev_priv->info->gen >= 6) {
-		__gen6_force_wake_get(dev_priv);
+		__gen6_gt_force_wake_get(dev_priv);
 		val = I915_READ(reg);
-		__gen6_force_wake_put(dev_priv);
+		__gen6_gt_force_wake_put(dev_priv);
 	} else
 		val = I915_READ(reg);
 
 	return val;
 }
 
-static inline void
-i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
+static inline void i915_gt_write(struct drm_i915_private *dev_priv,
+				u32 reg, u32 val)
 {
-       /* Trace down the write operation before the real write */
-       trace_i915_reg_rw('W', reg, val, len);
-       switch (len) {
-       case 8:
-               writeq(val, dev_priv->regs + reg);
-               break;
-       case 4:
-               writel(val, dev_priv->regs + reg);
-               break;
-       case 2:
-               writew(val, dev_priv->regs + reg);
-               break;
-       case 1:
-               writeb(val, dev_priv->regs + reg);
-               break;
-       }
+	if (dev_priv->info->gen >= 6)
+		__gen6_gt_wait_for_fifo(dev_priv);
+	I915_WRITE(reg, val);
 }
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 0x04: ring 0 head pointer
- * 0x05: ring 1 head pointer (915-class)
- * 0x06: ring 2 head pointer (915-class)
- * 0x10-0x1b: Context status DWords (GM45)
- * 0x1f: Last written status offset. (GM45)
- *
- * The area from dword 0x20 to 0x3ff is available for driver usage.
- */
-#define READ_HWSP(dev_priv, reg)  (((volatile u32 *)\
-			(LP_RING(dev_priv)->status_page.page_addr))[reg])
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_GEM_HWS_INDEX		0x20
-#define I915_BREADCRUMB_INDEX		0x21
-
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bc7f06b8fbca..c4c2855d002d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
 	dev_priv->mm.object_memory -= size;
 }
 
-int
-i915_gem_check_is_wedged(struct drm_device *dev)
+static int
+i915_gem_wait_for_error(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct completion *x = &dev_priv->error_completion;
@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
 	if (ret)
 		return ret;
 
-	/* Success, we reset the GPU! */
-	if (!atomic_read(&dev_priv->mm.wedged))
-		return 0;
-
-	/* GPU is hung, bump the completion count to account for
-	 * the token we just consumed so that we never hit zero and
-	 * end up waiting upon a subsequent completion event that
-	 * will never happen.
-	 */
-	spin_lock_irqsave(&x->wait.lock, flags);
-	x->done++;
-	spin_unlock_irqrestore(&x->wait.lock, flags);
-	return -EIO;
+	if (atomic_read(&dev_priv->mm.wedged)) {
+		/* GPU is hung, bump the completion count to account for
+		 * the token we just consumed so that we never hit zero and
+		 * end up waiting upon a subsequent completion event that
+		 * will never happen.
+		 */
+		spin_lock_irqsave(&x->wait.lock, flags);
+		x->done++;
+		spin_unlock_irqrestore(&x->wait.lock, flags);
+	}
+	return 0;
 }
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ret = i915_gem_check_is_wedged(dev);
+	ret = i915_gem_wait_for_error(dev);
 	if (ret)
 		return ret;
 
@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 	if (ret)
 		return ret;
 
-	if (atomic_read(&dev_priv->mm.wedged)) {
-		mutex_unlock(&dev->struct_mutex);
-		return -EAGAIN;
-	}
-
 	WARN_ON(i915_verify_lists(dev));
 	return 0;
 }
@@ -543,7 +535,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-	if (obj == NULL) {
+	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
@@ -555,6 +547,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 		goto out;
 	}
 
+	trace_i915_gem_object_pread(obj, args->offset, args->size);
+
 	ret = i915_gem_object_set_cpu_read_domain_range(obj,
 							args->offset,
 							args->size);
@@ -984,7 +978,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-	if (obj == NULL) {
+	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
@@ -996,6 +990,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 		goto out;
 	}
 
+	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+
 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
 	 * it would end up going through the fenced access, and we'll get
 	 * different detiling behavior between reading and writing.
@@ -1078,7 +1074,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-	if (obj == NULL) {
+	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
@@ -1121,7 +1117,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-	if (obj == NULL) {
+	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
@@ -1150,7 +1146,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_mmap *args = data;
 	struct drm_gem_object *obj;
-	loff_t offset;
 	unsigned long addr;
 
 	if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1165,8 +1160,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 		return -E2BIG;
 	}
 
-	offset = args->offset;
-
 	down_write(&current->mm->mmap_sem);
 	addr = do_mmap(obj->filp, 0, args->size,
 		       PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1211,9 +1204,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
 		PAGE_SHIFT;
 
-	/* Now bind it into the GTT if needed */
-	mutex_lock(&dev->struct_mutex);
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto out;
 
+	trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+	/* Now bind it into the GTT if needed */
 	if (!obj->map_and_fenceable) {
 		ret = i915_gem_object_unbind(obj);
 		if (ret)
@@ -1232,7 +1229,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	if (obj->tiling_mode == I915_TILING_NONE)
 		ret = i915_gem_object_put_fence(obj);
 	else
-		ret = i915_gem_object_get_fence(obj, NULL, true);
+		ret = i915_gem_object_get_fence(obj, NULL);
 	if (ret)
 		goto unlock;
 
@@ -1248,12 +1245,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
-
+out:
 	switch (ret) {
+	case -EIO:
 	case -EAGAIN:
+		/* Give the error handler a chance to run and move the
+		 * objects off the GPU active list. Next time we service the
+		 * fault, we should be able to transition the page into the
+		 * GTT without touching the GPU (and so avoid further
+		 * EIO/EGAIN). If the GPU is wedged, then there is no issue
+		 * with coherency, just lost writes.
+		 */
 		set_need_resched();
 	case 0:
 	case -ERESTARTSYS:
+	case -EINTR:
 		return VM_FAULT_NOPAGE;
 	case -ENOMEM:
 		return VM_FAULT_OOM;
@@ -1427,7 +1433,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
  * Return the required GTT alignment for an object, only taking into account
  * unfenced tiled surface requirements.
  */
-static uint32_t
+uint32_t
 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
 {
 	struct drm_device *dev = obj->base.dev;
@@ -1472,7 +1478,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
 		return ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
-	if (obj == NULL) {
+	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
@@ -1712,9 +1718,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
 }
 
 static void
-i915_gem_process_flushing_list(struct drm_device *dev,
-			       uint32_t flush_domains,
-			       struct intel_ring_buffer *ring)
+i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+			       uint32_t flush_domains)
 {
 	struct drm_i915_gem_object *obj, *next;
 
@@ -1727,7 +1732,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 			obj->base.write_domain = 0;
 			list_del_init(&obj->gpu_write_list);
 			i915_gem_object_move_to_active(obj, ring,
-						       i915_gem_next_request_seqno(dev, ring));
+						       i915_gem_next_request_seqno(ring));
 
 			trace_i915_gem_object_change_domain(obj,
 							    obj->base.read_domains,
@@ -1737,27 +1742,22 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 }
 
 int
-i915_add_request(struct drm_device *dev,
+i915_add_request(struct intel_ring_buffer *ring,
 		 struct drm_file *file,
-		 struct drm_i915_gem_request *request,
-		 struct intel_ring_buffer *ring)
+		 struct drm_i915_gem_request *request)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_file_private *file_priv = NULL;
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	uint32_t seqno;
 	int was_empty;
 	int ret;
 
 	BUG_ON(request == NULL);
 
-	if (file != NULL)
-		file_priv = file->driver_priv;
-
 	ret = ring->add_request(ring, &seqno);
 	if (ret)
 	    return ret;
 
-	ring->outstanding_lazy_request = false;
+	trace_i915_gem_request_add(ring, seqno);
 
 	request->seqno = seqno;
 	request->ring = ring;
@@ -1765,7 +1765,9 @@ i915_add_request(struct drm_device *dev,
 	was_empty = list_empty(&ring->request_list);
 	list_add_tail(&request->list, &ring->request_list);
 
-	if (file_priv) {
+	if (file) {
+		struct drm_i915_file_private *file_priv = file->driver_priv;
+
 		spin_lock(&file_priv->mm.lock);
 		request->file_priv = file_priv;
 		list_add_tail(&request->client_list,
@@ -1773,6 +1775,8 @@ i915_add_request(struct drm_device *dev,
 		spin_unlock(&file_priv->mm.lock);
 	}
 
+	ring->outstanding_lazy_request = false;
+
 	if (!dev_priv->mm.suspended) {
 		mod_timer(&dev_priv->hangcheck_timer,
 			  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1889,18 +1893,15 @@ void i915_gem_reset(struct drm_device *dev)
  * This function clears the request list as sequence numbers are passed.
  */
 static void
-i915_gem_retire_requests_ring(struct drm_device *dev,
-			      struct intel_ring_buffer *ring)
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t seqno;
 	int i;
 
-	if (!ring->status_page.page_addr ||
-	    list_empty(&ring->request_list))
+	if (list_empty(&ring->request_list))
 		return;
 
-	WARN_ON(i915_verify_lists(dev));
+	WARN_ON(i915_verify_lists(ring->dev));
 
 	seqno = ring->get_seqno(ring);
 
@@ -1918,7 +1919,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
 		if (!i915_seqno_passed(seqno, request->seqno))
 			break;
 
-		trace_i915_gem_request_retire(dev, request->seqno);
+		trace_i915_gem_request_retire(ring, request->seqno);
 
 		list_del(&request->list);
 		i915_gem_request_remove_from_client(request);
@@ -1944,13 +1945,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
 			i915_gem_object_move_to_inactive(obj);
 	}
 
-	if (unlikely (dev_priv->trace_irq_seqno &&
-		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+	if (unlikely(ring->trace_irq_seqno &&
+		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
 		ring->irq_put(ring);
-		dev_priv->trace_irq_seqno = 0;
+		ring->trace_irq_seqno = 0;
 	}
 
-	WARN_ON(i915_verify_lists(dev));
+	WARN_ON(i915_verify_lists(ring->dev));
 }
 
 void
@@ -1974,7 +1975,7 @@ i915_gem_retire_requests(struct drm_device *dev)
 	}
 
 	for (i = 0; i < I915_NUM_RINGS; i++)
-		i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
+		i915_gem_retire_requests_ring(&dev_priv->ring[i]);
 }
 
 static void
@@ -2008,11 +2009,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
 			struct drm_i915_gem_request *request;
 			int ret;
 
-			ret = i915_gem_flush_ring(dev, ring, 0,
-						  I915_GEM_GPU_DOMAINS);
+			ret = i915_gem_flush_ring(ring,
+						  0, I915_GEM_GPU_DOMAINS);
 			request = kzalloc(sizeof(*request), GFP_KERNEL);
 			if (ret || request == NULL ||
-			    i915_add_request(dev, NULL, request, ring))
+			    i915_add_request(ring, NULL, request))
 			    kfree(request);
 		}
 
@@ -2025,18 +2026,32 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	mutex_unlock(&dev->struct_mutex);
 }
 
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
 int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
-		     bool interruptible, struct intel_ring_buffer *ring)
+i915_wait_request(struct intel_ring_buffer *ring,
+		  uint32_t seqno)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	u32 ier;
 	int ret = 0;
 
 	BUG_ON(seqno == 0);
 
-	if (atomic_read(&dev_priv->mm.wedged))
-		return -EAGAIN;
+	if (atomic_read(&dev_priv->mm.wedged)) {
+		struct completion *x = &dev_priv->error_completion;
+		bool recovery_complete;
+		unsigned long flags;
+
+		/* Give the error handler a chance to run. */
+		spin_lock_irqsave(&x->wait.lock, flags);
+		recovery_complete = x->done > 0;
+		spin_unlock_irqrestore(&x->wait.lock, flags);
+
+		return recovery_complete ? -EIO : -EAGAIN;
+	}
 
 	if (seqno == ring->outstanding_lazy_request) {
 		struct drm_i915_gem_request *request;
@@ -2045,7 +2060,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 		if (request == NULL)
 			return -ENOMEM;
 
-		ret = i915_add_request(dev, NULL, request, ring);
+		ret = i915_add_request(ring, NULL, request);
 		if (ret) {
 			kfree(request);
 			return ret;
@@ -2055,22 +2070,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 	}
 
 	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
-		if (HAS_PCH_SPLIT(dev))
+		if (HAS_PCH_SPLIT(ring->dev))
 			ier = I915_READ(DEIER) | I915_READ(GTIER);
 		else
 			ier = I915_READ(IER);
 		if (!ier) {
 			DRM_ERROR("something (likely vbetool) disabled "
 				  "interrupts, re-enabling\n");
-			i915_driver_irq_preinstall(dev);
-			i915_driver_irq_postinstall(dev);
+			i915_driver_irq_preinstall(ring->dev);
+			i915_driver_irq_postinstall(ring->dev);
 		}
 
-		trace_i915_gem_request_wait_begin(dev, seqno);
+		trace_i915_gem_request_wait_begin(ring, seqno);
 
 		ring->waiting_seqno = seqno;
 		if (ring->irq_get(ring)) {
-			if (interruptible)
+			if (dev_priv->mm.interruptible)
 				ret = wait_event_interruptible(ring->irq_queue,
 							       i915_seqno_passed(ring->get_seqno(ring), seqno)
 							       || atomic_read(&dev_priv->mm.wedged));
@@ -2086,7 +2101,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 			ret = -EBUSY;
 		ring->waiting_seqno = 0;
 
-		trace_i915_gem_request_wait_end(dev, seqno);
+		trace_i915_gem_request_wait_end(ring, seqno);
 	}
 	if (atomic_read(&dev_priv->mm.wedged))
 		ret = -EAGAIN;
@@ -2102,31 +2117,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 	 * a separate wait queue to handle that.
 	 */
 	if (ret == 0)
-		i915_gem_retire_requests_ring(dev, ring);
+		i915_gem_retire_requests_ring(ring);
 
 	return ret;
 }
 
 /**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno,
-		  struct intel_ring_buffer *ring)
-{
-	return i915_do_wait_request(dev, seqno, 1, ring);
-}
-
-/**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
  */
 int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-			       bool interruptible)
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
 	int ret;
 
 	/* This function only exists to support waiting for existing rendering,
@@ -2138,10 +2140,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 	 * it.
 	 */
 	if (obj->active) {
-		ret = i915_do_wait_request(dev,
-					   obj->last_rendering_seqno,
-					   interruptible,
-					   obj->ring);
+		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
 		if (ret)
 			return ret;
 	}
@@ -2191,6 +2190,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	if (ret == -ERESTARTSYS)
 		return ret;
 
+	trace_i915_gem_object_unbind(obj);
+
 	i915_gem_gtt_unbind_object(obj);
 	i915_gem_object_put_pages_gtt(obj);
 
@@ -2206,29 +2207,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	if (i915_gem_object_is_purgeable(obj))
 		i915_gem_object_truncate(obj);
 
-	trace_i915_gem_object_unbind(obj);
-
 	return ret;
 }
 
 int
-i915_gem_flush_ring(struct drm_device *dev,
-		    struct intel_ring_buffer *ring,
+i915_gem_flush_ring(struct intel_ring_buffer *ring,
 		    uint32_t invalidate_domains,
 		    uint32_t flush_domains)
 {
 	int ret;
 
+	trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
+
 	ret = ring->flush(ring, invalidate_domains, flush_domains);
 	if (ret)
 		return ret;
 
-	i915_gem_process_flushing_list(dev, flush_domains, ring);
+	i915_gem_process_flushing_list(ring, flush_domains);
 	return 0;
 }
 
-static int i915_ring_idle(struct drm_device *dev,
-			  struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
 	int ret;
 
@@ -2236,15 +2235,13 @@ static int i915_ring_idle(struct drm_device *dev,
 		return 0;
 
 	if (!list_empty(&ring->gpu_write_list)) {
-		ret = i915_gem_flush_ring(dev, ring,
+		ret = i915_gem_flush_ring(ring,
 				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
 
-	return i915_wait_request(dev,
-				 i915_gem_next_request_seqno(dev, ring),
-				 ring);
+	return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
 }
 
 int
@@ -2261,7 +2258,7 @@ i915_gpu_idle(struct drm_device *dev)
 
 	/* Flush everything onto the inactive list. */
 	for (i = 0; i < I915_NUM_RINGS; i++) {
-		ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+		ret = i915_ring_idle(&dev_priv->ring[i]);
 		if (ret)
 			return ret;
 	}
@@ -2445,15 +2442,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
 
 static int
 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
-			    struct intel_ring_buffer *pipelined,
-			    bool interruptible)
+			    struct intel_ring_buffer *pipelined)
 {
 	int ret;
 
 	if (obj->fenced_gpu_access) {
 		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-			ret = i915_gem_flush_ring(obj->base.dev,
-						  obj->last_fenced_ring,
+			ret = i915_gem_flush_ring(obj->last_fenced_ring,
 						  0, obj->base.write_domain);
 			if (ret)
 				return ret;
@@ -2465,10 +2460,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
 	if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
 		if (!ring_passed_seqno(obj->last_fenced_ring,
 				       obj->last_fenced_seqno)) {
-			ret = i915_do_wait_request(obj->base.dev,
-						   obj->last_fenced_seqno,
-						   interruptible,
-						   obj->last_fenced_ring);
+			ret = i915_wait_request(obj->last_fenced_ring,
+						obj->last_fenced_seqno);
 			if (ret)
 				return ret;
 		}
@@ -2494,7 +2487,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 	if (obj->tiling_mode)
 		i915_gem_release_mmap(obj);
 
-	ret = i915_gem_object_flush_fence(obj, NULL, true);
+	ret = i915_gem_object_flush_fence(obj, NULL);
 	if (ret)
 		return ret;
 
@@ -2571,8 +2564,7 @@ i915_find_fence_reg(struct drm_device *dev,
  */
 int
 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
-			  struct intel_ring_buffer *pipelined,
-			  bool interruptible)
+			  struct intel_ring_buffer *pipelined)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2594,10 +2586,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 			if (reg->setup_seqno) {
 				if (!ring_passed_seqno(obj->last_fenced_ring,
 						       reg->setup_seqno)) {
-					ret = i915_do_wait_request(obj->base.dev,
-								   reg->setup_seqno,
-								   interruptible,
-								   obj->last_fenced_ring);
+					ret = i915_wait_request(obj->last_fenced_ring,
+								reg->setup_seqno);
 					if (ret)
 						return ret;
 				}
@@ -2606,15 +2596,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 			}
 		} else if (obj->last_fenced_ring &&
 			   obj->last_fenced_ring != pipelined) {
-			ret = i915_gem_object_flush_fence(obj,
-							  pipelined,
-							  interruptible);
+			ret = i915_gem_object_flush_fence(obj, pipelined);
 			if (ret)
 				return ret;
 		} else if (obj->tiling_changed) {
 			if (obj->fenced_gpu_access) {
 				if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-					ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+					ret = i915_gem_flush_ring(obj->ring,
 								  0, obj->base.write_domain);
 					if (ret)
 						return ret;
@@ -2631,7 +2619,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 		if (obj->tiling_changed) {
 			if (pipelined) {
 				reg->setup_seqno =
-					i915_gem_next_request_seqno(dev, pipelined);
+					i915_gem_next_request_seqno(pipelined);
 				obj->last_fenced_seqno = reg->setup_seqno;
 				obj->last_fenced_ring = pipelined;
 			}
@@ -2645,7 +2633,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 	if (reg == NULL)
 		return -ENOSPC;
 
-	ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+	ret = i915_gem_object_flush_fence(obj, pipelined);
 	if (ret)
 		return ret;
 
@@ -2657,9 +2645,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 		if (old->tiling_mode)
 			i915_gem_release_mmap(old);
 
-		ret = i915_gem_object_flush_fence(old,
-						  pipelined,
-						  interruptible);
+		ret = i915_gem_object_flush_fence(old, pipelined);
 		if (ret) {
 			drm_gem_object_unreference(&old->base);
 			return ret;
@@ -2671,7 +2657,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 		old->fence_reg = I915_FENCE_REG_NONE;
 		old->last_fenced_ring = pipelined;
 		old->last_fenced_seqno =
-			pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+			pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
 
 		drm_gem_object_unreference(&old->base);
 	} else if (obj->last_fenced_seqno == 0)
@@ -2683,7 +2669,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 	obj->last_fenced_ring = pipelined;
 
 	reg->setup_seqno =
-		pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+		pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
 	obj->last_fenced_seqno = reg->setup_seqno;
 
 update:
@@ -2880,7 +2866,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
 	obj->map_and_fenceable = mappable && fenceable;
 
-	trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
+	trace_i915_gem_object_bind(obj, map_and_fenceable);
 	return 0;
 }
 
@@ -2903,13 +2889,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-
 	if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
 		return 0;
 
 	/* Queue the GPU write cache flushing we need. */
-	return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+	return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -2976,12 +2960,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	if (obj->gtt_space == NULL)
 		return -EINVAL;
 
+	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+		return 0;
+
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
 	if (ret)
 		return ret;
 
 	if (obj->pending_gpu_write || write) {
-		ret = i915_gem_object_wait_rendering(obj, true);
+		ret = i915_gem_object_wait_rendering(obj);
 		if (ret)
 			return ret;
 	}
@@ -3031,7 +3018,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
 
 	/* Currently, we are always called from an non-interruptible context. */
 	if (pipelined != obj->ring) {
-		ret = i915_gem_object_wait_rendering(obj, false);
+		ret = i915_gem_object_wait_rendering(obj);
 		if (ret)
 			return ret;
 	}
@@ -3049,8 +3036,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
 }
 
 int
-i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
-			  bool interruptible)
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
 {
 	int ret;
 
@@ -3058,13 +3044,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
 		return 0;
 
 	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-		ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
-					  0, obj->base.write_domain);
+		ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
 		if (ret)
 			return ret;
 	}
 
-	return i915_gem_object_wait_rendering(obj, interruptible);
+	return i915_gem_object_wait_rendering(obj);
 }
 
 /**
@@ -3079,11 +3064,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
+	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+		return 0;
+
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
 	if (ret)
 		return ret;
 
-	ret = i915_gem_object_wait_rendering(obj, true);
+	ret = i915_gem_object_wait_rendering(obj);
 	if (ret)
 		return ret;
 
@@ -3181,7 +3169,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
-	ret = i915_gem_object_wait_rendering(obj, true);
+	ret = i915_gem_object_wait_rendering(obj);
 	if (ret)
 		return ret;
 
@@ -3252,6 +3240,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	u32 seqno = 0;
 	int ret;
 
+	if (atomic_read(&dev_priv->mm.wedged))
+		return -EIO;
+
 	spin_lock(&file_priv->mm.lock);
 	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
 		if (time_after_eq(request->emitted_jiffies, recent_enough))
@@ -3367,7 +3358,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-	if (obj == NULL) {
+	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
@@ -3418,7 +3409,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-	if (obj == NULL) {
+	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
@@ -3455,7 +3446,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-	if (obj == NULL) {
+	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
@@ -3473,7 +3464,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		 * flush earlier is beneficial.
 		 */
 		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-			ret = i915_gem_flush_ring(dev, obj->ring,
+			ret = i915_gem_flush_ring(obj->ring,
 						  0, obj->base.write_domain);
 		} else if (obj->ring->outstanding_lazy_request ==
 			   obj->last_rendering_seqno) {
@@ -3484,9 +3475,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 			 */
 			request = kzalloc(sizeof(*request), GFP_KERNEL);
 			if (request)
-				ret = i915_add_request(dev,
-						       NULL, request,
-						       obj->ring);
+				ret = i915_add_request(obj->ring, NULL,request);
 			else
 				ret = -ENOMEM;
 		}
@@ -3496,7 +3485,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		 * are actually unmasked, and our working set ends up being
 		 * larger than required.
 		 */
-		i915_gem_retire_requests_ring(dev, obj->ring);
+		i915_gem_retire_requests_ring(obj->ring);
 
 		args->busy = obj->active;
 	}
@@ -3535,7 +3524,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 		return ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
-	if (obj == NULL) {
+	if (&obj->base == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
@@ -3626,6 +3615,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
 	kfree(obj->page_cpu_valid);
 	kfree(obj->bit_17);
 	kfree(obj);
+
+	trace_i915_gem_object_destroy(obj);
 }
 
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
@@ -3633,8 +3624,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	struct drm_device *dev = obj->base.dev;
 
-	trace_i915_gem_object_destroy(obj);
-
 	while (obj->pin_count > 0)
 		i915_gem_object_unpin(obj);
 
@@ -3880,6 +3869,8 @@ i915_gem_load(struct drm_device *dev)
 	i915_gem_detect_bit_6_swizzle(dev);
 	init_waitqueue_head(&dev_priv->pending_flip_queue);
 
+	dev_priv->mm.interruptible = true;
+
 	dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
 	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
 	register_shrinker(&dev_priv->mm.inactive_shrinker);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 29d014c48ca2..8da1899bd24f 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev)
 }
 #endif /* WATCH_INACTIVE */
 
-
-#if WATCH_EXEC | WATCH_PWRITE
-static void
-i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
-		   uint32_t bias, uint32_t mark)
-{
-	uint32_t *mem = kmap_atomic(page, KM_USER0);
-	int i;
-	for (i = start; i < end; i += 4)
-		DRM_INFO("%08x: %08x%s\n",
-			  (int) (bias + i), mem[i / 4],
-			  (bias + i == mark) ? " ********" : "");
-	kunmap_atomic(mem, KM_USER0);
-	/* give syslog time to catch up */
-	msleep(1);
-}
-
-void
-i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
-		     const char *where, uint32_t mark)
-{
-	int page;
-
-	DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
-	for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
-		int page_len, chunk, chunk_len;
-
-		page_len = len - page * PAGE_SIZE;
-		if (page_len > PAGE_SIZE)
-			page_len = PAGE_SIZE;
-
-		for (chunk = 0; chunk < page_len; chunk += 128) {
-			chunk_len = page_len - chunk;
-			if (chunk_len > 128)
-				chunk_len = 128;
-			i915_gem_dump_page(obj->pages[page],
-					   chunk, chunk + chunk_len,
-					   obj->gtt_offset +
-					   page * PAGE_SIZE,
-					   mark);
-		}
-	}
-}
-#endif
-
 #if WATCH_COHERENCY
 void
 i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3d39005540aa..da05a2692a75 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -30,6 +30,7 @@
 #include "drm.h"
 #include "i915_drv.h"
 #include "i915_drm.h"
+#include "i915_trace.h"
 
 static bool
 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
@@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 			return 0;
 	}
 
+	trace_i915_gem_evict(dev, min_size, alignment, mappable);
+
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
 	 * The oldest idle objects reside on the inactive list, which is in
@@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 	if (lists_empty)
 		return -ENOSPC;
 
+	trace_i915_gem_evict_everything(dev, purgeable_only);
+
 	/* Flush everything (on to the inactive lists) and evict */
 	ret = i915_gpu_idle(dev);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2f445e825f2..7ff7f933ddf1 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -37,6 +37,7 @@ struct change_domains {
 	uint32_t invalidate_domains;
 	uint32_t flush_domains;
 	uint32_t flush_rings;
+	uint32_t flips;
 };
 
 /*
@@ -190,6 +191,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
 	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
 		i915_gem_release_mmap(obj);
 
+	if (obj->base.pending_write_domain)
+		cd->flips |= atomic_read(&obj->pending_flip);
+
 	/* The actual obj->write_domain will be updated with
 	 * pending_write_domain after we emit the accumulated flush for all
 	 * of our domain changes in execbuffers (which clears objects'
@@ -282,21 +286,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 
 	target_offset = to_intel_bo(target_obj)->gtt_offset;
 
-#if WATCH_RELOC
-	DRM_INFO("%s: obj %p offset %08x target %d "
-		 "read %08x write %08x gtt %08x "
-		 "presumed %08x delta %08x\n",
-		 __func__,
-		 obj,
-		 (int) reloc->offset,
-		 (int) reloc->target_handle,
-		 (int) reloc->read_domains,
-		 (int) reloc->write_domain,
-		 (int) target_offset,
-		 (int) reloc->presumed_offset,
-		 reloc->delta);
-#endif
-
 	/* The target buffer should have appeared before us in the
 	 * exec_object list, so it should have a GTT space bound by now.
 	 */
@@ -365,16 +354,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 		return ret;
 	}
 
-	/* and points to somewhere within the target object. */
-	if (unlikely(reloc->delta >= target_obj->size)) {
-		DRM_ERROR("Relocation beyond target object bounds: "
-			  "obj %p target %d delta %d size %d.\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->delta,
-			  (int) target_obj->size);
-		return ret;
-	}
-
 	reloc->delta += target_offset;
 	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
 		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
@@ -575,7 +554,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 
 			if (has_fenced_gpu_access) {
 				if (need_fence) {
-					ret = i915_gem_object_get_fence(obj, ring, 1);
+					ret = i915_gem_object_get_fence(obj, ring);
 					if (ret)
 						break;
 				} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@@ -690,11 +669,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 	/* reacquire the objects */
 	eb_reset(eb);
 	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_object *obj;
-
 		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
 							exec[i].handle));
-		if (obj == NULL) {
+		if (&obj->base == NULL) {
 			DRM_ERROR("Invalid object handle %d at index %d\n",
 				   exec[i].handle, i);
 			ret = -ENOENT;
@@ -749,8 +726,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
 	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
 		for (i = 0; i < I915_NUM_RINGS; i++)
 			if (flush_rings & (1 << i)) {
-				ret = i915_gem_flush_ring(dev,
-							  &dev_priv->ring[i],
+				ret = i915_gem_flush_ring(&dev_priv->ring[i],
 							  invalidate_domains,
 							  flush_domains);
 				if (ret)
@@ -772,9 +748,9 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 	if (from == NULL || to == from)
 		return 0;
 
-	/* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
-	if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
-		return i915_gem_object_wait_rendering(obj, true);
+	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
+	if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+		return i915_gem_object_wait_rendering(obj);
 
 	idx = intel_ring_sync_index(from, to);
 
@@ -789,7 +765,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 		if (request == NULL)
 			return -ENOMEM;
 
-		ret = i915_add_request(obj->base.dev, NULL, request, from);
+		ret = i915_add_request(from, NULL, request);
 		if (ret) {
 			kfree(request);
 			return ret;
@@ -803,6 +779,39 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 }
 
 static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
+{
+	u32 plane, flip_mask;
+	int ret;
+
+	/* Check for any pending flips. As we only maintain a flip queue depth
+	 * of 1, we can simply insert a WAIT for the next display flip prior
+	 * to executing the batch and avoid stalling the CPU.
+	 */
+
+	for (plane = 0; flips >> plane; plane++) {
+		if (((flips >> plane) & 1) == 0)
+			continue;
+
+		if (plane)
+			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+		else
+			flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+		ret = intel_ring_begin(ring, 2);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
+	}
+
+	return 0;
+}
+
+
+static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 				struct list_head *objects)
 {
@@ -810,19 +819,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 	struct change_domains cd;
 	int ret;
 
-	cd.invalidate_domains = 0;
-	cd.flush_domains = 0;
-	cd.flush_rings = 0;
+	memset(&cd, 0, sizeof(cd));
 	list_for_each_entry(obj, objects, exec_list)
 		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
 
 	if (cd.invalidate_domains | cd.flush_domains) {
-#if WATCH_EXEC
-		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-			  __func__,
-			 cd.invalidate_domains,
-			 cd.flush_domains);
-#endif
 		ret = i915_gem_execbuffer_flush(ring->dev,
 						cd.invalidate_domains,
 						cd.flush_domains,
@@ -831,6 +832,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 			return ret;
 	}
 
+	if (cd.flips) {
+		ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
+		if (ret)
+			return ret;
+	}
+
 	list_for_each_entry(obj, objects, exec_list) {
 		ret = i915_gem_execbuffer_sync_rings(obj, ring);
 		if (ret)
@@ -877,47 +884,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
 	return 0;
 }
 
-static int
-i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
-				   struct list_head *objects)
-{
-	struct drm_i915_gem_object *obj;
-	int flips;
-
-	/* Check for any pending flips. As we only maintain a flip queue depth
-	 * of 1, we can simply insert a WAIT for the next display flip prior
-	 * to executing the batch and avoid stalling the CPU.
-	 */
-	flips = 0;
-	list_for_each_entry(obj, objects, exec_list) {
-		if (obj->base.write_domain)
-			flips |= atomic_read(&obj->pending_flip);
-	}
-	if (flips) {
-		int plane, flip_mask, ret;
-
-		for (plane = 0; flips >> plane; plane++) {
-			if (((flips >> plane) & 1) == 0)
-				continue;
-
-			if (plane)
-				flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-			else
-				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
-			ret = intel_ring_begin(ring, 2);
-			if (ret)
-				return ret;
-
-			intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-			intel_ring_emit(ring, MI_NOOP);
-			intel_ring_advance(ring);
-		}
-	}
-
-	return 0;
-}
-
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *objects,
 				   struct intel_ring_buffer *ring,
@@ -926,6 +892,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
 	struct drm_i915_gem_object *obj;
 
 	list_for_each_entry(obj, objects, exec_list) {
+		  u32 old_read = obj->base.read_domains;
+		  u32 old_write = obj->base.write_domain;
+
+
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->base.write_domain = obj->base.pending_write_domain;
 		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
@@ -939,9 +909,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
 			intel_mark_busy(ring->dev, obj);
 		}
 
-		trace_i915_gem_object_change_domain(obj,
-						    obj->base.read_domains,
-						    obj->base.write_domain);
+		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
 }
 
@@ -963,14 +931,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
 	if (INTEL_INFO(dev)->gen >= 4)
 		invalidate |= I915_GEM_DOMAIN_SAMPLER;
 	if (ring->flush(ring, invalidate, 0)) {
-		i915_gem_next_request_seqno(dev, ring);
+		i915_gem_next_request_seqno(ring);
 		return;
 	}
 
 	/* Add a breadcrumb for the completion of the batch buffer */
 	request = kzalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL || i915_add_request(dev, file, request, ring)) {
-		i915_gem_next_request_seqno(dev, ring);
+	if (request == NULL || i915_add_request(ring, file, request)) {
+		i915_gem_next_request_seqno(ring);
 		kfree(request);
 	}
 }
@@ -1000,10 +968,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (ret)
 		return ret;
 
-#if WATCH_EXEC
-	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
 	switch (args->flags & I915_EXEC_RING_MASK) {
 	case I915_EXEC_DEFAULT:
 	case I915_EXEC_RENDER:
@@ -1113,7 +1077,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
 							exec[i].handle));
-		if (obj == NULL) {
+		if (&obj->base == NULL) {
 			DRM_ERROR("Invalid object handle %d at index %d\n",
 				   exec[i].handle, i);
 			/* prevent error path from reading uninitialized data */
@@ -1170,11 +1134,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (ret)
 		goto err;
 
-	ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
-	if (ret)
-		goto err;
-
-	seqno = i915_gem_next_request_seqno(dev, ring);
+	seqno = i915_gem_next_request_seqno(ring);
 	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
 		if (seqno < ring->sync_seqno[i]) {
 			/* The GPU can not handle its semaphore value wrapping,
@@ -1189,6 +1149,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		}
 	}
 
+	trace_i915_gem_ring_dispatch(ring, seqno);
+
 	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
 	exec_len = args->batch_len;
 	if (cliprects) {
@@ -1245,11 +1207,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
 	int ret, i;
 
-#if WATCH_EXEC
-	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
 	if (args->buffer_count < 1) {
 		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
 		return -EINVAL;
@@ -1330,17 +1287,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
 	int ret;
 
-#if WATCH_EXEC
-	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
 	if (args->buffer_count < 1) {
 		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
 		return -EINVAL;
 	}
 
-	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
+			     GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+	if (exec2_list == NULL)
+		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
+					   args->buffer_count);
 	if (exec2_list == NULL) {
 		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
 			  args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..281ad3d6115d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -284,14 +284,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 	struct drm_i915_gem_set_tiling *args = data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	int ret;
-
-	ret = i915_gem_check_is_wedged(dev);
-	if (ret)
-		return ret;
+	int ret = 0;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-	if (obj == NULL)
+	if (&obj->base == NULL)
 		return -ENOENT;
 
 	if (!i915_tiling_ok(dev,
@@ -349,14 +345,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 			(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
 			 i915_gem_object_fence_ok(obj, args->tiling_mode));
 
-		obj->tiling_changed = true;
-		obj->tiling_mode = args->tiling_mode;
-		obj->stride = args->stride;
+		/* Rebind if we need a change of alignment */
+		if (!obj->map_and_fenceable) {
+			u32 unfenced_alignment =
+				i915_gem_get_unfenced_gtt_alignment(obj);
+			if (obj->gtt_offset & (unfenced_alignment - 1))
+				ret = i915_gem_object_unbind(obj);
+		}
+
+		if (ret == 0) {
+			obj->tiling_changed = true;
+			obj->tiling_mode = args->tiling_mode;
+			obj->stride = args->stride;
+		}
 	}
+	/* we have to maintain this existing ABI... */
+	args->stride = obj->stride;
+	args->tiling_mode = obj->tiling_mode;
 	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -371,7 +380,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
 	struct drm_i915_gem_object *obj;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-	if (obj == NULL)
+	if (&obj->base == NULL)
 		return -ENOENT;
 
 	mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dcc1aa..188b497e5076 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,21 +85,11 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 	}
 }
 
-static inline u32
-i915_pipestat(int pipe)
-{
-	if (pipe == 0)
-		return PIPEASTAT;
-	if (pipe == 1)
-		return PIPEBSTAT;
-	BUG();
-}
-
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 {
 	if ((dev_priv->pipestat[pipe] & mask) != mask) {
-		u32 reg = i915_pipestat(pipe);
+		u32 reg = PIPESTAT(pipe);
 
 		dev_priv->pipestat[pipe] |= mask;
 		/* Enable the interrupt, clear any pending status */
@@ -112,7 +102,7 @@ void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 {
 	if ((dev_priv->pipestat[pipe] & mask) != 0) {
-		u32 reg = i915_pipestat(pipe);
+		u32 reg = PIPESTAT(pipe);
 
 		dev_priv->pipestat[pipe] &= ~mask;
 		I915_WRITE(reg, dev_priv->pipestat[pipe]);
@@ -171,12 +161,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 
 	if (!i915_pipe_enabled(dev, pipe)) {
 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
-				"pipe %d\n", pipe);
+				"pipe %c\n", pipe_name(pipe));
 		return 0;
 	}
 
-	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
-	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+	high_frame = PIPEFRAME(pipe);
+	low_frame = PIPEFRAMEPIXEL(pipe);
 
 	/*
 	 * High & low register fields aren't synchronized, so make sure
@@ -197,11 +187,11 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
+	int reg = PIPE_FRMCOUNT_GM45(pipe);
 
 	if (!i915_pipe_enabled(dev, pipe)) {
 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
-					"pipe %d\n", pipe);
+				 "pipe %c\n", pipe_name(pipe));
 		return 0;
 	}
 
@@ -219,7 +209,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 
 	if (!i915_pipe_enabled(dev, pipe)) {
 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
-					"pipe %d\n", pipe);
+				 "pipe %c\n", pipe_name(pipe));
 		return 0;
 	}
 
@@ -316,6 +306,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct intel_encoder *encoder;
 
+	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
 		if (encoder->hot_plug)
 			encoder->hot_plug(encoder);
@@ -365,7 +357,7 @@ static void notify_ring(struct drm_device *dev,
 		return;
 
 	seqno = ring->get_seqno(ring);
-	trace_i915_gem_request_complete(dev, seqno);
+	trace_i915_gem_request_complete(ring, seqno);
 
 	ring->irq_seqno = seqno;
 	wake_up_all(&ring->irq_queue);
@@ -417,6 +409,7 @@ static void pch_irq_handler(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 pch_iir;
+	int pipe;
 
 	pch_iir = I915_READ(SDEIIR);
 
@@ -437,13 +430,11 @@ static void pch_irq_handler(struct drm_device *dev)
 	if (pch_iir & SDE_POISON)
 		DRM_ERROR("PCH poison interrupt\n");
 
-	if (pch_iir & SDE_FDI_MASK) {
-		u32 fdia, fdib;
-
-		fdia = I915_READ(FDI_RXA_IIR);
-		fdib = I915_READ(FDI_RXB_IIR);
-		DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
-	}
+	if (pch_iir & SDE_FDI_MASK)
+		for_each_pipe(pipe)
+			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
+					 pipe_name(pipe),
+					 I915_READ(FDI_RX_IIR(pipe)));
 
 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
@@ -648,9 +639,14 @@ static void
 i915_error_state_free(struct drm_device *dev,
 		      struct drm_i915_error_state *error)
 {
-	i915_error_object_free(error->batchbuffer[0]);
-	i915_error_object_free(error->batchbuffer[1]);
-	i915_error_object_free(error->ringbuffer);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
+		i915_error_object_free(error->batchbuffer[i]);
+
+	for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
+		i915_error_object_free(error->ringbuffer[i]);
+
 	kfree(error->active_bo);
 	kfree(error->overlay);
 	kfree(error);
@@ -765,7 +761,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_error_state *error;
 	unsigned long flags;
-	int i;
+	int i, pipe;
 
 	spin_lock_irqsave(&dev_priv->error_lock, flags);
 	error = dev_priv->first_error;
@@ -773,19 +769,21 @@ static void i915_capture_error_state(struct drm_device *dev)
 	if (error)
 		return;
 
+	/* Account for pipe specific data like PIPE*STAT */
 	error = kmalloc(sizeof(*error), GFP_ATOMIC);
 	if (!error) {
 		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
 		return;
 	}
 
-	DRM_DEBUG_DRIVER("generating error event\n");
+	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
+		 dev->primary->index);
 
 	error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
-	error->pipeastat = I915_READ(PIPEASTAT);
-	error->pipebstat = I915_READ(PIPEBSTAT);
+	for_each_pipe(pipe)
+		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 	error->instpm = I915_READ(INSTPM);
 	error->error = 0;
 	if (INTEL_INFO(dev)->gen >= 6) {
@@ -824,15 +822,16 @@ static void i915_capture_error_state(struct drm_device *dev)
 	}
 	i915_gem_record_fences(dev, error);
 
-	/* Record the active batchbuffers */
-	for (i = 0; i < I915_NUM_RINGS; i++)
+	/* Record the active batch and ring buffers */
+	for (i = 0; i < I915_NUM_RINGS; i++) {
 		error->batchbuffer[i] =
 			i915_error_first_batchbuffer(dev_priv,
 						     &dev_priv->ring[i]);
 
-	/* Record the ringbuffer */
-	error->ringbuffer = i915_error_object_create(dev_priv,
-						     dev_priv->ring[RCS].obj);
+		error->ringbuffer[i] =
+			i915_error_object_create(dev_priv,
+						 dev_priv->ring[i].obj);
+	}
 
 	/* Record buffers on the active and pinned lists. */
 	error->active_bo = NULL;
@@ -905,6 +904,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 eir = I915_READ(EIR);
+	int pipe;
 
 	if (!eir)
 		return;
@@ -953,14 +953,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 	}
 
 	if (eir & I915_ERROR_MEMORY_REFRESH) {
-		u32 pipea_stats = I915_READ(PIPEASTAT);
-		u32 pipeb_stats = I915_READ(PIPEBSTAT);
-
-		printk(KERN_ERR "memory refresh error\n");
-		printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
-		       pipea_stats);
-		printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
-		       pipeb_stats);
+		printk(KERN_ERR "memory refresh error:\n");
+		for_each_pipe(pipe)
+			printk(KERN_ERR "pipe %c stat: 0x%08x\n",
+			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
 		/* pipestat has already been acked */
 	}
 	if (eir & I915_ERROR_INSTRUCTION) {
@@ -1074,10 +1070,10 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
 	obj = work->pending_flip_obj;
 	if (INTEL_INFO(dev)->gen >= 4) {
-		int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+		int dspsurf = DSPSURF(intel_crtc->plane);
 		stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
 	} else {
-		int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
+		int dspaddr = DSPADDR(intel_crtc->plane);
 		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
 							crtc->y * crtc->fb->pitch +
 							crtc->x * crtc->fb->bits_per_pixel/8);
@@ -1097,12 +1093,13 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	struct drm_i915_master_private *master_priv;
 	u32 iir, new_iir;
-	u32 pipea_stats, pipeb_stats;
+	u32 pipe_stats[I915_MAX_PIPES];
 	u32 vblank_status;
 	int vblank = 0;
 	unsigned long irqflags;
 	int irq_received;
-	int ret = IRQ_NONE;
+	int ret = IRQ_NONE, pipe;
+	bool blc_event = false;
 
 	atomic_inc(&dev_priv->irq_received);
 
@@ -1125,27 +1122,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 		 * interrupts (for non-MSI).
 		 */
 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-		pipea_stats = I915_READ(PIPEASTAT);
-		pipeb_stats = I915_READ(PIPEBSTAT);
-
 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 			i915_handle_error(dev, false);
 
-		/*
-		 * Clear the PIPE(A|B)STAT regs before the IIR
-		 */
-		if (pipea_stats & 0x8000ffff) {
-			if (pipea_stats &  PIPE_FIFO_UNDERRUN_STATUS)
-				DRM_DEBUG_DRIVER("pipe a underrun\n");
-			I915_WRITE(PIPEASTAT, pipea_stats);
-			irq_received = 1;
-		}
-
-		if (pipeb_stats & 0x8000ffff) {
-			if (pipeb_stats &  PIPE_FIFO_UNDERRUN_STATUS)
-				DRM_DEBUG_DRIVER("pipe b underrun\n");
-			I915_WRITE(PIPEBSTAT, pipeb_stats);
-			irq_received = 1;
+		for_each_pipe(pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/*
+			 * Clear the PIPE*STAT regs before the IIR
+			 */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+					DRM_DEBUG_DRIVER("pipe %c underrun\n",
+							 pipe_name(pipe));
+				I915_WRITE(reg, pipe_stats[pipe]);
+				irq_received = 1;
+			}
 		}
 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
@@ -1196,27 +1189,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 				intel_finish_page_flip_plane(dev, 1);
 		}
 
-		if (pipea_stats & vblank_status &&
-		    drm_handle_vblank(dev, 0)) {
-			vblank++;
-			if (!dev_priv->flip_pending_is_done) {
-				i915_pageflip_stall_check(dev, 0);
-				intel_finish_page_flip(dev, 0);
+		for_each_pipe(pipe) {
+			if (pipe_stats[pipe] & vblank_status &&
+			    drm_handle_vblank(dev, pipe)) {
+				vblank++;
+				if (!dev_priv->flip_pending_is_done) {
+					i915_pageflip_stall_check(dev, pipe);
+					intel_finish_page_flip(dev, pipe);
+				}
 			}
-		}
 
-		if (pipeb_stats & vblank_status &&
-		    drm_handle_vblank(dev, 1)) {
-			vblank++;
-			if (!dev_priv->flip_pending_is_done) {
-				i915_pageflip_stall_check(dev, 1);
-				intel_finish_page_flip(dev, 1);
-			}
+			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+				blc_event = true;
 		}
 
-		if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
-		    (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
-		    (iir & I915_ASLE_INTERRUPT))
+
+		if (blc_event || (iir & I915_ASLE_INTERRUPT))
 			intel_opregion_asle_intr(dev);
 
 		/* With MSI, interrupts are only generated when iir
@@ -1266,16 +1254,6 @@ static int i915_emit_irq(struct drm_device * dev)
 	return dev_priv->counter;
 }
 
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
-	if (dev_priv->trace_irq_seqno == 0 &&
-	    ring->irq_get(ring))
-		dev_priv->trace_irq_seqno = seqno;
-}
-
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1375,7 +1353,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
 	else
 		i915_enable_pipestat(dev_priv, pipe,
 				     PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	/* maintain vblank delivery even in deep C-states */
+	if (dev_priv->info->gen == 3)
+		I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
 	return 0;
 }
 
@@ -1388,6 +1371,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	if (dev_priv->info->gen == 3)
+		I915_WRITE(INSTPM,
+			   INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+
 	if (HAS_PCH_SPLIT(dev))
 		ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
 					     DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@@ -1398,16 +1385,6 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-void i915_enable_interrupt (struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (!HAS_PCH_SPLIT(dev))
-		intel_opregion_enable_asle(dev);
-	dev_priv->irq_enabled = 1;
-}
-
-
 /* Set the vblank monitor pipe
  */
 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -1644,14 +1621,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 	POSTING_READ(GTIER);
 
 	if (HAS_PCH_CPT(dev)) {
-		hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT  |
-			       SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+		hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+				SDE_PORTB_HOTPLUG_CPT |
+				SDE_PORTC_HOTPLUG_CPT |
+				SDE_PORTD_HOTPLUG_CPT);
 	} else {
-		hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
-			       SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
-		hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
-		I915_WRITE(FDI_RXA_IMR, 0);
-		I915_WRITE(FDI_RXB_IMR, 0);
+		hotplug_mask = (SDE_CRT_HOTPLUG |
+				SDE_PORTB_HOTPLUG |
+				SDE_PORTC_HOTPLUG |
+				SDE_PORTD_HOTPLUG |
+				SDE_AUX_MASK);
 	}
 
 	dev_priv->pch_irq_mask = ~hotplug_mask;
@@ -1674,6 +1653,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 void i915_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
 
 	atomic_set(&dev_priv->irq_received, 0);
 
@@ -1691,8 +1671,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
 	}
 
 	I915_WRITE(HWSTAM, 0xeffe);
-	I915_WRITE(PIPEASTAT, 0);
-	I915_WRITE(PIPEBSTAT, 0);
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
 	I915_WRITE(IMR, 0xffffffff);
 	I915_WRITE(IER, 0x0);
 	POSTING_READ(IER);
@@ -1804,6 +1784,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
 void i915_driver_irq_uninstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
 
 	if (!dev_priv)
 		return;
@@ -1821,12 +1802,13 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
 	}
 
 	I915_WRITE(HWSTAM, 0xffffffff);
-	I915_WRITE(PIPEASTAT, 0);
-	I915_WRITE(PIPEBSTAT, 0);
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
 	I915_WRITE(IMR, 0xffffffff);
 	I915_WRITE(IER, 0x0);
 
-	I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
-	I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe),
+			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
 	I915_WRITE(IIR, I915_READ(IIR));
 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5cfc68940f17..363f66ca5d33 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -174,7 +174,9 @@
  *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
  */
 #define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
-#define MI_FLUSH_DW		MI_INSTR(0x26, 2) /* for GEN6 */
+#define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
+#define   MI_INVALIDATE_TLB	(1<<18)
+#define   MI_INVALIDATE_BSD	(1<<7)
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE	(1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -403,9 +405,12 @@
 #define   I915_ERROR_INSTRUCTION			(1<<0)
 #define INSTPM	        0x020c0
 #define   INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define   INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+					will not assert AGPBUSY# and will only
+					be delivered when out of C3. */
 #define ACTHD	        0x020c8
 #define FW_BLC		0x020d8
-#define FW_BLC2	 	0x020dc
+#define FW_BLC2		0x020dc
 #define FW_BLC_SELF	0x020e0 /* 915+ only */
 #define   FW_BLC_SELF_EN_MASK      (1<<31)
 #define   FW_BLC_SELF_FIFO_MASK    (1<<16) /* 945 only */
@@ -704,9 +709,9 @@
 #define   VGA1_PD_P1_DIV_2	(1 << 13)
 #define   VGA1_PD_P1_SHIFT	8
 #define   VGA1_PD_P1_MASK	(0x1f << 8)
-#define DPLL_A	0x06014
-#define DPLL_B	0x06018
-#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
+#define _DPLL_A	0x06014
+#define _DPLL_B	0x06018
+#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
 #define   DPLL_VCO_ENABLE		(1 << 31)
 #define   DPLL_DVO_HIGH_SPEED		(1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
@@ -777,7 +782,7 @@
 #define   SDVO_MULTIPLIER_MASK			0x000000ff
 #define   SDVO_MULTIPLIER_SHIFT_HIRES		4
 #define   SDVO_MULTIPLIER_SHIFT_VGA		0
-#define DPLL_A_MD 0x0601c /* 965+ only */
+#define _DPLL_A_MD 0x0601c /* 965+ only */
 /*
  * UDI pixel divider, controlling how many pixels are stuffed into a packet.
  *
@@ -814,14 +819,14 @@
  */
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
-#define DPLL_B_MD 0x06020 /* 965+ only */
-#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
-#define FPA0	0x06040
-#define FPA1	0x06044
-#define FPB0	0x06048
-#define FPB1	0x0604c
-#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
-#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
+#define _DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+#define _FPA0	0x06040
+#define _FPA1	0x06044
+#define _FPB0	0x06048
+#define _FPB1	0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
 #define   FP_N_DIV_MASK		0x003f0000
 #define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
 #define   FP_N_DIV_SHIFT		16
@@ -960,8 +965,9 @@
  * Palette regs
  */
 
-#define PALETTE_A		0x0a000
-#define PALETTE_B		0x0a800
+#define _PALETTE_A		0x0a000
+#define _PALETTE_B		0x0a800
+#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
 
 /* MCH MMIO space */
 
@@ -1265,32 +1271,32 @@
  */
 
 /* Pipe A timing regs */
-#define HTOTAL_A	0x60000
-#define HBLANK_A	0x60004
-#define HSYNC_A		0x60008
-#define VTOTAL_A	0x6000c
-#define VBLANK_A	0x60010
-#define VSYNC_A		0x60014
-#define PIPEASRC	0x6001c
-#define BCLRPAT_A	0x60020
+#define _HTOTAL_A	0x60000
+#define _HBLANK_A	0x60004
+#define _HSYNC_A		0x60008
+#define _VTOTAL_A	0x6000c
+#define _VBLANK_A	0x60010
+#define _VSYNC_A		0x60014
+#define _PIPEASRC	0x6001c
+#define _BCLRPAT_A	0x60020
 
 /* Pipe B timing regs */
-#define HTOTAL_B	0x61000
-#define HBLANK_B	0x61004
-#define HSYNC_B		0x61008
-#define VTOTAL_B	0x6100c
-#define VBLANK_B	0x61010
-#define VSYNC_B		0x61014
-#define PIPEBSRC	0x6101c
-#define BCLRPAT_B	0x61020
-
-#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
+#define _HTOTAL_B	0x61000
+#define _HBLANK_B	0x61004
+#define _HSYNC_B		0x61008
+#define _VTOTAL_B	0x6100c
+#define _VBLANK_B	0x61010
+#define _VSYNC_B		0x61014
+#define _PIPEBSRC	0x6101c
+#define _BCLRPAT_B	0x61020
+
+#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
 
 /* VGA port control */
 #define ADPA			0x61100
@@ -1384,6 +1390,7 @@
 #define   SDVO_ENCODING_HDMI		(0x2 << 10)
 /** Requird for HDMI operation */
 #define   SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define   SDVO_COLOR_RANGE_16_235	(1 << 8)
 #define   SDVO_BORDER_ENABLE		(1 << 7)
 #define   SDVO_AUDIO_ENABLE		(1 << 6)
 /** New with 965, default is to be set */
@@ -1439,8 +1446,13 @@
 #define   LVDS_PORT_EN			(1 << 31)
 /* Selects pipe B for LVDS data.  Must be set on pre-965. */
 #define   LVDS_PIPEB_SELECT		(1 << 30)
+#define   LVDS_PIPE_MASK		(1 << 30)
 /* LVDS dithering flag on 965/g4x platform */
 #define   LVDS_ENABLE_DITHER		(1 << 25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define   LVDS_VSYNC_POLARITY		(1 << 21)
+#define   LVDS_HSYNC_POLARITY		(1 << 20)
+
 /* Enable border for unscaled (or aspect-scaled) display */
 #define   LVDS_BORDER_ENABLE		(1 << 15)
 /*
@@ -1474,6 +1486,9 @@
 #define   LVDS_B0B3_POWER_DOWN		(0 << 2)
 #define   LVDS_B0B3_POWER_UP		(3 << 2)
 
+#define LVDS_PIPE_ENABLED(V, P) \
+	(((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
+
 /* Video Data Island Packet control */
 #define VIDEO_DIP_DATA		0x61178
 #define VIDEO_DIP_CTL		0x61170
@@ -1551,17 +1566,7 @@
 
 /* Backlight control */
 #define BLC_PWM_CTL		0x61254
-#define   BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
 #define BLC_PWM_CTL2		0x61250 /* 965+ only */
-#define   BLM_COMBINATION_MODE (1 << 30)
-/*
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
- *
- * The actual value is this field multiplied by two.
- */
-#define   BACKLIGHT_MODULATION_FREQ_MASK		(0x7fff << 17)
-#define   BLM_LEGACY_MODE				(1 << 16)
 /*
  * This is the number of cycles out of the backlight modulation cycle for which
  * the backlight is on.
@@ -2062,6 +2067,10 @@
 
 #define   DP_PORT_EN			(1 << 31)
 #define   DP_PIPEB_SELECT		(1 << 30)
+#define   DP_PIPE_MASK			(1 << 30)
+
+#define DP_PIPE_ENABLED(V, P) \
+	(((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
 
 /* Link training mode - select a suitable mode for each stage */
 #define   DP_LINK_TRAIN_PAT_1		(0 << 28)
@@ -2204,8 +2213,8 @@
  * which is after the LUTs, so we want the bytes for our color format.
  * For our current usage, this is always 3, one byte for R, G and B.
  */
-#define PIPEA_GMCH_DATA_M			0x70050
-#define PIPEB_GMCH_DATA_M			0x71050
+#define _PIPEA_GMCH_DATA_M			0x70050
+#define _PIPEB_GMCH_DATA_M			0x71050
 
 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
 #define   PIPE_GMCH_DATA_M_TU_SIZE_MASK		(0x3f << 25)
@@ -2213,8 +2222,8 @@
 
 #define   PIPE_GMCH_DATA_M_MASK			(0xffffff)
 
-#define PIPEA_GMCH_DATA_N			0x70054
-#define PIPEB_GMCH_DATA_N			0x71054
+#define _PIPEA_GMCH_DATA_N			0x70054
+#define _PIPEB_GMCH_DATA_N			0x71054
 #define   PIPE_GMCH_DATA_N_MASK			(0xffffff)
 
 /*
@@ -2228,20 +2237,25 @@
  * Attributes and VB-ID.
  */
 
-#define PIPEA_DP_LINK_M				0x70060
-#define PIPEB_DP_LINK_M				0x71060
+#define _PIPEA_DP_LINK_M				0x70060
+#define _PIPEB_DP_LINK_M				0x71060
 #define   PIPEA_DP_LINK_M_MASK			(0xffffff)
 
-#define PIPEA_DP_LINK_N				0x70064
-#define PIPEB_DP_LINK_N				0x71064
+#define _PIPEA_DP_LINK_N				0x70064
+#define _PIPEB_DP_LINK_N				0x71064
 #define   PIPEA_DP_LINK_N_MASK			(0xffffff)
 
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
 /* Display & cursor control */
 
 /* Pipe A */
-#define PIPEADSL		0x70000
+#define _PIPEADSL		0x70000
 #define   DSL_LINEMASK		0x00000fff
-#define PIPEACONF		0x70008
+#define _PIPEACONF		0x70008
 #define   PIPECONF_ENABLE	(1<<31)
 #define   PIPECONF_DISABLE	0
 #define   PIPECONF_DOUBLE_WIDE	(1<<30)
@@ -2267,7 +2281,7 @@
 #define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
 #define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
 #define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define PIPEASTAT		0x70024
+#define _PIPEASTAT		0x70024
 #define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
 #define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
 #define   PIPE_CRC_DONE_ENABLE			(1UL<<28)
@@ -2303,10 +2317,12 @@
 #define   PIPE_6BPC				(2 << 5)
 #define   PIPE_12BPC				(3 << 5)
 
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
-#define PIPEDSL(pipe)  _PIPE(pipe, PIPEADSL, PIPEBDSL)
-#define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
+#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
+#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPEDSL(pipe)  _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
+#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
 
 #define DSPARB			0x70030
 #define   DSPARB_CSTART_MASK	(0x7f << 7)
@@ -2468,20 +2484,21 @@
  *  } while (high1 != high2);
  *  frame = (high1 << 8) | low1;
  */
-#define PIPEAFRAMEHIGH          0x70040
+#define _PIPEAFRAMEHIGH          0x70040
 #define   PIPE_FRAME_HIGH_MASK    0x0000ffff
 #define   PIPE_FRAME_HIGH_SHIFT   0
-#define PIPEAFRAMEPIXEL         0x70044
+#define _PIPEAFRAMEPIXEL         0x70044
 #define   PIPE_FRAME_LOW_MASK     0xff000000
 #define   PIPE_FRAME_LOW_SHIFT    24
 #define   PIPE_PIXEL_MASK         0x00ffffff
 #define   PIPE_PIXEL_SHIFT        0
 /* GM45+ just has to be different */
-#define PIPEA_FRMCOUNT_GM45	0x70040
-#define PIPEA_FLIPCOUNT_GM45	0x70044
+#define _PIPEA_FRMCOUNT_GM45	0x70040
+#define _PIPEA_FLIPCOUNT_GM45	0x70044
+#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
 
 /* Cursor A & B regs */
-#define CURACNTR		0x70080
+#define _CURACNTR		0x70080
 /* Old style CUR*CNTR flags (desktop 8xx) */
 #define   CURSOR_ENABLE		0x80000000
 #define   CURSOR_GAMMA_ENABLE	0x40000000
@@ -2502,23 +2519,23 @@
 #define   MCURSOR_PIPE_A	0x00
 #define   MCURSOR_PIPE_B	(1 << 28)
 #define   MCURSOR_GAMMA_ENABLE  (1 << 26)
-#define CURABASE		0x70084
-#define CURAPOS			0x70088
+#define _CURABASE		0x70084
+#define _CURAPOS			0x70088
 #define   CURSOR_POS_MASK       0x007FF
 #define   CURSOR_POS_SIGN       0x8000
 #define   CURSOR_X_SHIFT        0
 #define   CURSOR_Y_SHIFT        16
 #define CURSIZE			0x700a0
-#define CURBCNTR		0x700c0
-#define CURBBASE		0x700c4
-#define CURBPOS			0x700c8
+#define _CURBCNTR		0x700c0
+#define _CURBBASE		0x700c4
+#define _CURBPOS			0x700c8
 
-#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
-#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
-#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
 
 /* Display A control */
-#define DSPACNTR                0x70180
+#define _DSPACNTR                0x70180
 #define   DISPLAY_PLANE_ENABLE			(1<<31)
 #define   DISPLAY_PLANE_DISABLE			0
 #define   DISPPLANE_GAMMA_ENABLE		(1<<30)
@@ -2532,9 +2549,10 @@
 #define   DISPPLANE_32BPP_30BIT_NO_ALPHA	(0xa<<26)
 #define   DISPPLANE_STEREO_ENABLE		(1<<25)
 #define   DISPPLANE_STEREO_DISABLE		0
-#define   DISPPLANE_SEL_PIPE_MASK		(1<<24)
+#define   DISPPLANE_SEL_PIPE_SHIFT		24
+#define   DISPPLANE_SEL_PIPE_MASK		(3<<DISPPLANE_SEL_PIPE_SHIFT)
 #define   DISPPLANE_SEL_PIPE_A			0
-#define   DISPPLANE_SEL_PIPE_B			(1<<24)
+#define   DISPPLANE_SEL_PIPE_B			(1<<DISPPLANE_SEL_PIPE_SHIFT)
 #define   DISPPLANE_SRC_KEY_ENABLE		(1<<22)
 #define   DISPPLANE_SRC_KEY_DISABLE		0
 #define   DISPPLANE_LINE_DOUBLE			(1<<20)
@@ -2543,20 +2561,20 @@
 #define   DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
 #define   DISPPLANE_TRICKLE_FEED_DISABLE	(1<<14) /* Ironlake */
 #define   DISPPLANE_TILED			(1<<10)
-#define DSPAADDR		0x70184
-#define DSPASTRIDE		0x70188
-#define DSPAPOS			0x7018C /* reserved */
-#define DSPASIZE		0x70190
-#define DSPASURF		0x7019C /* 965+ only */
-#define DSPATILEOFF		0x701A4 /* 965+ only */
-
-#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
-#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
-#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
-#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
-#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
-#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
-#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
+#define _DSPAADDR		0x70184
+#define _DSPASTRIDE		0x70188
+#define _DSPAPOS			0x7018C /* reserved */
+#define _DSPASIZE		0x70190
+#define _DSPASURF		0x7019C /* 965+ only */
+#define _DSPATILEOFF		0x701A4 /* 965+ only */
+
+#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
 
 /* VBIOS flags */
 #define SWF00			0x71410
@@ -2574,27 +2592,27 @@
 #define SWF32			0x7241c
 
 /* Pipe B */
-#define PIPEBDSL		0x71000
-#define PIPEBCONF		0x71008
-#define PIPEBSTAT		0x71024
-#define PIPEBFRAMEHIGH		0x71040
-#define PIPEBFRAMEPIXEL		0x71044
-#define PIPEB_FRMCOUNT_GM45	0x71040
-#define PIPEB_FLIPCOUNT_GM45	0x71044
+#define _PIPEBDSL		0x71000
+#define _PIPEBCONF		0x71008
+#define _PIPEBSTAT		0x71024
+#define _PIPEBFRAMEHIGH		0x71040
+#define _PIPEBFRAMEPIXEL		0x71044
+#define _PIPEB_FRMCOUNT_GM45	0x71040
+#define _PIPEB_FLIPCOUNT_GM45	0x71044
 
 
 /* Display B control */
-#define DSPBCNTR		0x71180
+#define _DSPBCNTR		0x71180
 #define   DISPPLANE_ALPHA_TRANS_ENABLE		(1<<15)
 #define   DISPPLANE_ALPHA_TRANS_DISABLE		0
 #define   DISPPLANE_SPRITE_ABOVE_DISPLAY	0
 #define   DISPPLANE_SPRITE_ABOVE_OVERLAY	(1)
-#define DSPBADDR		0x71184
-#define DSPBSTRIDE		0x71188
-#define DSPBPOS			0x7118C
-#define DSPBSIZE		0x71190
-#define DSPBSURF		0x7119C
-#define DSPBTILEOFF		0x711A4
+#define _DSPBADDR		0x71184
+#define _DSPBSTRIDE		0x71188
+#define _DSPBPOS			0x7118C
+#define _DSPBSIZE		0x71190
+#define _DSPBSURF		0x7119C
+#define _DSPBTILEOFF		0x711A4
 
 /* VBIOS regs */
 #define VGACNTRL		0x71400
@@ -2648,68 +2666,80 @@
 #define  FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK  0xff
 
 
-#define PIPEA_DATA_M1           0x60030
+#define _PIPEA_DATA_M1           0x60030
 #define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
 #define  TU_SIZE_MASK           0x7e000000
 #define  PIPE_DATA_M1_OFFSET    0
-#define PIPEA_DATA_N1           0x60034
+#define _PIPEA_DATA_N1           0x60034
 #define  PIPE_DATA_N1_OFFSET    0
 
-#define PIPEA_DATA_M2           0x60038
+#define _PIPEA_DATA_M2           0x60038
 #define  PIPE_DATA_M2_OFFSET    0
-#define PIPEA_DATA_N2           0x6003c
+#define _PIPEA_DATA_N2           0x6003c
 #define  PIPE_DATA_N2_OFFSET    0
 
-#define PIPEA_LINK_M1           0x60040
+#define _PIPEA_LINK_M1           0x60040
 #define  PIPE_LINK_M1_OFFSET    0
-#define PIPEA_LINK_N1           0x60044
+#define _PIPEA_LINK_N1           0x60044
 #define  PIPE_LINK_N1_OFFSET    0
 
-#define PIPEA_LINK_M2           0x60048
+#define _PIPEA_LINK_M2           0x60048
 #define  PIPE_LINK_M2_OFFSET    0
-#define PIPEA_LINK_N2           0x6004c
+#define _PIPEA_LINK_N2           0x6004c
 #define  PIPE_LINK_N2_OFFSET    0
 
 /* PIPEB timing regs are same start from 0x61000 */
 
-#define PIPEB_DATA_M1           0x61030
-#define PIPEB_DATA_N1           0x61034
+#define _PIPEB_DATA_M1           0x61030
+#define _PIPEB_DATA_N1           0x61034
 
-#define PIPEB_DATA_M2           0x61038
-#define PIPEB_DATA_N2           0x6103c
+#define _PIPEB_DATA_M2           0x61038
+#define _PIPEB_DATA_N2           0x6103c
 
-#define PIPEB_LINK_M1           0x61040
-#define PIPEB_LINK_N1           0x61044
+#define _PIPEB_LINK_M1           0x61040
+#define _PIPEB_LINK_N1           0x61044
 
-#define PIPEB_LINK_M2           0x61048
-#define PIPEB_LINK_N2           0x6104c
+#define _PIPEB_LINK_M2           0x61048
+#define _PIPEB_LINK_N2           0x6104c
 
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
 
 /* CPU panel fitter */
-#define PFA_CTL_1               0x68080
-#define PFB_CTL_1               0x68880
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1               0x68080
+#define _PFB_CTL_1               0x68880
 #define  PF_ENABLE              (1<<31)
 #define  PF_FILTER_MASK		(3<<23)
 #define  PF_FILTER_PROGRAMMED	(0<<23)
 #define  PF_FILTER_MED_3x3	(1<<23)
 #define  PF_FILTER_EDGE_ENHANCE	(2<<23)
 #define  PF_FILTER_EDGE_SOFTEN	(3<<23)
-#define PFA_WIN_SZ		0x68074
-#define PFB_WIN_SZ		0x68874
-#define PFA_WIN_POS		0x68070
-#define PFB_WIN_POS		0x68870
+#define _PFA_WIN_SZ		0x68074
+#define _PFB_WIN_SZ		0x68874
+#define _PFA_WIN_POS		0x68070
+#define _PFB_WIN_POS		0x68870
+#define _PFA_VSCALE		0x68084
+#define _PFB_VSCALE		0x68884
+#define _PFA_HSCALE		0x68090
+#define _PFB_HSCALE		0x68890
+
+#define PF_CTL(pipe)		_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_WIN_SZ(pipe)		_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_POS(pipe)	_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_VSCALE(pipe)		_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+#define PF_HSCALE(pipe)		_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
 
 /* legacy palette */
-#define LGC_PALETTE_A           0x4a000
-#define LGC_PALETTE_B           0x4a800
+#define _LGC_PALETTE_A           0x4a000
+#define _LGC_PALETTE_B           0x4a800
+#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
 
 /* interrupts */
 #define DE_MASTER_IRQ_CONTROL   (1 << 31)
@@ -2875,17 +2905,17 @@
 #define PCH_GMBUS4		0xc5110
 #define PCH_GMBUS5		0xc5120
 
-#define PCH_DPLL_A              0xc6014
-#define PCH_DPLL_B              0xc6018
-#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
+#define _PCH_DPLL_A              0xc6014
+#define _PCH_DPLL_B              0xc6018
+#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
 
-#define PCH_FPA0                0xc6040
+#define _PCH_FPA0                0xc6040
 #define  FP_CB_TUNE		(0x3<<22)
-#define PCH_FPA1                0xc6044
-#define PCH_FPB0                0xc6048
-#define PCH_FPB1                0xc604c
-#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
-#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
+#define _PCH_FPA1                0xc6044
+#define _PCH_FPB0                0xc6048
+#define _PCH_FPB1                0xc604c
+#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
 
 #define PCH_DPLL_TEST           0xc606c
 
@@ -2904,6 +2934,7 @@
 #define  DREF_NONSPREAD_SOURCE_MASK		(3<<9)
 #define  DREF_SUPERSPREAD_SOURCE_DISABLE        (0<<7)
 #define  DREF_SUPERSPREAD_SOURCE_ENABLE         (2<<7)
+#define  DREF_SUPERSPREAD_SOURCE_MASK		(3<<7)
 #define  DREF_SSC4_DOWNSPREAD                   (0<<6)
 #define  DREF_SSC4_CENTERSPREAD                 (1<<6)
 #define  DREF_SSC1_DISABLE                      (0<<1)
@@ -2936,60 +2967,69 @@
 
 /* transcoder */
 
-#define TRANS_HTOTAL_A          0xe0000
+#define _TRANS_HTOTAL_A          0xe0000
 #define  TRANS_HTOTAL_SHIFT     16
 #define  TRANS_HACTIVE_SHIFT    0
-#define TRANS_HBLANK_A          0xe0004
+#define _TRANS_HBLANK_A          0xe0004
 #define  TRANS_HBLANK_END_SHIFT 16
 #define  TRANS_HBLANK_START_SHIFT 0
-#define TRANS_HSYNC_A           0xe0008
+#define _TRANS_HSYNC_A           0xe0008
 #define  TRANS_HSYNC_END_SHIFT  16
 #define  TRANS_HSYNC_START_SHIFT 0
-#define TRANS_VTOTAL_A          0xe000c
+#define _TRANS_VTOTAL_A          0xe000c
 #define  TRANS_VTOTAL_SHIFT     16
 #define  TRANS_VACTIVE_SHIFT    0
-#define TRANS_VBLANK_A          0xe0010
+#define _TRANS_VBLANK_A          0xe0010
 #define  TRANS_VBLANK_END_SHIFT 16
 #define  TRANS_VBLANK_START_SHIFT 0
-#define TRANS_VSYNC_A           0xe0014
+#define _TRANS_VSYNC_A           0xe0014
 #define  TRANS_VSYNC_END_SHIFT  16
 #define  TRANS_VSYNC_START_SHIFT 0
 
-#define TRANSA_DATA_M1          0xe0030
-#define TRANSA_DATA_N1          0xe0034
-#define TRANSA_DATA_M2          0xe0038
-#define TRANSA_DATA_N2          0xe003c
-#define TRANSA_DP_LINK_M1       0xe0040
-#define TRANSA_DP_LINK_N1       0xe0044
-#define TRANSA_DP_LINK_M2       0xe0048
-#define TRANSA_DP_LINK_N2       0xe004c
-
-#define TRANS_HTOTAL_B          0xe1000
-#define TRANS_HBLANK_B          0xe1004
-#define TRANS_HSYNC_B           0xe1008
-#define TRANS_VTOTAL_B          0xe100c
-#define TRANS_VBLANK_B          0xe1010
-#define TRANS_VSYNC_B           0xe1014
-
-#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
-#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
-#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
-#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
-#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
-#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
-
-#define TRANSB_DATA_M1          0xe1030
-#define TRANSB_DATA_N1          0xe1034
-#define TRANSB_DATA_M2          0xe1038
-#define TRANSB_DATA_N2          0xe103c
-#define TRANSB_DP_LINK_M1       0xe1040
-#define TRANSB_DP_LINK_N1       0xe1044
-#define TRANSB_DP_LINK_M2       0xe1048
-#define TRANSB_DP_LINK_N2       0xe104c
-
-#define TRANSACONF              0xf0008
-#define TRANSBCONF              0xf1008
-#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
+#define _TRANSA_DATA_M1          0xe0030
+#define _TRANSA_DATA_N1          0xe0034
+#define _TRANSA_DATA_M2          0xe0038
+#define _TRANSA_DATA_N2          0xe003c
+#define _TRANSA_DP_LINK_M1       0xe0040
+#define _TRANSA_DP_LINK_N1       0xe0044
+#define _TRANSA_DP_LINK_M2       0xe0048
+#define _TRANSA_DP_LINK_N2       0xe004c
+
+#define _TRANS_HTOTAL_B          0xe1000
+#define _TRANS_HBLANK_B          0xe1004
+#define _TRANS_HSYNC_B           0xe1008
+#define _TRANS_VTOTAL_B          0xe100c
+#define _TRANS_VBLANK_B          0xe1010
+#define _TRANS_VSYNC_B           0xe1014
+
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
+
+#define _TRANSB_DATA_M1          0xe1030
+#define _TRANSB_DATA_N1          0xe1034
+#define _TRANSB_DATA_M2          0xe1038
+#define _TRANSB_DATA_N2          0xe103c
+#define _TRANSB_DP_LINK_M1       0xe1040
+#define _TRANSB_DP_LINK_N1       0xe1044
+#define _TRANSB_DP_LINK_M2       0xe1048
+#define _TRANSB_DP_LINK_N2       0xe104c
+
+#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
+#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
+#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
+#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
+#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
+#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
+#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
+#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
+
+#define _TRANSACONF              0xf0008
+#define _TRANSBCONF              0xf1008
+#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
 #define  TRANS_DISABLE          (0<<31)
 #define  TRANS_ENABLE           (1<<31)
 #define  TRANS_STATE_MASK       (1<<30)
@@ -3007,18 +3047,19 @@
 #define  TRANS_6BPC             (2<<5)
 #define  TRANS_12BPC            (3<<5)
 
-#define FDI_RXA_CHICKEN         0xc200c
-#define FDI_RXB_CHICKEN         0xc2010
-#define  FDI_RX_PHASE_SYNC_POINTER_ENABLE       (1)
-#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
+#define _FDI_RXA_CHICKEN         0xc200c
+#define _FDI_RXB_CHICKEN         0xc2010
+#define  FDI_RX_PHASE_SYNC_POINTER_OVR	(1<<1)
+#define  FDI_RX_PHASE_SYNC_POINTER_EN	(1<<0)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
 
 #define SOUTH_DSPCLK_GATE_D	0xc2020
 #define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
 
 /* CPU: FDI_TX */
-#define FDI_TXA_CTL             0x60100
-#define FDI_TXB_CTL             0x61100
-#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
+#define _FDI_TXA_CTL             0x60100
+#define _FDI_TXB_CTL             0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
 #define  FDI_TX_DISABLE         (0<<31)
 #define  FDI_TX_ENABLE          (1<<31)
 #define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
@@ -3058,9 +3099,9 @@
 #define  FDI_SCRAMBLING_DISABLE         (1<<7)
 
 /* FDI_RX, FDI_X is hard-wired to Transcoder_X */
-#define FDI_RXA_CTL             0xf000c
-#define FDI_RXB_CTL             0xf100c
-#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
+#define _FDI_RXA_CTL             0xf000c
+#define _FDI_RXB_CTL             0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
 #define  FDI_RX_ENABLE          (1<<31)
 /* train, dp width same as FDI_TX */
 #define  FDI_DP_PORT_WIDTH_X8           (7<<19)
@@ -3085,15 +3126,15 @@
 #define  FDI_LINK_TRAIN_NORMAL_CPT		(3<<8)
 #define  FDI_LINK_TRAIN_PATTERN_MASK_CPT	(3<<8)
 
-#define FDI_RXA_MISC            0xf0010
-#define FDI_RXB_MISC            0xf1010
-#define FDI_RXA_TUSIZE1         0xf0030
-#define FDI_RXA_TUSIZE2         0xf0038
-#define FDI_RXB_TUSIZE1         0xf1030
-#define FDI_RXB_TUSIZE2         0xf1038
-#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
-#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
-#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
+#define _FDI_RXA_MISC            0xf0010
+#define _FDI_RXB_MISC            0xf1010
+#define _FDI_RXA_TUSIZE1         0xf0030
+#define _FDI_RXA_TUSIZE2         0xf0038
+#define _FDI_RXB_TUSIZE1         0xf1030
+#define _FDI_RXB_TUSIZE2         0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
 
 /* FDI_RX interrupt register format */
 #define FDI_RX_INTER_LANE_ALIGN         (1<<10)
@@ -3108,12 +3149,12 @@
 #define FDI_RX_CROSS_CLOCK_OVERFLOW     (1<<1)
 #define FDI_RX_SYMBOL_QUEUE_OVERFLOW    (1<<0)
 
-#define FDI_RXA_IIR             0xf0014
-#define FDI_RXA_IMR             0xf0018
-#define FDI_RXB_IIR             0xf1014
-#define FDI_RXB_IMR             0xf1018
-#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
-#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
+#define _FDI_RXA_IIR             0xf0014
+#define _FDI_RXA_IMR             0xf0018
+#define _FDI_RXB_IIR             0xf1014
+#define _FDI_RXB_IMR             0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
 
 #define FDI_PLL_CTL_1           0xfe000
 #define FDI_PLL_CTL_2           0xfe004
@@ -3143,11 +3184,15 @@
 #define  ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
 #define  ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
 
+#define ADPA_PIPE_ENABLED(V, P) \
+	(((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
+
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
 #define  TRANSCODER_A   (0)
 #define  TRANSCODER_B   (1 << 30)
+#define  TRANSCODER_MASK   (1 << 30)
 #define  COLOR_FORMAT_8bpc      (0)
 #define  COLOR_FORMAT_12bpc     (3 << 26)
 #define  SDVOB_HOTPLUG_ENABLE   (1 << 23)
@@ -3163,6 +3208,9 @@
 #define  HSYNC_ACTIVE_HIGH      (1 << 3)
 #define  PORT_DETECTED          (1 << 2)
 
+#define HDMI_PIPE_ENABLED(V, P) \
+	(((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
+
 /* PCH SDVOB multiplex with HDMIB */
 #define PCH_SDVOB	HDMIB
 
@@ -3238,6 +3286,7 @@
 #define  TRANS_DP_PORT_SEL_B	(0<<29)
 #define  TRANS_DP_PORT_SEL_C	(1<<29)
 #define  TRANS_DP_PORT_SEL_D	(2<<29)
+#define  TRANS_DP_PORT_SEL_NONE	(3<<29)
 #define  TRANS_DP_PORT_SEL_MASK	(3<<29)
 #define  TRANS_DP_AUDIO_ONLY	(1<<26)
 #define  TRANS_DP_ENH_FRAMING	(1<<18)
@@ -3269,6 +3318,8 @@
 #define  FORCEWAKE				0xA18C
 #define  FORCEWAKE_ACK				0x130090
 
+#define  GT_FIFO_FREE_ENTRIES			0x120008
+
 #define GEN6_RPNSWREQ				0xA008
 #define   GEN6_TURBO_DISABLE			(1<<31)
 #define   GEN6_FREQUENCY(x)			((x)<<25)
@@ -3286,15 +3337,28 @@
 #define GEN6_RP_DOWN_TIMEOUT			0xA010
 #define GEN6_RP_INTERRUPT_LIMITS		0xA014
 #define GEN6_RPSTAT1				0xA01C
+#define   GEN6_CAGF_SHIFT			8
+#define   GEN6_CAGF_MASK			(0x7f << GEN6_CAGF_SHIFT)
 #define GEN6_RP_CONTROL				0xA024
 #define   GEN6_RP_MEDIA_TURBO			(1<<11)
 #define   GEN6_RP_USE_NORMAL_FREQ		(1<<9)
 #define   GEN6_RP_MEDIA_IS_GFX			(1<<8)
 #define   GEN6_RP_ENABLE			(1<<7)
-#define   GEN6_RP_UP_BUSY_MAX			(0x2<<3)
-#define   GEN6_RP_DOWN_BUSY_MIN			(0x2<<0)
+#define   GEN6_RP_UP_IDLE_MIN			(0x1<<3)
+#define   GEN6_RP_UP_BUSY_AVG			(0x2<<3)
+#define   GEN6_RP_UP_BUSY_CONT			(0x4<<3)
+#define   GEN6_RP_DOWN_IDLE_CONT		(0x1<<0)
 #define GEN6_RP_UP_THRESHOLD			0xA02C
 #define GEN6_RP_DOWN_THRESHOLD			0xA030
+#define GEN6_RP_CUR_UP_EI			0xA050
+#define   GEN6_CURICONT_MASK			0xffffff
+#define GEN6_RP_CUR_UP				0xA054
+#define   GEN6_CURBSYTAVG_MASK			0xffffff
+#define GEN6_RP_PREV_UP				0xA058
+#define GEN6_RP_CUR_DOWN_EI			0xA05C
+#define   GEN6_CURIAVG_MASK			0xffffff
+#define GEN6_RP_CUR_DOWN			0xA060
+#define GEN6_RP_PREV_DOWN			0xA064
 #define GEN6_RP_UP_EI				0xA068
 #define GEN6_RP_DOWN_EI				0xA06C
 #define GEN6_RP_IDLE_HYSTERSIS			0xA070
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0521ecf26017..7e992a8e9098 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,11 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32	dpll_reg;
 
-	if (HAS_PCH_SPLIT(dev)) {
-		dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
-	} else {
-		dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
-	}
+	if (HAS_PCH_SPLIT(dev))
+		dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
+	else
+		dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
 
 	return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
 }
@@ -46,7 +45,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
 static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
 	u32 *array;
 	int i;
 
@@ -54,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
 		return;
 
 	if (HAS_PCH_SPLIT(dev))
-		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+		reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
 		array = dev_priv->save_palette_a;
@@ -68,7 +67,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
 static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
 	u32 *array;
 	int i;
 
@@ -76,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
 		return;
 
 	if (HAS_PCH_SPLIT(dev))
-		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+		reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
 		array = dev_priv->save_palette_a;
@@ -241,12 +240,12 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 		return;
 
 	/* Cursor state */
-	dev_priv->saveCURACNTR = I915_READ(CURACNTR);
-	dev_priv->saveCURAPOS = I915_READ(CURAPOS);
-	dev_priv->saveCURABASE = I915_READ(CURABASE);
-	dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
-	dev_priv->saveCURBPOS = I915_READ(CURBPOS);
-	dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+	dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
+	dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
+	dev_priv->saveCURABASE = I915_READ(_CURABASE);
+	dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
+	dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
+	dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
 	if (IS_GEN2(dev))
 		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
 
@@ -256,118 +255,118 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 	}
 
 	/* Pipe & plane A info */
-	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
-	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+	dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
+	dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
-		dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
-		dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
+		dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
+		dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
+		dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
 	} else {
-		dev_priv->saveFPA0 = I915_READ(FPA0);
-		dev_priv->saveFPA1 = I915_READ(FPA1);
-		dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+		dev_priv->saveFPA0 = I915_READ(_FPA0);
+		dev_priv->saveFPA1 = I915_READ(_FPA1);
+		dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
 	}
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
-	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
-	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
-	dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
-	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
-	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
-	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+		dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+	dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
+	dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
+	dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
+	dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
+	dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
+	dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
 	if (!HAS_PCH_SPLIT(dev))
-		dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+		dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
-		dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
-		dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
-		dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
-
-		dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
-		dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
-
-		dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
-		dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
-		dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
-
-		dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
-		dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
-		dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
-		dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
-		dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
-		dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
-		dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
-	}
-
-	dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
-	dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
-	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
-	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
-	dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+		dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+		dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+		dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+		dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+		dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+		dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+		dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+		dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+		dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+		dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
+		dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+		dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+		dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+		dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+		dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+		dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+	}
+
+	dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
+	dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+	dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
+	dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
+	dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
-		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+		dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
+		dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
 	}
 	i915_save_palette(dev, PIPE_A);
-	dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
+	dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
 
 	/* Pipe & plane B info */
-	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
-	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+	dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
+	dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
-		dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
-		dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
+		dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
+		dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
+		dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
 	} else {
-		dev_priv->saveFPB0 = I915_READ(FPB0);
-		dev_priv->saveFPB1 = I915_READ(FPB1);
-		dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+		dev_priv->saveFPB0 = I915_READ(_FPB0);
+		dev_priv->saveFPB1 = I915_READ(_FPB1);
+		dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
 	}
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
-	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
-	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
-	dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
-	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
-	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
-	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+		dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+	dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
+	dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
+	dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
+	dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
+	dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
+	dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
 	if (!HAS_PCH_SPLIT(dev))
-		dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
+		dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
-		dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
-		dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
-		dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
-
-		dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
-		dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
-
-		dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
-		dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
-		dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
-
-		dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
-		dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
-		dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
-		dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
-		dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
-		dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
-		dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
-	}
-
-	dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
-	dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
-	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
-	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
-	dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+		dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+		dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+		dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+		dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+		dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+		dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+		dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+		dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+		dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+		dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
+		dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+		dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+		dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+		dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+		dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+		dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+	}
+
+	dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
+	dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+	dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
+	dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
+	dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
-		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+		dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
+		dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
 	}
 	i915_save_palette(dev, PIPE_B);
-	dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+	dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
 
 	/* Fences */
 	switch (INTEL_INFO(dev)->gen) {
@@ -426,19 +425,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 
 
 	if (HAS_PCH_SPLIT(dev)) {
-		dpll_a_reg = PCH_DPLL_A;
-		dpll_b_reg = PCH_DPLL_B;
-		fpa0_reg = PCH_FPA0;
-		fpb0_reg = PCH_FPB0;
-		fpa1_reg = PCH_FPA1;
-		fpb1_reg = PCH_FPB1;
+		dpll_a_reg = _PCH_DPLL_A;
+		dpll_b_reg = _PCH_DPLL_B;
+		fpa0_reg = _PCH_FPA0;
+		fpb0_reg = _PCH_FPB0;
+		fpa1_reg = _PCH_FPA1;
+		fpb1_reg = _PCH_FPB1;
 	} else {
-		dpll_a_reg = DPLL_A;
-		dpll_b_reg = DPLL_B;
-		fpa0_reg = FPA0;
-		fpb0_reg = FPB0;
-		fpa1_reg = FPA1;
-		fpb1_reg = FPB1;
+		dpll_a_reg = _DPLL_A;
+		dpll_b_reg = _DPLL_B;
+		fpa0_reg = _FPA0;
+		fpb0_reg = _FPB0;
+		fpa1_reg = _FPA1;
+		fpb1_reg = _FPB1;
 	}
 
 	if (HAS_PCH_SPLIT(dev)) {
@@ -461,60 +460,60 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	POSTING_READ(dpll_a_reg);
 	udelay(150);
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
-		POSTING_READ(DPLL_A_MD);
+		I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+		POSTING_READ(_DPLL_A_MD);
 	}
 	udelay(150);
 
 	/* Restore mode */
-	I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
-	I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
-	I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
-	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
-	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
-	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+	I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
+	I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
+	I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
+	I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
+	I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
+	I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
 	if (!HAS_PCH_SPLIT(dev))
-		I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+		I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
-		I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
-		I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
-		I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+		I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
+		I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
+		I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
+		I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
 
-		I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
-		I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+		I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
+		I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
 
-		I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
-		I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
-		I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+		I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
+		I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
+		I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
 
-		I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
-		I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
-		I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
-		I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
-		I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
-		I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
-		I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+		I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
+		I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
+		I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
+		I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
+		I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
+		I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
+		I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
 	}
 
 	/* Restore plane info */
-	I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
-	I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
-	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
-	I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
-	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+	I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
+	I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
+	I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
+	I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
+	I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
-		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+		I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
+		I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
 	}
 
-	I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+	I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
 
 	i915_restore_palette(dev, PIPE_A);
 	/* Enable the plane */
-	I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
-	I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
+	I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+	I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
 
 	/* Pipe & plane B info */
 	if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
@@ -530,68 +529,68 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	POSTING_READ(dpll_b_reg);
 	udelay(150);
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
-		POSTING_READ(DPLL_B_MD);
+		I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+		POSTING_READ(_DPLL_B_MD);
 	}
 	udelay(150);
 
 	/* Restore mode */
-	I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
-	I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
-	I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
-	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
-	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
-	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+	I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
+	I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
+	I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
+	I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
+	I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
+	I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
 	if (!HAS_PCH_SPLIT(dev))
-		I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+		I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
-		I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
-		I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
-		I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+		I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
+		I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
+		I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
+		I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
 
-		I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
-		I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+		I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
+		I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
 
-		I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
-		I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
-		I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+		I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
+		I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
+		I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
 
-		I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
-		I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
-		I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
-		I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
-		I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
-		I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
-		I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+		I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
+		I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
+		I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
+		I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
+		I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
+		I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
+		I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
 	}
 
 	/* Restore plane info */
-	I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
-	I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
-	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
-	I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
-	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+	I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
+	I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
+	I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
+	I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
+	I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
-		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+		I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
+		I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
 	}
 
-	I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+	I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
 
 	i915_restore_palette(dev, PIPE_B);
 	/* Enable the plane */
-	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
-	I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+	I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+	I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
 
 	/* Cursor state */
-	I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
-	I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
-	I915_WRITE(CURABASE, dev_priv->saveCURABASE);
-	I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
-	I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
-	I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+	I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
+	I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
+	I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
+	I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
+	I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
+	I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
 	if (IS_GEN2(dev))
 		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
 
@@ -653,14 +652,14 @@ void i915_save_display(struct drm_device *dev)
 		dev_priv->saveDP_B = I915_READ(DP_B);
 		dev_priv->saveDP_C = I915_READ(DP_C);
 		dev_priv->saveDP_D = I915_READ(DP_D);
-		dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
-		dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
-		dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
-		dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
-		dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
-		dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
-		dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
-		dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
+		dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+		dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+		dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+		dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+		dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+		dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+		dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+		dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
 	}
 	/* FIXME: save TV & SDVO state */
 
@@ -699,14 +698,14 @@ void i915_restore_display(struct drm_device *dev)
 
 	/* Display port ratios (must be done before clock is set) */
 	if (SUPPORTS_INTEGRATED_DP(dev)) {
-		I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
-		I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
-		I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
-		I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
-		I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
-		I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
-		I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
-		I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+		I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+		I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+		I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+		I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+		I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+		I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+		I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+		I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
 	}
 
 	/* This is only meaningful in non-KMS mode */
@@ -797,9 +796,6 @@ int i915_save_state(struct drm_device *dev)
 
 	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
 
-	/* Hardware status page */
-	dev_priv->saveHWS = I915_READ(HWS_PGA);
-
 	i915_save_display(dev);
 
 	/* Interrupt state */
@@ -808,8 +804,8 @@ int i915_save_state(struct drm_device *dev)
 		dev_priv->saveDEIMR = I915_READ(DEIMR);
 		dev_priv->saveGTIER = I915_READ(GTIER);
 		dev_priv->saveGTIMR = I915_READ(GTIMR);
-		dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
-		dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
+		dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+		dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
 		dev_priv->saveMCHBAR_RENDER_STANDBY =
 			I915_READ(RSTDBYCTL);
 	} else {
@@ -846,9 +842,6 @@ int i915_restore_state(struct drm_device *dev)
 
 	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
 
-	/* Hardware status page */
-	I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
 	i915_restore_display(dev);
 
 	/* Interrupt state */
@@ -857,11 +850,11 @@ int i915_restore_state(struct drm_device *dev)
 		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
 		I915_WRITE(GTIER, dev_priv->saveGTIER);
 		I915_WRITE(GTIMR, dev_priv->saveGTIMR);
-		I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
-		I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+		I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
+		I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
 	} else {
-		I915_WRITE (IER, dev_priv->saveIER);
-		I915_WRITE (IMR,  dev_priv->saveIMR);
+		I915_WRITE(IER, dev_priv->saveIER);
+		I915_WRITE(IMR, dev_priv->saveIMR);
 	}
 
 	/* Clock gating state */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 7f0fc3ed61aa..d623fefbfaca 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -7,6 +7,7 @@
 
 #include <drm/drmP.h>
 #include "i915_drv.h"
+#include "intel_ringbuffer.h"
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM i915
@@ -16,9 +17,7 @@
 /* object tracking */
 
 TRACE_EVENT(i915_gem_object_create,
-
 	    TP_PROTO(struct drm_i915_gem_object *obj),
-
 	    TP_ARGS(obj),
 
 	    TP_STRUCT__entry(
@@ -35,33 +34,51 @@ TRACE_EVENT(i915_gem_object_create,
 );
 
 TRACE_EVENT(i915_gem_object_bind,
-
-	    TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
-
-	    TP_ARGS(obj, gtt_offset, mappable),
+	    TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
+	    TP_ARGS(obj, mappable),
 
 	    TP_STRUCT__entry(
 			     __field(struct drm_i915_gem_object *, obj)
-			     __field(u32, gtt_offset)
+			     __field(u32, offset)
+			     __field(u32, size)
 			     __field(bool, mappable)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->obj = obj;
-			   __entry->gtt_offset = gtt_offset;
+			   __entry->offset = obj->gtt_space->start;
+			   __entry->size = obj->gtt_space->size;
 			   __entry->mappable = mappable;
 			   ),
 
-	    TP_printk("obj=%p, gtt_offset=%08x%s",
-		      __entry->obj, __entry->gtt_offset,
+	    TP_printk("obj=%p, offset=%08x size=%x%s",
+		      __entry->obj, __entry->offset, __entry->size,
 		      __entry->mappable ? ", mappable" : "")
 );
 
-TRACE_EVENT(i915_gem_object_change_domain,
+TRACE_EVENT(i915_gem_object_unbind,
+	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, offset)
+			     __field(u32, size)
+			     ),
 
-	    TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->offset = obj->gtt_space->start;
+			   __entry->size = obj->gtt_space->size;
+			   ),
 
-	    TP_ARGS(obj, old_read_domains, old_write_domain),
+	    TP_printk("obj=%p, offset=%08x size=%x",
+		      __entry->obj, __entry->offset, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
+	    TP_ARGS(obj, old_read, old_write),
 
 	    TP_STRUCT__entry(
 			     __field(struct drm_i915_gem_object *, obj)
@@ -71,177 +88,264 @@ TRACE_EVENT(i915_gem_object_change_domain,
 
 	    TP_fast_assign(
 			   __entry->obj = obj;
-			   __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
-			   __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
+			   __entry->read_domains = obj->base.read_domains | (old_read << 16);
+			   __entry->write_domain = obj->base.write_domain | (old_write << 16);
 			   ),
 
-	    TP_printk("obj=%p, read=%04x, write=%04x",
+	    TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
 		      __entry->obj,
-		      __entry->read_domains, __entry->write_domain)
+		      __entry->read_domains >> 16,
+		      __entry->read_domains & 0xffff,
+		      __entry->write_domain >> 16,
+		      __entry->write_domain & 0xffff)
 );
 
-DECLARE_EVENT_CLASS(i915_gem_object,
+TRACE_EVENT(i915_gem_object_pwrite,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+	    TP_ARGS(obj, offset, len),
 
-	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, offset)
+			     __field(u32, len)
+			     ),
 
-	    TP_ARGS(obj),
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->offset = offset;
+			   __entry->len = len;
+			   ),
+
+	    TP_printk("obj=%p, offset=%u, len=%u",
+		      __entry->obj, __entry->offset, __entry->len)
+);
+
+TRACE_EVENT(i915_gem_object_pread,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+	    TP_ARGS(obj, offset, len),
 
 	    TP_STRUCT__entry(
 			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, offset)
+			     __field(u32, len)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->obj = obj;
+			   __entry->offset = offset;
+			   __entry->len = len;
 			   ),
 
-	    TP_printk("obj=%p", __entry->obj)
+	    TP_printk("obj=%p, offset=%u, len=%u",
+		      __entry->obj, __entry->offset, __entry->len)
 );
 
-DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+TRACE_EVENT(i915_gem_object_fault,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+	    TP_ARGS(obj, index, gtt, write),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, index)
+			     __field(bool, gtt)
+			     __field(bool, write)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->index = index;
+			   __entry->gtt = gtt;
+			   __entry->write = write;
+			   ),
 
+	    TP_printk("obj=%p, %s index=%u %s",
+		      __entry->obj,
+		      __entry->gtt ? "GTT" : "CPU",
+		      __entry->index,
+		      __entry->write ? ", writable" : "")
+);
+
+DECLARE_EVENT_CLASS(i915_gem_object,
 	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_ARGS(obj),
 
-	    TP_ARGS(obj)
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   ),
+
+	    TP_printk("obj=%p", __entry->obj)
 );
 
-DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+	     TP_PROTO(struct drm_i915_gem_object *obj),
+	     TP_ARGS(obj)
+);
 
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 	    TP_PROTO(struct drm_i915_gem_object *obj),
-
 	    TP_ARGS(obj)
 );
 
-DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
+TRACE_EVENT(i915_gem_evict,
+	    TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
+	    TP_ARGS(dev, size, align, mappable),
 
-	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, size)
+			     __field(u32, align)
+			     __field(bool, mappable)
+			    ),
 
-	    TP_ARGS(obj)
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->size = size;
+			   __entry->align = align;
+			   __entry->mappable = mappable;
+			  ),
+
+	    TP_printk("dev=%d, size=%d, align=%d %s",
+		      __entry->dev, __entry->size, __entry->align,
+		      __entry->mappable ? ", mappable" : "")
 );
 
-/* batch tracing */
+TRACE_EVENT(i915_gem_evict_everything,
+	    TP_PROTO(struct drm_device *dev, bool purgeable),
+	    TP_ARGS(dev, purgeable),
 
-TRACE_EVENT(i915_gem_request_submit,
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(bool, purgeable)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->purgeable = purgeable;
+			  ),
 
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
+	    TP_printk("dev=%d%s",
+		      __entry->dev,
+		      __entry->purgeable ? ", purgeable only" : "")
+);
 
-	    TP_ARGS(dev, seqno),
+TRACE_EVENT(i915_gem_ring_dispatch,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
+			     __field(u32, ring)
 			     __field(u32, seqno)
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
 			   __entry->seqno = seqno;
-			   i915_trace_irq_get(dev, seqno);
+			   i915_trace_irq_get(ring, seqno);
 			   ),
 
-	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+	    TP_printk("dev=%u, ring=%u, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
 );
 
-TRACE_EVENT(i915_gem_request_flush,
-
-	    TP_PROTO(struct drm_device *dev, u32 seqno,
-		     u32 flush_domains, u32 invalidate_domains),
-
-	    TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
+TRACE_EVENT(i915_gem_ring_flush,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
+	    TP_ARGS(ring, invalidate, flush),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
-			     __field(u32, seqno)
-			     __field(u32, flush_domains)
-			     __field(u32, invalidate_domains)
+			     __field(u32, ring)
+			     __field(u32, invalidate)
+			     __field(u32, flush)
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
-			   __entry->seqno = seqno;
-			   __entry->flush_domains = flush_domains;
-			   __entry->invalidate_domains = invalidate_domains;
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->invalidate = invalidate;
+			   __entry->flush = flush;
 			   ),
 
-	    TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
-		      __entry->dev, __entry->seqno,
-		      __entry->flush_domains, __entry->invalidate_domains)
+	    TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
+		      __entry->dev, __entry->ring,
+		      __entry->invalidate, __entry->flush)
 );
 
 DECLARE_EVENT_CLASS(i915_gem_request,
-
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
-
-	    TP_ARGS(dev, seqno),
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
+			     __field(u32, ring)
 			     __field(u32, seqno)
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
 			   __entry->seqno = seqno;
 			   ),
 
-	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+	    TP_printk("dev=%u, ring=%u, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
 );
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
-
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno)
+);
 
-	    TP_ARGS(dev, seqno)
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno)
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
-
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
-
-	    TP_ARGS(dev, seqno)
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno)
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
-
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
-
-	    TP_ARGS(dev, seqno)
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno)
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
-
-	    TP_PROTO(struct drm_device *dev, u32 seqno),
-
-	    TP_ARGS(dev, seqno)
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno)
 );
 
 DECLARE_EVENT_CLASS(i915_ring,
-
-	    TP_PROTO(struct drm_device *dev),
-
-	    TP_ARGS(dev),
+	    TP_PROTO(struct intel_ring_buffer *ring),
+	    TP_ARGS(ring),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
+			     __field(u32, ring)
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
 			   ),
 
-	    TP_printk("dev=%u", __entry->dev)
+	    TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
 );
 
 DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
-
-	    TP_PROTO(struct drm_device *dev),
-
-	    TP_ARGS(dev)
+	    TP_PROTO(struct intel_ring_buffer *ring),
+	    TP_ARGS(ring)
 );
 
 DEFINE_EVENT(i915_ring, i915_ring_wait_end,
-
-	    TP_PROTO(struct drm_device *dev),
-
-	    TP_ARGS(dev)
+	    TP_PROTO(struct intel_ring_buffer *ring),
+	    TP_ARGS(ring)
 );
 
 TRACE_EVENT(i915_flip_request,
@@ -281,26 +385,29 @@ TRACE_EVENT(i915_flip_complete,
 );
 
 TRACE_EVENT(i915_reg_rw,
-           TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+           TP_PROTO(bool write, u32 reg, u64 val, int len),
 
-           TP_ARGS(cmd, reg, val, len),
+           TP_ARGS(write, reg, val, len),
 
            TP_STRUCT__entry(
-                   __field(int, cmd)
-                   __field(uint32_t, reg)
-                   __field(uint64_t, val)
-                   __field(int, len)
+                   __field(u64, val)
+                   __field(u32, reg)
+                   __field(u16, write)
+                   __field(u16, len)
                    ),
 
            TP_fast_assign(
-                   __entry->cmd = cmd;
+                   __entry->val = (u64)val;
                    __entry->reg = reg;
-                   __entry->val = (uint64_t)val;
+                   __entry->write = write;
                    __entry->len = len;
                    ),
 
-           TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
-                     __entry->cmd, __entry->reg, __entry->val, __entry->len)
+           TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+                     __entry->write ? "write" : "read",
+		     __entry->reg, __entry->len,
+		     (u32)(__entry->val & 0xffffffff),
+		     (u32)(__entry->val >> 32))
 );
 
 #endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0b44956c336b..fb5b4d426ae0 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -226,29 +226,49 @@ static void
 parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
 		      struct bdb_header *bdb)
 {
-	struct bdb_sdvo_lvds_options *sdvo_lvds_options;
 	struct lvds_dvo_timing *dvo_timing;
 	struct drm_display_mode *panel_fixed_mode;
+	int index;
 
-	sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
-	if (!sdvo_lvds_options)
-		return;
+	index = i915_vbt_sdvo_panel_type;
+	if (index == -1) {
+		struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+		sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+		if (!sdvo_lvds_options)
+			return;
+
+		index = sdvo_lvds_options->panel_type;
+	}
 
 	dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
 	if (!dvo_timing)
 		return;
 
 	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
-
 	if (!panel_fixed_mode)
 		return;
 
-	fill_detail_timing_data(panel_fixed_mode,
-			dvo_timing + sdvo_lvds_options->panel_type);
+	fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
 
 	dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
 
-	return;
+	DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+	drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_device *dev,
+				    bool alternate)
+{
+	switch (INTEL_INFO(dev)->gen) {
+	case 2:
+		return alternate ? 66 : 48;
+	case 3:
+	case 4:
+		return alternate ? 100 : 96;
+	default:
+		return alternate ? 100 : 120;
+	}
 }
 
 static void
@@ -263,13 +283,8 @@ parse_general_features(struct drm_i915_private *dev_priv,
 		dev_priv->int_tv_support = general->int_tv_support;
 		dev_priv->int_crt_support = general->int_crt_support;
 		dev_priv->lvds_use_ssc = general->enable_ssc;
-
-		if (IS_I85X(dev))
-			dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
-		else if (IS_GEN5(dev) || IS_GEN6(dev))
-			dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
-		else
-			dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
+		dev_priv->lvds_ssc_freq =
+			intel_bios_ssc_frequency(dev, general->ssc_freq);
 	}
 }
 
@@ -553,6 +568,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 static void
 init_vbt_defaults(struct drm_i915_private *dev_priv)
 {
+	struct drm_device *dev = dev_priv->dev;
+
 	dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
 
 	/* LFP panel data */
@@ -565,7 +582,11 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
 	/* general features */
 	dev_priv->int_tv_support = 1;
 	dev_priv->int_crt_support = 1;
-	dev_priv->lvds_use_ssc = 0;
+
+	/* Default to using SSC */
+	dev_priv->lvds_use_ssc = 1;
+	dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+	DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
 
 	/* eDP data */
 	dev_priv->edp.bpp = 18;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8a77ff4a7237..8342259f3160 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -129,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 	u32 adpa, dpll_md;
 	u32 adpa_reg;
 
-	if (intel_crtc->pipe == 0)
-		dpll_md_reg = DPLL_A_MD;
-	else
-		dpll_md_reg = DPLL_B_MD;
+	dpll_md_reg = DPLL_MD(intel_crtc->pipe);
 
 	if (HAS_PCH_SPLIT(dev))
 		adpa_reg = PCH_ADPA;
@@ -160,17 +157,16 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 			adpa |= PORT_TRANS_A_SEL_CPT;
 		else
 			adpa |= ADPA_PIPE_A_SELECT;
-		if (!HAS_PCH_SPLIT(dev))
-			I915_WRITE(BCLRPAT_A, 0);
 	} else {
 		if (HAS_PCH_CPT(dev))
 			adpa |= PORT_TRANS_B_SEL_CPT;
 		else
 			adpa |= ADPA_PIPE_B_SELECT;
-		if (!HAS_PCH_SPLIT(dev))
-			I915_WRITE(BCLRPAT_B, 0);
 	}
 
+	if (!HAS_PCH_SPLIT(dev))
+		I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+
 	I915_WRITE(adpa_reg, adpa);
 }
 
@@ -353,21 +349,12 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
 
 	DRM_DEBUG_KMS("starting load-detect on CRT\n");
 
-	if (pipe == 0) {
-		bclrpat_reg = BCLRPAT_A;
-		vtotal_reg = VTOTAL_A;
-		vblank_reg = VBLANK_A;
-		vsync_reg = VSYNC_A;
-		pipeconf_reg = PIPEACONF;
-		pipe_dsl_reg = PIPEADSL;
-	} else {
-		bclrpat_reg = BCLRPAT_B;
-		vtotal_reg = VTOTAL_B;
-		vblank_reg = VBLANK_B;
-		vsync_reg = VSYNC_B;
-		pipeconf_reg = PIPEBCONF;
-		pipe_dsl_reg = PIPEBDSL;
-	}
+	bclrpat_reg = BCLRPAT(pipe);
+	vtotal_reg = VTOTAL(pipe);
+	vblank_reg = VBLANK(pipe);
+	vsync_reg = VSYNC(pipe);
+	pipeconf_reg = PIPECONF(pipe);
+	pipe_dsl_reg = PIPEDSL(pipe);
 
 	save_bclrpat = I915_READ(bclrpat_reg);
 	save_vtotal = I915_READ(vtotal_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7e42aa586504..3106c0dc8389 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -989,7 +989,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
+	int pipestat_reg = PIPESTAT(pipe);
 
 	/* Clear existing vblank status. Note this will clear any other
 	 * sticky status fields as well.
@@ -1058,6 +1058,612 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 	}
 }
 
+static const char *state_string(bool enabled)
+{
+	return enabled ? "on" : "off";
+}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+		       enum pipe pipe, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+
+	reg = DPLL(pipe);
+	val = I915_READ(reg);
+	cur_state = !!(val & DPLL_VCO_ENABLE);
+	WARN(cur_state != state,
+	     "PLL state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+
+/* For ILK+ */
+static void assert_pch_pll(struct drm_i915_private *dev_priv,
+			   enum pipe pipe, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+
+	reg = PCH_DPLL(pipe);
+	val = I915_READ(reg);
+	cur_state = !!(val & DPLL_VCO_ENABLE);
+	WARN(cur_state != state,
+	     "PCH PLL state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
+#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+
+	reg = FDI_TX_CTL(pipe);
+	val = I915_READ(reg);
+	cur_state = !!(val & FDI_TX_ENABLE);
+	WARN(cur_state != state,
+	     "FDI TX state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+
+	reg = FDI_RX_CTL(pipe);
+	val = I915_READ(reg);
+	cur_state = !!(val & FDI_RX_ENABLE);
+	WARN(cur_state != state,
+	     "FDI RX state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+				      enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* ILK FDI PLL is always enabled */
+	if (dev_priv->info->gen == 5)
+		return;
+
+	reg = FDI_TX_CTL(pipe);
+	val = I915_READ(reg);
+	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
+				      enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	reg = FDI_RX_CTL(pipe);
+	val = I915_READ(reg);
+	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+				  enum pipe pipe)
+{
+	int pp_reg, lvds_reg;
+	u32 val;
+	enum pipe panel_pipe = PIPE_A;
+	bool locked = locked;
+
+	if (HAS_PCH_SPLIT(dev_priv->dev)) {
+		pp_reg = PCH_PP_CONTROL;
+		lvds_reg = PCH_LVDS;
+	} else {
+		pp_reg = PP_CONTROL;
+		lvds_reg = LVDS;
+	}
+
+	val = I915_READ(pp_reg);
+	if (!(val & PANEL_POWER_ON) ||
+	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
+		locked = false;
+
+	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
+		panel_pipe = PIPE_B;
+
+	WARN(panel_pipe == pipe && locked,
+	     "panel assertion failure, pipe %c regs locked\n",
+	     pipe_name(pipe));
+}
+
+static void assert_pipe(struct drm_i915_private *dev_priv,
+			enum pipe pipe, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+
+	reg = PIPECONF(pipe);
+	val = I915_READ(reg);
+	cur_state = !!(val & PIPECONF_ENABLE);
+	WARN(cur_state != state,
+	     "pipe %c assertion failure (expected %s, current %s)\n",
+	     pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+static void assert_plane_enabled(struct drm_i915_private *dev_priv,
+				 enum plane plane)
+{
+	int reg;
+	u32 val;
+
+	reg = DSPCNTR(plane);
+	val = I915_READ(reg);
+	WARN(!(val & DISPLAY_PLANE_ENABLE),
+	     "plane %c assertion failure, should be active but is disabled\n",
+	     plane_name(plane));
+}
+
+static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+				   enum pipe pipe)
+{
+	int reg, i;
+	u32 val;
+	int cur_pipe;
+
+	/* Planes are fixed to pipes on ILK+ */
+	if (HAS_PCH_SPLIT(dev_priv->dev))
+		return;
+
+	/* Need to check both planes against the pipe */
+	for (i = 0; i < 2; i++) {
+		reg = DSPCNTR(i);
+		val = I915_READ(reg);
+		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+			DISPPLANE_SEL_PIPE_SHIFT;
+		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
+		     plane_name(i), pipe_name(pipe));
+	}
+}
+
+static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+	bool enabled;
+
+	val = I915_READ(PCH_DREF_CONTROL);
+	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+			    DREF_SUPERSPREAD_SOURCE_MASK));
+	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
+				       enum pipe pipe)
+{
+	int reg;
+	u32 val;
+	bool enabled;
+
+	reg = TRANSCONF(pipe);
+	val = I915_READ(reg);
+	enabled = !!(val & TRANS_ENABLE);
+	WARN(enabled,
+	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
+	     pipe_name(pipe));
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+				   enum pipe pipe, int reg)
+{
+	u32 val = I915_READ(reg);
+	WARN(DP_PIPE_ENABLED(val, pipe),
+	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+	     reg, pipe_name(pipe));
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+				     enum pipe pipe, int reg)
+{
+	u32 val = I915_READ(reg);
+	WARN(HDMI_PIPE_ENABLED(val, pipe),
+	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+	     reg, pipe_name(pipe));
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+				      enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
+	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
+	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
+
+	reg = PCH_ADPA;
+	val = I915_READ(reg);
+	WARN(ADPA_PIPE_ENABLED(val, pipe),
+	     "PCH VGA enabled on transcoder %c, should be disabled\n",
+	     pipe_name(pipe));
+
+	reg = PCH_LVDS;
+	val = I915_READ(reg);
+	WARN(LVDS_PIPE_ENABLED(val, pipe),
+	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
+	     pipe_name(pipe));
+
+	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
+	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
+	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
+}
+
+/**
+ * intel_enable_pll - enable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
+ * make sure the PLL reg is writable first though, since the panel write
+ * protect mechanism may be enabled.
+ *
+ * Note!  This is for pre-ILK only.
+ */
+static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* No really, not for ILK+ */
+	BUG_ON(dev_priv->info->gen >= 5);
+
+	/* PLL is protected by panel, make sure we can write it */
+	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
+		assert_panel_unlocked(dev_priv, pipe);
+
+	reg = DPLL(pipe);
+	val = I915_READ(reg);
+	val |= DPLL_VCO_ENABLE;
+
+	/* We do this three times for luck */
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+}
+
+/**
+ * intel_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe, making sure the pipe is off first.
+ *
+ * Note!  This is for pre-ILK only.
+ */
+static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* Don't disable pipe A or pipe A PLLs if needed */
+	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+		return;
+
+	/* Make sure the pipe isn't still relying on us */
+	assert_pipe_disabled(dev_priv, pipe);
+
+	reg = DPLL(pipe);
+	val = I915_READ(reg);
+	val &= ~DPLL_VCO_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+/**
+ * intel_enable_pch_pll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
+				 enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* PCH only available on ILK+ */
+	BUG_ON(dev_priv->info->gen < 5);
+
+	/* PCH refclock must be enabled first */
+	assert_pch_refclk_enabled(dev_priv);
+
+	reg = PCH_DPLL(pipe);
+	val = I915_READ(reg);
+	val |= DPLL_VCO_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(200);
+}
+
+static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
+				  enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* PCH only available on ILK+ */
+	BUG_ON(dev_priv->info->gen < 5);
+
+	/* Make sure transcoder isn't still depending on us */
+	assert_transcoder_disabled(dev_priv, pipe);
+
+	reg = PCH_DPLL(pipe);
+	val = I915_READ(reg);
+	val &= ~DPLL_VCO_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(200);
+}
+
+static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+				    enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* PCH only available on ILK+ */
+	BUG_ON(dev_priv->info->gen < 5);
+
+	/* Make sure PCH DPLL is enabled */
+	assert_pch_pll_enabled(dev_priv, pipe);
+
+	/* FDI must be feeding us bits for PCH ports */
+	assert_fdi_tx_enabled(dev_priv, pipe);
+	assert_fdi_rx_enabled(dev_priv, pipe);
+
+	reg = TRANSCONF(pipe);
+	val = I915_READ(reg);
+	/*
+	 * make the BPC in transcoder be consistent with
+	 * that in pipeconf reg.
+	 */
+	val &= ~PIPE_BPC_MASK;
+	val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+	I915_WRITE(reg, val | TRANS_ENABLE);
+	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+		DRM_ERROR("failed to enable transcoder %d\n", pipe);
+}
+
+static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
+				     enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* FDI relies on the transcoder */
+	assert_fdi_tx_disabled(dev_priv, pipe);
+	assert_fdi_rx_disabled(dev_priv, pipe);
+
+	/* Ports must be off as well */
+	assert_pch_ports_disabled(dev_priv, pipe);
+
+	reg = TRANSCONF(pipe);
+	val = I915_READ(reg);
+	val &= ~TRANS_ENABLE;
+	I915_WRITE(reg, val);
+	/* wait for PCH transcoder off, transcoder state */
+	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+		DRM_ERROR("failed to disable transcoder\n");
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to enable
+ * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ *
+ * Enable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe is actually running (i.e. first vblank) before
+ * returning.
+ */
+static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+			      bool pch_port)
+{
+	int reg;
+	u32 val;
+
+	/*
+	 * A pipe without a PLL won't actually be able to drive bits from
+	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
+	 * need the check.
+	 */
+	if (!HAS_PCH_SPLIT(dev_priv->dev))
+		assert_pll_enabled(dev_priv, pipe);
+	else {
+		if (pch_port) {
+			/* if driving the PCH, we need FDI enabled */
+			assert_fdi_rx_pll_enabled(dev_priv, pipe);
+			assert_fdi_tx_pll_enabled(dev_priv, pipe);
+		}
+		/* FIXME: assert CPU port conditions for SNB+ */
+	}
+
+	reg = PIPECONF(pipe);
+	val = I915_READ(reg);
+	val |= PIPECONF_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to disable
+ *
+ * Disable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+static void intel_disable_pipe(struct drm_i915_private *dev_priv,
+			       enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/*
+	 * Make sure planes won't keep trying to pump pixels to us,
+	 * or we might hang the display.
+	 */
+	assert_planes_disabled(dev_priv, pipe);
+
+	/* Don't disable pipe A or pipe A PLLs if needed */
+	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+		return;
+
+	reg = PIPECONF(pipe);
+	val = I915_READ(reg);
+	val &= ~PIPECONF_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	intel_wait_for_pipe_off(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_enable_plane - enable a display plane on a given pipe
+ * @dev_priv: i915 private structure
+ * @plane: plane to enable
+ * @pipe: pipe being fed
+ *
+ * Enable @plane on @pipe, making sure that @pipe is running first.
+ */
+static void intel_enable_plane(struct drm_i915_private *dev_priv,
+			       enum plane plane, enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* If the pipe isn't enabled, we can't pump pixels and may hang */
+	assert_pipe_enabled(dev_priv, pipe);
+
+	reg = DSPCNTR(plane);
+	val = I915_READ(reg);
+	val |= DISPLAY_PLANE_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch.  The display address reg provides this.
+ */
+static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+				      enum plane plane)
+{
+	u32 reg = DSPADDR(plane);
+	I915_WRITE(reg, I915_READ(reg));
+}
+
+/**
+ * intel_disable_plane - disable a display plane
+ * @dev_priv: i915 private structure
+ * @plane: plane to disable
+ * @pipe: pipe consuming the data
+ *
+ * Disable @plane; should be an independent operation.
+ */
+static void intel_disable_plane(struct drm_i915_private *dev_priv,
+				enum plane plane, enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	reg = DSPCNTR(plane);
+	val = I915_READ(reg);
+	val &= ~DISPLAY_PLANE_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	intel_flush_display_plane(dev_priv, plane);
+	intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+static void disable_pch_dp(struct drm_i915_private *dev_priv,
+			   enum pipe pipe, int reg)
+{
+	u32 val = I915_READ(reg);
+	if (DP_PIPE_ENABLED(val, pipe))
+		I915_WRITE(reg, val & ~DP_PORT_EN);
+}
+
+static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
+			     enum pipe pipe, int reg)
+{
+	u32 val = I915_READ(reg);
+	if (HDMI_PIPE_ENABLED(val, pipe))
+		I915_WRITE(reg, val & ~PORT_ENABLE);
+}
+
+/* Disable any ports connected to this transcoder */
+static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
+				    enum pipe pipe)
+{
+	u32 reg, val;
+
+	val = I915_READ(PCH_PP_CONTROL);
+	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
+
+	disable_pch_dp(dev_priv, pipe, PCH_DP_B);
+	disable_pch_dp(dev_priv, pipe, PCH_DP_C);
+	disable_pch_dp(dev_priv, pipe, PCH_DP_D);
+
+	reg = PCH_ADPA;
+	val = I915_READ(reg);
+	if (ADPA_PIPE_ENABLED(val, pipe))
+		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
+
+	reg = PCH_LVDS;
+	val = I915_READ(reg);
+	if (LVDS_PIPE_ENABLED(val, pipe)) {
+		I915_WRITE(reg, val & ~LVDS_PORT_EN);
+		POSTING_READ(reg);
+		udelay(100);
+	}
+
+	disable_pch_hdmi(dev_priv, pipe, HDMIB);
+	disable_pch_hdmi(dev_priv, pipe, HDMIC);
+	disable_pch_hdmi(dev_priv, pipe, HDMID);
+}
+
 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 {
 	struct drm_device *dev = crtc->dev;
@@ -1219,7 +1825,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
 	u32 blt_ecoskpd;
 
 	/* Make sure blitter notifies FBC of writes */
-	__gen6_force_wake_get(dev_priv);
+	__gen6_gt_force_wake_get(dev_priv);
 	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
 		GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1836,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
 			 GEN6_BLITTER_LOCK_SHIFT);
 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
 	POSTING_READ(GEN6_BLITTER_ECOSKPD);
-	__gen6_force_wake_put(dev_priv);
+	__gen6_gt_force_wake_put(dev_priv);
 }
 
 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -1390,7 +1996,7 @@ static void intel_update_fbc(struct drm_device *dev)
 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
 	 */
 	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
-		if (tmp_crtc->enabled) {
+		if (tmp_crtc->enabled && tmp_crtc->fb) {
 			if (crtc) {
 				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
 				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -1461,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
 			   struct drm_i915_gem_object *obj,
 			   struct intel_ring_buffer *pipelined)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 alignment;
 	int ret;
 
@@ -1485,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
 		BUG();
 	}
 
+	dev_priv->mm.interruptible = false;
 	ret = i915_gem_object_pin(obj, alignment, true);
 	if (ret)
-		return ret;
+		goto err_interruptible;
 
 	ret = i915_gem_object_set_to_display_plane(obj, pipelined);
 	if (ret)
@@ -1499,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
 	 * a fence as the cost is not that onerous.
 	 */
 	if (obj->tiling_mode != I915_TILING_NONE) {
-		ret = i915_gem_object_get_fence(obj, pipelined, false);
+		ret = i915_gem_object_get_fence(obj, pipelined);
 		if (ret)
 			goto err_unpin;
 	}
 
+	dev_priv->mm.interruptible = true;
 	return 0;
 
 err_unpin:
 	i915_gem_object_unpin(obj);
+err_interruptible:
+	dev_priv->mm.interruptible = true;
 	return ret;
 }
 
@@ -1630,19 +2241,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
 
 		wait_event(dev_priv->pending_flip_queue,
+			   atomic_read(&dev_priv->mm.wedged) ||
 			   atomic_read(&obj->pending_flip) == 0);
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
 		 * current scanout is retired before unpinning the old
 		 * framebuffer.
+		 *
+		 * This should only fail upon a hung GPU, in which case we
+		 * can safely continue.
 		 */
-		ret = i915_gem_object_flush_gpu(obj, false);
-		if (ret) {
-			i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
-			mutex_unlock(&dev->struct_mutex);
-			return ret;
-		}
+		ret = i915_gem_object_flush_gpu(obj);
+		(void) ret;
 	}
 
 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -1753,8 +2364,13 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
 	u32 reg, temp, tries;
 
+	/* FDI needs bits from pipe & plane first */
+	assert_pipe_enabled(dev_priv, pipe);
+	assert_plane_enabled(dev_priv, plane);
+
 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 	   for train result */
 	reg = FDI_RX_IMR(pipe);
@@ -1784,7 +2400,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 	udelay(150);
 
 	/* Ironlake workaround, enable clock pointer after FDI enable*/
-	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
+	if (HAS_PCH_IBX(dev)) {
+		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+			   FDI_RX_PHASE_SYNC_POINTER_EN);
+	}
 
 	reg = FDI_RX_IIR(pipe);
 	for (tries = 0; tries < 5; tries++) {
@@ -1834,7 +2454,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 
 }
 
-static const int const snb_b_fdi_train_param [] = {
+static const int snb_b_fdi_train_param [] = {
 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -2003,12 +2623,60 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc)
 	}
 }
 
-static void intel_flush_display_plane(struct drm_device *dev,
-				      int plane)
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 reg = DSPADDR(plane);
-	I915_WRITE(reg, I915_READ(reg));
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* disable CPU FDI tx and PCH FDI rx */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+	POSTING_READ(reg);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~(0x7 << 16);
+	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(100);
+
+	/* Ironlake workaround, disable clock pointer after downing FDI */
+	if (HAS_PCH_IBX(dev)) {
+		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+		I915_WRITE(FDI_RX_CHICKEN(pipe),
+			   I915_READ(FDI_RX_CHICKEN(pipe) &
+				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
+	}
+
+	/* still set train pattern 1 */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+	}
+	/* BPC in FDI rx is consistent with that in PIPECONF */
+	temp &= ~(0x07 << 16);
+	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(100);
 }
 
 /*
@@ -2045,60 +2713,46 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 		   atomic_read(&obj->pending_flip) == 0);
 }
 
-static void ironlake_crtc_enable(struct drm_crtc *crtc)
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
-	int plane = intel_crtc->plane;
-	u32 reg, temp;
-
-	if (intel_crtc->active)
-		return;
-
-	intel_crtc->active = true;
-	intel_update_watermarks(dev);
-
-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-		temp = I915_READ(PCH_LVDS);
-		if ((temp & LVDS_PORT_EN) == 0)
-			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
-	}
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_encoder *encoder;
 
-	ironlake_fdi_enable(crtc);
+	/*
+	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+	 * must be driven by its own crtc; no sharing is possible.
+	 */
+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+		if (encoder->base.crtc != crtc)
+			continue;
 
-	/* Enable panel fitting for LVDS */
-	if (dev_priv->pch_pf_size &&
-	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
-		/* Force use of hard-coded filter coefficients
-		 * as some pre-programmed values are broken,
-		 * e.g. x201.
-		 */
-		I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
-			   PF_ENABLE | PF_FILTER_MED_3x3);
-		I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
-			   dev_priv->pch_pf_pos);
-		I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
-			   dev_priv->pch_pf_size);
+		switch (encoder->type) {
+		case INTEL_OUTPUT_EDP:
+			if (!intel_encoder_is_pch_edp(&encoder->base))
+				return false;
+			continue;
+		}
 	}
 
-	/* Enable CPU pipe */
-	reg = PIPECONF(pipe);
-	temp = I915_READ(reg);
-	if ((temp & PIPECONF_ENABLE) == 0) {
-		I915_WRITE(reg, temp | PIPECONF_ENABLE);
-		POSTING_READ(reg);
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
-	}
+	return true;
+}
 
-	/* configure and enable CPU plane */
-	reg = DSPCNTR(plane);
-	temp = I915_READ(reg);
-	if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-		I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
-		intel_flush_display_plane(dev, plane);
-	}
+/*
+ * Enable PCH resources required for PCH ports:
+ *   - PCH PLLs
+ *   - FDI training & RX/TX
+ *   - update transcoder timings
+ *   - DP transcoding bits
+ *   - transcoder
+ */
+static void ironlake_pch_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
 
 	/* For PCH output, training FDI link */
 	if (IS_GEN6(dev))
@@ -2106,14 +2760,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 	else
 		ironlake_fdi_link_train(crtc);
 
-	/* enable PCH DPLL */
-	reg = PCH_DPLL(pipe);
-	temp = I915_READ(reg);
-	if ((temp & DPLL_VCO_ENABLE) == 0) {
-		I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-		POSTING_READ(reg);
-		udelay(200);
-	}
+	intel_enable_pch_pll(dev_priv, pipe);
 
 	if (HAS_PCH_CPT(dev)) {
 		/* Be sure PCH DPLL SEL is set */
@@ -2125,7 +2772,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 		I915_WRITE(PCH_DPLL_SEL, temp);
 	}
 
-	/* set transcoder timing */
+	/* set transcoder timing, panel must allow it */
+	assert_panel_unlocked(dev_priv, pipe);
 	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
 	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
 	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
@@ -2172,18 +2820,55 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 		I915_WRITE(reg, temp);
 	}
 
-	/* enable PCH transcoder */
-	reg = TRANSCONF(pipe);
-	temp = I915_READ(reg);
-	/*
-	 * make the BPC in transcoder be consistent with
-	 * that in pipeconf reg.
-	 */
-	temp &= ~PIPE_BPC_MASK;
-	temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
-	I915_WRITE(reg, temp | TRANS_ENABLE);
-	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
-		DRM_ERROR("failed to enable transcoder %d\n", pipe);
+	intel_enable_transcoder(dev_priv, pipe);
+}
+
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	u32 temp;
+	bool is_pch_port;
+
+	if (intel_crtc->active)
+		return;
+
+	intel_crtc->active = true;
+	intel_update_watermarks(dev);
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		temp = I915_READ(PCH_LVDS);
+		if ((temp & LVDS_PORT_EN) == 0)
+			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+	}
+
+	is_pch_port = intel_crtc_driving_pch(crtc);
+
+	if (is_pch_port)
+		ironlake_fdi_enable(crtc);
+	else
+		ironlake_fdi_disable(crtc);
+
+	/* Enable panel fitting for LVDS */
+	if (dev_priv->pch_pf_size &&
+	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+		/* Force use of hard-coded filter coefficients
+		 * as some pre-programmed values are broken,
+		 * e.g. x201.
+		 */
+		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+	}
+
+	intel_enable_pipe(dev_priv, pipe, is_pch_port);
+	intel_enable_plane(dev_priv, plane, pipe);
+
+	if (is_pch_port)
+		ironlake_pch_enable(crtc);
 
 	intel_crtc_load_lut(crtc);
 	intel_update_fbc(dev);
@@ -2206,116 +2891,58 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 	drm_vblank_off(dev, pipe);
 	intel_crtc_update_cursor(crtc, false);
 
-	/* Disable display plane */
-	reg = DSPCNTR(plane);
-	temp = I915_READ(reg);
-	if (temp & DISPLAY_PLANE_ENABLE) {
-		I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
-		intel_flush_display_plane(dev, plane);
-	}
+	intel_disable_plane(dev_priv, plane, pipe);
 
 	if (dev_priv->cfb_plane == plane &&
 	    dev_priv->display.disable_fbc)
 		dev_priv->display.disable_fbc(dev);
 
-	/* disable cpu pipe, disable after all planes disabled */
-	reg = PIPECONF(pipe);
-	temp = I915_READ(reg);
-	if (temp & PIPECONF_ENABLE) {
-		I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
-		POSTING_READ(reg);
-		/* wait for cpu pipe off, pipe state */
-		intel_wait_for_pipe_off(dev, intel_crtc->pipe);
-	}
+	intel_disable_pipe(dev_priv, pipe);
 
 	/* Disable PF */
-	I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
-	I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
-
-	/* disable CPU FDI tx and PCH FDI rx */
-	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
-	POSTING_READ(reg);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	temp &= ~(0x7 << 16);
-	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
-	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
-
-	POSTING_READ(reg);
-	udelay(100);
-
-	/* Ironlake workaround, disable clock pointer after downing FDI */
-	if (HAS_PCH_IBX(dev))
-		I915_WRITE(FDI_RX_CHICKEN(pipe),
-			   I915_READ(FDI_RX_CHICKEN(pipe) &
-				     ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
-
-	/* still set train pattern 1 */
-	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_PATTERN_1;
-	I915_WRITE(reg, temp);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev)) {
-		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-	} else {
-		temp &= ~FDI_LINK_TRAIN_NONE;
-		temp |= FDI_LINK_TRAIN_PATTERN_1;
-	}
-	/* BPC in FDI rx is consistent with that in PIPECONF */
-	temp &= ~(0x07 << 16);
-	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
-	I915_WRITE(reg, temp);
+	I915_WRITE(PF_CTL(pipe), 0);
+	I915_WRITE(PF_WIN_SZ(pipe), 0);
 
-	POSTING_READ(reg);
-	udelay(100);
+	ironlake_fdi_disable(crtc);
 
-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-		temp = I915_READ(PCH_LVDS);
-		if (temp & LVDS_PORT_EN) {
-			I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
-			POSTING_READ(PCH_LVDS);
-			udelay(100);
-		}
-	}
+	/* This is a horrible layering violation; we should be doing this in
+	 * the connector/encoder ->prepare instead, but we don't always have
+	 * enough information there about the config to know whether it will
+	 * actually be necessary or just cause undesired flicker.
+	 */
+	intel_disable_pch_ports(dev_priv, pipe);
 
-	/* disable PCH transcoder */
-	reg = TRANSCONF(plane);
-	temp = I915_READ(reg);
-	if (temp & TRANS_ENABLE) {
-		I915_WRITE(reg, temp & ~TRANS_ENABLE);
-		/* wait for PCH transcoder off, transcoder state */
-		if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
-			DRM_ERROR("failed to disable transcoder\n");
-	}
+	intel_disable_transcoder(dev_priv, pipe);
 
 	if (HAS_PCH_CPT(dev)) {
 		/* disable TRANS_DP_CTL */
 		reg = TRANS_DP_CTL(pipe);
 		temp = I915_READ(reg);
 		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+		temp |= TRANS_DP_PORT_SEL_NONE;
 		I915_WRITE(reg, temp);
 
 		/* disable DPLL_SEL */
 		temp = I915_READ(PCH_DPLL_SEL);
-		if (pipe == 0)
-			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
-		else
+		switch (pipe) {
+		case 0:
+			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+			break;
+		case 1:
 			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+			break;
+		case 2:
+			/* FIXME: manage transcoder PLLs? */
+			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
+			break;
+		default:
+			BUG(); /* wtf */
+		}
 		I915_WRITE(PCH_DPLL_SEL, temp);
 	}
 
 	/* disable PCH DPLL */
-	reg = PCH_DPLL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
+	intel_disable_pch_pll(dev_priv, pipe);
 
 	/* Switch from PCDclk to Rawclk */
 	reg = FDI_RX_CTL(pipe);
@@ -2372,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
 {
 	if (!enable && intel_crtc->overlay) {
 		struct drm_device *dev = intel_crtc->base.dev;
+		struct drm_i915_private *dev_priv = dev->dev_private;
 
 		mutex_lock(&dev->struct_mutex);
-		(void) intel_overlay_switch_off(intel_crtc->overlay, false);
+		dev_priv->mm.interruptible = false;
+		(void) intel_overlay_switch_off(intel_crtc->overlay);
+		dev_priv->mm.interruptible = true;
 		mutex_unlock(&dev->struct_mutex);
 	}
 
@@ -2390,7 +3020,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
-	u32 reg, temp;
 
 	if (intel_crtc->active)
 		return;
@@ -2398,42 +3027,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 	intel_crtc->active = true;
 	intel_update_watermarks(dev);
 
-	/* Enable the DPLL */
-	reg = DPLL(pipe);
-	temp = I915_READ(reg);
-	if ((temp & DPLL_VCO_ENABLE) == 0) {
-		I915_WRITE(reg, temp);
-
-		/* Wait for the clocks to stabilize. */
-		POSTING_READ(reg);
-		udelay(150);
-
-		I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-
-		/* Wait for the clocks to stabilize. */
-		POSTING_READ(reg);
-		udelay(150);
-
-		I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-
-		/* Wait for the clocks to stabilize. */
-		POSTING_READ(reg);
-		udelay(150);
-	}
-
-	/* Enable the pipe */
-	reg = PIPECONF(pipe);
-	temp = I915_READ(reg);
-	if ((temp & PIPECONF_ENABLE) == 0)
-		I915_WRITE(reg, temp | PIPECONF_ENABLE);
-
-	/* Enable the plane */
-	reg = DSPCNTR(plane);
-	temp = I915_READ(reg);
-	if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-		I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
-		intel_flush_display_plane(dev, plane);
-	}
+	intel_enable_pll(dev_priv, pipe);
+	intel_enable_pipe(dev_priv, pipe, false);
+	intel_enable_plane(dev_priv, plane, pipe);
 
 	intel_crtc_load_lut(crtc);
 	intel_update_fbc(dev);
@@ -2450,7 +3046,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
-	u32 reg, temp;
 
 	if (!intel_crtc->active)
 		return;
@@ -2465,45 +3060,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 	    dev_priv->display.disable_fbc)
 		dev_priv->display.disable_fbc(dev);
 
-	/* Disable display plane */
-	reg = DSPCNTR(plane);
-	temp = I915_READ(reg);
-	if (temp & DISPLAY_PLANE_ENABLE) {
-		I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
-		/* Flush the plane changes */
-		intel_flush_display_plane(dev, plane);
-
-		/* Wait for vblank for the disable to take effect */
-		if (IS_GEN2(dev))
-			intel_wait_for_vblank(dev, pipe);
-	}
-
-	/* Don't disable pipe A or pipe A PLLs if needed */
-	if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
-		goto done;
-
-	/* Next, disable display pipes */
-	reg = PIPECONF(pipe);
-	temp = I915_READ(reg);
-	if (temp & PIPECONF_ENABLE) {
-		I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
-
-		/* Wait for the pipe to turn off */
-		POSTING_READ(reg);
-		intel_wait_for_pipe_off(dev, pipe);
-	}
-
-	reg = DPLL(pipe);
-	temp = I915_READ(reg);
-	if (temp & DPLL_VCO_ENABLE) {
-		I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
-
-		/* Wait for the clocks to turn off. */
-		POSTING_READ(reg);
-		udelay(150);
-	}
+	intel_disable_plane(dev_priv, plane, pipe);
+	intel_disable_pipe(dev_priv, pipe);
+	intel_disable_pll(dev_priv, pipe);
 
-done:
 	intel_crtc->active = false;
 	intel_update_fbc(dev);
 	intel_update_watermarks(dev);
@@ -2565,7 +3125,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
 		break;
 	default:
-		DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
 		break;
 	}
 }
@@ -2762,77 +3322,77 @@ struct intel_watermark_params {
 };
 
 /* Pineview has different values for various configs */
-static struct intel_watermark_params pineview_display_wm = {
+static const struct intel_watermark_params pineview_display_wm = {
 	PINEVIEW_DISPLAY_FIFO,
 	PINEVIEW_MAX_WM,
 	PINEVIEW_DFT_WM,
 	PINEVIEW_GUARD_WM,
 	PINEVIEW_FIFO_LINE_SIZE
 };
-static struct intel_watermark_params pineview_display_hplloff_wm = {
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
 	PINEVIEW_DISPLAY_FIFO,
 	PINEVIEW_MAX_WM,
 	PINEVIEW_DFT_HPLLOFF_WM,
 	PINEVIEW_GUARD_WM,
 	PINEVIEW_FIFO_LINE_SIZE
 };
-static struct intel_watermark_params pineview_cursor_wm = {
+static const struct intel_watermark_params pineview_cursor_wm = {
 	PINEVIEW_CURSOR_FIFO,
 	PINEVIEW_CURSOR_MAX_WM,
 	PINEVIEW_CURSOR_DFT_WM,
 	PINEVIEW_CURSOR_GUARD_WM,
 	PINEVIEW_FIFO_LINE_SIZE,
 };
-static struct intel_watermark_params pineview_cursor_hplloff_wm = {
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
 	PINEVIEW_CURSOR_FIFO,
 	PINEVIEW_CURSOR_MAX_WM,
 	PINEVIEW_CURSOR_DFT_WM,
 	PINEVIEW_CURSOR_GUARD_WM,
 	PINEVIEW_FIFO_LINE_SIZE
 };
-static struct intel_watermark_params g4x_wm_info = {
+static const struct intel_watermark_params g4x_wm_info = {
 	G4X_FIFO_SIZE,
 	G4X_MAX_WM,
 	G4X_MAX_WM,
 	2,
 	G4X_FIFO_LINE_SIZE,
 };
-static struct intel_watermark_params g4x_cursor_wm_info = {
+static const struct intel_watermark_params g4x_cursor_wm_info = {
 	I965_CURSOR_FIFO,
 	I965_CURSOR_MAX_WM,
 	I965_CURSOR_DFT_WM,
 	2,
 	G4X_FIFO_LINE_SIZE,
 };
-static struct intel_watermark_params i965_cursor_wm_info = {
+static const struct intel_watermark_params i965_cursor_wm_info = {
 	I965_CURSOR_FIFO,
 	I965_CURSOR_MAX_WM,
 	I965_CURSOR_DFT_WM,
 	2,
 	I915_FIFO_LINE_SIZE,
 };
-static struct intel_watermark_params i945_wm_info = {
+static const struct intel_watermark_params i945_wm_info = {
 	I945_FIFO_SIZE,
 	I915_MAX_WM,
 	1,
 	2,
 	I915_FIFO_LINE_SIZE
 };
-static struct intel_watermark_params i915_wm_info = {
+static const struct intel_watermark_params i915_wm_info = {
 	I915_FIFO_SIZE,
 	I915_MAX_WM,
 	1,
 	2,
 	I915_FIFO_LINE_SIZE
 };
-static struct intel_watermark_params i855_wm_info = {
+static const struct intel_watermark_params i855_wm_info = {
 	I855GM_FIFO_SIZE,
 	I915_MAX_WM,
 	1,
 	2,
 	I830_FIFO_LINE_SIZE
 };
-static struct intel_watermark_params i830_wm_info = {
+static const struct intel_watermark_params i830_wm_info = {
 	I830_FIFO_SIZE,
 	I915_MAX_WM,
 	1,
@@ -2840,31 +3400,28 @@ static struct intel_watermark_params i830_wm_info = {
 	I830_FIFO_LINE_SIZE
 };
 
-static struct intel_watermark_params ironlake_display_wm_info = {
+static const struct intel_watermark_params ironlake_display_wm_info = {
 	ILK_DISPLAY_FIFO,
 	ILK_DISPLAY_MAXWM,
 	ILK_DISPLAY_DFTWM,
 	2,
 	ILK_FIFO_LINE_SIZE
 };
-
-static struct intel_watermark_params ironlake_cursor_wm_info = {
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
 	ILK_CURSOR_FIFO,
 	ILK_CURSOR_MAXWM,
 	ILK_CURSOR_DFTWM,
 	2,
 	ILK_FIFO_LINE_SIZE
 };
-
-static struct intel_watermark_params ironlake_display_srwm_info = {
+static const struct intel_watermark_params ironlake_display_srwm_info = {
 	ILK_DISPLAY_SR_FIFO,
 	ILK_DISPLAY_MAX_SRWM,
 	ILK_DISPLAY_DFT_SRWM,
 	2,
 	ILK_FIFO_LINE_SIZE
 };
-
-static struct intel_watermark_params ironlake_cursor_srwm_info = {
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
 	ILK_CURSOR_SR_FIFO,
 	ILK_CURSOR_MAX_SRWM,
 	ILK_CURSOR_DFT_SRWM,
@@ -2872,31 +3429,28 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
 	ILK_FIFO_LINE_SIZE
 };
 
-static struct intel_watermark_params sandybridge_display_wm_info = {
+static const struct intel_watermark_params sandybridge_display_wm_info = {
 	SNB_DISPLAY_FIFO,
 	SNB_DISPLAY_MAXWM,
 	SNB_DISPLAY_DFTWM,
 	2,
 	SNB_FIFO_LINE_SIZE
 };
-
-static struct intel_watermark_params sandybridge_cursor_wm_info = {
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
 	SNB_CURSOR_FIFO,
 	SNB_CURSOR_MAXWM,
 	SNB_CURSOR_DFTWM,
 	2,
 	SNB_FIFO_LINE_SIZE
 };
-
-static struct intel_watermark_params sandybridge_display_srwm_info = {
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
 	SNB_DISPLAY_SR_FIFO,
 	SNB_DISPLAY_MAX_SRWM,
 	SNB_DISPLAY_DFT_SRWM,
 	2,
 	SNB_FIFO_LINE_SIZE
 };
-
-static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
 	SNB_CURSOR_SR_FIFO,
 	SNB_CURSOR_MAX_SRWM,
 	SNB_CURSOR_DFT_SRWM,
@@ -2924,7 +3478,8 @@ static struct intel_watermark_params sandybridge_cursor_srwm_info = {
  * will occur, and a display engine hang could result.
  */
 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
-					struct intel_watermark_params *wm,
+					const struct intel_watermark_params *wm,
+					int fifo_size,
 					int pixel_size,
 					unsigned long latency_ns)
 {
@@ -2942,7 +3497,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
 
 	DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
 
-	wm_size = wm->fifo_size - (entries_required + wm->guard_size);
+	wm_size = fifo_size - (entries_required + wm->guard_size);
 
 	DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
 
@@ -3115,15 +3670,28 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
 	return size;
 }
 
-static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
-			       int planeb_clock, int sr_hdisplay, int unused,
-			       int pixel_size)
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+	struct drm_crtc *crtc, *enabled = NULL;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc->enabled && crtc->fb) {
+			if (enabled)
+				return NULL;
+			enabled = crtc;
+		}
+	}
+
+	return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
 	const struct cxsr_latency *latency;
 	u32 reg;
 	unsigned long wm;
-	int sr_clock;
 
 	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
 					 dev_priv->fsb_freq, dev_priv->mem_freq);
@@ -3133,11 +3701,14 @@ static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
 		return;
 	}
 
-	if (!planea_clock || !planeb_clock) {
-		sr_clock = planea_clock ? planea_clock : planeb_clock;
+	crtc = single_enabled_crtc(dev);
+	if (crtc) {
+		int clock = crtc->mode.clock;
+		int pixel_size = crtc->fb->bits_per_pixel / 8;
 
 		/* Display SR */
-		wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+		wm = intel_calculate_wm(clock, &pineview_display_wm,
+					pineview_display_wm.fifo_size,
 					pixel_size, latency->display_sr);
 		reg = I915_READ(DSPFW1);
 		reg &= ~DSPFW_SR_MASK;
@@ -3146,7 +3717,8 @@ static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
 		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
 
 		/* cursor SR */
-		wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+					pineview_display_wm.fifo_size,
 					pixel_size, latency->cursor_sr);
 		reg = I915_READ(DSPFW3);
 		reg &= ~DSPFW_CURSOR_SR_MASK;
@@ -3154,7 +3726,8 @@ static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
 		I915_WRITE(DSPFW3, reg);
 
 		/* Display HPLL off SR */
-		wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+					pineview_display_hplloff_wm.fifo_size,
 					pixel_size, latency->display_hpll_disable);
 		reg = I915_READ(DSPFW3);
 		reg &= ~DSPFW_HPLL_SR_MASK;
@@ -3162,7 +3735,8 @@ static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
 		I915_WRITE(DSPFW3, reg);
 
 		/* cursor HPLL off SR */
-		wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+					pineview_display_hplloff_wm.fifo_size,
 					pixel_size, latency->cursor_hpll_disable);
 		reg = I915_READ(DSPFW3);
 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
@@ -3180,125 +3754,229 @@ static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
 	}
 }
 
-static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
-			  int planeb_clock, int sr_hdisplay, int sr_htotal,
-			  int pixel_size)
+static bool g4x_compute_wm0(struct drm_device *dev,
+			    int plane,
+			    const struct intel_watermark_params *display,
+			    int display_latency_ns,
+			    const struct intel_watermark_params *cursor,
+			    int cursor_latency_ns,
+			    int *plane_wm,
+			    int *cursor_wm)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int total_size, cacheline_size;
-	int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
-	struct intel_watermark_params planea_params, planeb_params;
-	unsigned long line_time_us;
-	int sr_clock, sr_entries = 0, entries_required;
+	struct drm_crtc *crtc;
+	int htotal, hdisplay, clock, pixel_size;
+	int line_time_us, line_count;
+	int entries, tlb_miss;
 
-	/* Create copies of the base settings for each pipe */
-	planea_params = planeb_params = g4x_wm_info;
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	if (crtc->fb == NULL || !crtc->enabled)
+		return false;
 
-	/* Grab a couple of global values before we overwrite them */
-	total_size = planea_params.fifo_size;
-	cacheline_size = planea_params.cacheline_size;
+	htotal = crtc->mode.htotal;
+	hdisplay = crtc->mode.hdisplay;
+	clock = crtc->mode.clock;
+	pixel_size = crtc->fb->bits_per_pixel / 8;
 
-	/*
-	 * Note: we need to make sure we don't overflow for various clock &
-	 * latency values.
-	 * clocks go from a few thousand to several hundred thousand.
-	 * latency is usually a few thousand
-	 */
-	entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
-		1000;
-	entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
-	planea_wm = entries_required + planea_params.guard_size;
+	/* Use the small buffer method to calculate plane watermark */
+	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, display->cacheline_size);
+	*plane_wm = entries + display->guard_size;
+	if (*plane_wm > (int)display->max_wm)
+		*plane_wm = display->max_wm;
 
-	entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
-		1000;
-	entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
-	planeb_wm = entries_required + planeb_params.guard_size;
+	/* Use the large buffer method to calculate cursor watermark */
+	line_time_us = ((htotal * 1000) / clock);
+	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+	entries = line_count * 64 * pixel_size;
+	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+	if (*cursor_wm > (int)cursor->max_wm)
+		*cursor_wm = (int)cursor->max_wm;
 
-	cursora_wm = cursorb_wm = 16;
-	cursor_sr = 32;
+	return true;
+}
 
-	DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+			   int display_wm, int cursor_wm,
+			   const struct intel_watermark_params *display,
+			   const struct intel_watermark_params *cursor)
+{
+	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+		      display_wm, cursor_wm);
 
-	/* Calc sr entries for one plane configs */
-	if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
-		/* self-refresh has much higher latency */
-		static const int sr_latency_ns = 12000;
+	if (display_wm > display->max_wm) {
+		DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n",
+			      display_wm, display->max_wm);
+		return false;
+	}
 
-		sr_clock = planea_clock ? planea_clock : planeb_clock;
-		line_time_us = ((sr_htotal * 1000) / sr_clock);
+	if (cursor_wm > cursor->max_wm) {
+		DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n",
+			      cursor_wm, cursor->max_wm);
+		return false;
+	}
 
-		/* Use ns/us then divide to preserve precision */
-		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			pixel_size * sr_hdisplay;
-		sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
-
-		entries_required = (((sr_latency_ns / line_time_us) +
-				     1000) / 1000) * pixel_size * 64;
-		entries_required = DIV_ROUND_UP(entries_required,
-						g4x_cursor_wm_info.cacheline_size);
-		cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
-
-		if (cursor_sr > g4x_cursor_wm_info.max_wm)
-			cursor_sr = g4x_cursor_wm_info.max_wm;
-		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
-			      "cursor %d\n", sr_entries, cursor_sr);
+	if (!(display_wm || cursor_wm)) {
+		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+		return false;
+	}
 
-		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
-	} else {
-		/* Turn off self refresh if both pipes are enabled */
-		I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
-			   & ~FW_BLC_SELF_EN);
+	return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+			     int plane,
+			     int latency_ns,
+			     const struct intel_watermark_params *display,
+			     const struct intel_watermark_params *cursor,
+			     int *display_wm, int *cursor_wm)
+{
+	struct drm_crtc *crtc;
+	int hdisplay, htotal, pixel_size, clock;
+	unsigned long line_time_us;
+	int line_count, line_size;
+	int small, large;
+	int entries;
+
+	if (!latency_ns) {
+		*display_wm = *cursor_wm = 0;
+		return false;
 	}
 
-	DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
-		  planea_wm, planeb_wm, sr_entries);
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	hdisplay = crtc->mode.hdisplay;
+	htotal = crtc->mode.htotal;
+	clock = crtc->mode.clock;
+	pixel_size = crtc->fb->bits_per_pixel / 8;
+
+	line_time_us = (htotal * 1000) / clock;
+	line_count = (latency_ns / line_time_us + 1000) / 1000;
+	line_size = hdisplay * pixel_size;
+
+	/* Use the minimum of the small and large buffer method for primary */
+	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+	large = line_count * line_size;
+
+	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+	*display_wm = entries + display->guard_size;
+
+	/* calculate the self-refresh watermark for display cursor */
+	entries = line_count * pixel_size * 64;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+
+	return g4x_check_srwm(dev,
+			      *display_wm, *cursor_wm,
+			      display, cursor);
+}
+
+static inline bool single_plane_enabled(unsigned int mask)
+{
+	return mask && (mask & -mask) == 0;
+}
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+	static const int sr_latency_ns = 12000;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+	int plane_sr, cursor_sr;
+	unsigned int enabled = 0;
+
+	if (g4x_compute_wm0(dev, 0,
+			    &g4x_wm_info, latency_ns,
+			    &g4x_cursor_wm_info, latency_ns,
+			    &planea_wm, &cursora_wm))
+		enabled |= 1;
+
+	if (g4x_compute_wm0(dev, 1,
+			    &g4x_wm_info, latency_ns,
+			    &g4x_cursor_wm_info, latency_ns,
+			    &planeb_wm, &cursorb_wm))
+		enabled |= 2;
+
+	plane_sr = cursor_sr = 0;
+	if (single_plane_enabled(enabled) &&
+	    g4x_compute_srwm(dev, ffs(enabled) - 1,
+			     sr_latency_ns,
+			     &g4x_wm_info,
+			     &g4x_cursor_wm_info,
+			     &plane_sr, &cursor_sr))
+		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+	else
+		I915_WRITE(FW_BLC_SELF,
+			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
 
-	planea_wm &= 0x3f;
-	planeb_wm &= 0x3f;
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+		      planea_wm, cursora_wm,
+		      planeb_wm, cursorb_wm,
+		      plane_sr, cursor_sr);
 
-	I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
+	I915_WRITE(DSPFW1,
+		   (plane_sr << DSPFW_SR_SHIFT) |
 		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
-		   (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
-	I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
+		   planea_wm);
+	I915_WRITE(DSPFW2,
+		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
 		   (cursora_wm << DSPFW_CURSORA_SHIFT));
 	/* HPLL off in SR has some issues on G4x... disable it */
-	I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+	I915_WRITE(DSPFW3,
+		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void i965_update_wm(struct drm_device *dev, int planea_clock,
-			   int planeb_clock, int sr_hdisplay, int sr_htotal,
-			   int pixel_size)
+static void i965_update_wm(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long line_time_us;
-	int sr_clock, sr_entries, srwm = 1;
+	struct drm_crtc *crtc;
+	int srwm = 1;
 	int cursor_sr = 16;
 
 	/* Calc sr entries for one plane configs */
-	if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+	crtc = single_enabled_crtc(dev);
+	if (crtc) {
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 12000;
+		int clock = crtc->mode.clock;
+		int htotal = crtc->mode.htotal;
+		int hdisplay = crtc->mode.hdisplay;
+		int pixel_size = crtc->fb->bits_per_pixel / 8;
+		unsigned long line_time_us;
+		int entries;
 
-		sr_clock = planea_clock ? planea_clock : planeb_clock;
-		line_time_us = ((sr_htotal * 1000) / sr_clock);
+		line_time_us = ((htotal * 1000) / clock);
 
 		/* Use ns/us then divide to preserve precision */
-		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			pixel_size * sr_hdisplay;
-		sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
-		DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
-		srwm = I965_FIFO_SIZE - sr_entries;
+		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+			pixel_size * hdisplay;
+		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+		srwm = I965_FIFO_SIZE - entries;
 		if (srwm < 0)
 			srwm = 1;
 		srwm &= 0x1ff;
+		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+			      entries, srwm);
 
-		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
 			pixel_size * 64;
-		sr_entries = DIV_ROUND_UP(sr_entries,
+		entries = DIV_ROUND_UP(entries,
 					  i965_cursor_wm_info.cacheline_size);
 		cursor_sr = i965_cursor_wm_info.fifo_size -
-			(sr_entries + i965_cursor_wm_info.guard_size);
+			(entries + i965_cursor_wm_info.guard_size);
 
 		if (cursor_sr > i965_cursor_wm_info.max_wm)
 			cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3319,46 +3997,56 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
 		      srwm);
 
 	/* 965 has limitations... */
-	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
-		   (8 << 0));
+	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+		   (8 << 16) | (8 << 8) | (8 << 0));
 	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
 	/* update cursor SR watermark */
 	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
-			   int planeb_clock, int sr_hdisplay, int sr_htotal,
-			   int pixel_size)
+static void i9xx_update_wm(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct intel_watermark_params *wm_info;
 	uint32_t fwater_lo;
 	uint32_t fwater_hi;
-	int total_size, cacheline_size, cwm, srwm = 1;
+	int cwm, srwm = 1;
+	int fifo_size;
 	int planea_wm, planeb_wm;
-	struct intel_watermark_params planea_params, planeb_params;
-	unsigned long line_time_us;
-	int sr_clock, sr_entries = 0;
+	struct drm_crtc *crtc, *enabled = NULL;
 
-	/* Create copies of the base settings for each pipe */
-	if (IS_CRESTLINE(dev) || IS_I945GM(dev))
-		planea_params = planeb_params = i945_wm_info;
+	if (IS_I945GM(dev))
+		wm_info = &i945_wm_info;
 	else if (!IS_GEN2(dev))
-		planea_params = planeb_params = i915_wm_info;
+		wm_info = &i915_wm_info;
 	else
-		planea_params = planeb_params = i855_wm_info;
-
-	/* Grab a couple of global values before we overwrite them */
-	total_size = planea_params.fifo_size;
-	cacheline_size = planea_params.cacheline_size;
-
-	/* Update per-plane FIFO sizes */
-	planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
-	planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+		wm_info = &i855_wm_info;
+
+	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+	crtc = intel_get_crtc_for_plane(dev, 0);
+	if (crtc->enabled && crtc->fb) {
+		planea_wm = intel_calculate_wm(crtc->mode.clock,
+					       wm_info, fifo_size,
+					       crtc->fb->bits_per_pixel / 8,
+					       latency_ns);
+		enabled = crtc;
+	} else
+		planea_wm = fifo_size - wm_info->guard_size;
+
+	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+	crtc = intel_get_crtc_for_plane(dev, 1);
+	if (crtc->enabled && crtc->fb) {
+		planeb_wm = intel_calculate_wm(crtc->mode.clock,
+					       wm_info, fifo_size,
+					       crtc->fb->bits_per_pixel / 8,
+					       latency_ns);
+		if (enabled == NULL)
+			enabled = crtc;
+		else
+			enabled = NULL;
+	} else
+		planeb_wm = fifo_size - wm_info->guard_size;
 
-	planea_wm = intel_calculate_wm(planea_clock, &planea_params,
-				       pixel_size, latency_ns);
-	planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
-				       pixel_size, latency_ns);
 	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
 	/*
@@ -3366,39 +4054,39 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
 	 */
 	cwm = 2;
 
+	/* Play safe and disable self-refresh before adjusting watermarks. */
+	if (IS_I945G(dev) || IS_I945GM(dev))
+		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+	else if (IS_I915GM(dev))
+		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
 	/* Calc sr entries for one plane configs */
-	if (HAS_FW_BLC(dev) && sr_hdisplay &&
-	    (!planea_clock || !planeb_clock)) {
+	if (HAS_FW_BLC(dev) && enabled) {
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 6000;
+		int clock = enabled->mode.clock;
+		int htotal = enabled->mode.htotal;
+		int hdisplay = enabled->mode.hdisplay;
+		int pixel_size = enabled->fb->bits_per_pixel / 8;
+		unsigned long line_time_us;
+		int entries;
 
-		sr_clock = planea_clock ? planea_clock : planeb_clock;
-		line_time_us = ((sr_htotal * 1000) / sr_clock);
+		line_time_us = (htotal * 1000) / clock;
 
 		/* Use ns/us then divide to preserve precision */
-		sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			pixel_size * sr_hdisplay;
-		sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
-		DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
-		srwm = total_size - sr_entries;
+		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+			pixel_size * hdisplay;
+		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+		srwm = wm_info->fifo_size - entries;
 		if (srwm < 0)
 			srwm = 1;
 
 		if (IS_I945G(dev) || IS_I945GM(dev))
-			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
-		else if (IS_I915GM(dev)) {
-			/* 915M has a smaller SRWM field */
+			I915_WRITE(FW_BLC_SELF,
+				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+		else if (IS_I915GM(dev))
 			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
-			I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
-		}
-	} else {
-		/* Turn off self refresh if both pipes are enabled */
-		if (IS_I945G(dev) || IS_I945GM(dev)) {
-			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
-				   & ~FW_BLC_SELF_EN);
-		} else if (IS_I915GM(dev)) {
-			I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
-		}
 	}
 
 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3413,19 +4101,36 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
 
 	I915_WRITE(FW_BLC, fwater_lo);
 	I915_WRITE(FW_BLC2, fwater_hi);
+
+	if (HAS_FW_BLC(dev)) {
+		if (enabled) {
+			if (IS_I945G(dev) || IS_I945GM(dev))
+				I915_WRITE(FW_BLC_SELF,
+					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+			else if (IS_I915GM(dev))
+				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+			DRM_DEBUG_KMS("memory self refresh enabled\n");
+		} else
+			DRM_DEBUG_KMS("memory self refresh disabled\n");
+	}
 }
 
-static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
-			   int unused2, int unused3, int pixel_size)
+static void i830_update_wm(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+	struct drm_crtc *crtc;
+	uint32_t fwater_lo;
 	int planea_wm;
 
-	i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+	crtc = single_enabled_crtc(dev);
+	if (crtc == NULL)
+		return;
 
-	planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
-				       pixel_size, latency_ns);
+	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+				       dev_priv->display.get_fifo_size(dev, 0),
+				       crtc->fb->bits_per_pixel / 8,
+				       latency_ns);
+	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
 	fwater_lo |= (3<<8) | planea_wm;
 
 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
@@ -3534,15 +4239,15 @@ static bool ironlake_check_srwm(struct drm_device *dev, int level,
 /*
  * Compute watermark values of WM[1-3],
  */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level,
-				  int hdisplay, int htotal,
-				  int pixel_size, int clock, int latency_ns,
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+				  int latency_ns,
 				  const struct intel_watermark_params *display,
 				  const struct intel_watermark_params *cursor,
 				  int *fbc_wm, int *display_wm, int *cursor_wm)
 {
-
+	struct drm_crtc *crtc;
 	unsigned long line_time_us;
+	int hdisplay, htotal, pixel_size, clock;
 	int line_count, line_size;
 	int small, large;
 	int entries;
@@ -3552,6 +4257,12 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
 		return false;
 	}
 
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	hdisplay = crtc->mode.hdisplay;
+	htotal = crtc->mode.htotal;
+	clock = crtc->mode.clock;
+	pixel_size = crtc->fb->bits_per_pixel / 8;
+
 	line_time_us = (htotal * 1000) / clock;
 	line_count = (latency_ns / line_time_us + 1000) / 1000;
 	line_size = hdisplay * pixel_size;
@@ -3579,14 +4290,11 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
 				   display, cursor);
 }
 
-static void ironlake_update_wm(struct drm_device *dev,
-			       int planea_clock, int planeb_clock,
-			       int hdisplay, int htotal,
-			       int pixel_size)
+static void ironlake_update_wm(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int fbc_wm, plane_wm, cursor_wm, enabled;
-	int clock;
+	int fbc_wm, plane_wm, cursor_wm;
+	unsigned int enabled;
 
 	enabled = 0;
 	if (ironlake_compute_wm0(dev, 0,
@@ -3600,7 +4308,7 @@ static void ironlake_update_wm(struct drm_device *dev,
 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
 			      " plane %d, " "cursor: %d\n",
 			      plane_wm, cursor_wm);
-		enabled++;
+		enabled |= 1;
 	}
 
 	if (ironlake_compute_wm0(dev, 1,
@@ -3614,7 +4322,7 @@ static void ironlake_update_wm(struct drm_device *dev,
 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
 			      " plane %d, cursor: %d\n",
 			      plane_wm, cursor_wm);
-		enabled++;
+		enabled |= 2;
 	}
 
 	/*
@@ -3625,14 +4333,13 @@ static void ironlake_update_wm(struct drm_device *dev,
 	I915_WRITE(WM2_LP_ILK, 0);
 	I915_WRITE(WM1_LP_ILK, 0);
 
-	if (enabled != 1)
+	if (!single_plane_enabled(enabled))
 		return;
-
-	clock = planea_clock ? planea_clock : planeb_clock;
+	enabled = ffs(enabled) - 1;
 
 	/* WM1 */
-	if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
-				   clock, ILK_READ_WM1_LATENCY() * 500,
+	if (!ironlake_compute_srwm(dev, 1, enabled,
+				   ILK_READ_WM1_LATENCY() * 500,
 				   &ironlake_display_srwm_info,
 				   &ironlake_cursor_srwm_info,
 				   &fbc_wm, &plane_wm, &cursor_wm))
@@ -3646,8 +4353,8 @@ static void ironlake_update_wm(struct drm_device *dev,
 		   cursor_wm);
 
 	/* WM2 */
-	if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
-				   clock, ILK_READ_WM2_LATENCY() * 500,
+	if (!ironlake_compute_srwm(dev, 2, enabled,
+				   ILK_READ_WM2_LATENCY() * 500,
 				   &ironlake_display_srwm_info,
 				   &ironlake_cursor_srwm_info,
 				   &fbc_wm, &plane_wm, &cursor_wm))
@@ -3666,15 +4373,12 @@ static void ironlake_update_wm(struct drm_device *dev,
 	 */
 }
 
-static void sandybridge_update_wm(struct drm_device *dev,
-			       int planea_clock, int planeb_clock,
-			       int hdisplay, int htotal,
-			       int pixel_size)
+static void sandybridge_update_wm(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
-	int fbc_wm, plane_wm, cursor_wm, enabled;
-	int clock;
+	int fbc_wm, plane_wm, cursor_wm;
+	unsigned int enabled;
 
 	enabled = 0;
 	if (ironlake_compute_wm0(dev, 0,
@@ -3686,7 +4390,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
 			      " plane %d, " "cursor: %d\n",
 			      plane_wm, cursor_wm);
-		enabled++;
+		enabled |= 1;
 	}
 
 	if (ironlake_compute_wm0(dev, 1,
@@ -3698,7 +4402,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
 			      " plane %d, cursor: %d\n",
 			      plane_wm, cursor_wm);
-		enabled++;
+		enabled |= 2;
 	}
 
 	/*
@@ -3715,14 +4419,13 @@ static void sandybridge_update_wm(struct drm_device *dev,
 	I915_WRITE(WM2_LP_ILK, 0);
 	I915_WRITE(WM1_LP_ILK, 0);
 
-	if (enabled != 1)
+	if (!single_plane_enabled(enabled))
 		return;
-
-	clock = planea_clock ? planea_clock : planeb_clock;
+	enabled = ffs(enabled) - 1;
 
 	/* WM1 */
-	if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
-				   clock, SNB_READ_WM1_LATENCY() * 500,
+	if (!ironlake_compute_srwm(dev, 1, enabled,
+				   SNB_READ_WM1_LATENCY() * 500,
 				   &sandybridge_display_srwm_info,
 				   &sandybridge_cursor_srwm_info,
 				   &fbc_wm, &plane_wm, &cursor_wm))
@@ -3736,9 +4439,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
 		   cursor_wm);
 
 	/* WM2 */
-	if (!ironlake_compute_srwm(dev, 2,
-				   hdisplay, htotal, pixel_size,
-				   clock, SNB_READ_WM2_LATENCY() * 500,
+	if (!ironlake_compute_srwm(dev, 2, enabled,
+				   SNB_READ_WM2_LATENCY() * 500,
 				   &sandybridge_display_srwm_info,
 				   &sandybridge_cursor_srwm_info,
 				   &fbc_wm, &plane_wm, &cursor_wm))
@@ -3752,9 +4454,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
 		   cursor_wm);
 
 	/* WM3 */
-	if (!ironlake_compute_srwm(dev, 3,
-				   hdisplay, htotal, pixel_size,
-				   clock, SNB_READ_WM3_LATENCY() * 500,
+	if (!ironlake_compute_srwm(dev, 3, enabled,
+				   SNB_READ_WM3_LATENCY() * 500,
 				   &sandybridge_display_srwm_info,
 				   &sandybridge_cursor_srwm_info,
 				   &fbc_wm, &plane_wm, &cursor_wm))
@@ -3803,44 +4504,9 @@ static void sandybridge_update_wm(struct drm_device *dev,
 static void intel_update_watermarks(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
-	int sr_hdisplay = 0;
-	unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
-	int enabled = 0, pixel_size = 0;
-	int sr_htotal = 0;
-
-	if (!dev_priv->display.update_wm)
-		return;
-
-	/* Get the clock config from both planes */
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-		if (intel_crtc->active) {
-			enabled++;
-			if (intel_crtc->plane == 0) {
-				DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
-					      intel_crtc->pipe, crtc->mode.clock);
-				planea_clock = crtc->mode.clock;
-			} else {
-				DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
-					      intel_crtc->pipe, crtc->mode.clock);
-				planeb_clock = crtc->mode.clock;
-			}
-			sr_hdisplay = crtc->mode.hdisplay;
-			sr_clock = crtc->mode.clock;
-			sr_htotal = crtc->mode.htotal;
-			if (crtc->fb)
-				pixel_size = crtc->fb->bits_per_pixel / 8;
-			else
-				pixel_size = 4; /* by default */
-		}
-	}
-
-	if (enabled <= 0)
-		return;
 
-	dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
-				    sr_hdisplay, sr_htotal, pixel_size);
+	if (dev_priv->display.update_wm)
+		dev_priv->display.update_wm(dev);
 }
 
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -3872,6 +4538,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 	int ret;
 	struct fdi_m_n m_n = {0};
 	u32 reg, temp;
+	u32 lvds_sync = 0;
 	int target_clock;
 
 	drm_vblank_pre_modeset(dev, pipe);
@@ -4243,9 +4910,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
 	}
 
-	dspcntr |= DISPLAY_PLANE_ENABLE;
-	pipeconf |= PIPECONF_ENABLE;
-	dpll |= DPLL_VCO_ENABLE;
+	if (!HAS_PCH_SPLIT(dev))
+		dpll |= DPLL_VCO_ENABLE;
 
 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
 	drm_mode_debug_printmodeline(mode);
@@ -4271,10 +4937,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 	/* enable transcoder DPLL */
 	if (HAS_PCH_CPT(dev)) {
 		temp = I915_READ(PCH_DPLL_SEL);
-		if (pipe == 0)
+		switch (pipe) {
+		case 0:
 			temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
-		else
+			break;
+		case 1:
 			temp |=	TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
+			break;
+		case 2:
+			/* FIXME: manage transcoder PLLs? */
+			temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
+			break;
+		default:
+			BUG();
+		}
 		I915_WRITE(PCH_DPLL_SEL, temp);
 
 		POSTING_READ(PCH_DPLL_SEL);
@@ -4324,6 +5000,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 			else
 				temp &= ~LVDS_ENABLE_DITHER;
 		}
+		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+			lvds_sync |= LVDS_HSYNC_POLARITY;
+		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+			lvds_sync |= LVDS_VSYNC_POLARITY;
+		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
+		    != lvds_sync) {
+			char flags[2] = "-+";
+			DRM_INFO("Changing LVDS panel from "
+				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
+				 flags[!(temp & LVDS_HSYNC_POLARITY)],
+				 flags[!(temp & LVDS_VSYNC_POLARITY)],
+				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
+				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
+			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+			temp |= lvds_sync;
+		}
 		I915_WRITE(reg, temp);
 	}
 
@@ -4341,17 +5033,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
 	} else if (HAS_PCH_SPLIT(dev)) {
 		/* For non-DP output, clear any trans DP clock recovery setting.*/
-		if (pipe == 0) {
-			I915_WRITE(TRANSA_DATA_M1, 0);
-			I915_WRITE(TRANSA_DATA_N1, 0);
-			I915_WRITE(TRANSA_DP_LINK_M1, 0);
-			I915_WRITE(TRANSA_DP_LINK_N1, 0);
-		} else {
-			I915_WRITE(TRANSB_DATA_M1, 0);
-			I915_WRITE(TRANSB_DATA_N1, 0);
-			I915_WRITE(TRANSB_DP_LINK_M1, 0);
-			I915_WRITE(TRANSB_DP_LINK_N1, 0);
-		}
+		I915_WRITE(TRANSDATA_M1(pipe), 0);
+		I915_WRITE(TRANSDATA_N1(pipe), 0);
+		I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
 	}
 
 	if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
@@ -4454,6 +5139,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 
 	I915_WRITE(PIPECONF(pipe), pipeconf);
 	POSTING_READ(PIPECONF(pipe));
+	if (!HAS_PCH_SPLIT(dev))
+		intel_enable_pipe(dev_priv, pipe, false);
 
 	intel_wait_for_vblank(dev, pipe);
 
@@ -4464,6 +5151,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 	}
 
 	I915_WRITE(DSPCNTR(plane), dspcntr);
+	POSTING_READ(DSPCNTR(plane));
+	if (!HAS_PCH_SPLIT(dev))
+		intel_enable_plane(dev_priv, plane, pipe);
 
 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
 
@@ -4480,7 +5170,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
+	int palreg = PALETTE(intel_crtc->pipe);
 	int i;
 
 	/* The clocks have to be on to load the palette. */
@@ -4489,8 +5179,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
 
 	/* use legacy palette for Ironlake */
 	if (HAS_PCH_SPLIT(dev))
-		palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
-						   LGC_PALETTE_B;
+		palreg = LGC_PALETTE(intel_crtc->pipe);
 
 	for (i = 0; i < 256; i++) {
 		I915_WRITE(palreg + 4 * i,
@@ -4511,12 +5200,12 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
 	if (intel_crtc->cursor_visible == visible)
 		return;
 
-	cntl = I915_READ(CURACNTR);
+	cntl = I915_READ(_CURACNTR);
 	if (visible) {
 		/* On these chipsets we can only modify the base whilst
 		 * the cursor is disabled.
 		 */
-		I915_WRITE(CURABASE, base);
+		I915_WRITE(_CURABASE, base);
 
 		cntl &= ~(CURSOR_FORMAT_MASK);
 		/* XXX width must be 64, stride 256 => 0x00 << 28 */
@@ -4525,7 +5214,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
 			CURSOR_FORMAT_ARGB;
 	} else
 		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
-	I915_WRITE(CURACNTR, cntl);
+	I915_WRITE(_CURACNTR, cntl);
 
 	intel_crtc->cursor_visible = visible;
 }
@@ -4539,7 +5228,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
 	bool visible = base != 0;
 
 	if (intel_crtc->cursor_visible != visible) {
-		uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+		uint32_t cntl = I915_READ(CURCNTR(pipe));
 		if (base) {
 			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
@@ -4548,12 +5237,12 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
 			cntl |= CURSOR_MODE_DISABLE;
 		}
-		I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+		I915_WRITE(CURCNTR(pipe), cntl);
 
 		intel_crtc->cursor_visible = visible;
 	}
 	/* and commit changes on next vblank */
-	I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+	I915_WRITE(CURBASE(pipe), base);
 }
 
 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -4603,7 +5292,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 	if (!visible && !intel_crtc->cursor_visible)
 		return;
 
-	I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
+	I915_WRITE(CURPOS(pipe), pos);
 	if (IS_845G(dev) || IS_I865G(dev))
 		i845_update_cursor(crtc, base);
 	else
@@ -4643,7 +5332,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 	}
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
-	if (!obj)
+	if (&obj->base == NULL)
 		return -ENOENT;
 
 	if (obj->base.size < width * height * 4) {
@@ -4909,14 +5598,14 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
+	u32 dpll = I915_READ(DPLL(pipe));
 	u32 fp;
 	intel_clock_t clock;
 
 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-		fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
+		fp = FP0(pipe);
 	else
-		fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+		fp = FP1(pipe);
 
 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
 	if (IS_PINEVIEW(dev)) {
@@ -4998,10 +5687,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
 	struct drm_display_mode *mode;
-	int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
-	int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
-	int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
-	int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+	int htot = I915_READ(HTOTAL(pipe));
+	int hsync = I915_READ(HSYNC(pipe));
+	int vtot = I915_READ(VTOTAL(pipe));
+	int vsync = I915_READ(VSYNC(pipe));
 
 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
 	if (!mode)
@@ -5110,7 +5799,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+	int dpll_reg = DPLL(pipe);
 	int dpll = I915_READ(dpll_reg);
 
 	if (HAS_PCH_SPLIT(dev))
@@ -5158,7 +5847,6 @@ static void intel_idle_update(struct work_struct *work)
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_crtc *crtc;
 	struct intel_crtc *intel_crtc;
-	int enabled = 0;
 
 	if (!i915_powersave)
 		return;
@@ -5172,16 +5860,11 @@ static void intel_idle_update(struct work_struct *work)
 		if (!crtc->fb)
 			continue;
 
-		enabled++;
 		intel_crtc = to_intel_crtc(crtc);
 		if (!intel_crtc->busy)
 			intel_decrease_pllclock(crtc);
 	}
 
-	if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) {
-		DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
-		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
-	}
 
 	mutex_unlock(&dev->struct_mutex);
 }
@@ -5206,17 +5889,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	if (!dev_priv->busy) {
-		if (IS_I945G(dev) || IS_I945GM(dev)) {
-			u32 fw_blc_self;
-
-			DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
-			fw_blc_self = I915_READ(FW_BLC_SELF);
-			fw_blc_self &= ~FW_BLC_SELF_EN;
-			I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
-		}
+	if (!dev_priv->busy)
 		dev_priv->busy = true;
-	} else
+	else
 		mod_timer(&dev_priv->idle_timer, jiffies +
 			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
 
@@ -5228,14 +5903,6 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
 		intel_fb = to_intel_framebuffer(crtc->fb);
 		if (intel_fb->obj == obj) {
 			if (!intel_crtc->busy) {
-				if (IS_I945G(dev) || IS_I945GM(dev)) {
-					u32 fw_blc_self;
-
-					DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
-					fw_blc_self = I915_READ(FW_BLC_SELF);
-					fw_blc_self &= ~FW_BLC_SELF_EN;
-					I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
-				}
 				/* Non-busy -> busy, upclock */
 				intel_increase_pllclock(crtc);
 				intel_crtc->busy = true;
@@ -5513,7 +6180,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
 		 */
 		pf = 0;
-		pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+		pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
 		OUT_RING(pf | pipesrc);
 		break;
 
@@ -5523,8 +6190,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		OUT_RING(fb->pitch | obj->tiling_mode);
 		OUT_RING(obj->gtt_offset);
 
-		pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
-		pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+		pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
+		pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
 		OUT_RING(pf | pipesrc);
 		break;
 	}
@@ -5558,9 +6225,7 @@ static void intel_crtc_reset(struct drm_crtc *crtc)
 	/* Reset flags back to the 'unknown' status so that they
 	 * will be correctly set on the initial modeset.
 	 */
-	intel_crtc->cursor_addr = 0;
 	intel_crtc->dpms_mode = -1;
-	intel_crtc->active = true; /* force the pipe off on setup_init_config */
 }
 
 static struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -5615,22 +6280,8 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
 	pipe = !pipe;
 
 	/* Disable the plane and wait for it to stop reading from the pipe. */
-	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
-	intel_flush_display_plane(dev, plane);
-
-	if (IS_GEN2(dev))
-		intel_wait_for_vblank(dev, pipe);
-
-	if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
-		return;
-
-	/* Switch off the pipe. */
-	reg = PIPECONF(pipe);
-	val = I915_READ(reg);
-	if (val & PIPECONF_ENABLE) {
-		I915_WRITE(reg, val & ~PIPECONF_ENABLE);
-		intel_wait_for_pipe_off(dev, pipe);
-	}
+	intel_disable_plane(dev_priv, plane, pipe);
+	intel_disable_pipe(dev_priv, pipe);
 }
 
 static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -5666,6 +6317,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
 	intel_crtc_reset(&intel_crtc->base);
+	intel_crtc->active = true; /* force the pipe off on setup_init_config */
 
 	if (HAS_PCH_SPLIT(dev)) {
 		intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -5919,7 +6571,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
 	int ret;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
-	if (!obj)
+	if (&obj->base == NULL)
 		return ERR_PTR(-ENOENT);
 
 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
@@ -6204,7 +6856,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	 * userspace...
 	 */
 	I915_WRITE(GEN6_RC_STATE, 0);
-	__gen6_force_wake_get(dev_priv);
+	__gen6_gt_force_wake_get(dev_priv);
 
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6241,18 +6893,18 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
 		   18 << 24 |
 		   6 << 16);
-	I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
-	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
 	I915_WRITE(GEN6_RP_UP_EI, 100000);
-	I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 	I915_WRITE(GEN6_RP_CONTROL,
 		   GEN6_RP_MEDIA_TURBO |
 		   GEN6_RP_USE_NORMAL_FREQ |
 		   GEN6_RP_MEDIA_IS_GFX |
 		   GEN6_RP_ENABLE |
-		   GEN6_RP_UP_BUSY_MAX |
-		   GEN6_RP_DOWN_BUSY_MIN);
+		   GEN6_RP_UP_BUSY_AVG |
+		   GEN6_RP_DOWN_IDLE_CONT);
 
 	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 		     500))
@@ -6302,12 +6954,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	/* enable all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
-	__gen6_force_wake_put(dev_priv);
+	__gen6_gt_force_wake_put(dev_priv);
 }
 
 void intel_enable_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
 
 	/*
 	 * Disable clock gating reported to work incorrectly according to the
@@ -6417,12 +7070,10 @@ void intel_enable_clock_gating(struct drm_device *dev)
 				   ILK_DPARB_CLK_GATE  |
 				   ILK_DPFD_CLK_GATE);
 
-			I915_WRITE(DSPACNTR,
-				   I915_READ(DSPACNTR) |
-				   DISPPLANE_TRICKLE_FEED_DISABLE);
-			I915_WRITE(DSPBCNTR,
-				   I915_READ(DSPBCNTR) |
-				   DISPPLANE_TRICKLE_FEED_DISABLE);
+			for_each_pipe(pipe)
+				I915_WRITE(DSPCNTR(pipe),
+					   I915_READ(DSPCNTR(pipe)) |
+					   DISPPLANE_TRICKLE_FEED_DISABLE);
 		}
 	} else if (IS_G4X(dev)) {
 		uint32_t dspclk_gate;
@@ -6463,52 +7114,60 @@ void intel_enable_clock_gating(struct drm_device *dev)
 	}
 }
 
-void intel_disable_clock_gating(struct drm_device *dev)
+static void ironlake_teardown_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (dev_priv->renderctx) {
-		struct drm_i915_gem_object *obj = dev_priv->renderctx;
-
-		I915_WRITE(CCID, 0);
-		POSTING_READ(CCID);
-
-		i915_gem_object_unpin(obj);
-		drm_gem_object_unreference(&obj->base);
+		i915_gem_object_unpin(dev_priv->renderctx);
+		drm_gem_object_unreference(&dev_priv->renderctx->base);
 		dev_priv->renderctx = NULL;
 	}
 
 	if (dev_priv->pwrctx) {
-		struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+		i915_gem_object_unpin(dev_priv->pwrctx);
+		drm_gem_object_unreference(&dev_priv->pwrctx->base);
+		dev_priv->pwrctx = NULL;
+	}
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (I915_READ(PWRCTXA)) {
+		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+			 50);
 
 		I915_WRITE(PWRCTXA, 0);
 		POSTING_READ(PWRCTXA);
 
-		i915_gem_object_unpin(obj);
-		drm_gem_object_unreference(&obj->base);
-		dev_priv->pwrctx = NULL;
+		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+		POSTING_READ(RSTDBYCTL);
 	}
+
+	ironlake_teardown_rc6(dev);
 }
 
-static void ironlake_disable_rc6(struct drm_device *dev)
+static int ironlake_setup_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
-	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
-	wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
-		 10);
-	POSTING_READ(CCID);
-	I915_WRITE(PWRCTXA, 0);
-	POSTING_READ(PWRCTXA);
-	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-	POSTING_READ(RSTDBYCTL);
-	i915_gem_object_unpin(dev_priv->renderctx);
-	drm_gem_object_unreference(&dev_priv->renderctx->base);
-	dev_priv->renderctx = NULL;
-	i915_gem_object_unpin(dev_priv->pwrctx);
-	drm_gem_object_unreference(&dev_priv->pwrctx->base);
-	dev_priv->pwrctx = NULL;
+	if (dev_priv->renderctx == NULL)
+		dev_priv->renderctx = intel_alloc_context_page(dev);
+	if (!dev_priv->renderctx)
+		return -ENOMEM;
+
+	if (dev_priv->pwrctx == NULL)
+		dev_priv->pwrctx = intel_alloc_context_page(dev);
+	if (!dev_priv->pwrctx) {
+		ironlake_teardown_rc6(dev);
+		return -ENOMEM;
+	}
+
+	return 0;
 }
 
 void ironlake_enable_rc6(struct drm_device *dev)
@@ -6516,15 +7175,26 @@ void ironlake_enable_rc6(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	/* rc6 disabled by default due to repeated reports of hanging during
+	 * boot and resume.
+	 */
+	if (!i915_enable_rc6)
+		return;
+
+	ret = ironlake_setup_rc6(dev);
+	if (ret)
+		return;
+
 	/*
 	 * GPU can automatically power down the render unit if given a page
 	 * to save state.
 	 */
 	ret = BEGIN_LP_RING(6);
 	if (ret) {
-		ironlake_disable_rc6(dev);
+		ironlake_teardown_rc6(dev);
 		return;
 	}
+
 	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
 	OUT_RING(MI_SET_CONTEXT);
 	OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6541,6 +7211,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
+
 /* Set up chip specific display functions */
 static void intel_init_display(struct drm_device *dev)
 {
@@ -6757,10 +7428,6 @@ void intel_modeset_init(struct drm_device *dev)
 	}
 	dev->mode_config.fb_base = dev->agp->base;
 
-	if (IS_MOBILE(dev) || !IS_GEN2(dev))
-		dev_priv->num_pipe = 2;
-	else
-		dev_priv->num_pipe = 1;
 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
 		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
 
@@ -6783,21 +7450,9 @@ void intel_modeset_init(struct drm_device *dev)
 	if (IS_GEN6(dev))
 		gen6_enable_rps(dev_priv);
 
-	if (IS_IRONLAKE_M(dev)) {
-		dev_priv->renderctx = intel_alloc_context_page(dev);
-		if (!dev_priv->renderctx)
-			goto skip_rc6;
-		dev_priv->pwrctx = intel_alloc_context_page(dev);
-		if (!dev_priv->pwrctx) {
-			i915_gem_object_unpin(dev_priv->renderctx);
-			drm_gem_object_unreference(&dev_priv->renderctx->base);
-			dev_priv->renderctx = NULL;
-			goto skip_rc6;
-		}
+	if (IS_IRONLAKE_M(dev))
 		ironlake_enable_rc6(dev);
-	}
 
-skip_rc6:
 	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
 	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
 		    (unsigned long)dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1f4242b682c8..d29e33f815d7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -49,6 +49,7 @@ struct intel_dp {
 	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
 	bool has_audio;
 	int force_audio;
+	uint32_t color_range;
 	int dpms_mode;
 	uint8_t link_bw;
 	uint8_t lane_count;
@@ -685,6 +686,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int lane_count = 4, bpp = 24;
 	struct intel_dp_m_n m_n;
+	int pipe = intel_crtc->pipe;
 
 	/*
 	 * Find the lane count in the intel_encoder private
@@ -715,39 +717,19 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 			     mode->clock, adjusted_mode->clock, &m_n);
 
 	if (HAS_PCH_SPLIT(dev)) {
-		if (intel_crtc->pipe == 0) {
-			I915_WRITE(TRANSA_DATA_M1,
-				   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
-				   m_n.gmch_m);
-			I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n);
-			I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m);
-			I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n);
-		} else {
-			I915_WRITE(TRANSB_DATA_M1,
-				   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
-				   m_n.gmch_m);
-			I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n);
-			I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m);
-			I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n);
-		}
+		I915_WRITE(TRANSDATA_M1(pipe),
+			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+			   m_n.gmch_m);
+		I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
+		I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
+		I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
 	} else {
-		if (intel_crtc->pipe == 0) {
-			I915_WRITE(PIPEA_GMCH_DATA_M,
-				   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
-				   m_n.gmch_m);
-			I915_WRITE(PIPEA_GMCH_DATA_N,
-				   m_n.gmch_n);
-			I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
-			I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
-		} else {
-			I915_WRITE(PIPEB_GMCH_DATA_M,
-				   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
-				   m_n.gmch_m);
-			I915_WRITE(PIPEB_GMCH_DATA_N,
-					m_n.gmch_n);
-			I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
-			I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
-		}
+		I915_WRITE(PIPE_GMCH_DATA_M(pipe),
+			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+			   m_n.gmch_m);
+		I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+		I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+		I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
 	}
 }
 
@@ -760,8 +742,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	intel_dp->DP = (DP_VOLTAGE_0_4 |
-		       DP_PRE_EMPHASIS_0);
+	intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+	intel_dp->DP |= intel_dp->color_range;
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 		intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -813,6 +795,40 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	}
 }
 
+static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+
+	/*
+	 * If the panel wasn't on, make sure there's not a currently
+	 * active PP sequence before enabling AUX VDD.
+	 */
+	if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
+		msleep(dev_priv->panel_t3);
+
+	pp = I915_READ(PCH_PP_CONTROL);
+	pp |= EDP_FORCE_VDD;
+	I915_WRITE(PCH_PP_CONTROL, pp);
+	POSTING_READ(PCH_PP_CONTROL);
+}
+
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+
+	pp = I915_READ(PCH_PP_CONTROL);
+	pp &= ~EDP_FORCE_VDD;
+	I915_WRITE(PCH_PP_CONTROL, pp);
+	POSTING_READ(PCH_PP_CONTROL);
+
+	/* Make sure sequencer is idle before allowing subsequent activity */
+	msleep(dev_priv->panel_t12);
+}
+
 /* Returns true if the panel was already on when called */
 static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
 {
@@ -834,11 +850,6 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
 	I915_WRITE(PCH_PP_CONTROL, pp);
 	POSTING_READ(PCH_PP_CONTROL);
 
-	/* Ouch. We need to wait here for some panels, like Dell e6510
-	 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
-	 */
-	msleep(300);
-
 	if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
 		     5000))
 		DRM_ERROR("panel on wait timed out: 0x%08x\n",
@@ -875,11 +886,6 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
 	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
 	I915_WRITE(PCH_PP_CONTROL, pp);
 	POSTING_READ(PCH_PP_CONTROL);
-
-	/* Ouch. We need to wait here for some panels, like Dell e6510
-	 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
-	 */
-	msleep(300);
 }
 
 static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -945,7 +951,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
 
 	if (is_edp(intel_dp)) {
 		ironlake_edp_backlight_off(dev);
-		ironlake_edp_panel_on(intel_dp);
+		ironlake_edp_panel_off(dev);
 		if (!is_pch_edp(intel_dp))
 			ironlake_edp_pll_on(encoder);
 		else
@@ -959,10 +965,15 @@ static void intel_dp_commit(struct drm_encoder *encoder)
 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct drm_device *dev = encoder->dev;
 
+	if (is_edp(intel_dp))
+		ironlake_edp_panel_vdd_on(intel_dp);
+
 	intel_dp_start_link_train(intel_dp);
 
-	if (is_edp(intel_dp))
+	if (is_edp(intel_dp)) {
 		ironlake_edp_panel_on(intel_dp);
+		ironlake_edp_panel_vdd_off(intel_dp);
+	}
 
 	intel_dp_complete_link_train(intel_dp);
 
@@ -988,9 +999,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
 			ironlake_edp_pll_off(encoder);
 	} else {
 		if (is_edp(intel_dp))
-			ironlake_edp_panel_on(intel_dp);
+			ironlake_edp_panel_vdd_on(intel_dp);
 		if (!(dp_reg & DP_PORT_EN)) {
 			intel_dp_start_link_train(intel_dp);
+			if (is_edp(intel_dp)) {
+				ironlake_edp_panel_on(intel_dp);
+				ironlake_edp_panel_vdd_off(intel_dp);
+			}
 			intel_dp_complete_link_train(intel_dp);
 		}
 		if (is_edp(intel_dp))
@@ -1508,9 +1523,13 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
 {
 	enum drm_connector_status status;
 
-	/* Can't disconnect eDP */
-	if (is_edp(intel_dp))
-		return connector_status_connected;
+	/* Can't disconnect eDP, but you can close the lid... */
+	if (is_edp(intel_dp)) {
+		status = intel_panel_detect(intel_dp->base.base.dev);
+		if (status == connector_status_unknown)
+			status = connector_status_connected;
+		return status;
+	}
 
 	status = connector_status_disconnected;
 	if (intel_dp_aux_native_read(intel_dp,
@@ -1639,11 +1658,30 @@ static int intel_dp_get_modes(struct drm_connector *connector)
 	return 0;
 }
 
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		has_audio = drm_detect_monitor_audio(edid);
+
+		connector->display_info.raw_edid = NULL;
+		kfree(edid);
+	}
+
+	return has_audio;
+}
+
 static int
 intel_dp_set_property(struct drm_connector *connector,
 		      struct drm_property *property,
 		      uint64_t val)
 {
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	int ret;
 
@@ -1652,17 +1690,31 @@ intel_dp_set_property(struct drm_connector *connector,
 		return ret;
 
 	if (property == intel_dp->force_audio_property) {
-		if (val == intel_dp->force_audio)
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_dp->force_audio)
 			return 0;
 
-		intel_dp->force_audio = val;
+		intel_dp->force_audio = i;
 
-		if (val > 0 && intel_dp->has_audio)
+		if (i == 0)
+			has_audio = intel_dp_detect_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_dp->has_audio)
 			return 0;
-		if (val < 0 && !intel_dp->has_audio)
+
+		intel_dp->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		if (val == !!intel_dp->color_range)
 			return 0;
 
-		intel_dp->has_audio = val > 0;
+		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
 		goto done;
 	}
 
@@ -1785,6 +1837,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
 		intel_dp->force_audio_property->values[1] = 1;
 		drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
 	}
+
+	intel_attach_broadcast_rgb_property(connector);
 }
 
 void
@@ -1802,6 +1856,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 	if (!intel_dp)
 		return;
 
+	intel_dp->output_reg = output_reg;
+	intel_dp->dpms_mode = -1;
+
 	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(intel_dp);
@@ -1841,10 +1898,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 	connector->interlace_allowed = true;
 	connector->doublescan_allowed = 0;
 
-	intel_dp->output_reg = output_reg;
-	intel_dp->has_audio = false;
-	intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
-
 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
 			 DRM_MODE_ENCODER_TMDS);
 	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
@@ -1882,21 +1935,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 	/* Cache some DPCD data in the eDP case */
 	if (is_edp(intel_dp)) {
 		int ret;
-		bool was_on;
+		u32 pp_on, pp_div;
+
+		pp_on = I915_READ(PCH_PP_ON_DELAYS);
+		pp_div = I915_READ(PCH_PP_DIVISOR);
 
-		was_on = ironlake_edp_panel_on(intel_dp);
+		/* Get T3 & T12 values (note: VESA not bspec terminology) */
+		dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
+		dev_priv->panel_t3 /= 10; /* t3 in 100us units */
+		dev_priv->panel_t12 = pp_div & 0xf;
+		dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
+
+		ironlake_edp_panel_vdd_on(intel_dp);
 		ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
 					       intel_dp->dpcd,
 					       sizeof(intel_dp->dpcd));
+		ironlake_edp_panel_vdd_off(intel_dp);
 		if (ret == sizeof(intel_dp->dpcd)) {
 			if (intel_dp->dpcd[0] >= 0x11)
 				dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
 					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
 		} else {
+			/* if this fails, presume the device is a ghost */
 			DRM_ERROR("failed to retrieve link info\n");
+			intel_dp_destroy(&intel_connector->base);
+			intel_dp_encoder_destroy(&intel_dp->base.base);
+			return;
 		}
-		if (!was_on)
-			ironlake_edp_panel_off(dev);
 	}
 
 	intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 74db2557d644..5daa991cb287 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -217,6 +217,13 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 	return dev_priv->pipe_to_crtc_mapping[pipe];
 }
 
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return dev_priv->plane_to_crtc_mapping[plane];
+}
+
 struct intel_unpin_work {
 	struct work_struct work;
 	struct drm_device *dev;
@@ -230,6 +237,8 @@ struct intel_unpin_work {
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
 
+extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
 void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
@@ -260,6 +269,7 @@ extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
 extern void intel_panel_setup_backlight(struct drm_device *dev);
 extern void intel_panel_enable_backlight(struct drm_device *dev);
 extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
 
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
@@ -298,7 +308,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 				    u16 *blue, int regno);
 extern void intel_enable_clock_gating(struct drm_device *dev);
-extern void intel_disable_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
@@ -322,8 +331,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
 
 extern void intel_setup_overlay(struct drm_device *dev);
 extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay,
-				    bool interruptible);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
 extern int intel_overlay_put_image(struct drm_device *dev, void *data,
 				   struct drm_file *file_priv);
 extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ea373283c93b..6eda1b51c636 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -178,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
 	int pipe = intel_crtc->pipe;
 	u32 dvo_val;
 	u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+	int dpll_reg = DPLL(pipe);
 
 	switch (dvo_reg) {
 	case DVOA:
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e7b029..f289b8642976 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -41,6 +41,7 @@ struct intel_hdmi {
 	struct intel_encoder base;
 	u32 sdvox_reg;
 	int ddc_bus;
+	uint32_t color_range;
 	bool has_hdmi_sink;
 	bool has_audio;
 	int force_audio;
@@ -124,6 +125,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 	u32 sdvox;
 
 	sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+	sdvox |= intel_hdmi->color_range;
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 		sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -251,12 +253,34 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
 				   &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
 }
 
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct edid *edid;
+	bool has_audio = false;
+
+	edid = drm_get_edid(connector,
+			    &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+	if (edid) {
+		if (edid->input & DRM_EDID_INPUT_DIGITAL)
+			has_audio = drm_detect_monitor_audio(edid);
+
+		connector->display_info.raw_edid = NULL;
+		kfree(edid);
+	}
+
+	return has_audio;
+}
+
 static int
 intel_hdmi_set_property(struct drm_connector *connector,
 		      struct drm_property *property,
 		      uint64_t val)
 {
 	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	int ret;
 
 	ret = drm_connector_property_set_value(connector, property, val);
@@ -264,17 +288,31 @@ intel_hdmi_set_property(struct drm_connector *connector,
 		return ret;
 
 	if (property == intel_hdmi->force_audio_property) {
-		if (val == intel_hdmi->force_audio)
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_hdmi->force_audio)
 			return 0;
 
-		intel_hdmi->force_audio = val;
+		intel_hdmi->force_audio = i;
+
+		if (i == 0)
+			has_audio = intel_hdmi_detect_audio(connector);
+		else
+			has_audio = i > 0;
 
-		if (val > 0 && intel_hdmi->has_audio)
+		if (has_audio == intel_hdmi->has_audio)
 			return 0;
-		if (val < 0 && !intel_hdmi->has_audio)
+
+		intel_hdmi->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		if (val == !!intel_hdmi->color_range)
 			return 0;
 
-		intel_hdmi->has_audio = val > 0;
+		intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
 		goto done;
 	}
 
@@ -336,6 +374,8 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
 		intel_hdmi->force_audio_property->values[1] = 1;
 		drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
 	}
+
+	intel_attach_broadcast_rgb_property(connector);
 }
 
 void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 58040f68ed7a..82d04c5899d2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -384,7 +384,8 @@ int intel_setup_gmbus(struct drm_device *dev)
 		bus->reg0 = i | GMBUS_RATE_100KHZ;
 
 		/* XXX force bit banging until GMBUS is fully debugged */
-		bus->force_bit = intel_gpio_create(dev_priv, i);
+		if (IS_GEN2(dev))
+			bus->force_bit = intel_gpio_create(dev_priv, i);
 	}
 
 	intel_i2c_reset(dev_priv->dev);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ace8d5d30dd2..1a311ad01116 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -231,6 +231,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 	struct drm_encoder *tmp_encoder;
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+	int pipe;
 
 	/* Should never happen!! */
 	if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@@ -261,12 +262,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 		return true;
 	}
 
-	/* Make sure pre-965s set dither correctly */
-	if (INTEL_INFO(dev)->gen < 4) {
-		if (dev_priv->lvds_dither)
-			pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-	}
-
 	/* Native modes don't need fitting */
 	if (adjusted_mode->hdisplay == mode->hdisplay &&
 	    adjusted_mode->vdisplay == mode->vdisplay)
@@ -283,8 +278,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	 * to register description and PRM.
 	 * Change the value here to see the borders for debugging
 	 */
-	I915_WRITE(BCLRPAT_A, 0);
-	I915_WRITE(BCLRPAT_B, 0);
+	for_each_pipe(pipe)
+		I915_WRITE(BCLRPAT(pipe), 0);
 
 	switch (intel_lvds->fitting_mode) {
 	case DRM_MODE_SCALE_CENTER:
@@ -374,10 +369,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	}
 
 out:
+	/* If not enabling scaling, be consistent and always use 0. */
 	if ((pfit_control & PFIT_ENABLE) == 0) {
 		pfit_control = 0;
 		pfit_pgm_ratios = 0;
 	}
+
+	/* Make sure pre-965 set dither correctly */
+	if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
+		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
 	if (pfit_control != intel_lvds->pfit_control ||
 	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
 		intel_lvds->pfit_control = pfit_control;
@@ -474,6 +475,10 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
 	struct drm_device *dev = connector->dev;
 	enum drm_connector_status status = connector_status_connected;
 
+	status = intel_panel_detect(dev);
+	if (status != connector_status_unknown)
+		return status;
+
 	/* ACPI lid methods were generally unreliable in this generation, so
 	 * don't even bother.
 	 */
@@ -496,7 +501,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
 		return drm_add_edid_modes(connector, intel_lvds->edid);
 
 	mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
-	if (mode == 0)
+	if (mode == NULL)
 		return 0;
 
 	drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index f70b7cf32bff..9034dd8f33c7 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -80,3 +80,33 @@ int intel_ddc_get_modes(struct drm_connector *connector,
 
 	return ret;
 }
+
+static const char *broadcast_rgb_names[] = {
+	"Full",
+	"Limited 16:235",
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+	int i;
+
+	prop = dev_priv->broadcast_rgb_property;
+	if (prop == NULL) {
+		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					   "Broadcast RGB",
+					   ARRAY_SIZE(broadcast_rgb_names));
+		if (prop == NULL)
+			return;
+
+		for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+			drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+		dev_priv->broadcast_rgb_property = prop;
+	}
+
+	drm_connector_attach_property(connector, prop, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 64fd64443ca6..d2c710422908 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -39,6 +39,8 @@
 
 #define OPREGION_HEADER_OFFSET 0
 #define OPREGION_ACPI_OFFSET   0x100
+#define   ACPI_CLID 0x01ac /* current lid state indicator */
+#define   ACPI_CDCK 0x01b0 /* current docking state indicator */
 #define OPREGION_SWSCI_OFFSET  0x200
 #define OPREGION_ASLE_OFFSET   0x300
 #define OPREGION_VBT_OFFSET    0x400
@@ -489,6 +491,8 @@ int intel_opregion_setup(struct drm_device *dev)
 	opregion->header = base;
 	opregion->vbt = base + OPREGION_VBT_OFFSET;
 
+	opregion->lid_state = base + ACPI_CLID;
+
 	mboxes = opregion->header->mboxes;
 	if (mboxes & MBOX_ACPI) {
 		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3fbb98b948d6..a670c006982e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
 
 static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 					 struct drm_i915_gem_request *request,
-					 bool interruptible,
 					 void (*tail)(struct intel_overlay *))
 {
 	struct drm_device *dev = overlay->dev;
@@ -221,16 +220,14 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 	int ret;
 
 	BUG_ON(overlay->last_flip_req);
-	ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+	ret = i915_add_request(LP_RING(dev_priv), NULL, request);
 	if (ret) {
 	    kfree(request);
 	    return ret;
 	}
 	overlay->last_flip_req = request->seqno;
 	overlay->flip_tail = tail;
-	ret = i915_do_wait_request(dev,
-				   overlay->last_flip_req, true,
-				   LP_RING(dev_priv));
+	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
 	if (ret)
 		return ret;
 
@@ -256,7 +253,7 @@ i830_activate_pipe_a(struct drm_device *dev)
 		return 0;
 
 	/* most i8xx have pipe a forced on, so don't trust dpms mode */
-	if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
+	if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
 		return 0;
 
 	crtc_funcs = crtc->base.helper_private;
@@ -322,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	OUT_RING(MI_NOOP);
 	ADVANCE_LP_RING();
 
-	ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
+	ret = intel_overlay_do_wait_request(overlay, request, NULL);
 out:
 	if (pipe_a_quirk)
 		i830_deactivate_pipe_a(dev);
@@ -364,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	OUT_RING(flip_addr);
         ADVANCE_LP_RING();
 
-	ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+	ret = i915_add_request(LP_RING(dev_priv), NULL, request);
 	if (ret) {
 		kfree(request);
 		return ret;
@@ -401,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
 }
 
 /* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay,
-			     bool interruptible)
+static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -437,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay,
 	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	ADVANCE_LP_RING();
 
-	return intel_overlay_do_wait_request(overlay, request, interruptible,
+	return intel_overlay_do_wait_request(overlay, request,
 					     intel_overlay_off_tail);
 }
 
 /* recover from an interruption due to a signal
  * We have to be careful not to repeat work forever an make forward progess. */
-static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
-						bool interruptible)
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -453,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
 	if (overlay->last_flip_req == 0)
 		return 0;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req,
-				   interruptible, LP_RING(dev_priv));
+	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
 	if (ret)
 		return ret;
 
@@ -499,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 		OUT_RING(MI_NOOP);
 		ADVANCE_LP_RING();
 
-		ret = intel_overlay_do_wait_request(overlay, request, true,
+		ret = intel_overlay_do_wait_request(overlay, request,
 						    intel_overlay_release_old_vid_tail);
 		if (ret)
 			return ret;
@@ -868,8 +862,7 @@ out_unpin:
 	return ret;
 }
 
-int intel_overlay_switch_off(struct intel_overlay *overlay,
-			     bool interruptible)
+int intel_overlay_switch_off(struct intel_overlay *overlay)
 {
 	struct overlay_registers *regs;
 	struct drm_device *dev = overlay->dev;
@@ -878,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
-	ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
+	ret = intel_overlay_recover_from_interrupt(overlay);
 	if (ret != 0)
 		return ret;
 
@@ -893,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
 	regs->OCMD = 0;
 	intel_overlay_unmap_regs(overlay, regs);
 
-	ret = intel_overlay_off(overlay, interruptible);
+	ret = intel_overlay_off(overlay);
 	if (ret != 0)
 		return ret;
 
@@ -1135,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
 		mutex_lock(&dev->mode_config.mutex);
 		mutex_lock(&dev->struct_mutex);
 
-		ret = intel_overlay_switch_off(overlay, true);
+		ret = intel_overlay_switch_off(overlay);
 
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->mode_config.mutex);
@@ -1157,7 +1150,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
 
 	new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
 						   put_image_rec->bo_handle));
-	if (!new_bo) {
+	if (&new_bo->base == NULL) {
 		ret = -ENOENT;
 		goto out_free;
 	}
@@ -1171,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
 		goto out_unlock;
 	}
 
-	ret = intel_overlay_recover_from_interrupt(overlay, true);
+	ret = intel_overlay_recover_from_interrupt(overlay);
 	if (ret != 0)
 		goto out_unlock;
 
 	if (overlay->crtc != crtc) {
 		struct drm_display_mode *mode = &crtc->base.mode;
-		ret = intel_overlay_switch_off(overlay, true);
+		ret = intel_overlay_switch_off(overlay);
 		if (ret != 0)
 			goto out_unlock;
 
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458d..18391b3ec2c1 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,8 +30,6 @@
 
 #include "intel_drv.h"
 
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
-
 void
 intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 		       struct drm_display_mode *adjusted_mode)
@@ -112,19 +110,6 @@ done:
 	dev_priv->pch_pf_size = (width << 16) | height;
 }
 
-static int is_backlight_combination_mode(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (INTEL_INFO(dev)->gen >= 4)
-		return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
-
-	if (IS_GEN2(dev))
-		return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
-
-	return 0;
-}
-
 static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
 {
 	u32 val;
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
 			if (INTEL_INFO(dev)->gen < 4)
 				max &= ~1;
 		}
-
-		if (is_backlight_combination_mode(dev))
-			max *= 0xff;
 	}
 
 	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
 		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 		if (IS_PINEVIEW(dev))
 			val >>= 1;
-
-		if (is_backlight_combination_mode(dev)){
-			u8 lbpc;
-
-			val &= ~1;
-			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
-			val *= lbpc;
-			val >>= 1;
-		}
 	}
 
 	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
 
 	if (HAS_PCH_SPLIT(dev))
 		return intel_pch_panel_set_backlight(dev, level);
-
-	if (is_backlight_combination_mode(dev)){
-		u32 max = intel_panel_get_max_backlight(dev);
-		u8 lpbc;
-
-		lpbc = level * 0xfe / max + 1;
-		level /= lpbc;
-		pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
-	}
-
 	tmp = I915_READ(BLC_PWM_CTL);
 	if (IS_PINEVIEW(dev)) {
 		tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
@@ -281,3 +244,22 @@ void intel_panel_setup_backlight(struct drm_device *dev)
 	dev_priv->backlight_level = intel_panel_get_backlight(dev);
 	dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
 }
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (i915_panel_ignore_lid)
+		return i915_panel_ignore_lid > 0 ?
+			connector_status_connected :
+			connector_status_disconnected;
+
+	/* Assume that the BIOS does not lie through the OpRegion... */
+	if (dev_priv->opregion.lid_state)
+		return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+			connector_status_connected :
+			connector_status_disconnected;
+
+	return connector_status_unknown;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6218fa97aa1e..789c47801ba8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -62,18 +62,9 @@ render_ring_flush(struct intel_ring_buffer *ring,
 		  u32	flush_domains)
 {
 	struct drm_device *dev = ring->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 cmd;
 	int ret;
 
-#if WATCH_EXEC
-	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
-		  invalidate_domains, flush_domains);
-#endif
-
-	trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
-				     invalidate_domains, flush_domains);
-
 	if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
 		/*
 		 * read/write caches:
@@ -122,9 +113,6 @@ render_ring_flush(struct intel_ring_buffer *ring,
 		    (IS_G4X(dev) || IS_GEN5(dev)))
 			cmd |= MI_INVALIDATE_ISP;
 
-#if WATCH_EXEC
-		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
 		ret = intel_ring_begin(ring, 2);
 		if (ret)
 			return ret;
@@ -612,7 +600,6 @@ ring_add_request(struct intel_ring_buffer *ring,
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_advance(ring);
 
-	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
 	*result = seqno;
 	return 0;
 }
@@ -715,11 +702,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 				u32 offset, u32 len)
 {
 	struct drm_device *dev = ring->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 
-	trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
-
 	if (IS_I830(dev) || IS_845G(dev)) {
 		ret = intel_ring_begin(ring, 4);
 		if (ret)
@@ -894,6 +878,10 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 	/* Disable the ring buffer. The ring must be idle at this point */
 	dev_priv = ring->dev->dev_private;
 	ret = intel_wait_ring_buffer(ring, ring->size - 8);
+	if (ret)
+		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+			  ring->name, ret);
+
 	I915_WRITE_CTL(ring, 0);
 
 	drm_core_ioremapfree(&ring->map, ring->dev);
@@ -950,13 +938,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
 			return 0;
 	}
 
-	trace_i915_ring_wait_begin (dev);
+	trace_i915_ring_wait_begin(ring);
 	end = jiffies + 3 * HZ;
 	do {
 		ring->head = I915_READ_HEAD(ring);
 		ring->space = ring_space(ring);
 		if (ring->space >= n) {
-			trace_i915_ring_wait_end(dev);
+			trace_i915_ring_wait_end(ring);
 			return 0;
 		}
 
@@ -970,16 +958,20 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
 		if (atomic_read(&dev_priv->mm.wedged))
 			return -EAGAIN;
 	} while (!time_after(jiffies, end));
-	trace_i915_ring_wait_end (dev);
+	trace_i915_ring_wait_end(ring);
 	return -EBUSY;
 }
 
 int intel_ring_begin(struct intel_ring_buffer *ring,
 		     int num_dwords)
 {
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	int n = 4*num_dwords;
 	int ret;
 
+	if (unlikely(atomic_read(&dev_priv->mm.wedged)))
+		return -EIO;
+
 	if (unlikely(ring->tail + n > ring->effective_size)) {
 		ret = intel_wrap_ring_buffer(ring);
 		if (unlikely(ret))
@@ -1059,22 +1051,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
 }
 
 static int gen6_ring_flush(struct intel_ring_buffer *ring,
-			   u32 invalidate_domains,
-			   u32 flush_domains)
+			   u32 invalidate, u32 flush)
 {
+	uint32_t cmd;
 	int ret;
 
-	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+	if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
 		return 0;
 
 	ret = intel_ring_begin(ring, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_FLUSH_DW);
-	intel_ring_emit(ring, 0);
+	cmd = MI_FLUSH_DW;
+	if (invalidate & I915_GEM_GPU_DOMAINS)
+		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 	return 0;
 }
@@ -1230,22 +1225,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
 }
 
 static int blt_ring_flush(struct intel_ring_buffer *ring,
-			   u32 invalidate_domains,
-			   u32 flush_domains)
+			  u32 invalidate, u32 flush)
 {
+	uint32_t cmd;
 	int ret;
 
-	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+	if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
 		return 0;
 
 	ret = blt_ring_begin(ring, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_FLUSH_DW);
-	intel_ring_emit(ring, 0);
+	cmd = MI_FLUSH_DW;
+	if (invalidate & I915_GEM_DOMAIN_RENDER)
+		cmd |= MI_INVALIDATE_TLB;
+	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6d6fde85a636..f23cc5f037a6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct  intel_hw_status_page {
 	struct		drm_i915_gem_object *obj;
 };
 
-#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
+#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
 
 #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
 
 #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
 
 #define I915_READ_HEAD(ring)  I915_RING_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
 
 #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
 
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
 
 #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
 #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
@@ -43,7 +44,7 @@ struct  intel_ring_buffer {
 		RING_BLT = 0x4,
 	} id;
 	u32		mmio_base;
-	void		*virtual_start;
+	void		__iomem *virtual_start;
 	struct		drm_device *dev;
 	struct		drm_i915_gem_object *obj;
 
@@ -58,6 +59,7 @@ struct  intel_ring_buffer {
 	u32		irq_refcount;
 	u32		irq_mask;
 	u32		irq_seqno;		/* last seq seem at irq time */
+	u32		trace_irq_seqno;
 	u32		waiting_seqno;
 	u32		sync_seqno[I915_NUM_RINGS-1];
 	bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
@@ -141,6 +143,26 @@ intel_read_status_page(struct intel_ring_buffer *ring,
 	return ioread32(ring->status_page.page_addr + reg);
 }
 
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_GEM_HWS_INDEX		0x20
+#define I915_BREADCRUMB_INDEX		0x21
+
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
@@ -166,6 +188,12 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
 
+static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+{
+	if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+		ring->trace_irq_seqno = seqno;
+}
+
 /* DRI warts */
 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
 
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 318f398e6b2e..4324f33212d6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
                          SDVO_TV_MASK)
 
 #define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
 
@@ -92,6 +93,12 @@ struct intel_sdvo {
 	uint16_t attached_output;
 
 	/**
+	 * This is used to select the color range of RBG outputs in HDMI mode.
+	 * It is only valid when using TMDS encoding and 8 bit per color mode.
+	 */
+	uint32_t color_range;
+
+	/**
 	 * This is set if we're going to treat the device as TV-out.
 	 *
 	 * While we have these nice friendly flags for output types that ought
@@ -584,6 +591,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i
 {
 	struct intel_sdvo_get_trained_inputs_response response;
 
+	BUILD_BUG_ON(sizeof(response) != 1);
 	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
 				  &response, sizeof(response)))
 		return false;
@@ -631,6 +639,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo
 {
 	struct intel_sdvo_pixel_clock_range clocks;
 
+	BUILD_BUG_ON(sizeof(clocks) != 4);
 	if (!intel_sdvo_get_value(intel_sdvo,
 				  SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
 				  &clocks, sizeof(clocks)))
@@ -698,6 +707,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
 static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
 						  struct intel_sdvo_dtd *dtd)
 {
+	BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+	BUILD_BUG_ON(sizeof(dtd->part2) != 8);
 	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
 				    &dtd->part1, sizeof(dtd->part1)) &&
 		intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
@@ -795,6 +806,7 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
 {
 	struct intel_sdvo_encode encode;
 
+	BUILD_BUG_ON(sizeof(encode) != 2);
 	return intel_sdvo_get_value(intel_sdvo,
 				  SDVO_CMD_GET_SUPP_ENCODE,
 				  &encode, sizeof(encode));
@@ -1050,6 +1062,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
 	/* Set the SDVO control regs. */
 	if (INTEL_INFO(dev)->gen >= 4) {
 		sdvox = 0;
+		if (intel_sdvo->is_hdmi)
+			sdvox |= intel_sdvo->color_range;
 		if (INTEL_INFO(dev)->gen < 5)
 			sdvox |= SDVO_BORDER_ENABLE;
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1161,6 +1175,7 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
 
 static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
 {
+	BUILD_BUG_ON(sizeof(*caps) != 8);
 	if (!intel_sdvo_get_value(intel_sdvo,
 				  SDVO_CMD_GET_DEVICE_CAPS,
 				  caps, sizeof(*caps)))
@@ -1267,33 +1282,9 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
 static bool
 intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
 {
-	int caps = 0;
-
-	if (intel_sdvo->caps.output_flags &
-		(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
-		caps++;
-	if (intel_sdvo->caps.output_flags &
-		(SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
-		caps++;
-	if (intel_sdvo->caps.output_flags &
-		(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
-		caps++;
-	if (intel_sdvo->caps.output_flags &
-		(SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
-		caps++;
-	if (intel_sdvo->caps.output_flags &
-		(SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
-		caps++;
-
-	if (intel_sdvo->caps.output_flags &
-		(SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
-		caps++;
-
-	if (intel_sdvo->caps.output_flags &
-		(SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
-		caps++;
-
-	return (caps > 1);
+	/* Is there more than one type of output? */
+	int caps = intel_sdvo->caps.output_flags & 0xf;
+	return caps & -caps;
 }
 
 static struct edid *
@@ -1359,7 +1350,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
 				intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
 				intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
 			}
-		}
+		} else
+			status = connector_status_disconnected;
 		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
@@ -1407,10 +1399,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
 
 	if ((intel_sdvo_connector->output_flag & response) == 0)
 		ret = connector_status_disconnected;
-	else if (response & SDVO_TMDS_MASK)
+	else if (IS_TMDS(intel_sdvo_connector))
 		ret = intel_sdvo_hdmi_sink_detect(connector);
-	else
-		ret = connector_status_connected;
+	else {
+		struct edid *edid;
+
+		/* if we have an edid check it matches the connection */
+		edid = intel_sdvo_get_edid(connector);
+		if (edid == NULL)
+			edid = intel_sdvo_get_analog_edid(connector);
+		if (edid != NULL) {
+			if (edid->input & DRM_EDID_INPUT_DIGITAL)
+				ret = connector_status_disconnected;
+			else
+				ret = connector_status_connected;
+			connector->display_info.raw_edid = NULL;
+			kfree(edid);
+		} else
+			ret = connector_status_connected;
+	}
 
 	/* May update encoder flag for like clock for SDVO TV, etc.*/
 	if (ret == connector_status_connected) {
@@ -1446,10 +1453,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 		edid = intel_sdvo_get_analog_edid(connector);
 
 	if (edid != NULL) {
-		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+		bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+		bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
+
+		if (connector_is_digital == monitor_is_digital) {
 			drm_mode_connector_update_edid_property(connector, edid);
 			drm_add_edid_modes(connector, edid);
 		}
+
 		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
@@ -1668,6 +1680,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
 	kfree(connector);
 }
 
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	if (!intel_sdvo->is_hdmi)
+		return false;
+
+	edid = intel_sdvo_get_edid(connector);
+	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+		has_audio = drm_detect_monitor_audio(edid);
+
+	return has_audio;
+}
+
 static int
 intel_sdvo_set_property(struct drm_connector *connector,
 			struct drm_property *property,
@@ -1675,6 +1703,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
 {
 	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	uint16_t temp_value;
 	uint8_t cmd;
 	int ret;
@@ -1684,17 +1713,31 @@ intel_sdvo_set_property(struct drm_connector *connector,
 		return ret;
 
 	if (property == intel_sdvo_connector->force_audio_property) {
-		if (val == intel_sdvo_connector->force_audio)
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_sdvo_connector->force_audio)
 			return 0;
 
-		intel_sdvo_connector->force_audio = val;
+		intel_sdvo_connector->force_audio = i;
+
+		if (i == 0)
+			has_audio = intel_sdvo_detect_hdmi_audio(connector);
+		else
+			has_audio = i > 0;
 
-		if (val > 0 && intel_sdvo->has_hdmi_audio)
+		if (has_audio == intel_sdvo->has_hdmi_audio)
 			return 0;
-		if (val < 0 && !intel_sdvo->has_hdmi_audio)
+
+		intel_sdvo->has_hdmi_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		if (val == !!intel_sdvo->color_range)
 			return 0;
 
-		intel_sdvo->has_hdmi_audio = val > 0;
+		intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
 		goto done;
 	}
 
@@ -2002,6 +2045,9 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
 		drm_connector_attach_property(&connector->base.base,
 					      connector->force_audio_property, 0);
 	}
+
+	if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+		intel_attach_broadcast_rgb_property(&connector->base.base);
 }
 
 static bool
@@ -2224,6 +2270,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
 	if (!intel_sdvo_set_target_output(intel_sdvo, type))
 		return false;
 
+	BUILD_BUG_ON(sizeof(format) != 6);
 	if (!intel_sdvo_get_value(intel_sdvo,
 				  SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
 				  &format, sizeof(format)))
@@ -2430,6 +2477,8 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
 		uint16_t response;
 	} enhancements;
 
+	BUILD_BUG_ON(sizeof(enhancements) != 2);
+
 	enhancements.response = 0;
 	intel_sdvo_get_value(intel_sdvo,
 			     SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 93206e4eaa6f..4256b8ef3947 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1006,6 +1006,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	const struct video_levels *video_levels;
 	const struct color_conversion *color_conversion;
 	bool burst_ena;
+	int pipe = intel_crtc->pipe;
 
 	if (!tv_mode)
 		return;	/* can't happen (mode_prepare prevents this) */
@@ -1149,14 +1150,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 			   ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
 			    (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
 	{
-		int pipeconf_reg = (intel_crtc->pipe == 0) ?
-			PIPEACONF : PIPEBCONF;
-		int dspcntr_reg = (intel_crtc->plane == 0) ?
-			DSPACNTR : DSPBCNTR;
+		int pipeconf_reg = PIPECONF(pipe);
+		int dspcntr_reg = DSPCNTR(pipe);
 		int pipeconf = I915_READ(pipeconf_reg);
 		int dspcntr = I915_READ(dspcntr_reg);
-		int dspbase_reg = (intel_crtc->plane == 0) ?
-			DSPAADDR : DSPBADDR;
+		int dspbase_reg = DSPADDR(pipe);
 		int xpos = 0x0, ypos = 0x0;
 		unsigned int xsize, ysize;
 		/* Pipe must be off here */
@@ -1234,7 +1232,8 @@ static const struct drm_display_mode reported_modes[] = {
  * \return false if TV is disconnected.
  */
 static int
-intel_tv_detect_type (struct intel_tv *intel_tv)
+intel_tv_detect_type (struct intel_tv *intel_tv,
+		      struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = &intel_tv->base.base;
 	struct drm_device *dev = encoder->dev;
@@ -1245,11 +1244,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
 	int type;
 
 	/* Disable TV interrupts around load detect or we'll recurse */
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_disable_pipestat(dev_priv, 0,
-			      PIPE_HOTPLUG_INTERRUPT_ENABLE |
-			      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		i915_disable_pipestat(dev_priv, 0,
+				      PIPE_HOTPLUG_INTERRUPT_ENABLE |
+				      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	}
 
 	save_tv_dac = tv_dac = I915_READ(TV_DAC);
 	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1303,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
 	I915_WRITE(TV_CTL, save_tv_ctl);
 
 	/* Restore interrupt config */
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0,
-			     PIPE_HOTPLUG_INTERRUPT_ENABLE |
-			     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		i915_enable_pipestat(dev_priv, 0,
+				     PIPE_HOTPLUG_INTERRUPT_ENABLE |
+				     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	}
 
 	return type;
 }
@@ -1356,7 +1359,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
 	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 
 	if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
-		type = intel_tv_detect_type(intel_tv);
+		type = intel_tv_detect_type(intel_tv, connector);
 	} else if (force) {
 		struct drm_crtc *crtc;
 		int dpms_mode;
@@ -1364,7 +1367,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
 		crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
 						  &mode, &dpms_mode);
 		if (crtc) {
-			type = intel_tv_detect_type(intel_tv);
+			type = intel_tv_detect_type(intel_tv, connector);
 			intel_release_load_detect_pipe(&intel_tv->base, connector,
 						       dpms_mode);
 		} else
@@ -1658,6 +1661,18 @@ intel_tv_init(struct drm_device *dev)
 	intel_encoder = &intel_tv->base;
 	connector = &intel_connector->base;
 
+	/* The documentation, for the older chipsets at least, recommend
+	 * using a polling method rather than hotplug detection for TVs.
+	 * This is because in order to perform the hotplug detection, the PLLs
+	 * for the TV must be kept alive increasing power drain and starving
+	 * bandwidth from other encoders. Notably for instance, it causes
+	 * pipe underruns on Crestline when this encoder is supposedly idle.
+	 *
+	 * More recent chipsets favour HDMI rather than integrated S-Video.
+	 */
+	connector->polled =
+		DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
 	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
 			   DRM_MODE_CONNECTOR_SVIDEO);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 3fcffcf75e35..2ad49cbf7c8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
 	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
-	nouveau_vm_put(&nvbo->vma);
+	if (nvbo->vma.node) {
+		nouveau_vm_unmap(&nvbo->vma);
+		nouveau_vm_put(&nvbo->vma);
+	}
 	kfree(nvbo);
 }
 
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d56f08d3cbdc..a2199fe9fa9b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
 
 	switch (radeon_crtc->rmx_type) {
 	case RMX_CENTER:
-		args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
-		args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
-		args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
-		args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+		args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
 		break;
 	case RMX_ASPECT:
 		a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
 		a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
 
 		if (a1 > a2) {
-			args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
-			args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
 		} else if (a2 > a1) {
-			args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
-			args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
 		}
 		break;
 	case RMX_FULL:
 	default:
-		args.usOverscanRight = radeon_crtc->h_border;
-		args.usOverscanLeft = radeon_crtc->h_border;
-		args.usOverscanBottom = radeon_crtc->v_border;
-		args.usOverscanTop = radeon_crtc->v_border;
+		args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
+		args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
 		break;
 	}
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
 	memset(&args, 0, sizeof(args));
 
 	if (ASIC_IS_DCE5(rdev)) {
-		args.v3.usSpreadSpectrumAmountFrac = 0;
+		args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
 		args.v3.ucSpreadSpectrumType = ss->type;
 		switch (pll_id) {
 		case ATOM_PPLL1:
 			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
-			args.v3.usSpreadSpectrumAmount = ss->amount;
-			args.v3.usSpreadSpectrumStep = ss->step;
+			args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_PPLL2:
 			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
-			args.v3.usSpreadSpectrumAmount = ss->amount;
-			args.v3.usSpreadSpectrumStep = ss->step;
+			args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_DCPLL:
 			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
-			args.v3.usSpreadSpectrumAmount = 0;
-			args.v3.usSpreadSpectrumStep = 0;
+			args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
+			args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
 			break;
 		case ATOM_PPLL_INVALID:
 			return;
@@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
 		switch (pll_id) {
 		case ATOM_PPLL1:
 			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
-			args.v2.usSpreadSpectrumAmount = ss->amount;
-			args.v2.usSpreadSpectrumStep = ss->step;
+			args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_PPLL2:
 			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
-			args.v2.usSpreadSpectrumAmount = ss->amount;
-			args.v2.usSpreadSpectrumStep = ss->step;
+			args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_DCPLL:
 			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
-			args.v2.usSpreadSpectrumAmount = 0;
-			args.v2.usSpreadSpectrumStep = 0;
+			args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
+			args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
 			break;
 		case ATOM_PPLL_INVALID:
 			return;
@@ -538,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 			pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
 		else
 			pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
-
 	}
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -555,29 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 					dp_clock = dig_connector->dp_clock;
 				}
 			}
-/* this might work properly with the new pll algo */
-#if 0 /* doesn't work properly on some laptops */
+
 			/* use recommended ref_div for ss */
 			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
 				if (ss_enabled) {
 					if (ss->refdiv) {
+						pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
 						pll->flags |= RADEON_PLL_USE_REF_DIV;
 						pll->reference_div = ss->refdiv;
+						if (ASIC_IS_AVIVO(rdev))
+							pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 					}
 				}
 			}
-#endif
+
 			if (ASIC_IS_AVIVO(rdev)) {
 				/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
 				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
 					adjusted_clock = mode->clock * 2;
 				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
 					pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
-				/* rv515 needs more testing with this option */
-				if (rdev->family != CHIP_RV515) {
-					if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-						pll->flags |= RADEON_PLL_IS_LCD;
-				}
+				if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+					pll->flags |= RADEON_PLL_IS_LCD;
 			} else {
 				if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
 					pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -664,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
 						   index, (uint32_t *)&args);
 				adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
 				if (args.v3.sOutput.ucRefDiv) {
+					pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 					pll->flags |= RADEON_PLL_USE_REF_DIV;
 					pll->reference_div = args.v3.sOutput.ucRefDiv;
 				}
 				if (args.v3.sOutput.ucPostDiv) {
+					pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 					pll->flags |= RADEON_PLL_USE_POST_DIV;
 					pll->post_div = args.v3.sOutput.ucPostDiv;
 				}
@@ -721,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
 			 * SetPixelClock provides the dividers
 			 */
 			args.v5.ucCRTC = ATOM_CRTC_INVALID;
-			args.v5.usPixelClock = dispclk;
+			args.v5.usPixelClock = cpu_to_le16(dispclk);
 			args.v5.ucPpll = ATOM_DCPLL;
 			break;
 		case 6:
 			/* if the default dcpll clock is specified,
 			 * SetPixelClock provides the dividers
 			 */
-			args.v6.ulDispEngClkFreq = dispclk;
+			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
 			args.v6.ucPpll = ATOM_DCPLL;
 			break;
 		default:
@@ -957,11 +957,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
 	/* adjust pixel clock as needed */
 	adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
 
-	/* rv515 seems happier with the old algo */
-	if (rdev->family == CHIP_RV515)
-		radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
-					  &ref_div, &post_div);
-	else if (ASIC_IS_AVIVO(rdev))
+	if (ASIC_IS_AVIVO(rdev))
 		radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
 					 &ref_div, &post_div);
 	else
@@ -995,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
 	}
 }
 
-static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
-				      struct drm_framebuffer *fb,
-				      int x, int y, int atomic)
+static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 int x, int y, int atomic)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -1137,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
 	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
-		WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
-		       EVERGREEN_INTERLEAVE_EN);
-	else
-		WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
 	if (!atomic && fb && fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
 		rbo = gem_to_radeon_bo(radeon_fb->obj);
@@ -1300,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
 	WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
-		WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
-		       AVIVO_D1MODE_INTERLEAVE_EN);
-	else
-		WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
 	if (!atomic && fb && fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
 		rbo = gem_to_radeon_bo(radeon_fb->obj);
@@ -1329,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 	struct radeon_device *rdev = dev->dev_private;
 
 	if (ASIC_IS_DCE4(rdev))
-		return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
+		return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
 	else if (ASIC_IS_AVIVO(rdev))
 		return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
 	else
@@ -1344,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
        struct radeon_device *rdev = dev->dev_private;
 
 	if (ASIC_IS_DCE4(rdev))
-		return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
+		return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
 	else if (ASIC_IS_AVIVO(rdev))
 		return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
 	else
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d4045223d0ff..789441ed9837 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1192,7 +1192,11 @@ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 	radeon_ring_write(rdev, 1);
 	/* FIXME: implement */
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-	radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
 	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
 	radeon_ring_write(rdev, ib->length_dw);
 }
@@ -1207,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
 		return -EINVAL;
 
 	r700_cp_stop(rdev);
-	WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
 	fw_data = (const __be32 *)rdev->pfp_fw->data;
 	WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1326,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev)
 	WREG32(CP_RB_WPTR, 0);
 
 	/* set the wb address wether it's enabled or not */
-	WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+	       RB_RPTR_SWAP(2) |
+#endif
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
@@ -2627,8 +2639,8 @@ restart_ih:
 	while (rptr != wptr) {
 		/* wptr/rptr are in bytes! */
 		ring_index = rptr / 4;
-		src_id =  rdev->ih.ring[ring_index] & 0xff;
-		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
 
 		switch (src_id) {
 		case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2ed930e02f3a..3218287f4c51 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format,
 	if (h < 8)
 		h = 8;
 
-	cb_color_info = ((format << 2) | (1 << 24));
+	cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
 	pitch = (w / 8) - 1;
 	slice = ((w * h) / 64) - 1;
 
@@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
 
 	/* high addr, stride */
 	sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
 	/* xyzw swizzles */
 	sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
 
@@ -173,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev,
 	sq_tex_resource_word0 = (1 << 0); /* 2D */
 	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
 				  ((w - 1) << 18));
-	sq_tex_resource_word1 = ((h - 1) << 0);
+	sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
 	/* xyzw swizzles */
 	sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
 
@@ -221,7 +224,11 @@ draw_auto(struct radeon_device *rdev)
 	radeon_ring_write(rdev, DI_PT_RECTLIST);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-	radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
 	radeon_ring_write(rdev, 1);
@@ -541,7 +548,7 @@ static inline uint32_t i2f(uint32_t input)
 int evergreen_blit_init(struct radeon_device *rdev)
 {
 	u32 obj_size;
-	int r, dwords;
+	int i, r, dwords;
 	void *ptr;
 	u32 packet2s[16];
 	int num_packet2s = 0;
@@ -557,7 +564,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
 
 	dwords = rdev->r600_blit.state_len;
 	while (dwords & 0xf) {
-		packet2s[num_packet2s++] = PACKET2(0);
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
 		dwords++;
 	}
 
@@ -598,8 +605,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
 	if (num_packet2s)
 		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
 			    packet2s, num_packet2s * 4);
-	memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
-	memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
+	for (i = 0; i < evergreen_vs_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+	for (i = 0; i < evergreen_ps_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
 	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
 	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index ef1d28c07fbf..3a10399e0066 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -311,11 +311,19 @@ const u32 evergreen_vs[] =
 	0x00000000,
 	0x3c000000,
 	0x67961001,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
 	0x00080000,
+#endif
 	0x00000000,
 	0x1c000000,
 	0x67961000,
+#ifdef __BIG_ENDIAN
+	0x00020008,
+#else
 	0x00000008,
+#endif
 	0x00000000,
 };
 
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 21e839bd20e7..9aaa3f0c9372 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -98,6 +98,7 @@
 #define		BUF_SWAP_32BIT					(2 << 16)
 #define	CP_RB_RPTR					0x8700
 #define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
 #define	CP_RB_RPTR_ADDR_HI				0xC110
 #define	CP_RB_RPTR_WR					0xC108
 #define	CP_RB_WPTR					0xC114
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 607241c6a8a9..5a82b6b75849 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename)
 	last_reg = strtol(last_reg_s, NULL, 16);
 
 	do {
-		if (fgets(buf, 1024, file) == NULL)
+		if (fgets(buf, 1024, file) == NULL) {
+			fclose(file);
 			return -1;
+		}
 		len = strlen(buf);
 		if (ftell(file) == end)
 			done = 1;
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename)
 				fprintf(stderr,
 					"Error matching regular expression %d in %s\n",
 					r, filename);
+				fclose(file);
 				return -1;
 			} else {
 				buf[match[0].rm_eo] = 0;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5f15820efe12..93fa735c8c1a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1427,6 +1427,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		}
 		track->zb.robj = reloc->robj;
 		track->zb.offset = idx_value;
+		track->zb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case RADEON_RB3D_COLOROFFSET:
@@ -1439,6 +1440,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		}
 		track->cb[0].robj = reloc->robj;
 		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case RADEON_PP_TXOFFSET_0:
@@ -1454,6 +1456,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		}
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_OFFSET_T0_0:
 	case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1471,6 +1474,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		track->textures[0].cube_info[i].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[0].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_OFFSET_T1_0:
 	case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1488,6 +1492,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		track->textures[1].cube_info[i].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[1].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_OFFSET_T2_0:
 	case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1505,9 +1510,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		track->textures[2].cube_info[i].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[2].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_RE_WIDTH_HEIGHT:
 		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_COLORPITCH:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1528,9 +1536,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		ib[idx] = tmp;
 
 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
 		break;
 	case RADEON_RB3D_DEPTHPITCH:
 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_CNTL:
 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1555,6 +1565,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 			return -EINVAL;
 		}
 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZSTENCILCNTL:
 		switch (idx_value & 0xf) {
@@ -1572,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		default:
 			break;
 		}
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZPASS_ADDR:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1588,6 +1601,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 			uint32_t temp = idx_value >> 4;
 			for (i = 0; i < track->num_texture; i++)
 				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
 		}
 		break;
 	case RADEON_SE_VF_CNTL:
@@ -1602,12 +1616,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_TEX_PITCH_0:
 	case RADEON_PP_TEX_PITCH_1:
 	case RADEON_PP_TEX_PITCH_2:
 		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
 		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_TXFILTER_0:
 	case RADEON_PP_TXFILTER_1:
@@ -1621,6 +1637,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		tmp = (idx_value >> 27) & 0x7;
 		if (tmp == 2 || tmp == 6)
 			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_TXFORMAT_0:
 	case RADEON_PP_TXFORMAT_1:
@@ -1673,6 +1690,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 		}
 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_FACES_0:
 	case RADEON_PP_CUBIC_FACES_1:
@@ -1683,6 +1701,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
 		}
+		track->tex_dirty = true;
 		break;
 	default:
 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -3318,9 +3337,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
 	unsigned long size;
 	unsigned prim_walk;
 	unsigned nverts;
-	unsigned num_cb = track->num_cb;
+	unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
 
-	if (!track->zb_cb_clear && !track->color_channel_mask &&
+	if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
 	    !track->blend_read_enable)
 		num_cb = 0;
 
@@ -3341,7 +3360,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
 			return -EINVAL;
 		}
 	}
-	if (track->z_enabled) {
+	track->cb_dirty = false;
+
+	if (track->zb_dirty && track->z_enabled) {
 		if (track->zb.robj == NULL) {
 			DRM_ERROR("[drm] No buffer for z buffer !\n");
 			return -EINVAL;
@@ -3358,6 +3379,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
 			return -EINVAL;
 		}
 	}
+	track->zb_dirty = false;
+
+	if (track->aa_dirty && track->aaresolve) {
+		if (track->aa.robj == NULL) {
+			DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+			return -EINVAL;
+		}
+		/* I believe the format comes from colorbuffer0. */
+		size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+		size += track->aa.offset;
+		if (size > radeon_bo_size(track->aa.robj)) {
+			DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+				  "(need %lu have %lu) !\n", i, size,
+				  radeon_bo_size(track->aa.robj));
+			DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+				  i, track->aa.pitch, track->cb[0].cpp,
+				  track->aa.offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->aa_dirty = false;
+
 	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
 	if (track->vap_vf_cntl & (1 << 14)) {
 		nverts = track->vap_alt_nverts;
@@ -3417,13 +3460,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
 			  prim_walk);
 		return -EINVAL;
 	}
-	return r100_cs_track_texture_check(rdev, track);
+
+	if (track->tex_dirty) {
+		track->tex_dirty = false;
+		return r100_cs_track_texture_check(rdev, track);
+	}
+	return 0;
 }
 
 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
 {
 	unsigned i, face;
 
+	track->cb_dirty = true;
+	track->zb_dirty = true;
+	track->tex_dirty = true;
+	track->aa_dirty = true;
+
 	if (rdev->family < CHIP_R300) {
 		track->num_cb = 1;
 		if (rdev->family <= CHIP_RS200)
@@ -3437,6 +3490,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
 		track->num_texture = 16;
 		track->maxy = 4096;
 		track->separate_cube = 0;
+		track->aaresolve = false;
+		track->aa.robj = NULL;
 	}
 
 	for (i = 0; i < track->num_cb; i++) {
@@ -3746,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev)
 	r100_mc_program(rdev);
 	/* Resume clock */
 	r100_clock_startup(rdev);
-	/* Initialize GPU configuration (# pipes, ...) */
-//	r100_gpu_init(rdev);
 	/* Initialize GART (initialize after TTM so we can allocate
 	 * memory through TTM but finalize after TTM) */
 	r100_enable_bm(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index af65600e6564..2fef9de7f363 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -52,14 +52,7 @@ struct r100_cs_track_texture {
 	unsigned                compress_format;
 };
 
-struct r100_cs_track_limits {
-	unsigned num_cb;
-	unsigned num_texture;
-	unsigned max_levels;
-};
-
 struct r100_cs_track {
-	struct radeon_device *rdev;
 	unsigned			num_cb;
 	unsigned                        num_texture;
 	unsigned			maxy;
@@ -73,11 +66,17 @@ struct r100_cs_track {
 	struct r100_cs_track_array	arrays[11];
 	struct r100_cs_track_cb 	cb[R300_MAX_CB];
 	struct r100_cs_track_cb 	zb;
+	struct r100_cs_track_cb 	aa;
 	struct r100_cs_track_texture	textures[R300_TRACK_MAX_TEXTURE];
 	bool				z_enabled;
 	bool                            separate_cube;
 	bool				zb_cb_clear;
 	bool				blend_read_enable;
+	bool				cb_dirty;
+	bool				zb_dirty;
+	bool				tex_dirty;
+	bool				aa_dirty;
+	bool				aaresolve;
 };
 
 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index d2408c395619..f24058300413 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		}
 		track->zb.robj = reloc->robj;
 		track->zb.offset = idx_value;
+		track->zb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case RADEON_RB3D_COLOROFFSET:
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		}
 		track->cb[0].robj = reloc->robj;
 		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case R200_PP_TXOFFSET_0:
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		}
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_CUBIC_OFFSET_F1_0:
 	case R200_PP_CUBIC_OFFSET_F2_0:
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		track->textures[i].cube_info[face - 1].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[i].cube_info[face - 1].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_RE_WIDTH_HEIGHT:
 		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_COLORPITCH:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		ib[idx] = tmp;
 
 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
 		break;
 	case RADEON_RB3D_DEPTHPITCH:
 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_CNTL:
 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		}
 
 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZSTENCILCNTL:
 		switch (idx_value & 0xf) {
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		default:
 			break;
 		}
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZPASS_ADDR:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 			uint32_t temp = idx_value >> 4;
 			for (i = 0; i < track->num_texture; i++)
 				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
 		}
 		break;
 	case RADEON_SE_VF_CNTL:
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		i = (reg - R200_PP_TXSIZE_0) / 32;
 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXPITCH_0:
 	case R200_PP_TXPITCH_1:
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 	case R200_PP_TXPITCH_5:
 		i = (reg - R200_PP_TXPITCH_0) / 32;
 		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXFILTER_0:
 	case R200_PP_TXFILTER_1:
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		tmp = (idx_value >> 27) & 0x7;
 		if (tmp == 2 || tmp == 6)
 			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXMULTI_CTL_0:
 	case R200_PP_TXMULTI_CTL_1:
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 			track->textures[i].tex_coord_type = 1;
 			break;
 		}
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXFORMAT_0:
 	case R200_PP_TXFORMAT_1:
@@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 		}
 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
 		break;
 	case R200_PP_CUBIC_FACES_0:
 	case R200_PP_CUBIC_FACES_1:
@@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
 		}
+		track->tex_dirty = true;
 		break;
 	default:
 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 55fe5ba7def3..069efa8c8ecf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -667,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		}
 		track->cb[i].robj = reloc->robj;
 		track->cb[i].offset = idx_value;
+		track->cb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case R300_ZB_DEPTHOFFSET:
@@ -679,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		}
 		track->zb.robj = reloc->robj;
 		track->zb.offset = idx_value;
+		track->zb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case R300_TX_OFFSET_0:
@@ -717,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		tmp |= tile_flags;
 		ib[idx] = tmp;
 		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	/* Tracked registers */
 	case 0x2084:
@@ -743,6 +746,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		if (p->rdev->family < CHIP_RV515) {
 			track->maxy -= 1440;
 		}
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case 0x4E00:
 		/* RB3D_CCTL */
@@ -752,6 +757,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 			return -EINVAL;
 		}
 		track->num_cb = ((idx_value >> 5) & 0x3) + 1;
+		track->cb_dirty = true;
 		break;
 	case 0x4E38:
 	case 0x4E3C:
@@ -814,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 				  ((idx_value >> 21) & 0xF));
 			return -EINVAL;
 		}
+		track->cb_dirty = true;
 		break;
 	case 0x4F00:
 		/* ZB_CNTL */
@@ -822,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		} else {
 			track->z_enabled = false;
 		}
+		track->zb_dirty = true;
 		break;
 	case 0x4F10:
 		/* ZB_FORMAT */
@@ -838,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 				  (idx_value & 0xF));
 			return -EINVAL;
 		}
+		track->zb_dirty = true;
 		break;
 	case 0x4F24:
 		/* ZB_DEPTHPITCH */
@@ -861,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		ib[idx] = tmp;
 
 		track->zb.pitch = idx_value & 0x3FFC;
+		track->zb_dirty = true;
 		break;
 	case 0x4104:
+		/* TX_ENABLE */
 		for (i = 0; i < 16; i++) {
 			bool enabled;
 
 			enabled = !!(idx_value & (1 << i));
 			track->textures[i].enabled = enabled;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x44C0:
 	case 0x44C4:
@@ -898,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_X16:
+		case R300_TX_FORMAT_FL_I16:
 		case R300_TX_FORMAT_Y8X8:
 		case R300_TX_FORMAT_Z5Y6X5:
 		case R300_TX_FORMAT_Z6Y5X5:
@@ -910,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_Y16X16:
+		case R300_TX_FORMAT_FL_I16A16:
 		case R300_TX_FORMAT_Z11Y11X10:
 		case R300_TX_FORMAT_Z10Y11X11:
 		case R300_TX_FORMAT_W8Z8Y8X8:
@@ -951,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 			DRM_ERROR("Invalid texture format %u\n",
 				  (idx_value & 0x1F));
 			return -EINVAL;
-			break;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x4400:
 	case 0x4404:
@@ -980,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		if (tmp == 2 || tmp == 4 || tmp == 6) {
 			track->textures[i].roundup_h = false;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x4500:
 	case 0x4504:
@@ -1017,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 			DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
 			return -EINVAL;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x4480:
 	case 0x4484:
@@ -1046,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 		track->textures[i].use_pitch = !!tmp;
 		tmp = (idx_value >> 22) & 0xF;
 		track->textures[i].txdepth = tmp;
+		track->tex_dirty = true;
 		break;
 	case R300_ZB_ZPASS_ADDR:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1060,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 	case 0x4e0c:
 		/* RB3D_COLOR_CHANNEL_MASK */
 		track->color_channel_mask = idx_value;
+		track->cb_dirty = true;
 		break;
 	case 0x43a4:
 		/* SC_HYPERZ_EN */
@@ -1073,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 	case 0x4f1c:
 		/* ZB_BW_CNTL */
 		track->zb_cb_clear = !!(idx_value & (1 << 5));
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		if (p->rdev->hyperz_filp != p->filp) {
 			if (idx_value & (R300_HIZ_ENABLE |
 					 R300_RD_COMP_ENABLE |
@@ -1084,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 	case 0x4e04:
 		/* RB3D_BLENDCNTL */
 		track->blend_read_enable = !!(idx_value & (1 << 2));
+		track->cb_dirty = true;
+		break;
+	case R300_RB3D_AARESOLVE_OFFSET:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->aa.robj = reloc->robj;
+		track->aa.offset = idx_value;
+		track->aa_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R300_RB3D_AARESOLVE_PITCH:
+		track->aa.pitch = idx_value & 0x3FFE;
+		track->aa_dirty = true;
 		break;
-	case 0x4f28: /* ZB_DEPTHCLEARVALUE */
+	case R300_RB3D_AARESOLVE_CTL:
+		track->aaresolve = idx_value & 0x1;
+		track->aa_dirty = true;
 		break;
 	case 0x4f30: /* ZB_MASK_OFFSET */
 	case 0x4f34: /* ZB_ZMASK_PITCH */
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1a0d5362cd79..f0bce399c9f3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1371,6 +1371,8 @@
 #define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
 #define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
 
+#define R300_RB3D_AARESOLVE_OFFSET          0x4E80
+#define R300_RB3D_AARESOLVE_PITCH           0x4E84
 #define R300_RB3D_AARESOLVE_CTL             0x4E88
 /* gap */
 
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 1cd56dc8c8ab..b409b24207a1 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2106,7 +2106,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
 
 	r600_cp_stop(rdev);
 
-	WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
 	/* Reset cp */
 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2193,7 +2197,11 @@ int r600_cp_resume(struct radeon_device *rdev)
 	WREG32(CP_RB_WPTR, 0);
 
 	/* set the wb address whether it's enabled or not */
-	WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+	       RB_RPTR_SWAP(2) |
+#endif
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
@@ -2629,7 +2637,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 {
 	/* FIXME: implement */
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-	radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
 	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
 	radeon_ring_write(rdev, ib->length_dw);
 }
@@ -3305,8 +3317,8 @@ restart_ih:
 	while (rptr != wptr) {
 		/* wptr/rptr are in bytes! */
 		ring_index = rptr / 4;
-		src_id =  rdev->ih.ring[ring_index] & 0xff;
-		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
 
 		switch (src_id) {
 		case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index ca5c29f70779..7f1043448d25 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev)
 	ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
 
 	for (i = 0; i < r6xx_vs_size; i++)
-		vs[i] = r6xx_vs[i];
+		vs[i] = cpu_to_le32(r6xx_vs[i]);
 	for (i = 0; i < r6xx_ps_size; i++)
-		ps[i] = r6xx_ps[i];
+		ps[i] = cpu_to_le32(r6xx_ps[i]);
 
 	dev_priv->blit_vb->used = 512;
 
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
 	DRM_DEBUG("\n");
 
 	sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
 
 	BEGIN_RING(9);
 	OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv)
 	OUT_RING(DI_PT_RECTLIST);
 
 	OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+#ifdef __BIG_ENDIAN
+	OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
+#else
 	OUT_RING(DI_INDEX_SIZE_16_BIT);
+#endif
 
 	OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
 	OUT_RING(1);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 16e211a614d7..2fed91750126 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format,
 	if (h < 8)
 		h = 8;
 
-	cb_color_info = ((format << 2) | (1 << 27));
+	cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
 	pitch = (w / 8) - 1;
 	slice = ((w * h) / 64) - 1;
 
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
 	u32 sq_vtx_constant_word2;
 
 	sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
 	radeon_ring_write(rdev, 0x460);
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev,
 	if (h < 1)
 		h = 1;
 
-	sq_tex_resource_word0 = (1 << 0);
+	sq_tex_resource_word0 = (1 << 0) | (1 << 3);
 	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
 				  ((w - 1) << 19));
 
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev)
 	radeon_ring_write(rdev, DI_PT_RECTLIST);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-	radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
 	radeon_ring_write(rdev, 1);
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev)
 	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-	radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (gpu_addr & 0xFFFFFFFC));
 	radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
 	radeon_ring_write(rdev, dwords);
 
@@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input)
 int r600_blit_init(struct radeon_device *rdev)
 {
 	u32 obj_size;
-	int r, dwords;
+	int i, r, dwords;
 	void *ptr;
 	u32 packet2s[16];
 	int num_packet2s = 0;
@@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev)
 
 	dwords = rdev->r600_blit.state_len;
 	while (dwords & 0xf) {
-		packet2s[num_packet2s++] = PACKET2(0);
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
 		dwords++;
 	}
 
@@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev)
 	if (num_packet2s)
 		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
 			    packet2s, num_packet2s * 4);
-	memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
-	memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
+	for (i = 0; i < r6xx_vs_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
+	for (i = 0; i < r6xx_ps_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
 	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
 	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index e8151c1d55b2..2d1f6c5ee2a7 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] =
 	0x00000000,
 	0x3c000000,
 	0x68cd1000,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
 	0x00080000,
+#endif
 	0x00000000,
 };
 
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 4f4cd8b286d5..c3ab959bdc7c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
 	r600_do_cp_stop(dev_priv);
 
 	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
 		     R600_RB_NO_UPDATE |
 		     R600_RB_BLKSZ(15) |
 		     R600_RB_BUFSZ(3));
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
 	r600_do_cp_stop(dev_priv);
 
 	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
 		     R600_RB_NO_UPDATE |
-		     (15 << 8) |
-		     (3 << 0));
+		     R600_RB_BLKSZ(15) |
+		     R600_RB_BUFSZ(3));
 
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
 	RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv)
 
 	if (!dev_priv->writeback_works) {
 		/* Disable writeback to avoid unnecessary bus master transfer */
-		RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) |
-			     RADEON_RB_NO_UPDATE);
+		RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+			     R600_BUF_SWAP_32BIT |
+#endif
+			     RADEON_READ(R600_CP_RB_CNTL) |
+			     R600_RB_NO_UPDATE);
 		RADEON_WRITE(R600_SCRATCH_UMSK, 0);
 	}
 }
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev)
 
 	RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
 	cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
-	RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA);
+	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
+		     R600_RB_RPTR_WR_ENA);
 
 	RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
 	RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
 			+ dev_priv->gart_vm_start;
 	}
 	RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
-		     rptr_addr & 0xffffffff);
+#ifdef __BIG_ENDIAN
+		     (2 << 0) |
+#endif
+		     (rptr_addr & 0xfffffffc));
 	RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
 		     upper_32_bits(rptr_addr));
 
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
 	{
 		u64 scratch_addr;
 
-		scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR);
+		scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
 		scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
 		scratch_addr += R600_SCRATCH_REG_OFFSET;
 		scratch_addr >>= 8;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index fe0c8eb76010..0a0848f0346d 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -388,17 +388,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 	}
 
 	if (!IS_ALIGNED(pitch, pitch_align)) {
-		dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
-			 __func__, __LINE__, pitch);
+		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, array_mode);
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(height, height_align)) {
-		dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
-			 __func__, __LINE__, height);
+		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, height, height_align, array_mode);
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(base_offset, base_align)) {
-		dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+			 base_offset, base_align, array_mode);
 		return -EINVAL;
 	}
 
@@ -413,7 +414,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
 			 * broken userspace.
 			 */
 		} else {
-			dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+			dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
+				 array_mode,
+				 track->cb_color_bo_offset[i], tmp,
+				 radeon_bo_size(track->cb_color_bo[i]));
 			return -EINVAL;
 		}
 	}
@@ -548,17 +552,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
 			}
 
 			if (!IS_ALIGNED(pitch, pitch_align)) {
-				dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
-					 __func__, __LINE__, pitch);
+				dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+					 __func__, __LINE__, pitch, pitch_align, array_mode);
 				return -EINVAL;
 			}
 			if (!IS_ALIGNED(height, height_align)) {
-				dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
-					 __func__, __LINE__, height);
+				dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+					 __func__, __LINE__, height, height_align, array_mode);
 				return -EINVAL;
 			}
 			if (!IS_ALIGNED(base_offset, base_align)) {
-				dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+				dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
+					 base_offset, base_align, array_mode);
 				return -EINVAL;
 			}
 
@@ -566,9 +571,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
 			nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
 			tmp = ntiles * bpe * 64 * nviews;
 			if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
-				dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
-						track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
-						radeon_bo_size(track->db_bo));
+				dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+					 array_mode,
+					 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+					 radeon_bo_size(track->db_bo));
 				return -EINVAL;
 			}
 		}
@@ -1350,18 +1356,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 i
 	/* XXX check height as well... */
 
 	if (!IS_ALIGNED(pitch, pitch_align)) {
-		dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
-			 __func__, __LINE__, pitch);
+		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(base_offset, base_align)) {
-		dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
-			 __func__, __LINE__, base_offset);
+		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(mip_offset, base_align)) {
-		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
-			 __func__, __LINE__, mip_offset);
+		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index d1f598663da7..b2b944bcd05a 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -154,13 +154,14 @@
 #define		ROQ_IB2_START(x)				((x) << 8)
 #define	CP_RB_BASE					0xC100
 #define	CP_RB_CNTL					0xC104
-#define		RB_BUFSZ(x)					((x)<<0)
-#define		RB_BLKSZ(x)					((x)<<8)
-#define		RB_NO_UPDATE					(1<<27)
-#define		RB_RPTR_WR_ENA					(1<<31)
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
 #define		BUF_SWAP_32BIT					(2 << 16)
 #define	CP_RB_RPTR					0x8700
 #define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
 #define	CP_RB_RPTR_ADDR_HI				0xC110
 #define	CP_RB_RPTR_WR					0xC108
 #define	CP_RB_WPTR					0xC114
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5c1cc7ad9a15..02d5c415f499 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
 			/* some evergreen boards have bad data for this entry */
 			if (ASIC_IS_DCE4(rdev)) {
 				if ((i == 7) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1936) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
 				    (gpio->sucI2cId.ucAccess == 0)) {
 					gpio->sucI2cId.ucAccess = 0x97;
 					gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
 			/* some DCE3 boards have bad data for this entry */
 			if (ASIC_IS_DCE3(rdev)) {
 				if ((i == 4) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
 				    (gpio->sucI2cId.ucAccess == 0x94))
 					gpio->sucI2cId.ucAccess = 0x14;
 			}
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
 			/* some evergreen boards have bad data for this entry */
 			if (ASIC_IS_DCE4(rdev)) {
 				if ((i == 7) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1936) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
 				    (gpio->sucI2cId.ucAccess == 0)) {
 					gpio->sucI2cId.ucAccess = 0x97;
 					gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
 			/* some DCE3 boards have bad data for this entry */
 			if (ASIC_IS_DCE3(rdev)) {
 				if ((i == 4) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
 				    (gpio->sucI2cId.ucAccess == 0x94))
 					gpio->sucI2cId.ucAccess = 0x14;
 			}
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
 			pin = &gpio_info->asGPIO_Pin[i];
 			if (id == pin->ucGPIO_ID) {
 				gpio.id = pin->ucGPIO_ID;
-				gpio.reg = pin->usGpioPin_AIndex * 4;
+				gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
 				gpio.mask = (1 << pin->ucGpioPinBitShift);
 				gpio.valid = true;
 				break;
@@ -1274,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
 				      data_offset);
 		switch (crev) {
 		case 1:
-			if (igp_info->info.ulBootUpMemoryClock)
+			if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
 				return true;
 			break;
 		case 2:
-			if (igp_info->info_2.ulBootUpSidePortClock)
+			if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
 				return true;
 			break;
 		default:
@@ -1442,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
 
 			for (i = 0; i < num_indices; i++) {
 				if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
+				    (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
 					ss->percentage =
 						le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1456,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
 			for (i = 0; i < num_indices; i++) {
 				if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
+				    (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
 					ss->percentage =
 						le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1470,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
 			for (i = 0; i < num_indices; i++) {
 				if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
+				    (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
 					ss->percentage =
 						le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1553,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
 		if (misc & ATOM_DOUBLE_CLOCK_MODE)
 			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
 
-		lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
-		lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
+		lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+		lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
 
 		/* set crtc values */
 		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1569,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
 			lvds->linkb = false;
 
 		/* parse the lcd record table */
-		if (lvds_info->info.usModePatchTableOffset) {
+		if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
 			ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
 			ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
 			bool bad_record = false;
 			u8 *record = (u8 *)(mode_info->atom_context->bios +
 					    data_offset +
-					    lvds_info->info.usModePatchTableOffset);
+					    le16_to_cpu(lvds_info->info.usModePatchTableOffset));
 			while (*record != ATOM_RECORD_END_TYPE) {
 				switch (*record) {
 				case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -2189,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
 		firmware_info =
 			(union firmware_info *)(mode_info->atom_context->bios +
 						data_offset);
-		vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+		vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
 	}
 
 	return vddc;
@@ -2284,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
 			VOLTAGE_SW;
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-			clock_info->evergreen.usVDDC;
+			le16_to_cpu(clock_info->evergreen.usVDDC);
 	} else {
 		sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
 		sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2295,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
 			VOLTAGE_SW;
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-			clock_info->r600.usVDDC;
+			le16_to_cpu(clock_info->r600.usVDDC);
 	}
 
 	if (rdev->flags & RADEON_IS_IGP) {
@@ -2408,13 +2408,13 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
 	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
 	state_array = (struct StateArray *)
 		(mode_info->atom_context->bios + data_offset +
-		 power_info->pplib.usStateArrayOffset);
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
 	clock_info_array = (struct ClockInfoArray *)
 		(mode_info->atom_context->bios + data_offset +
-		 power_info->pplib.usClockInfoArrayOffset);
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
 	non_clock_info_array = (struct NonClockInfoArray *)
 		(mode_info->atom_context->bios + data_offset +
-		 power_info->pplib.usNonClockInfoArrayOffset);
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
 	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
 				       state_array->ucNumEntries, GFP_KERNEL);
 	if (!rdev->pm.power_state)
@@ -2533,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
 	int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-	return args.ulReturnEngineClock;
+	return le32_to_cpu(args.ulReturnEngineClock);
 }
 
 uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2542,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
 	int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-	return args.ulReturnMemoryClock;
+	return le32_to_cpu(args.ulReturnMemoryClock);
 }
 
 void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2551,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
 	SET_ENGINE_CLOCK_PS_ALLOCATION args;
 	int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
 
-	args.ulTargetEngineClock = eng_clock;	/* 10 khz */
+	args.ulTargetEngineClock = cpu_to_le32(eng_clock);	/* 10 khz */
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
@@ -2565,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
 	if (rdev->flags & RADEON_IS_IGP)
 		return;
 
-	args.ulTargetMemoryClock = mem_clock;	/* 10 khz */
+	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index d27ef74590cd..cf7c8d5b4ec2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1504,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
 			   (rdev->pdev->subsystem_device == 0x4a48)) {
 			/* Mac X800 */
 			rdev->mode_info.connector_table = CT_MAC_X800;
+		} else if ((rdev->pdev->device == 0x4150) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4150)) {
+			/* Mac G5 9600 */
+			rdev->mode_info.connector_table = CT_MAC_G5_9600;
 		} else
 #endif /* CONFIG_PPC_PMAC */
 #ifdef CONFIG_PPC64
@@ -2022,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
 					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
 					    &hpd);
 		break;
+	case CT_MAC_G5_9600:
+		DRM_INFO("Connector Table: %d (mac g5 9600)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* ADC - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		break;
 	default:
 		DRM_INFO("Connector table: %d (invalid)\n",
 			 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ca5eb217929..f0209be7a34b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -937,8 +937,11 @@ int radeon_resume_kms(struct drm_device *dev)
 int radeon_gpu_reset(struct radeon_device *rdev)
 {
 	int r;
+	int resched;
 
 	radeon_save_bios_scratch_regs(rdev);
+	/* block TTM */
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
 	radeon_suspend(rdev);
 
 	r = radeon_asic_reset(rdev);
@@ -947,6 +950,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
 		radeon_resume(rdev);
 		radeon_restore_bios_scratch_regs(rdev);
 		drm_helper_resume_force_mode(rdev->ddev);
+		ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
 		return 0;
 	}
 	/* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 4409975a363c..4be58793dc17 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -793,6 +793,11 @@ static void avivo_get_fb_div(struct radeon_pll *pll,
 	tmp *= target_clock;
 	*fb_div = tmp / pll->reference_freq;
 	*frac_fb_div = tmp % pll->reference_freq;
+
+        if (*fb_div > pll->max_feedback_div)
+		*fb_div = pll->max_feedback_div;
+        else if (*fb_div < pll->min_feedback_div)
+                *fb_div = pll->min_feedback_div;
 }
 
 static u32 avivo_get_post_div(struct radeon_pll *pll,
@@ -826,6 +831,11 @@ static u32 avivo_get_post_div(struct radeon_pll *pll,
 			post_div--;
 	}
 
+	if (post_div > pll->max_post_div)
+		post_div = pll->max_post_div;
+	else if (post_div < pll->min_post_div)
+		post_div = pll->min_post_div;
+
 	return post_div;
 }
 
@@ -961,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
 		max_fractional_feed_div = pll->max_frac_feedback_div;
 	}
 
-	for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+	for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
 		uint32_t ref_div;
 
 		if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 448eba89d1e6..5cba46b9779a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
 #define R600_CP_RB_CNTL                                        0xc104
 #       define R600_RB_BUFSZ(x)                                ((x) << 0)
 #       define R600_RB_BLKSZ(x)                                ((x) << 8)
+#	define R600_BUF_SWAP_32BIT		               (2 << 16)
 #       define R600_RB_NO_UPDATE                               (1 << 27)
 #       define R600_RB_RPTR_WR_ENA                             (1 << 31)
 #define R600_CP_RB_RPTR_WR                                     0xc108
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d4a542247618..b4274883227f 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 
 	args.v1.ucAction = action;
 	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
-		args.v1.usInitInfo = connector_object_id;
+		args.v1.usInitInfo = cpu_to_le16(connector_object_id);
 	} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
 		args.v1.asMode.ucLaneSel = lane_num;
 		args.v1.asMode.ucLaneSet = lane_set;
@@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
 		case 3:
 			args.v3.sExtEncoder.ucAction = action;
 			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
-				args.v3.sExtEncoder.usConnectorId = connector_object_id;
+				args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
 			else
 				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
@@ -1570,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
 	}
 
 	/* set scaler clears this on some chips */
-	/* XXX check DCE4 */
-	if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
-		if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
-			WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
-			       AVIVO_D1MODE_INTERLEAVE_EN);
+	if (ASIC_IS_AVIVO(rdev) &&
+	    (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+		if (ASIC_IS_DCE4(rdev)) {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       EVERGREEN_INTERLEAVE_EN);
+			else
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		} else {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       AVIVO_D1MODE_INTERLEAVE_EN);
+			else
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 28431e78ab56..0b7b486c97e8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
 	u32 tiling_flags = 0;
 	int ret;
 	int aligned_size, size;
+	int height = mode_cmd->height;
 
 	/* need to align pitch with crtc limits */
 	mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
 
-	size = mode_cmd->pitch * mode_cmd->height;
+	if (rdev->family >= CHIP_R600)
+		height = ALIGN(mode_cmd->height, 8);
+	size = mode_cmd->pitch * height;
 	aligned_size = ALIGN(size, PAGE_SIZE);
 	ret = radeon_gem_object_create(rdev, aligned_size, 0,
 				       RADEON_GEM_DOMAIN_VRAM,
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index c3f23f6ff60e..5067d18d0009 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,6 +209,7 @@ enum radeon_connector_table {
 	CT_EMAC,
 	CT_RN50_POWER,
 	CT_MAC_X800,
+	CT_MAC_G5_9600,
 };
 
 enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index df5734d0c4af..e446979e0e0a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -791,9 +791,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
 		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
 		radeon_mem_types_list[i].driver_features = 0;
 		if (i == 0)
-			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
 		else
-			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
 
 	}
 	/* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index b506ec1cab4b..e8a1786b6426 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -683,9 +683,7 @@ r300 0x4f60
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -706,13 +704,11 @@ r300 0x4f60
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 8c1214c2390f..722074e21e2f 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,7 +130,6 @@ r420 0x4f60
 0x401C GB_SELECT
 0x4020 GB_AA_CONFIG
 0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
 0x4100 TX_INVALTAGS
 0x4200 GA_POINT_S0
 0x4204 GA_POINT_T0
@@ -750,9 +749,7 @@ r420 0x4f60
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -773,13 +770,11 @@ r420 0x4f60
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 0828d80396f2..d9f62866bbc1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -749,9 +749,7 @@ rs600 0x6d40
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -772,13 +770,11 @@ rs600 0x6d40
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index ef422bbacfc1..911a8fbd32bb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -164,7 +164,6 @@ rv515 0x6d40
 0x401C GB_SELECT
 0x4020 GB_AA_CONFIG
 0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
 0x4100 TX_INVALTAGS
 0x4114 SU_TEX_WRAP_PS3
 0x4118 PS3_ENABLE
@@ -461,9 +460,7 @@ rv515 0x6d40
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -484,9 +481,6 @@ rv515 0x6d40
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -496,4 +490,5 @@ rv515 0x6d40
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
 0x4F58 ZB_ZPASS_DATA
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0137d3e3728d..6638c8e4c81b 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev)
 		switch (crev) {
 		case 1:
 			tmp.full = dfixed_const(100);
-			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			if (info->info.usK8MemoryClock)
+			if (le16_to_cpu(info->info.usK8MemoryClock))
 				rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
 			else if (rdev->clock.default_mclk) {
 				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev)
 			break;
 		case 2:
 			tmp.full = dfixed_const(100);
-			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			if (info->info_v2.ulBootUpUMAClock)
-				rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+			if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
+				rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
 			else if (rdev->clock.default_mclk)
 				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
 			else
 				rdev->pm.igp_system_mclk.full = dfixed_const(66700);
 			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
-			rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
 			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
 			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
 			break;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3a95999d2fef..ee5541c6a623 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -321,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
 		return -EINVAL;
 
 	r700_cp_stop(rdev);
-	WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
 	/* Reset cp */
 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index abc8cf5a3672..79fa588e9ed5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -76,10 +76,10 @@
 #define		ROQ_IB1_START(x)				((x) << 0)
 #define		ROQ_IB2_START(x)				((x) << 8)
 #define	CP_RB_CNTL					0xC104
-#define		RB_BUFSZ(x)					((x)<<0)
-#define		RB_BLKSZ(x)					((x)<<8)
-#define		RB_NO_UPDATE					(1<<27)
-#define		RB_RPTR_WR_ENA					(1<<31)
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
 #define		BUF_SWAP_32BIT					(2 << 16)
 #define	CP_RB_RPTR					0x8700
 #define	CP_RB_RPTR_ADDR					0xC10C
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 773e484f1646..297bc9a7d6e6 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
 	  will be called k8temp.
 
 config SENSORS_K10TEMP
-	tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
+	tristate "AMD Family 10h/11h/12h/14h temperature sensor"
 	depends on X86 && PCI
 	help
 	  If you say yes here you get support for the temperature
 	  sensor(s) inside your CPU. Supported are later revisions of
-	  the AMD Family 10h and all revisions of the AMD Family 11h
-	  microarchitectures.
+	  the AMD Family 10h and all revisions of the AMD Family 11h,
+	  12h (Llano), and 14h (Brazos) microarchitectures.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called k10temp.
@@ -455,13 +455,14 @@ config SENSORS_JZ4740
 	  called jz4740-hwmon.
 
 config SENSORS_JC42
-	tristate "JEDEC JC42.4 compliant temperature sensors"
+	tristate "JEDEC JC42.4 compliant memory module temperature sensors"
 	depends on I2C
 	help
-	  If you say yes here you get support for Jedec JC42.4 compliant
-	  temperature sensors. Support will include, but not be limited to,
-	  ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
-	  MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3.
+	  If you say yes here, you get support for JEDEC JC42.4 compliant
+	  temperature sensors, which are used on many DDR3 memory modules for
+	  mobile devices and servers.  Support will include, but not be limited
+	  to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
+	  MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called jc42.
@@ -574,7 +575,7 @@ config SENSORS_LM85
 	help
 	  If you say yes here you get support for National Semiconductor LM85
 	  sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
-	  EMC6D101 and EMC6D102.
+	  EMC6D101, EMC6D102, and EMC6D103.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called lm85.
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822aa9bbf..d46c0c758ddf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
 	{ "ad7414", 0 },
 	{}
 };
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
 
 static struct i2c_driver ad7414_driver = {
 	.driver = {
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843a2964..5cc3e3784b42 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
 	{ "adt7411", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
 
 static struct i2c_driver adt7411_driver = {
 	.driver		= {
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5dea9faa1656..cd2a6e437aec 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -344,7 +344,7 @@ static int emc1403_remove(struct i2c_client *client)
 }
 
 static const unsigned short emc1403_address_list[] = {
-	0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
+	0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END
 };
 
 static const struct i2c_device_id emc1403_idtable[] = {
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 340fc78c8dde..934991237061 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = {
 
 /* Configuration register defines */
 #define JC42_CFG_CRIT_ONLY	(1 << 2)
+#define JC42_CFG_TCRIT_LOCK	(1 << 6)
+#define JC42_CFG_EVENT_LOCK	(1 << 7)
 #define JC42_CFG_SHUTDOWN	(1 << 8)
 #define JC42_CFG_HYST_SHIFT	9
 #define JC42_CFG_HYST_MASK	0x03
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct jc42_data *data = i2c_get_clientdata(client);
-	long val;
+	unsigned long val;
 	int diff, hyst;
 	int err;
 	int ret = count;
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev,
 
 static DEVICE_ATTR(temp1_input, S_IRUGO,
 		   show_temp_input, NULL);
-static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit, S_IRUGO,
 		   show_temp_crit, set_temp_crit);
-static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_min, S_IRUGO,
 		   show_temp_min, set_temp_min);
-static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_max, S_IRUGO,
 		   show_temp_max, set_temp_max);
 
-static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
 		   show_temp_crit_hyst, set_temp_crit_hyst);
 static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
 		   show_temp_max_hyst, NULL);
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = {
 	NULL
 };
 
+static mode_t jc42_attribute_mode(struct kobject *kobj,
+				  struct attribute *attr, int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct jc42_data *data = i2c_get_clientdata(client);
+	unsigned int config = data->config;
+	bool readonly;
+
+	if (attr == &dev_attr_temp1_crit.attr)
+		readonly = config & JC42_CFG_TCRIT_LOCK;
+	else if (attr == &dev_attr_temp1_min.attr ||
+		 attr == &dev_attr_temp1_max.attr)
+		readonly = config & JC42_CFG_EVENT_LOCK;
+	else if (attr == &dev_attr_temp1_crit_hyst.attr)
+		readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
+	else
+		readonly = true;
+
+	return S_IRUGO | (readonly ? 0 : S_IWUSR);
+}
+
 static const struct attribute_group jc42_group = {
 	.attrs = jc42_attributes,
+	.is_visible = jc42_attribute_mode,
 };
 
 /* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index da5a2404cd3e..82bf65aa2968 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
 /*
- * k10temp.c - AMD Family 10h/11h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
  *
  * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
  *
@@ -25,7 +25,7 @@
 #include <linux/pci.h>
 #include <asm/processor.h>
 
-MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
+MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_LICENSE("GPL");
 
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
 static const struct pci_device_id k10temp_id_table[] = {
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
 	{}
 };
 MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 776aeb3019d2..508cb291f71b 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -98,6 +98,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
  * value, it uses signed 8-bit values with LSB = 1 degree Celsius.
  * For remote temperature, low and high limits, it uses signed 11-bit values
  * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
+ * For LM64 the actual remote diode temperature is 16 degree Celsius higher
+ * than the register reading. Remote temperature setpoints have to be
+ * adapted accordingly.
  */
 
 #define FAN_FROM_REG(reg)	((reg) == 0xFFFC || (reg) == 0 ? 0 : \
@@ -165,6 +168,8 @@ struct lm63_data {
 	struct mutex update_lock;
 	char valid; /* zero until following fields are valid */
 	unsigned long last_updated; /* in jiffies */
+	int kind;
+	int temp2_offset;
 
 	/* registers values */
 	u8 config, config_fan;
@@ -247,16 +252,34 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum
 	return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
 }
 
-static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
-			  char *buf)
+/*
+ * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
+ * For remote sensor registers temp2_offset has to be considered,
+ * for local sensor it must not.
+ * So we need separate 8bit accessors for local and remote sensor.
+ */
+static ssize_t show_local_temp8(struct device *dev,
+				struct device_attribute *devattr,
+				char *buf)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 	struct lm63_data *data = lm63_update_device(dev);
 	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
 }
 
-static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy,
-			 const char *buf, size_t count)
+static ssize_t show_remote_temp8(struct device *dev,
+				 struct device_attribute *devattr,
+				 char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct lm63_data *data = lm63_update_device(dev);
+	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
+		       + data->temp2_offset);
+}
+
+static ssize_t set_local_temp8(struct device *dev,
+			       struct device_attribute *dummy,
+			       const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct lm63_data *data = i2c_get_clientdata(client);
@@ -274,7 +297,8 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 	struct lm63_data *data = lm63_update_device(dev);
-	return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]));
+	return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
+		       + data->temp2_offset);
 }
 
 static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
@@ -294,7 +318,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
 	int nr = attr->index;
 
 	mutex_lock(&data->update_lock);
-	data->temp11[nr] = TEMP11_TO_REG(val);
+	data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
 	i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
 				  data->temp11[nr] >> 8);
 	i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -310,6 +334,7 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute
 {
 	struct lm63_data *data = lm63_update_device(dev);
 	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
+		       + data->temp2_offset
 		       - TEMP8_FROM_REG(data->temp2_crit_hyst));
 }
 
@@ -324,7 +349,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *
 	long hyst;
 
 	mutex_lock(&data->update_lock);
-	hyst = TEMP8_FROM_REG(data->temp8[2]) - val;
+	hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
 	i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
 				  HYST_TO_REG(hyst));
 	mutex_unlock(&data->update_lock);
@@ -355,16 +380,21 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
 static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
 static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
 
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 1);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
+	set_local_temp8, 1);
 
 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
 	set_temp11, 1);
 static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
 	set_temp11, 2);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2);
+/*
+ * On LM63, temp2_crit can be set only once, which should be job
+ * of the bootloader.
+ */
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
+	NULL, 2);
 static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
 	set_temp2_crit_hyst);
 
@@ -479,7 +509,12 @@ static int lm63_probe(struct i2c_client *new_client,
 	data->valid = 0;
 	mutex_init(&data->update_lock);
 
-	/* Initialize the LM63 chip */
+	/* Set the device type */
+	data->kind = id->driver_data;
+	if (data->kind == lm64)
+		data->temp2_offset = 16000;
+
+	/* Initialize chip */
 	lm63_init_client(new_client);
 
 	/* Register sysfs hooks */
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 1e229847f37a..d2cc28660816 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
 enum chips {
 	any_chip, lm85b, lm85c,
 	adm1027, adt7463, adt7468,
-	emc6d100, emc6d102
+	emc6d100, emc6d102, emc6d103
 };
 
 /* The LM85 registers */
@@ -90,6 +90,9 @@ enum chips {
 #define	LM85_VERSTEP_EMC6D100_A0        0x60
 #define	LM85_VERSTEP_EMC6D100_A1        0x61
 #define	LM85_VERSTEP_EMC6D102		0x65
+#define	LM85_VERSTEP_EMC6D103_A0	0x68
+#define	LM85_VERSTEP_EMC6D103_A1	0x69
+#define	LM85_VERSTEP_EMC6D103S		0x6A	/* Also known as EMC6D103:A2 */
 
 #define	LM85_REG_CONFIG			0x40
 
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = {
 	{ "emc6d100", emc6d100 },
 	{ "emc6d101", emc6d100 },
 	{ "emc6d102", emc6d102 },
+	{ "emc6d103", emc6d103 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
 		case LM85_VERSTEP_EMC6D102:
 			type_name = "emc6d102";
 			break;
+		case LM85_VERSTEP_EMC6D103_A0:
+		case LM85_VERSTEP_EMC6D103_A1:
+			type_name = "emc6d103";
+			break;
+		/*
+		 * Registers apparently missing in EMC6D103S/EMC6D103:A2
+		 * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
+		 * (according to the data sheets), but used unconditionally
+		 * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
+		 * So skip EMC6D103S for now.
+		case LM85_VERSTEP_EMC6D103S:
+			type_name = "emc6d103s";
+			break;
+		 */
 		}
 	} else {
 		dev_dbg(&adapter->dev,
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client,
 	case adt7468:
 	case emc6d100:
 	case emc6d102:
+	case emc6d103:
 		data->freq_map = adm1027_freq_map;
 		break;
 	default:
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
 			/* More alarm bits */
 			data->alarms |= lm85_read_value(client,
 						EMC6D100_REG_ALARM3) << 16;
-		} else if (data->type == emc6d102) {
+		} else if (data->type == emc6d102 || data->type == emc6d103) {
 			/* Have to read LSB bits after the MSB ones because
 			   the reading of the MSB bits has frozen the
 			   LSBs (backward from the ADM1027).
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b605ff3a1fa0..829a2a1029f7 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -847,11 +847,15 @@ complete:
 			dev_err(dev->dev, "Arbitration lost\n");
 			err |= OMAP_I2C_STAT_AL;
 		}
+		/*
+		 * ProDB0017052: Clear ARDY bit twice
+		 */
 		if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
 					OMAP_I2C_STAT_AL)) {
 			omap_i2c_ack_stat(dev, stat &
 				(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
-				OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
+				OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
+				OMAP_I2C_STAT_ARDY));
 			omap_i2c_complete_cmd(dev, err);
 			return IRQ_HANDLED;
 		}
@@ -1137,12 +1141,41 @@ omap_i2c_remove(struct platform_device *pdev)
 	return 0;
 }
 
+#ifdef CONFIG_SUSPEND
+static int omap_i2c_suspend(struct device *dev)
+{
+	if (!pm_runtime_suspended(dev))
+		if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
+			dev->bus->pm->runtime_suspend(dev);
+
+	return 0;
+}
+
+static int omap_i2c_resume(struct device *dev)
+{
+	if (!pm_runtime_suspended(dev))
+		if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
+			dev->bus->pm->runtime_resume(dev);
+
+	return 0;
+}
+
+static struct dev_pm_ops omap_i2c_pm_ops = {
+	.suspend = omap_i2c_suspend,
+	.resume = omap_i2c_resume,
+};
+#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+#else
+#define OMAP_I2C_PM_OPS NULL
+#endif
+
 static struct platform_driver omap_i2c_driver = {
 	.probe		= omap_i2c_probe,
 	.remove		= omap_i2c_remove,
 	.driver		= {
 		.name	= "omap_i2c",
 		.owner	= THIS_MODULE,
+		.pm	= OMAP_I2C_PM_OPS,
 	},
 };
 
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 495be451d326..266135ddf7fa 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
 	adap->owner = THIS_MODULE;
 	/* DDC class but actually often used for more generic I2C */
 	adap->class = I2C_CLASS_DDC;
-	strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
+	strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
 		sizeof(adap->name));
 	adap->nr = bus_nr;
 	adap->algo = &stu300_algo;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8b606fd64022..08c194861af5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
 					netif_carrier_on(nesvnic->netdev);
 
 					spin_lock(&nesvnic->port_ibevent_lock);
-					if (nesdev->iw_status == 0) {
-						nesdev->iw_status = 1;
-						nes_port_ibevent(nesvnic);
+					if (nesvnic->of_device_registered) {
+						if (nesdev->iw_status == 0) {
+							nesdev->iw_status = 1;
+							nes_port_ibevent(nesvnic);
+						}
 					}
 					spin_unlock(&nesvnic->port_ibevent_lock);
 				}
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
 					netif_carrier_off(nesvnic->netdev);
 
 					spin_lock(&nesvnic->port_ibevent_lock);
-					if (nesdev->iw_status == 1) {
-						nesdev->iw_status = 0;
-						nes_port_ibevent(nesvnic);
+					if (nesvnic->of_device_registered) {
+						if (nesdev->iw_status == 1) {
+							nesdev->iw_status = 0;
+							nes_port_ibevent(nesvnic);
+						}
 					}
 					spin_unlock(&nesvnic->port_ibevent_lock);
 				}
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work)
 				netif_carrier_on(nesvnic->netdev);
 
 				spin_lock(&nesvnic->port_ibevent_lock);
-				if (nesdev->iw_status == 0) {
-					nesdev->iw_status = 1;
-					nes_port_ibevent(nesvnic);
+				if (nesvnic->of_device_registered) {
+					if (nesdev->iw_status == 0) {
+						nesdev->iw_status = 1;
+						nes_port_ibevent(nesvnic);
+					}
 				}
 				spin_unlock(&nesvnic->port_ibevent_lock);
 			}
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work)
 				netif_carrier_off(nesvnic->netdev);
 
 				spin_lock(&nesvnic->port_ibevent_lock);
-				if (nesdev->iw_status == 1) {
-					nesdev->iw_status = 0;
-					nes_port_ibevent(nesvnic);
+				if (nesvnic->of_device_registered) {
+					if (nesdev->iw_status == 1) {
+						nesdev->iw_status = 0;
+						nes_port_ibevent(nesvnic);
+					}
 				}
 				spin_unlock(&nesvnic->port_ibevent_lock);
 			}
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8245237b67ce..eca0c41f1226 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
 	 * there are still requests that haven't been acked.
 	 */
 	if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
-	    !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+	    !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
+	    (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
 		start_timer(qp);
 
 	while (qp->s_last != qp->s_acked) {
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
 	}
 
 	spin_lock_irqsave(&qp->s_lock, flags);
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+		goto ack_done;
 
 	/* Ignore invalid responses. */
 	if (qib_cmp24(psn, qp->s_next_psn) >= 0)
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 23cf8fc933ec..5b8f59d6c3e8 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
 	event->owner = owner;
 
 	list_add_tail(&event->node, &gameport_event_list);
-	schedule_work(&gameport_event_work);
+	queue_work(system_long_wq, &gameport_event_work);
 
 out:
 	spin_unlock_irqrestore(&gameport_event_lock, flags);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7985114beac7..11905b6a3023 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -75,7 +75,6 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
  * dev->event_lock held and interrupts disabled.
  */
 static void input_pass_event(struct input_dev *dev,
-			     struct input_handler *src_handler,
 			     unsigned int type, unsigned int code, int value)
 {
 	struct input_handler *handler;
@@ -94,15 +93,6 @@ static void input_pass_event(struct input_dev *dev,
 				continue;
 
 			handler = handle->handler;
-
-			/*
-			 * If this is the handler that injected this
-			 * particular event we want to skip it to avoid
-			 * filters firing again and again.
-			 */
-			if (handler == src_handler)
-				continue;
-
 			if (!handler->filter) {
 				if (filtered)
 					break;
@@ -132,7 +122,7 @@ static void input_repeat_key(unsigned long data)
 	if (test_bit(dev->repeat_key, dev->key) &&
 	    is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
 
-		input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
+		input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
 
 		if (dev->sync) {
 			/*
@@ -141,7 +131,7 @@ static void input_repeat_key(unsigned long data)
 			 * Otherwise assume that the driver will send
 			 * SYN_REPORT once it's done.
 			 */
-			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 		}
 
 		if (dev->rep[REP_PERIOD])
@@ -174,7 +164,6 @@ static void input_stop_autorepeat(struct input_dev *dev)
 #define INPUT_PASS_TO_ALL	(INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
 
 static int input_handle_abs_event(struct input_dev *dev,
-				  struct input_handler *src_handler,
 				  unsigned int code, int *pval)
 {
 	bool is_mt_event;
@@ -218,15 +207,13 @@ static int input_handle_abs_event(struct input_dev *dev,
 	/* Flush pending "slot" event */
 	if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
 		input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
-		input_pass_event(dev, src_handler,
-				 EV_ABS, ABS_MT_SLOT, dev->slot);
+		input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
 	}
 
 	return INPUT_PASS_TO_HANDLERS;
 }
 
 static void input_handle_event(struct input_dev *dev,
-			       struct input_handler *src_handler,
 			       unsigned int type, unsigned int code, int value)
 {
 	int disposition = INPUT_IGNORE_EVENT;
@@ -279,8 +266,7 @@ static void input_handle_event(struct input_dev *dev,
 
 	case EV_ABS:
 		if (is_event_supported(code, dev->absbit, ABS_MAX))
-			disposition = input_handle_abs_event(dev, src_handler,
-							     code, &value);
+			disposition = input_handle_abs_event(dev, code, &value);
 
 		break;
 
@@ -338,7 +324,7 @@ static void input_handle_event(struct input_dev *dev,
 		dev->event(dev, type, code, value);
 
 	if (disposition & INPUT_PASS_TO_HANDLERS)
-		input_pass_event(dev, src_handler, type, code, value);
+		input_pass_event(dev, type, code, value);
 }
 
 /**
@@ -367,7 +353,7 @@ void input_event(struct input_dev *dev,
 
 		spin_lock_irqsave(&dev->event_lock, flags);
 		add_input_randomness(type, code, value);
-		input_handle_event(dev, NULL, type, code, value);
+		input_handle_event(dev, type, code, value);
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 }
@@ -397,8 +383,7 @@ void input_inject_event(struct input_handle *handle,
 		rcu_read_lock();
 		grab = rcu_dereference(dev->grab);
 		if (!grab || grab == handle)
-			input_handle_event(dev, handle->handler,
-					   type, code, value);
+			input_handle_event(dev, type, code, value);
 		rcu_read_unlock();
 
 		spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -611,10 +596,10 @@ static void input_dev_release_keys(struct input_dev *dev)
 		for (code = 0; code <= KEY_MAX; code++) {
 			if (is_event_supported(code, dev->keybit, KEY_MAX) &&
 			    __test_and_clear_bit(code, dev->key)) {
-				input_pass_event(dev, NULL, EV_KEY, code, 0);
+				input_pass_event(dev, EV_KEY, code, 0);
 			}
 		}
-		input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+		input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 	}
 }
 
@@ -889,9 +874,9 @@ int input_set_keycode(struct input_dev *dev,
 	    !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
 	    __test_and_clear_bit(old_keycode, dev->key)) {
 
-		input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
+		input_pass_event(dev, EV_KEY, old_keycode, 0);
 		if (dev->sync)
-			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 	}
 
  out:
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index ac471b77c18e..99ce9032d08c 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -71,8 +71,9 @@ struct tegra_kbc {
 	spinlock_t lock;
 	unsigned int repoll_dly;
 	unsigned long cp_dly_jiffies;
+	bool use_fn_map;
 	const struct tegra_kbc_platform_data *pdata;
-	unsigned short keycode[KBC_MAX_KEY];
+	unsigned short keycode[KBC_MAX_KEY * 2];
 	unsigned short current_keys[KBC_MAX_KPENT];
 	unsigned int num_pressed_keys;
 	struct timer_list timer;
@@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = {
 	KEY(15, 5, KEY_F2),
 	KEY(15, 6, KEY_CAPSLOCK),
 	KEY(15, 7, KEY_F6),
+
+	/* Software Handled Function Keys */
+	KEY(20, 0, KEY_KP7),
+
+	KEY(21, 0, KEY_KP9),
+	KEY(21, 1, KEY_KP8),
+	KEY(21, 2, KEY_KP4),
+	KEY(21, 4, KEY_KP1),
+
+	KEY(22, 1, KEY_KPSLASH),
+	KEY(22, 2, KEY_KP6),
+	KEY(22, 3, KEY_KP5),
+	KEY(22, 4, KEY_KP3),
+	KEY(22, 5, KEY_KP2),
+	KEY(22, 7, KEY_KP0),
+
+	KEY(27, 1, KEY_KPASTERISK),
+	KEY(27, 3, KEY_KPMINUS),
+	KEY(27, 4, KEY_KPPLUS),
+	KEY(27, 5, KEY_KPDOT),
+
+	KEY(28, 5, KEY_VOLUMEUP),
+
+	KEY(29, 3, KEY_HOME),
+	KEY(29, 4, KEY_END),
+	KEY(29, 5, KEY_BRIGHTNESSDOWN),
+	KEY(29, 6, KEY_VOLUMEDOWN),
+	KEY(29, 7, KEY_BRIGHTNESSUP),
+
+	KEY(30, 0, KEY_NUMLOCK),
+	KEY(30, 1, KEY_SCROLLLOCK),
+	KEY(30, 2, KEY_MUTE),
+
+	KEY(31, 4, KEY_HELP),
 };
 
 static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
@@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
 	unsigned int i;
 	unsigned int num_down = 0;
 	unsigned long flags;
+	bool fn_keypress = false;
 
 	spin_lock_irqsave(&kbc->lock, flags);
 	for (i = 0; i < KBC_MAX_KPENT; i++) {
@@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
 				MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
 
 			scancodes[num_down] = scancode;
-			keycodes[num_down++] = kbc->keycode[scancode];
+			keycodes[num_down] = kbc->keycode[scancode];
+			/* If driver uses Fn map, do not report the Fn key. */
+			if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
+				fn_keypress = true;
+			else
+				num_down++;
 		}
 
 		val >>= 8;
 	}
+
+	/*
+	 * If the platform uses Fn keymaps, translate keys on a Fn keypress.
+	 * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
+	 */
+	if (fn_keypress) {
+		for (i = 0; i < num_down; i++) {
+			scancodes[i] += KBC_MAX_KEY;
+			keycodes[i] = kbc->keycode[scancodes[i]];
+		}
+	}
+
 	spin_unlock_irqrestore(&kbc->lock, flags);
 
 	tegra_kbc_report_released_keys(kbc->idev,
@@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
 
 	input_dev->keycode = kbc->keycode;
 	input_dev->keycodesize = sizeof(kbc->keycode[0]);
-	input_dev->keycodemax = ARRAY_SIZE(kbc->keycode);
+	input_dev->keycodemax = KBC_MAX_KEY;
+	if (pdata->use_fn_map)
+		input_dev->keycodemax *= 2;
 
+	kbc->use_fn_map = pdata->use_fn_map;
 	keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
 	matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
 				   input_dev->keycode, input_dev->keybit);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 1f8e0108962e..7e64d01da2be 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -176,7 +176,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
 
 	/* request the IRQs */
 	err = request_irq(encoder->irq_a, &rotary_encoder_irq,
-			  IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 			  DRV_NAME, encoder);
 	if (err) {
 		dev_err(&pdev->dev, "unable to request IRQ %d\n",
@@ -185,7 +185,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
 	}
 
 	err = request_irq(encoder->irq_b, &rotary_encoder_irq,
-			  IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 			  DRV_NAME, encoder);
 	if (err) {
 		dev_err(&pdev->dev, "unable to request IRQ %d\n",
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 25e5d042a72c..7453938bf5ef 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,6 +51,29 @@
 #define SYN_EXT_CAP_REQUESTS(c)		(((c) & 0x700000) >> 20)
 #define SYN_CAP_MULTI_BUTTON_NO(ec)	(((ec) & 0x00f000) >> 12)
 #define SYN_CAP_PRODUCT_ID(ec)		(((ec) & 0xff0000) >> 16)
+
+/*
+ * The following describes response for the 0x0c query.
+ *
+ * byte	mask	name			meaning
+ * ----	----	-------			------------
+ * 1	0x01	adjustable threshold	capacitive button sensitivity
+ *					can be adjusted
+ * 1	0x02	report max		query 0x0d gives max coord reported
+ * 1	0x04	clearpad		sensor is ClearPad product
+ * 1	0x08	advanced gesture	not particularly meaningful
+ * 1	0x10	clickpad bit 0		1-button ClickPad
+ * 1	0x60	multifinger mode	identifies firmware finger counting
+ *					(not reporting!) algorithm.
+ *					Not particularly meaningful
+ * 1	0x80    covered pad		W clipped to 14, 15 == pad mostly covered
+ * 2	0x01    clickpad bit 1		2-button ClickPad
+ * 2	0x02    deluxe LED controls	touchpad support LED commands
+ *					ala multimedia control bar
+ * 2	0x04	reduced filtering	firmware does less filtering on
+ *					position data, driver should watch
+ *					for noise.
+ */
 #define SYN_CAP_CLICKPAD(ex0c)		((ex0c) & 0x100000) /* 1-button ClickPad */
 #define SYN_CAP_CLICKPAD2BTN(ex0c)	((ex0c) & 0x000100) /* 2-button ClickPad */
 #define SYN_CAP_MAX_DIMENSIONS(ex0c)	((ex0c) & 0x020000)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index db5b0bca1a1a..ba70058e2be3 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -188,7 +188,8 @@ static void serio_free_event(struct serio_event *event)
 	kfree(event);
 }
 
-static void serio_remove_duplicate_events(struct serio_event *event)
+static void serio_remove_duplicate_events(void *object,
+					  enum serio_event_type type)
 {
 	struct serio_event *e, *next;
 	unsigned long flags;
@@ -196,13 +197,13 @@ static void serio_remove_duplicate_events(struct serio_event *event)
 	spin_lock_irqsave(&serio_event_lock, flags);
 
 	list_for_each_entry_safe(e, next, &serio_event_list, node) {
-		if (event->object == e->object) {
+		if (object == e->object) {
 			/*
 			 * If this event is of different type we should not
 			 * look further - we only suppress duplicate events
 			 * that were sent back-to-back.
 			 */
-			if (event->type != e->type)
+			if (type != e->type)
 				break;
 
 			list_del_init(&e->node);
@@ -245,7 +246,7 @@ static void serio_handle_event(struct work_struct *work)
 			break;
 		}
 
-		serio_remove_duplicate_events(event);
+		serio_remove_duplicate_events(event->object, event->type);
 		serio_free_event(event);
 	}
 
@@ -298,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
 	event->owner = owner;
 
 	list_add_tail(&event->node, &serio_event_list);
-	schedule_work(&serio_event_work);
+	queue_work(system_long_wq, &serio_event_work);
 
 out:
 	spin_unlock_irqrestore(&serio_event_lock, flags);
@@ -436,10 +437,12 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
 	} else if (!strncmp(buf, "rescan", count)) {
 		serio_disconnect_port(serio);
 		serio_find_driver(serio);
+		serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
 	} else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
 		serio_disconnect_port(serio);
 		error = serio_bind_driver(serio, to_serio_driver(drv));
 		put_driver(drv);
+		serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
 	} else {
 		error = -EINVAL;
 	}
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index fc381498b798..cf8fb9f5d4a8 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -519,7 +519,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
 	/* Retrieve the physical and logical size for OEM devices */
 	error = wacom_retrieve_hid_descriptor(intf, features);
 	if (error)
-		goto fail2;
+		goto fail3;
 
 	wacom_setup_device_quirks(features);
 
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 14ea54b78e46..4bf2316e3284 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -941,28 +941,29 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
 	struct ads7846_platform_data *pdata = spi->dev.platform_data;
 	int err;
 
-	/* REVISIT when the irq can be triggered active-low, or if for some
+	/*
+	 * REVISIT when the irq can be triggered active-low, or if for some
 	 * reason the touchscreen isn't hooked up, we don't need to access
 	 * the pendown state.
 	 */
-	if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) {
-		dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
-		return -EINVAL;
-	}
 
 	if (pdata->get_pendown_state) {
 		ts->get_pendown_state = pdata->get_pendown_state;
-		return 0;
-	}
+	} else if (gpio_is_valid(pdata->gpio_pendown)) {
 
-	err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
-	if (err) {
-		dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
-			pdata->gpio_pendown);
-		return err;
-	}
+		err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
+		if (err) {
+			dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
+				pdata->gpio_pendown);
+			return err;
+		}
 
-	ts->gpio_pendown = pdata->gpio_pendown;
+		ts->gpio_pendown = pdata->gpio_pendown;
+
+	} else {
+		dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
+		return -EINVAL;
+	}
 
 	return 0;
 }
@@ -1353,7 +1354,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
  err_put_regulator:
 	regulator_put(ts->reg);
  err_free_gpio:
-	if (ts->gpio_pendown != -1)
+	if (!ts->get_pendown_state)
 		gpio_free(ts->gpio_pendown);
  err_cleanup_filter:
 	if (ts->filter_cleanup)
@@ -1383,8 +1384,13 @@ static int __devexit ads7846_remove(struct spi_device *spi)
 	regulator_disable(ts->reg);
 	regulator_put(ts->reg);
 
-	if (ts->gpio_pendown != -1)
+	if (!ts->get_pendown_state) {
+		/*
+		 * If we are not using specialized pendown method we must
+		 * have been relying on gpio we set up ourselves.
+		 */
 		gpio_free(ts->gpio_pendown);
+	}
 
 	if (ts->filter_cleanup)
 		ts->filter_cleanup(ts->filter_data);
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 5cb8449c909d..c14412ef4648 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL");
 #define W8001_PKTLEN_TPCCTL	11	/* control packet */
 #define W8001_PKTLEN_TOUCH2FG	13
 
+/* resolution in points/mm */
+#define W8001_PEN_RESOLUTION    100
+#define W8001_TOUCH_RESOLUTION  10
+
 struct w8001_coord {
 	u8 rdy;
 	u8 tsw;
@@ -198,7 +202,7 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query)
 		query->y = 1024;
 		if (query->panel_res)
 			query->x = query->y = (1 << query->panel_res);
-		query->panel_res = 10;
+		query->panel_res = W8001_TOUCH_RESOLUTION;
 	}
 }
 
@@ -394,6 +398,8 @@ static int w8001_setup(struct w8001 *w8001)
 
 		input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
 		input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
+		input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION);
+		input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION);
 		input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
 		if (coord.tilt_x && coord.tilt_y) {
 			input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
@@ -418,14 +424,17 @@ static int w8001_setup(struct w8001 *w8001)
 		w8001->max_touch_x = touch.x;
 		w8001->max_touch_y = touch.y;
 
-		/* scale to pen maximum */
 		if (w8001->max_pen_x && w8001->max_pen_y) {
+			/* if pen is supported scale to pen maximum */
 			touch.x = w8001->max_pen_x;
 			touch.y = w8001->max_pen_y;
+			touch.panel_res = W8001_PEN_RESOLUTION;
 		}
 
 		input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
 		input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
+		input_abs_set_res(dev, ABS_X, touch.panel_res);
+		input_abs_set_res(dev, ABS_Y, touch.panel_res);
 
 		switch (touch.sensor_id) {
 		case 0:
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 0858791978d8..cfff0c41d298 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,10 +1247,10 @@ static void
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
 	struct PStack *st = fi->userdata;
-	struct sk_buff *skb, *oskb;
+	struct sk_buff *skb;
 	struct Layer2 *l2 = &st->l2;
 	u_char header[MAX_HEADER_LEN];
-	int i;
+	int i, hdr_space_needed;
 	int unsigned p1;
 	u_long flags;
 
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 	if (!skb)
 		return;
 
+	hdr_space_needed = l2headersize(l2, 0);
+	if (hdr_space_needed > skb_headroom(skb)) {
+		struct sk_buff *orig_skb = skb;
+
+		skb = skb_realloc_headroom(skb, hdr_space_needed);
+		if (!skb) {
+			dev_kfree_skb(orig_skb);
+			return;
+		}
+	}
 	spin_lock_irqsave(&l2->lock, flags);
 	if(test_bit(FLG_MOD128, &l2->flag))
 		p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 		l2->vs = (l2->vs + 1) % 8;
 	}
 	spin_unlock_irqrestore(&l2->lock, flags);
-	p1 = skb->data - skb->head;
-	if (p1 >= i)
-		memcpy(skb_push(skb, i), header, i);
-	else {
-		printk(KERN_WARNING
-		"isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
-		oskb = skb;
-		skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
-		memcpy(skb_put(skb, i), header, i);
-		skb_copy_from_linear_data(oskb,
-					  skb_put(skb, oskb->len), oskb->len);
-		dev_kfree_skb(oskb);
-	}
+	memcpy(skb_push(skb, i), header, i);
 	st->l2.l2l1(st, PH_PULL | INDICATION, skb);
 	test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
 	if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/isdn/hysdn/hysdn_defs.h
index 729df4089385..18b801ad97a4 100644
--- a/drivers/isdn/hysdn/hysdn_defs.h
+++ b/drivers/isdn/hysdn/hysdn_defs.h
@@ -227,7 +227,6 @@ extern hysdn_card *card_root;	/* pointer to first card */
 /*************************/
 /* im/exported functions */
 /*************************/
-extern char *hysdn_getrev(const char *);
 
 /* hysdn_procconf.c */
 extern int hysdn_procconf_init(void);	/* init proc config filesys */
@@ -259,7 +258,6 @@ extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
 
 /* hysdn_net.c */
 extern unsigned int hynet_enable; 
-extern char *hysdn_net_revision;
 extern int hysdn_net_create(hysdn_card *);	/* create a new net device */
 extern int hysdn_net_release(hysdn_card *);	/* delete the device */
 extern char *hysdn_net_getname(hysdn_card *);	/* get name of net interface */
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index b7cc5c2f08c6..0ab42ace1692 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -36,7 +36,6 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
 MODULE_AUTHOR("Werner Cornelius");
 MODULE_LICENSE("GPL");
 
-static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
 static int cardmax;		/* number of found cards */
 hysdn_card *card_root = NULL;	/* pointer to first card */
 static hysdn_card *card_last = NULL;	/* pointer to first card */
@@ -49,25 +48,6 @@ static hysdn_card *card_last = NULL;	/* pointer to first card */
 /* Additionally newer versions may be activated without rebooting.          */
 /****************************************************************************/
 
-/******************************************************/
-/* extract revision number from string for log output */
-/******************************************************/
-char *
-hysdn_getrev(const char *revision)
-{
-	char *rev;
-	char *p;
-
-	if ((p = strchr(revision, ':'))) {
-		rev = p + 2;
-		p = strchr(rev, '$');
-		*--p = 0;
-	} else
-		rev = "???";
-	return rev;
-}
-
-
 /****************************************************************************/
 /* init_module is called once when the module is loaded to do all necessary */
 /* things like autodetect...                                                */
@@ -175,13 +155,9 @@ static int hysdn_have_procfs;
 static int __init
 hysdn_init(void)
 {
-	char tmp[50];
 	int rc;
 
-	strcpy(tmp, hysdn_init_revision);
-	printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp));
-	strcpy(tmp, hysdn_net_revision);
-	printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp));
+	printk(KERN_NOTICE "HYSDN: module loaded\n");
 
 	rc = pci_register_driver(&hysdn_pci_driver);
 	if (rc)
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index feec8d89d719..11f2cce26005 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -26,9 +26,6 @@
 unsigned int hynet_enable = 0xffffffff; 
 module_param(hynet_enable, uint, 0);
 
-/* store the actual version for log reporting */
-char *hysdn_net_revision = "$Revision: 1.8.6.4 $";
-
 #define MAX_SKB_BUFFERS 20	/* number of buffers for keeping TX-data */
 
 /****************************************************************************/
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 96b3e39c3356..5fe83bd42061 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -23,7 +23,6 @@
 #include "hysdn_defs.h"
 
 static DEFINE_MUTEX(hysdn_conf_mutex);
-static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $";
 
 #define INFO_OUT_LEN 80		/* length of info line including lf */
 
@@ -404,7 +403,7 @@ hysdn_procconf_init(void)
 		card = card->next;	/* next entry */
 	}
 
-	printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision));
+	printk(KERN_NOTICE "HYSDN: procfs initialised\n");
 	return (0);
 }				/* hysdn_procconf_init */
 
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d8..0ed7f6bc2a7f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
 
 	if (md_check_no_bitmap(mddev))
 		return -EINVAL;
-	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 	conf = linear_conf(mddev, mddev->raid_disks);
 
 	if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b76cfc89e1b5..818313e277e7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -287,6 +287,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
 	mddev_t *mddev = q->queuedata;
 	int rv;
 	int cpu;
+	unsigned int sectors;
 
 	if (mddev == NULL || mddev->pers == NULL
 	    || !mddev->ready) {
@@ -311,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
 	atomic_inc(&mddev->active_io);
 	rcu_read_unlock();
 
+	/*
+	 * save the sectors now since our bio can
+	 * go away inside make_request
+	 */
+	sectors = bio_sectors(bio);
 	rv = mddev->pers->make_request(mddev, bio);
 
 	cpu = part_stat_lock();
 	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
-	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
-		      bio_sectors(bio));
+	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
 	part_stat_unlock();
 
 	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -548,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
 {
 	mddev_t *mddev, *new = NULL;
 
+	if (unit && MAJOR(unit) != MD_MAJOR)
+		unit &= ~((1<<MdpMinorShift)-1);
+
  retry:
 	spin_lock(&all_mddevs_lock);
 
@@ -1947,8 +1955,6 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
 			__bdevname(dev, b));
 		return PTR_ERR(bdev);
 	}
-	if (!shared)
-		set_bit(AllReserved, &rdev->flags);
 	rdev->bdev = bdev;
 	return err;
 }
@@ -2465,6 +2471,9 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 		if (rdev->raid_disk != -1)
 			return -EBUSY;
 
+		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
+			return -EBUSY;
+
 		if (rdev->mddev->pers->hot_add_disk == NULL)
 			return -EINVAL;
 
@@ -2610,12 +2619,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 
 			mddev_lock(mddev);
 			list_for_each_entry(rdev2, &mddev->disks, same_set)
-				if (test_bit(AllReserved, &rdev2->flags) ||
-				    (rdev->bdev == rdev2->bdev &&
-				     rdev != rdev2 &&
-				     overlaps(rdev->data_offset, rdev->sectors,
-					      rdev2->data_offset,
-					      rdev2->sectors))) {
+				if (rdev->bdev == rdev2->bdev &&
+				    rdev != rdev2 &&
+				    overlaps(rdev->data_offset, rdev->sectors,
+					     rdev2->data_offset,
+					     rdev2->sectors)) {
 					overlap = 1;
 					break;
 				}
@@ -4133,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
 	}
 
 	mddev->array_sectors = sectors;
-	set_capacity(mddev->gendisk, mddev->array_sectors);
-	if (mddev->pers)
+	if (mddev->pers) {
+		set_capacity(mddev->gendisk, mddev->array_sectors);
 		revalidate_disk(mddev->gendisk);
-
+	}
 	return len;
 }
 
@@ -4619,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
 	}
 	set_capacity(mddev->gendisk, mddev->array_sectors);
 	revalidate_disk(mddev->gendisk);
+	mddev->changed = 1;
 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
 out:
 	return err;
@@ -4707,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
 	mddev->sync_speed_min = mddev->sync_speed_max = 0;
 	mddev->recovery = 0;
 	mddev->in_sync = 0;
+	mddev->changed = 0;
 	mddev->degraded = 0;
 	mddev->safemode = 0;
 	mddev->bitmap_info.offset = 0;
@@ -4822,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
 
 		set_capacity(disk, 0);
 		mutex_unlock(&mddev->open_mutex);
+		mddev->changed = 1;
 		revalidate_disk(disk);
 
 		if (mddev->ro)
@@ -5578,6 +5589,8 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
 	mddev->delta_disks = raid_disks - mddev->raid_disks;
 
 	rv = mddev->pers->check_reshape(mddev);
+	if (rv < 0)
+		mddev->delta_disks = 0;
 	return rv;
 }
 
@@ -6004,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
 	atomic_inc(&mddev->openers);
 	mutex_unlock(&mddev->open_mutex);
 
-	check_disk_size_change(mddev->gendisk, bdev);
+	check_disk_change(bdev);
  out:
 	return err;
 }
@@ -6019,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
 
 	return 0;
 }
+
+static int md_media_changed(struct gendisk *disk)
+{
+	mddev_t *mddev = disk->private_data;
+
+	return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+	mddev_t *mddev = disk->private_data;
+
+	mddev->changed = 0;
+	return 0;
+}
 static const struct block_device_operations md_fops =
 {
 	.owner		= THIS_MODULE,
@@ -6029,6 +6057,8 @@ static const struct block_device_operations md_fops =
 	.compat_ioctl	= md_compat_ioctl,
 #endif
 	.getgeo		= md_getgeo,
+	.media_changed  = md_media_changed,
+	.revalidate_disk= md_revalidate,
 };
 
 static int md_thread(void * arg)
@@ -6985,9 +7015,6 @@ void md_do_sync(mddev_t *mddev)
 	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
 		mddev->resync_min = mddev->curr_resync_completed;
 	mddev->curr_resync = 0;
-	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
-		mddev->curr_resync_completed = 0;
-	sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 	wake_up(&resync_wait);
 	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
 	md_wakeup_thread(mddev->thread);
@@ -7028,7 +7055,7 @@ static int remove_and_add_spares(mddev_t *mddev)
 			}
 		}
 
-	if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
+	if (mddev->degraded && !mddev->recovery_disabled) {
 		list_for_each_entry(rdev, &mddev->disks, same_set) {
 			if (rdev->raid_disk >= 0 &&
 			    !test_bit(In_sync, &rdev->flags) &&
@@ -7151,7 +7178,20 @@ void md_check_recovery(mddev_t *mddev)
 			/* Only thing we do on a ro array is remove
 			 * failed devices.
 			 */
-			remove_and_add_spares(mddev);
+			mdk_rdev_t *rdev;
+			list_for_each_entry(rdev, &mddev->disks, same_set)
+				if (rdev->raid_disk >= 0 &&
+				    !test_bit(Blocked, &rdev->flags) &&
+				    test_bit(Faulty, &rdev->flags) &&
+				    atomic_read(&rdev->nr_pending)==0) {
+					if (mddev->pers->hot_remove_disk(
+						    mddev, rdev->raid_disk)==0) {
+						char nm[20];
+						sprintf(nm,"rd%d", rdev->raid_disk);
+						sysfs_remove_link(&mddev->kobj, nm);
+						rdev->raid_disk = -1;
+					}
+				}
 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 			goto unlock;
 		}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index eec517ced31a..12215d437fcc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -93,8 +93,6 @@ struct mdk_rdev_s
 #define	Faulty		1		/* device is known to have a fault */
 #define	In_sync		2		/* device is in_sync with rest of array */
 #define	WriteMostly	4		/* Avoid reading if at all possible */
-#define	AllReserved	6		/* If whole device is reserved for
-					 * one array */
 #define	AutoDetected	7		/* added by auto-detect */
 #define Blocked		8		/* An error occured on an externally
 					 * managed array, don't allow writes
@@ -276,6 +274,8 @@ struct mddev_s
 	atomic_t			active;		/* general refcount */
 	atomic_t			openers;	/* number of active opens */
 
+	int				changed;	/* True if we might need to
+							 * reread partition info */
 	int				degraded;	/* whether md should consider
 							 * adding a spare
 							 */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2e..3a62d440e27b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
 	 * bookkeeping area. [whatever we allocate in multipath_run(),
 	 * should be freed in multipath_stop()]
 	 */
-	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
 	conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
 	mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a39f4c355e55..c0ac457f1218 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -179,6 +179,14 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
 			rdev1->new_raid_disk = j;
 		}
 
+		if (mddev->level == 1) {
+			/* taiking over a raid1 array-
+			 * we have only one active disk
+			 */
+			j = 0;
+			rdev1->new_raid_disk = j;
+		}
+
 		if (j < 0 || j >= mddev->raid_disks) {
 			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
 			       "aborting!\n", mdname(mddev), j);
@@ -353,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
 	if (md_check_no_bitmap(mddev))
 		return -EINVAL;
 	blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
-	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
 	/* if private is not null, we are here after takeover */
 	if (mddev->private == NULL) {
@@ -644,12 +651,39 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
 	return priv_conf;
 }
 
+static void *raid0_takeover_raid1(mddev_t *mddev)
+{
+	raid0_conf_t *priv_conf;
+
+	/* Check layout:
+	 *  - (N - 1) mirror drives must be already faulty
+	 */
+	if ((mddev->raid_disks - 1) != mddev->degraded) {
+		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+		       mdname(mddev));
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Set new parameters */
+	mddev->new_level = 0;
+	mddev->new_layout = 0;
+	mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
+	mddev->delta_disks = 1 - mddev->raid_disks;
+	mddev->raid_disks = 1;
+	/* make sure it will be not marked as dirty */
+	mddev->recovery_cp = MaxSector;
+
+	create_strip_zones(mddev, &priv_conf);
+	return priv_conf;
+}
+
 static void *raid0_takeover(mddev_t *mddev)
 {
 	/* raid0 can take over:
 	 *  raid4 - if all data disks are active.
 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
 	 *  raid10 - assuming we have all necessary active disks
+	 *  raid1 - with (N -1) mirror drives faulty
 	 */
 	if (mddev->level == 4)
 		return raid0_takeover_raid45(mddev);
@@ -665,6 +699,12 @@ static void *raid0_takeover(mddev_t *mddev)
 	if (mddev->level == 10)
 		return raid0_takeover_raid10(mddev);
 
+	if (mddev->level == 1)
+		return raid0_takeover_raid1(mddev);
+
+	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+		mddev->level);
+
 	return ERR_PTR(-EINVAL);
 }
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa397ba9..06cd712807d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
 	if (conf->pending_bio_list.head) {
 		struct bio *bio;
 		bio = bio_list_get(&conf->pending_bio_list);
+		/* Only take the spinlock to quiet a warning */
+		spin_lock(conf->mddev->queue->queue_lock);
 		blk_remove_plug(conf->mddev->queue);
+		spin_unlock(conf->mddev->queue->queue_lock);
 		spin_unlock_irq(&conf->device_lock);
 		/* flush any pending bitmap writes to
 		 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 		atomic_inc(&r1_bio->remaining);
 		spin_lock_irqsave(&conf->device_lock, flags);
 		bio_list_add(&conf->pending_bio_list, mbio);
-		blk_plug_device(mddev->queue);
+		blk_plug_device_unlocked(mddev->queue);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 	r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
 	if (IS_ERR(conf))
 		return PTR_ERR(conf);
 
-	mddev->queue->queue_lock = &conf->device_lock;
 	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		disk_stack_limits(mddev->gendisk, rdev->bdev,
 				  rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 69b659544390..747d061d8e05 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
 	if (conf->pending_bio_list.head) {
 		struct bio *bio;
 		bio = bio_list_get(&conf->pending_bio_list);
+		/* Spinlock only taken to quiet a warning */
+		spin_lock(conf->mddev->queue->queue_lock);
 		blk_remove_plug(conf->mddev->queue);
+		spin_unlock(conf->mddev->queue->queue_lock);
 		spin_unlock_irq(&conf->device_lock);
 		/* flush any pending bitmap writes to disk
 		 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 		atomic_inc(&r10_bio->remaining);
 		spin_lock_irqsave(&conf->device_lock, flags);
 		bio_list_add(&conf->pending_bio_list, mbio);
-		blk_plug_device(mddev->queue);
+		blk_plug_device_unlocked(mddev->queue);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
 	if (!conf)
 		goto out;
 
-	mddev->queue->queue_lock = &conf->device_lock;
-
 	mddev->thread = conf->thread;
 	conf->thread = NULL;
 
@@ -2463,11 +2464,13 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
 	mddev->recovery_cp = MaxSector;
 
 	conf = setup_conf(mddev);
-	if (!IS_ERR(conf))
+	if (!IS_ERR(conf)) {
 		list_for_each_entry(rdev, &mddev->disks, same_set)
 			if (rdev->raid_disk >= 0)
 				rdev->new_raid_disk = rdev->raid_disk * 2;
-		
+		conf->barrier = 1;
+	}
+
 	return conf;
 }
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5044babfcda0..78536fdbd87f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
 
 		mddev->queue->backing_dev_info.congested_data = mddev;
 		mddev->queue->backing_dev_info.congested_fn = raid5_congested;
-		mddev->queue->queue_lock = &conf->device_lock;
 		mddev->queue->unplug_fn = raid5_unplug_queue;
 
 		chunk_size = mddev->chunk_sectors << 9;
@@ -5517,7 +5516,6 @@ static int raid5_start_reshape(mddev_t *mddev)
 	raid5_conf_t *conf = mddev->private;
 	mdk_rdev_t *rdev;
 	int spares = 0;
-	int added_devices = 0;
 	unsigned long flags;
 
 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -5527,8 +5525,8 @@ static int raid5_start_reshape(mddev_t *mddev)
 		return -ENOSPC;
 
 	list_for_each_entry(rdev, &mddev->disks, same_set)
-		if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks)
-		     && !test_bit(Faulty, &rdev->flags))
+		if (!test_bit(In_sync, &rdev->flags)
+		    && !test_bit(Faulty, &rdev->flags))
 			spares++;
 
 	if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5571,34 +5569,35 @@ static int raid5_start_reshape(mddev_t *mddev)
 	 * to correctly record the "partially reconstructed" state of
 	 * such devices during the reshape and confusion could result.
 	 */
-	if (mddev->delta_disks >= 0)
-	    list_for_each_entry(rdev, &mddev->disks, same_set)
-		if (rdev->raid_disk < 0 &&
-		    !test_bit(Faulty, &rdev->flags)) {
-			if (raid5_add_disk(mddev, rdev) == 0) {
-				char nm[20];
-				if (rdev->raid_disk >= conf->previous_raid_disks) {
-					set_bit(In_sync, &rdev->flags);
-					added_devices++;
-				} else
-					rdev->recovery_offset = 0;
-				sprintf(nm, "rd%d", rdev->raid_disk);
-				if (sysfs_create_link(&mddev->kobj,
-						      &rdev->kobj, nm))
-					/* Failure here is OK */;
-			} else
-				break;
-		} else if (rdev->raid_disk >= conf->previous_raid_disks
-			   && !test_bit(Faulty, &rdev->flags)) {
-			/* This is a spare that was manually added */
-			set_bit(In_sync, &rdev->flags);
-			added_devices++;
-		}
+	if (mddev->delta_disks >= 0) {
+		int added_devices = 0;
+		list_for_each_entry(rdev, &mddev->disks, same_set)
+			if (rdev->raid_disk < 0 &&
+			    !test_bit(Faulty, &rdev->flags)) {
+				if (raid5_add_disk(mddev, rdev) == 0) {
+					char nm[20];
+					if (rdev->raid_disk
+					    >= conf->previous_raid_disks) {
+						set_bit(In_sync, &rdev->flags);
+						added_devices++;
+					} else
+						rdev->recovery_offset = 0;
+					sprintf(nm, "rd%d", rdev->raid_disk);
+					if (sysfs_create_link(&mddev->kobj,
+							      &rdev->kobj, nm))
+						/* Failure here is OK */;
+				}
+			} else if (rdev->raid_disk >= conf->previous_raid_disks
+				   && !test_bit(Faulty, &rdev->flags)) {
+				/* This is a spare that was manually added */
+				set_bit(In_sync, &rdev->flags);
+				added_devices++;
+			}
 
-	/* When a reshape changes the number of devices, ->degraded
-	 * is measured against the larger of the pre and post number of
-	 * devices.*/
-	if (mddev->delta_disks > 0) {
+		/* When a reshape changes the number of devices,
+		 * ->degraded is measured against the larger of the
+		 * pre and post number of devices.
+		 */
 		spin_lock_irqsave(&conf->device_lock, flags);
 		mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
 			- added_devices;
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index e9a3eab7b0cf..8c1d85e27be4 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -621,7 +621,7 @@ static int __init memstick_init(void)
 {
 	int rc;
 
-	workqueue = create_freezeable_workqueue("kmemstick");
+	workqueue = create_freezable_workqueue("kmemstick");
 	if (!workqueue)
 		return -ENOMEM;
 
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f71f22948477..1735c84ff757 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
 #define COPYRIGHT	"Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON	"3.04.17"
-#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.17"
+#define MPT_LINUX_VERSION_COMMON	"3.04.18"
+#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.18"
 #define WHAT_MAGIC_STRING		"@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a3856ed90aef..e8deb8ed0499 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
 }
 
 static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+	fasync_helper(-1, filep, 0, &async_queue);
+	return 0;
+}
+
+static int
 mptctl_fasync(int fd, struct file *filep, int mode)
 {
 	MPT_ADAPTER	*ioc;
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = {
 	.llseek =	no_llseek,
 	.fasync = 	mptctl_fasync,
 	.unlocked_ioctl = mptctl_ioctl,
+	.release =	mptctl_release,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = compat_mpctl_ioctl,
 #endif
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 59b8f53d1ece..0d9b82a44540 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
 	}
 
  out:
-	printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
-	    ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
+	printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
+	    ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+	    SCpnt, SCpnt->serial_number);
 
 	return retval;
 }
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
 
 	vdevice = SCpnt->device->hostdata;
 	if (!vdevice || !vdevice->vtarget) {
-		retval = SUCCESS;
+		retval = 0;
 		goto out;
 	}
 
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 5f6852dff40b..44d4475a09dd 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -329,7 +329,7 @@ static int __init tifm_init(void)
 {
 	int rc;
 
-	workqueue = create_freezeable_workqueue("tifm");
+	workqueue = create_freezable_workqueue("tifm");
 	if (!workqueue)
 		return -ENOMEM;
 
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 4d2ea8e80140..6df5a55da110 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void)
 	if (x86_hyper != &x86_hyper_vmware)
 		return -ENODEV;
 
-	vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+	vmballoon_wq = create_freezable_workqueue("vmmemctl");
 	if (!vmballoon_wq) {
 		pr_err("failed to create workqueue\n");
 		return -ENOMEM;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efbc77cc..6322d1fb5d62 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@ int  r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
 
 	init_completion(&dev->dma_done);
 
-	dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+	dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
 
 	if (!dev->card_workqueue)
 		goto error9;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 67822cf6c025..ac0d6a8613b5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
 static __init int sm_module_init(void)
 {
 	int error = 0;
-	cache_flush_workqueue = create_freezeable_workqueue("smflush");
+	cache_flush_workqueue = create_freezable_workqueue("smflush");
 
 	if (IS_ERR(cache_flush_workqueue))
 		return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0c7811faf72c..a179cc6d79f2 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = nonemb_cmd->va;
 	sge = nonembedded_sgl(wrb);
 
@@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
 
 	status = be_mcc_notify_wait(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index f40740e68ea5..d584d32c747d 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -4276,9 +4276,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
 				BNX2X_ACCEPT_MULTICAST;
 #ifdef BCM_CNIC
-		cl_id = bnx2x_fcoe(bp, cl_id);
-		bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-					  BNX2X_ACCEPT_MULTICAST);
+		if (!NO_FCOE(bp)) {
+			cl_id = bnx2x_fcoe(bp, cl_id);
+			bnx2x_rxq_set_mac_filters(bp, cl_id,
+						  BNX2X_ACCEPT_UNICAST |
+						  BNX2X_ACCEPT_MULTICAST);
+		}
 #endif
 		break;
 
@@ -4286,18 +4289,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
 				BNX2X_ACCEPT_ALL_MULTICAST;
 #ifdef BCM_CNIC
-		cl_id = bnx2x_fcoe(bp, cl_id);
-		bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-					  BNX2X_ACCEPT_MULTICAST);
+		/*
+		 *  Prevent duplication of multicast packets by configuring FCoE
+		 *  L2 Client to receive only matched unicast frames.
+		 */
+		if (!NO_FCOE(bp)) {
+			cl_id = bnx2x_fcoe(bp, cl_id);
+			bnx2x_rxq_set_mac_filters(bp, cl_id,
+						  BNX2X_ACCEPT_UNICAST);
+		}
 #endif
 		break;
 
 	case BNX2X_RX_MODE_PROMISC:
 		def_q_filters |= BNX2X_PROMISCUOUS_MODE;
 #ifdef BCM_CNIC
-		cl_id = bnx2x_fcoe(bp, cl_id);
-		bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-					  BNX2X_ACCEPT_MULTICAST);
+		/*
+		 *  Prevent packets duplication by configuring DROP_ALL for FCoE
+		 *  L2 Client.
+		 */
+		if (!NO_FCOE(bp)) {
+			cl_id = bnx2x_fcoe(bp, cl_id);
+			bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+		}
 #endif
 		/* pass management unicast packets as well */
 		llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534aee452..7513c4523ac4 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
 		goto open_unlock;
 	}
 
-	priv->wq = create_freezeable_workqueue("mcp251x_wq");
+	priv->wq = create_freezable_workqueue("mcp251x_wq");
 	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
 	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index 27d1d398e25e..d38706958af6 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
 config CAN_MSCAN
-	depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
+	depends on CAN_DEV && (PPC || M68K)
 	tristate "Support for Freescale MSCAN based chips"
 	---help---
 	  The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index c42e97268248..e54712b22c27 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -185,7 +185,7 @@ struct pch_can_priv {
 
 static struct can_bittiming_const pch_can_bittiming_const = {
 	.name = KBUILD_MODNAME,
-	.tseg1_min = 1,
+	.tseg1_min = 2,
 	.tseg1_max = 16,
 	.tseg2_min = 1,
 	.tseg2_max = 8,
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
 	struct pch_can_priv *priv = netdev_priv(ndev);
 
 	unregister_candev(priv->ndev);
-	pci_iounmap(pdev, priv->regs);
 	if (priv->use_msi)
 		pci_disable_msi(priv->dev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
 	pch_can_reset(priv);
+	pci_iounmap(pdev, priv->regs);
 	free_candev(priv->ndev);
 }
 
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
 		priv->use_msi = 0;
 	} else {
 		netdev_err(ndev, "PCH CAN opened with MSI\n");
+		pci_set_master(pdev);
 		priv->use_msi = 1;
 	}
 
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 8ba81b3ddd90..5de46a9a77bb 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -18,7 +18,7 @@ config CAN_SOFTING
 config CAN_SOFTING_CS
 	tristate "Softing Gmbh CAN pcmcia cards"
 	depends on PCMCIA
-	select CAN_SOFTING
+	depends on CAN_SOFTING
 	---help---
 	  Support for PCMCIA cards from Softing Gmbh & some cards
 	  from Vector Gmbh.
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index 300fe75dd1a7..c11bb4de8630 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -19,6 +19,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 
 #include <pcmcia/cistpl.h>
 #include <pcmcia/ds.h>
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 56166ae2059f..6aad64df4dcb 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
 {
 	int i;
 
-	BUG_ON(adapter->debugfs_root == NULL);
+	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
 	/*
 	 * Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
  */
 static void cleanup_debugfs(struct adapter *adapter)
 {
-	BUG_ON(adapter->debugfs_root == NULL);
+	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
 	/*
 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
 	struct net_device *netdev;
 
 	/*
-	 * Vet our module parameters.
-	 */
-	if (msi != MSI_MSIX && msi != MSI_MSI) {
-		dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
-			" (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
-			MSI_MSI);
-		err = -EINVAL;
-		goto err_out;
-	}
-
-	/*
 	 * Print our driver banner the first time we're called to initialize a
 	 * device.
 	 */
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
 	/*
 	 * Set up our debugfs entries.
 	 */
-	if (cxgb4vf_debugfs_root) {
+	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
 		adapter->debugfs_root =
 			debugfs_create_dir(pci_name(pdev),
 					   cxgb4vf_debugfs_root);
-		if (adapter->debugfs_root == NULL)
+		if (IS_ERR_OR_NULL(adapter->debugfs_root))
 			dev_warn(&pdev->dev, "could not create debugfs"
 				 " directory");
 		else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
 	 */
 
 err_free_debugfs:
-	if (adapter->debugfs_root) {
+	if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
 		cleanup_debugfs(adapter);
 		debugfs_remove_recursive(adapter->debugfs_root);
 	}
@@ -2802,7 +2791,6 @@ err_release_regions:
 err_disable_device:
 	pci_disable_device(pdev);
 
-err_out:
 	return err;
 }
 
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
 		/*
 		 * Tear down our debugfs entries.
 		 */
-		if (adapter->debugfs_root) {
+		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
 			cleanup_debugfs(adapter);
 			debugfs_remove_recursive(adapter->debugfs_root);
 		}
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
 }
 
 /*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+	struct adapter *adapter;
+	int pidx;
+
+	adapter = pci_get_drvdata(pdev);
+	if (!adapter)
+		return;
+
+	/*
+	 * Disable all Virtual Interfaces.  This will shut down the
+	 * delivery of all ingress packets into the chip for these
+	 * Virtual Interfaces.
+	 */
+	for_each_port(adapter, pidx) {
+		struct net_device *netdev;
+		struct port_info *pi;
+
+		if (!test_bit(pidx, &adapter->registered_device_map))
+			continue;
+
+		netdev = adapter->port[pidx];
+		if (!netdev)
+			continue;
+
+		pi = netdev_priv(netdev);
+		t4vf_enable_vi(adapter, pi->viid, false, false);
+	}
+
+	/*
+	 * Free up all Queues which will prevent further DMA and
+	 * Interrupts allowing various internal pathways to drain.
+	 */
+	t4vf_free_sge_resources(adapter);
+}
+
+/*
  * PCI Device registration data structures.
  */
 #define CH_DEVICE(devid, idx) \
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
 	.id_table	= cxgb4vf_pci_tbl,
 	.probe		= cxgb4vf_pci_probe,
 	.remove		= __devexit_p(cxgb4vf_pci_remove),
+	.shutdown	= __devexit_p(cxgb4vf_pci_shutdown),
 };
 
 /*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
 {
 	int ret;
 
+	/*
+	 * Vet our module parameters.
+	 */
+	if (msi != MSI_MSIX && msi != MSI_MSI) {
+		printk(KERN_WARNING KBUILD_MODNAME
+		       ": bad module parameter msi=%d; must be %d"
+		       " (MSI-X or MSI) or %d (MSI)\n",
+		       msi, MSI_MSIX, MSI_MSI);
+		return -EINVAL;
+	}
+
 	/* Debugfs support is optional, just warn if this fails */
 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
-	if (!cxgb4vf_debugfs_root)
+	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
 		printk(KERN_WARNING KBUILD_MODNAME ": could not create"
 		       " debugfs entry, continuing\n");
 
 	ret = pci_register_driver(&cxgb4vf_driver);
-	if (ret < 0)
+	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
 		debugfs_remove(cxgb4vf_debugfs_root);
 	return ret;
 }
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 0f51c80475ce..192db226ec7f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
 	delay_idx = 0;
 	ms = delay[0];
 
-	for (i = 0; i < 500; i += ms) {
+	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
 		if (sleep_ok) {
 			ms = delay[delay_idx];
 			if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 2d4c4fc1d900..461dd6f905f7 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
 	/* Checksum mode */
 	dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
 
-	/* GPIO0 on pre-activate PHY */
-	iow(db, DM9000_GPR, 0);	/* REG_1F bit0 activate phyxcer */
 	iow(db, DM9000_GPCR, GPCR_GEP_CNTL);	/* Let GPIO0 output */
-	iow(db, DM9000_GPR, 0);	/* Enable PHY */
 
 	ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
 	unsigned long flags;
 
 	/* Save previous register address */
-	reg_save = readb(db->io_addr);
 	spin_lock_irqsave(&db->lock, flags);
+	reg_save = readb(db->io_addr);
 
 	netif_stop_queue(dev);
 	dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
 	if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
 		return -EAGAIN;
 
+	/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+	iow(db, DM9000_GPR, 0);	/* REG_1F bit0 activate phyxcer */
+	mdelay(1); /* delay needs by DM9000B */
+
 	/* Initialize DM9000 board */
 	dm9000_reset(db);
 	dm9000_init_dm9000(dev);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index aed223b1b897..7501d977d992 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
 	case M88E1000_I_PHY_ID:
 	case M88E1011_I_PHY_ID:
 	case M88E1111_I_PHY_ID:
+	case M88E1118_E_PHY_ID:
 		hw->phy_type = e1000_phy_m88;
 		break;
 	case IGP01E1000_I_PHY_ID:
@@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
 		break;
 	case e1000_ce4100:
 		if ((hw->phy_id == RTL8211B_PHY_ID) ||
-		    (hw->phy_id == RTL8201N_PHY_ID))
+		    (hw->phy_id == RTL8201N_PHY_ID) ||
+		    (hw->phy_id == M88E1118_E_PHY_ID))
 			match = true;
 		break;
 	case e1000_82541:
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 196eeda2dd6c..c70b23d52284 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -2917,6 +2917,7 @@ struct e1000_host_command_info {
 #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
 #define M88E1011_I_REV_4   0x04
 #define M88E1111_I_PHY_ID  0x01410CC0
+#define M88E1118_E_PHY_ID  0x01410E40
 #define L1LXT971A_PHY_ID   0x001378E0
 
 #define RTL8211B_PHY_ID    0x001CC910
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 1c18f26b0812..3fa110ddb041 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
 	u16 phy_status, phy_1000t_status, phy_ext_status;
 	u16 pci_status;
 
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	e1e_rphy(hw, PHY_STATUS, &phy_status);
 	e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
 	e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
 	struct e1000_adapter *adapter = container_of(work,
 					struct e1000_adapter, downshift_task);
 
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
 }
 
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter)
 	return 0;
 }
 
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (!(adapter->flags2 & FLAG2_DMA_BURST))
+		return;
+
+	/* flush pending descriptor writebacks to memory */
+	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+	/* execute the writes immediately */
+	e1e_flush();
+}
+
 void e1000e_down(struct e1000_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter)
 
 	if (!pci_channel_offline(adapter->pdev))
 		e1000e_reset(adapter);
+
+	e1000e_flush_descriptors(adapter);
+
 	e1000_clean_tx_ring(adapter);
 	e1000_clean_rx_ring(adapter);
 
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
 {
 	struct e1000_adapter *adapter = container_of(work,
 					struct e1000_adapter, update_phy_task);
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	e1000_get_phy_info(&adapter->hw);
 }
 
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
 static void e1000_update_phy_info(unsigned long data)
 {
 	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	schedule_work(&adapter->update_phy_task);
 }
 
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work)
 	u32 link, tctl;
 	int tx_pending = 0;
 
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	link = e1000e_has_link(adapter);
 	if ((netif_carrier_ok(netdev)) && link) {
 		/* Cancel scheduled suspend requests. */
@@ -4309,7 +4344,6 @@ link_up:
 			 * to get done, so reset controller to flush Tx.
 			 * (Do the reset outside of interrupt context).
 			 */
-			adapter->tx_timeout_count++;
 			schedule_work(&adapter->reset_task);
 			/* return immediately since reset is imminent */
 			return;
@@ -4338,19 +4372,12 @@ link_up:
 	else
 		ew32(ICS, E1000_ICS_RXDMT0);
 
+	/* flush pending descriptors to memory before detecting Tx hang */
+	e1000e_flush_descriptors(adapter);
+
 	/* Force detection of hung controller every watchdog period */
 	adapter->detect_tx_hung = 1;
 
-	/* flush partial descriptors to memory before detecting Tx hang */
-	if (adapter->flags2 & FLAG2_DMA_BURST) {
-		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
-		ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
-		/*
-		 * no need to flush the writes because the timeout code does
-		 * an er32 first thing
-		 */
-	}
-
 	/*
 	 * With 82571 controllers, LAA may be overwritten due to controller
 	 * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4888,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work)
 	struct e1000_adapter *adapter;
 	adapter = container_of(work, struct e1000_adapter, reset_task);
 
+	/* don't run the task if already down */
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
 	      (adapter->flags & FLAG_RX_RESTART_NOW))) {
 		e1000e_dump(adapter);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index af09296ef0dd..9c0b1bac6af6 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
 		goto out_error;
 	}
 
+	netif_carrier_off(dev);
+
 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index d5ede2df3e42..ebbda7d15254 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
 		hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
 
 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+		/*  clear VMDq pool/queue selection for RAR 0 */
+		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
 	}
 	hw->addr_ctrl.overflow_promisc = 0;
 
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6342d4859790..c54a88274d51 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,13 +159,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 	struct scatterlist *sg;
 	unsigned int i, j, dmacount;
 	unsigned int len;
-	static const unsigned int bufflen = 4096;
+	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
 	unsigned int firstoff = 0;
 	unsigned int lastsize;
 	unsigned int thisoff = 0;
 	unsigned int thislen = 0;
 	u32 fcbuff, fcdmarw, fcfltrw;
-	dma_addr_t addr;
+	dma_addr_t addr = 0;
 
 	if (!netdev || !sgl)
 		return 0;
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 	/* only the last buffer may have non-full bufflen */
 	lastsize = thisoff + thislen;
 
+	/*
+	 * lastsize can not be buffer len.
+	 * If it is then adding another buffer with lastsize = 1.
+	 */
+	if (lastsize == bufflen) {
+		if (j >= IXGBE_BUFFCNT_MAX) {
+			e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+				"not enough user buffers. We need an extra "
+				"buffer because lastsize is bufflen.\n",
+				xid, i, j, dmacount, (u64)addr);
+			goto out_noddp_free;
+		}
+
+		ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+		j++;
+		lastsize = 1;
+	}
+
 	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
 	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
 	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 			e_err(drv, "failed to allocated FCoE DDP pool\n");
 
 		spin_lock_init(&fcoe->lock);
+
+		/* Extra buffer to be shared by all DDPs for HW work around */
+		fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+		if (fcoe->extra_ddp_buffer == NULL) {
+			e_err(drv, "failed to allocated extra DDP buffer\n");
+			goto out_extra_ddp_buffer_alloc;
+		}
+
+		fcoe->extra_ddp_buffer_dma =
+			dma_map_single(&adapter->pdev->dev,
+				       fcoe->extra_ddp_buffer,
+				       IXGBE_FCBUFF_MIN,
+				       DMA_FROM_DEVICE);
+		if (dma_mapping_error(&adapter->pdev->dev,
+				      fcoe->extra_ddp_buffer_dma)) {
+			e_err(drv, "failed to map extra DDP buffer\n");
+			goto out_extra_ddp_buffer_dma;
+		}
 	}
 
 	/* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 		}
 	}
 #endif
+
+	return;
+
+out_extra_ddp_buffer_dma:
+	kfree(fcoe->extra_ddp_buffer);
+out_extra_ddp_buffer_alloc:
+	pci_pool_destroy(fcoe->pool);
+	fcoe->pool = NULL;
 }
 
 /**
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
 	if (fcoe->pool) {
 		for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
 			ixgbe_fcoe_ddp_put(adapter->netdev, i);
+		dma_unmap_single(&adapter->pdev->dev,
+				 fcoe->extra_ddp_buffer_dma,
+				 IXGBE_FCBUFF_MIN,
+				 DMA_FROM_DEVICE);
+		kfree(fcoe->extra_ddp_buffer);
 		pci_pool_destroy(fcoe->pool);
 		fcoe->pool = NULL;
 	}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c551c8db..65cc8fb14fe7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@ struct ixgbe_fcoe {
 	spinlock_t lock;
 	struct pci_pool *pool;
 	struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+	unsigned char *extra_ddp_buffer;
+	dma_addr_t extra_ddp_buffer_dma;
 };
 
 #endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 602078b84892..30f9ccfb4f87 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
 			      "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "3.0.12-k2"
+#define DRV_VERSION "3.2.9-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
 
@@ -3176,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
 	u32 mhadd, hlreg0;
 
 	/* Decide whether to use packet split mode or not */
+	/* On by default */
+	adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+
 	/* Do not use packet split if we're in SR-IOV Mode */
-	if (!adapter->num_vfs)
-		adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+	if (adapter->num_vfs)
+		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+	/* Disable packet split due to 82599 erratum #45 */
+	if (hw->mac.type == ixgbe_mac_82599EB)
+		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 
 	/* Set the RX buffer length according to the mode */
 	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -3721,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
 			 * We need to try and force an autonegotiation
 			 * session, then bring up link.
 			 */
-			hw->mac.ops.setup_sfp(hw);
+			if (hw->mac.ops.setup_sfp)
+				hw->mac.ops.setup_sfp(hw);
 			if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
 				schedule_work(&adapter->multispeed_fiber_task);
 		} else {
@@ -4863,16 +4871,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
 {
 	int q_idx, num_q_vectors;
 	struct ixgbe_q_vector *q_vector;
-	int napi_vectors;
 	int (*poll)(struct napi_struct *, int);
 
 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-		napi_vectors = adapter->num_rx_queues;
 		poll = &ixgbe_clean_rxtx_many;
 	} else {
 		num_q_vectors = 1;
-		napi_vectors = 1;
 		poll = &ixgbe_poll;
 	}
 
@@ -5964,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
 		unregister_netdev(adapter->netdev);
 		return;
 	}
-	hw->mac.ops.setup_sfp(hw);
+	if (hw->mac.ops.setup_sfp)
+		hw->mac.ops.setup_sfp(hw);
 
 	if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
 		/* This will also work for DA Twinax connections */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 47b15738b009..187b3a16ec1f 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
 	return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 }
 
-
 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 {
 	u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 	vmolr |= (IXGBE_VMOLR_ROMPE |
-		  IXGBE_VMOLR_ROPE |
 		  IXGBE_VMOLR_BAM);
 	if (aupe)
 		vmolr |= IXGBE_VMOLR_AUPE;
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 3a8923993ce3..f2518b01067d 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
 	}
 
 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
 	IXGBE_WRITE_FLUSH(hw);
 
 	/* Poll for reset bit to self-clear indicating reset is complete */
 	for (i = 0; i < 10; i++) {
 		udelay(1);
 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-		if (!(ctrl & IXGBE_CTRL_RST))
+		if (!(ctrl & reset_bit))
 			break;
 	}
-	if (ctrl & IXGBE_CTRL_RST) {
+	if (ctrl & reset_bit) {
 		status = IXGBE_ERR_RESET_FAILED;
 		hw_dbg(hw, "Reset polling failed to complete.\n");
 	}
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a99520f..e1e33c80fb25 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
 	struct pch_gbe_regs_mac_adr mac_adr[16];
 	u32 ADDR_MASK;
 	u32 MIIM;
-	u32 reserve2;
+	u32 MAC_ADDR_LOAD;
 	u32 RGMII_ST;
 	u32 RGMII_CTRL;
 	u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 1bf12339441b..b99e90aca37d 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCH_GBE_SHORT_PKT		64
 #define DSC_INIT16			0xC000
 #define PCH_GBE_DMA_ALIGN		0
+#define PCH_GBE_DMA_PADDING		2
 #define PCH_GBE_WATCHDOG_PERIOD		(1 * HZ)	/* watchdog time */
 #define PCH_GBE_COPYBREAK_DEFAULT	256
 #define PCH_GBE_PCI_BAR			1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
 			       int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+	iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
 /**
  * pch_gbe_mac_read_mac_addr - Read MAC address
  * @hw:	            Pointer to the HW structure
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work)
 	struct pch_gbe_adapter *adapter;
 	adapter = container_of(work, struct pch_gbe_adapter, reset_task);
 
+	rtnl_lock();
 	pch_gbe_reinit_locked(adapter);
+	rtnl_unlock();
 }
 
 /**
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work)
  */
 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
-
-	rtnl_lock();
-	if (netif_running(netdev)) {
-		pch_gbe_down(adapter);
-		pch_gbe_up(adapter);
-	}
-	rtnl_unlock();
+	pch_gbe_down(adapter);
+	pch_gbe_up(adapter);
 }
 
 /**
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
 	struct pch_gbe_buffer *buffer_info;
 	struct pch_gbe_rx_desc *rx_desc;
 	u32 length;
-	unsigned char tmp_packet[ETH_HLEN];
 	unsigned int i;
 	unsigned int cleaned_count = 0;
 	bool cleaned = false;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *new_skb;
 	u8 dma_status;
 	u16 gbec_status;
 	u32 tcp_ip_status;
-	u8 skb_copy_flag = 0;
-	u8 skb_padding_flag = 0;
 
 	i = rx_ring->next_to_clean;
 
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
 			pr_err("Receive CRC Error\n");
 		} else {
 			/* get receive length */
-			/* length convert[-3], padding[-2] */
-			length = (rx_desc->rx_words_eob) - 3 - 2;
+			/* length convert[-3] */
+			length = (rx_desc->rx_words_eob) - 3;
 
 			/* Decide the data conversion method */
 			if (!adapter->rx_csum) {
 				/* [Header:14][payload] */
-				skb_padding_flag = 0;
-				skb_copy_flag = 1;
+				if (NET_IP_ALIGN) {
+					/* Because alignment differs,
+					 * the new_skb is newly allocated,
+					 * and data is copied to new_skb.*/
+					new_skb = netdev_alloc_skb(netdev,
+							 length + NET_IP_ALIGN);
+					if (!new_skb) {
+						/* dorrop error */
+						pr_err("New skb allocation "
+							"Error\n");
+						goto dorrop;
+					}
+					skb_reserve(new_skb, NET_IP_ALIGN);
+					memcpy(new_skb->data, skb->data,
+					       length);
+					skb = new_skb;
+				} else {
+					/* DMA buffer is used as SKB as it is.*/
+					buffer_info->skb = NULL;
+				}
 			} else {
 				/* [Header:14][padding:2][payload] */
-				skb_padding_flag = 1;
-				if (length < copybreak)
-					skb_copy_flag = 1;
-				else
-					skb_copy_flag = 0;
-			}
-
-			/* Data conversion */
-			if (skb_copy_flag) {	/* recycle  skb */
-				struct sk_buff *new_skb;
-				new_skb =
-				    netdev_alloc_skb(netdev,
-						     length + NET_IP_ALIGN);
-				if (new_skb) {
-					if (!skb_padding_flag) {
-						skb_reserve(new_skb,
-								NET_IP_ALIGN);
+				/* The length includes padding length */
+				length = length - PCH_GBE_DMA_PADDING;
+				if ((length < copybreak) ||
+				    (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
+					/* Because alignment differs,
+					 * the new_skb is newly allocated,
+					 * and data is copied to new_skb.
+					 * Padding data is deleted
+					 * at the time of a copy.*/
+					new_skb = netdev_alloc_skb(netdev,
+							 length + NET_IP_ALIGN);
+					if (!new_skb) {
+						/* dorrop error */
+						pr_err("New skb allocation "
+							"Error\n");
+						goto dorrop;
 					}
+					skb_reserve(new_skb, NET_IP_ALIGN);
 					memcpy(new_skb->data, skb->data,
-						length);
-					/* save the skb
-					 * in buffer_info as good */
+					       ETH_HLEN);
+					memcpy(&new_skb->data[ETH_HLEN],
+					       &skb->data[ETH_HLEN +
+					       PCH_GBE_DMA_PADDING],
+					       length - ETH_HLEN);
 					skb = new_skb;
-				} else if (!skb_padding_flag) {
-					/* dorrop error */
-					pr_err("New skb allocation Error\n");
-					goto dorrop;
+				} else {
+					/* Padding data is deleted
+					 * by moving header data.*/
+					memmove(&skb->data[PCH_GBE_DMA_PADDING],
+						&skb->data[0], ETH_HLEN);
+					skb_reserve(skb, NET_IP_ALIGN);
+					buffer_info->skb = NULL;
 				}
-			} else {
-				buffer_info->skb = NULL;
 			}
-			if (skb_padding_flag) {
-				memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
-				memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
-					ETH_HLEN);
-				skb_reserve(skb, NET_IP_ALIGN);
-
-			}
-
+			/* The length includes FCS length */
+			length = length - ETH_FCS_LEN;
 			/* update status of driver */
 			adapter->stats.rx_bytes += length;
 			adapter->stats.rx_packets++;
@@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
 	netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
 	pch_gbe_set_ethtool_ops(netdev);
 
+	pch_gbe_mac_load_mac_addr(&adapter->hw);
 	pch_gbe_mac_reset_hw(&adapter->hw);
 
 	/* setup the private structure */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 59ccf0c5c610..ef2133b16f8c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -617,8 +617,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
 	}
 }
 
-static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	int i;
 
 	RTL_W8(ERIDR, cmd);
@@ -630,7 +631,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
 			break;
 	}
 
-	ocp_write(ioaddr, 0x1, 0x30, 0x00000001);
+	ocp_write(tp, 0x1, 0x30, 0x00000001);
 }
 
 #define OOB_CMD_RESET		0x00
@@ -2868,8 +2869,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 
-	if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+	if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+	     (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+	    (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
 		return;
+	}
 
 	if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
 	     (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2891,6 +2895,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_25:
 	case RTL_GIGA_MAC_VER_26:
+	case RTL_GIGA_MAC_VER_27:
+	case RTL_GIGA_MAC_VER_28:
 		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
 		break;
 	}
@@ -2900,12 +2906,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 
-	if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+	if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+	     (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+	    (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
 		return;
+	}
 
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_25:
 	case RTL_GIGA_MAC_VER_26:
+	case RTL_GIGA_MAC_VER_27:
+	case RTL_GIGA_MAC_VER_28:
 		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
 		break;
 	}
@@ -3042,7 +3053,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_out_mwi_2;
 	}
 
-	tp->cp_cmd = PCIMulRW | RxChkSum;
+	tp->cp_cmd = RxChkSum;
 
 	if ((sizeof(dma_addr_t) > 4) &&
 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3190,6 +3201,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (pci_dev_run_wake(pdev))
 		pm_runtime_put_noidle(&pdev->dev);
 
+	netif_carrier_off(dev);
+
 out:
 	return rc;
 
@@ -3316,7 +3329,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
 	/* Disable interrupts */
 	rtl8169_irq_mask_and_ack(ioaddr);
 
-	if (tp->mac_version == RTL_GIGA_MAC_VER_28) {
+	if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+	    tp->mac_version == RTL_GIGA_MAC_VER_28) {
 		while (RTL_R8(TxPoll) & NPQ)
 			udelay(20);
 
@@ -3845,8 +3859,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
 	Cxpl_dbg_sel | \
 	ASF | \
 	PktCntrDisable | \
-	PCIDAC | \
-	PCIMulRW)
+	Mac_dbgo_sel)
 
 static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 {
@@ -3876,8 +3889,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
 		RTL_W8(Config1, cfg1 & ~LEDS0);
 
-	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-
 	rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
 }
 
@@ -3889,8 +3900,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
 
 	RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-
-	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
 }
 
 static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3916,6 +3925,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
 		}
 	}
 
+	RTL_W8(Cfg9346, Cfg9346_Unlock);
+
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_07:
 		rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3930,14 +3941,13 @@ static void rtl_hw_start_8101(struct net_device *dev)
 		break;
 	}
 
-	RTL_W8(Cfg9346, Cfg9346_Unlock);
+	RTL_W8(Cfg9346, Cfg9346_Lock);
 
 	RTL_W8(MaxTxPacketSize, TxPacketMax);
 
 	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 
-	tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
-
+	tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
 	RTL_W16(CPlusCmd, tp->cp_cmd);
 
 	RTL_W16(IntrMitigate, 0x0000);
@@ -3947,14 +3957,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
 	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
 	rtl_set_rx_tx_config_registers(tp);
 
-	RTL_W8(Cfg9346, Cfg9346_Lock);
-
 	RTL_R8(IntrMask);
 
 	rtl_set_rx_mode(dev);
 
-	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-
 	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
 
 	RTL_W16(IntrMask, tp->intr_event);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19ed60d..ca886d98bdc7 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
 				  struct ethtool_test *test, u64 *data)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
-	struct efx_self_tests efx_tests;
+	struct efx_self_tests *efx_tests;
 	int already_up;
-	int rc;
+	int rc = -ENOMEM;
+
+	efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+	if (!efx_tests)
+		goto fail;
+
 
 	ASSERT_RTNL();
 	if (efx->state != STATE_RUNNING) {
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
 		if (rc) {
 			netif_err(efx, drv, efx->net_dev,
 				  "failed opening device.\n");
-			goto fail2;
+			goto fail1;
 		}
 	}
 
-	memset(&efx_tests, 0, sizeof(efx_tests));
-
-	rc = efx_selftest(efx, &efx_tests, test->flags);
+	rc = efx_selftest(efx, efx_tests, test->flags);
 
 	if (!already_up)
 		dev_close(efx->net_dev);
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
 		   rc == 0 ? "passed" : "failed",
 		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
 
- fail2:
- fail1:
+fail1:
 	/* Fill ethtool results structures */
-	efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+	efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+	kfree(efx_tests);
+fail:
 	if (rc)
 		test->flags |= ETH_TEST_FL_FAILED;
 }
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5976d1d51df1..640e368ebeee 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
 					      "cur_rx:%4.4d, dirty_rx:%4.4d\n",
 					      net_dev->name, sis_priv->cur_rx,
 					      sis_priv->dirty_rx);
+				dev_kfree_skb(skb);
 				break;
 			}
 
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3837f9..0e5f03135b50 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
 
 	priv->hw = device;
 
-	if (device_can_wakeup(priv->device))
+	if (device_can_wakeup(priv->device)) {
 		priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+		enable_irq_wake(dev->irq);
+	}
 
 	return 0;
 }
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 93b32d366611..06c0e5033656 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 			break;			/* We have no PHY */
 
-		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+		     !netif_running(dev)))
 			return -EAGAIN;
 
 		spin_lock_bh(&tp->lock);
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 			break;			/* We have no PHY */
 
-		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+		     !netif_running(dev)))
 			return -EAGAIN;
 
 		spin_lock_bh(&tp->lock);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 04e8ce14a1d0..7113168473cf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
 /*
  * cdc_ncm.c
  *
- * Copyright (C) ST-Ericsson 2010
+ * Copyright (C) ST-Ericsson 2010-2011
  * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
  * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
  *
@@ -54,7 +54,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc.h>
 
-#define	DRIVER_VERSION				"17-Jan-2011"
+#define	DRIVER_VERSION				"7-Feb-2011"
 
 /* CDC NCM subclass 3.2.1 */
 #define USB_CDC_NCM_NDP16_LENGTH_MIN		0x10
@@ -77,6 +77,9 @@
  */
 #define	CDC_NCM_DPT_DATAGRAMS_MAX		32
 
+/* Maximum amount of IN datagrams in NTB */
+#define	CDC_NCM_DPT_DATAGRAMS_IN_MAX		0 /* unlimited */
+
 /* Restart the timer, if amount of datagrams is less than given value */
 #define	CDC_NCM_RESTART_TIMER_DATAGRAM_CNT	3
 
@@ -85,11 +88,6 @@
 	(sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
 	(CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
 
-struct connection_speed_change {
-	__le32	USBitRate; /* holds 3GPP downlink value, bits per second */
-	__le32	DSBitRate; /* holds 3GPP uplink value, bits per second */
-} __attribute__ ((packed));
-
 struct cdc_ncm_data {
 	struct usb_cdc_ncm_nth16 nth16;
 	struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
 {
 	struct usb_cdc_notification req;
 	u32 val;
-	__le16 max_datagram_size;
 	u8 flags;
 	u8 iface_no;
 	int err;
+	u16 ntb_fmt_supported;
 
 	iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
 
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
 	ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
 	ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
 	ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+	/* devices prior to NCM Errata shall set this field to zero */
+	ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+	ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
 
 	if (ctx->func_desc != NULL)
 		flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
 
 	pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
 		 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
-		 "wNdpOutAlignment=%u flags=0x%x\n",
+		 "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
 		 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
-		 ctx->tx_ndp_modulus, flags);
+		 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
 
-	/* max count of tx datagrams without terminating NULL entry */
-	ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
+	/* max count of tx datagrams */
+	if ((ctx->tx_max_datagrams == 0) ||
+			(ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
+		ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
 
 	/* verify maximum size of received NTB in bytes */
-	if ((ctx->rx_max <
-	    (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
-	    (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
+	if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
+		pr_debug("Using min receive length=%d\n",
+						USB_CDC_NCM_NTB_MIN_IN_SIZE);
+		ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+	}
+
+	if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
 		pr_debug("Using default maximum receive length=%d\n",
 						CDC_NCM_NTB_MAX_SIZE_RX);
 		ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
 	}
 
+	/* inform device about NTB input size changes */
+	if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
+		req.wValue = 0;
+		req.wIndex = cpu_to_le16(iface_no);
+
+		if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
+			struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
+
+			req.wLength = 8;
+			ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+			ndp_in_sz.wNtbInMaxDatagrams =
+					cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
+			ndp_in_sz.wReserved = 0;
+			err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
+									1000);
+		} else {
+			__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
+			req.wLength = 4;
+			err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
+								NULL, 1000);
+		}
+
+		if (err)
+			pr_debug("Setting NTB Input Size failed\n");
+	}
+
 	/* verify maximum size of transmitted NTB in bytes */
 	if ((ctx->tx_max <
 	    (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
 	/* additional configuration */
 
 	/* set CRC Mode */
-	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
-	req.bNotificationType = USB_CDC_SET_CRC_MODE;
-	req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
-	req.wIndex = cpu_to_le16(iface_no);
-	req.wLength = 0;
-
-	err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
-	if (err)
-		pr_debug("Setting CRC mode off failed\n");
+	if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_SET_CRC_MODE;
+		req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
+		req.wIndex = cpu_to_le16(iface_no);
+		req.wLength = 0;
+
+		err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+		if (err)
+			pr_debug("Setting CRC mode off failed\n");
+	}
 
-	/* set NTB format */
-	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
-	req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
-	req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
-	req.wIndex = cpu_to_le16(iface_no);
-	req.wLength = 0;
+	/* set NTB format, if both formats are supported */
+	if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
+		req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
+		req.wIndex = cpu_to_le16(iface_no);
+		req.wLength = 0;
+
+		err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+		if (err)
+			pr_debug("Setting NTB format to 16-bit failed\n");
+	}
 
-	err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
-	if (err)
-		pr_debug("Setting NTB format to 16-bit failed\n");
+	ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
 
 	/* set Max Datagram Size (MTU) */
-	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
-	req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
-	req.wValue = 0;
-	req.wIndex = cpu_to_le16(iface_no);
-	req.wLength = cpu_to_le16(2);
+	if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
+		__le16 max_datagram_size;
+		u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
+		req.wValue = 0;
+		req.wIndex = cpu_to_le16(iface_no);
+		req.wLength = cpu_to_le16(2);
+
+		err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
+									1000);
+		if (err) {
+			pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
+						CDC_NCM_MIN_DATAGRAM_SIZE);
+		} else {
+			ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+			/* Check Eth descriptor value */
+			if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
+				if (ctx->max_datagram_size > eth_max_sz)
+					ctx->max_datagram_size = eth_max_sz;
+			} else {
+				if (ctx->max_datagram_size >
+						CDC_NCM_MAX_DATAGRAM_SIZE)
+					ctx->max_datagram_size =
+						CDC_NCM_MAX_DATAGRAM_SIZE;
+			}
 
-	err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
-	if (err) {
-		pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
-			 CDC_NCM_MIN_DATAGRAM_SIZE);
-		/* use default */
-		ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-	} else {
-		ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+			if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+				ctx->max_datagram_size =
+					CDC_NCM_MIN_DATAGRAM_SIZE;
+
+			/* if value changed, update device */
+			req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+			req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
+			req.wValue = 0;
+			req.wIndex = cpu_to_le16(iface_no);
+			req.wLength = 2;
+			max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+
+			err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
+								0, NULL, 1000);
+			if (err)
+				pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
+		}
 
-		if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
-			ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-		else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
-			ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
 	}
 
 	if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
 
 			ctx->ether_desc =
 					(const struct usb_cdc_ether_desc *)buf;
-
 			dev->hard_mtu =
 				le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
 
-			if (dev->hard_mtu <
-			    (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
-				dev->hard_mtu =
-					CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
-
-			else if (dev->hard_mtu >
-				 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
-				dev->hard_mtu =
-					CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
+			if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
+				dev->hard_mtu =	CDC_NCM_MIN_DATAGRAM_SIZE;
+			else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
+				dev->hard_mtu =	CDC_NCM_MAX_DATAGRAM_SIZE;
 			break;
 
 		case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
 	u32 offset;
 	u32 last_offset;
 	u16 n = 0;
-	u8 timeout = 0;
+	u8 ready2send = 0;
 
 	/* if there is a remaining skb, it gets priority */
 	if (skb != NULL)
 		swap(skb, ctx->tx_rem_skb);
 	else
-		timeout = 1;
+		ready2send = 1;
 
 	/*
 	 * +----------------+
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
 
 	for (; n < ctx->tx_max_datagrams; n++) {
 		/* check if end of transmit buffer is reached */
-		if (offset >= ctx->tx_max)
+		if (offset >= ctx->tx_max) {
+			ready2send = 1;
 			break;
-
+		}
 		/* compute maximum buffer size */
 		rem = ctx->tx_max - offset;
 
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
 				}
 				ctx->tx_rem_skb = skb;
 				skb = NULL;
-
-				/* loop one more time */
-				timeout = 1;
+				ready2send = 1;
 			}
 			break;
 		}
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
 		ctx->tx_curr_last_offset = last_offset;
 		goto exit_no_skb;
 
-	} else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
+	} else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
 		/* wait for more frames */
 		/* push variables */
 		ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
 					cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
 	ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
 	ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
-	ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+	ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
 							ctx->tx_ndp_modulus);
 
 	memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
 	rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
 					sizeof(struct usb_cdc_ncm_dpe16));
 	ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
-	ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
+	ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
 
-	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
+	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
 						&(ctx->tx_ncm.ndp16),
 						sizeof(ctx->tx_ncm.ndp16));
 
-	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
+	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
 					sizeof(ctx->tx_ncm.ndp16),
 					&(ctx->tx_ncm.dpe16),
 					(ctx->tx_curr_frame_num + 1) *
@@ -961,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
 		goto error;
 	}
 
-	temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
+	temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
 	if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
 		pr_debug("invalid DPT16 index\n");
 		goto error;
@@ -1048,10 +1115,10 @@ error:
 
 static void
 cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
-		     struct connection_speed_change *data)
+		     struct usb_cdc_speed_change *data)
 {
-	uint32_t rx_speed = le32_to_cpu(data->USBitRate);
-	uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
+	uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
+	uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
 
 	/*
 	 * Currently the USB-NET API does not support reporting the actual
@@ -1092,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
 	/* test for split data in 8-byte chunks */
 	if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
 		cdc_ncm_speed_change(ctx,
-		      (struct connection_speed_change *)urb->transfer_buffer);
+		      (struct usb_cdc_speed_change *)urb->transfer_buffer);
 		return;
 	}
 
@@ -1120,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
 		break;
 
 	case USB_CDC_NOTIFY_SPEED_CHANGE:
-		if (urb->actual_length <
-		    (sizeof(*event) + sizeof(struct connection_speed_change)))
+		if (urb->actual_length < (sizeof(*event) +
+					sizeof(struct usb_cdc_speed_change)))
 			set_bit(EVENT_STS_SPLIT, &dev->flags);
 		else
 			cdc_ncm_speed_change(ctx,
-				(struct connection_speed_change *) &event[1]);
+				(struct usb_cdc_speed_change *) &event[1]);
 		break;
 
 	default:
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e3b9fb..5002f5be47be 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = {
 	.driver_info = (unsigned long)&dm9601_info,
 	 },
 	{
+	 USB_DEVICE(0x0fe6, 0x9700),	/* DM9601 USB to Fast Ethernet Adapter */
+	 .driver_info = (unsigned long)&dm9601_info,
+	 },
+	{
 	 USB_DEVICE(0x0a46, 0x9000),	/* DM9000E */
 	 .driver_info = (unsigned long)&dm9601_info,
 	 },
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fcedff49..6d83812603b6 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@ exit:
 
 static void hso_free_tiomget(struct hso_serial *serial)
 {
-	struct hso_tiocmget *tiocmget = serial->tiocmget;
+	struct hso_tiocmget *tiocmget;
+	if (!serial)
+		return;
+	tiocmget = serial->tiocmget;
 	if (tiocmget) {
-		if (tiocmget->urb) {
-			usb_free_urb(tiocmget->urb);
-			tiocmget->urb = NULL;
-		}
+		usb_free_urb(tiocmget->urb);
+		tiocmget->urb = NULL;
 		serial->tiocmget = NULL;
 		kfree(tiocmget);
-
 	}
 }
 
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a41643ff4..95c41d56631c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@ fail_halt:
 		if (urb != NULL) {
 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
 			status = usb_autopm_get_interface(dev->intf);
-			if (status < 0)
+			if (status < 0) {
+				usb_free_urb(urb);
 				goto fail_lowmem;
+			}
 			if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
 				resched = 0;
 			usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 90a23e410d1b..82dba5aaf423 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
 	}
 }
 
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+	napi_enable(&vi->napi);
+
+	/* If all buffers were filled by other side before we napi_enabled, we
+	 * won't get another interrupt, so process any outstanding packets
+	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
+	 * We synchronize against interrupts via NAPI_STATE_SCHED */
+	if (napi_schedule_prep(&vi->napi)) {
+		virtqueue_disable_cb(vi->rvq);
+		__napi_schedule(&vi->napi);
+	}
+}
+
 static void refill_work(struct work_struct *work)
 {
 	struct virtnet_info *vi;
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
 	vi = container_of(work, struct virtnet_info, refill.work);
 	napi_disable(&vi->napi);
 	still_empty = !try_fill_recv(vi, GFP_KERNEL);
-	napi_enable(&vi->napi);
+	virtnet_napi_enable(vi);
 
 	/* In theory, this can happen: if we don't get any buffers in
 	 * we will *never* try to fill again. */
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 
-	napi_enable(&vi->napi);
-
-	/* If all buffers were filled by other side before we napi_enabled, we
-	 * won't get another interrupt, so process any outstanding packets
-	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
-	 * We synchronize against interrupts via NAPI_STATE_SCHED */
-	if (napi_schedule_prep(&vi->napi)) {
-		virtqueue_disable_cb(vi->rvq);
-		__napi_schedule(&vi->napi);
-	}
+	virtnet_napi_enable(vi);
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 78c26fdccad1..62ce2f4e8605 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
 	return 0;
 }
 
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+			struct ieee80211_channel *channel)
+{
+	/*
+	 * On 5211+ read activation -> rx delay
+	 * and use it (100ns steps).
+	 */
+	if (ah->ah_version != AR5K_AR5210) {
+		u32 delay;
+		delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+			AR5K_PHY_RX_DELAY_M;
+		delay = (channel->hw_value & CHANNEL_CCK) ?
+			((delay << 2) / 22) : (delay / 10);
+		if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+			delay = delay << 1;
+		if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+			delay = delay << 2;
+		/* XXX: /2 on turbo ? Let's be safe
+		 * for now */
+		udelay(100 + delay);
+	} else {
+		mdelay(1);
+	}
+}
+
 
 /**********************\
 * RF Gain optimization *
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
 	case AR5K_RF5111:
 		ret = ath5k_hw_rf5111_channel(ah, channel);
 		break;
+	case AR5K_RF2317:
 	case AR5K_RF2425:
 		ret = ath5k_hw_rf2425_channel(ah, channel);
 		break;
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
 		/* Failed */
 		if (i >= 100)
 			return -EIO;
+
+		/* Set channel and wait for synth */
+		ret = ath5k_hw_channel(ah, channel);
+		if (ret)
+			return ret;
+
+		ath5k_hw_wait_for_synth(ah, channel);
 	}
 
 	/*
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
 	if (ret)
 		return ret;
 
+	/* Write OFDM timings on 5212*/
+	if (ah->ah_version == AR5K_AR5212 &&
+		channel->hw_value & CHANNEL_OFDM) {
+
+		ret = ath5k_hw_write_ofdm_timings(ah, channel);
+		if (ret)
+			return ret;
+
+		/* Spur info is available only from EEPROM versions
+		 * greater than 5.3, but the EEPROM routines will use
+		 * static values for older versions */
+		if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+			ath5k_hw_set_spur_mitigation_filter(ah,
+							    channel);
+	}
+
+	/* If we used fast channel switching
+	 * we are done, release RF bus and
+	 * fire up NF calibration.
+	 *
+	 * Note: Only NF calibration due to
+	 * channel change, not AGC calibration
+	 * since AGC is still running !
+	 */
+	if (fast) {
+		/*
+		 * Release RF Bus grant
+		 */
+		AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+				    AR5K_PHY_RFBUS_REQ_REQUEST);
+
+		/*
+		 * Start NF calibration
+		 */
+		AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+					AR5K_PHY_AGCCTL_NF);
+
+		return ret;
+	}
+
 	/*
 	 * For 5210 we do all initialization using
 	 * initvals, so we don't have to modify
 	 * any settings (5210 also only supports
 	 * a/aturbo modes)
 	 */
-	if ((ah->ah_version != AR5K_AR5210) && !fast) {
+	if (ah->ah_version != AR5K_AR5210) {
 
 		/*
 		 * Write initial RF gain settings
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
 		if (ret)
 			return ret;
 
-		/* Write OFDM timings on 5212*/
-		if (ah->ah_version == AR5K_AR5212 &&
-			channel->hw_value & CHANNEL_OFDM) {
-
-			ret = ath5k_hw_write_ofdm_timings(ah, channel);
-			if (ret)
-				return ret;
-
-			/* Spur info is available only from EEPROM versions
-			 * greater than 5.3, but the EEPROM routines will use
-			 * static values for older versions */
-			if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
-				ath5k_hw_set_spur_mitigation_filter(ah,
-								    channel);
-		}
-
 		/*Enable/disable 802.11b mode on 5111
 		(enable 2111 frequency converter + CCK)*/
 		if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
 	 */
 	ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
 
+	ath5k_hw_wait_for_synth(ah, channel);
+
 	/*
-	 * On 5211+ read activation -> rx delay
-	 * and use it.
+	 * Perform ADC test to see if baseband is ready
+	 * Set tx hold and check adc test register
 	 */
-	if (ah->ah_version != AR5K_AR5210) {
-		u32 delay;
-		delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
-			AR5K_PHY_RX_DELAY_M;
-		delay = (channel->hw_value & CHANNEL_CCK) ?
-			((delay << 2) / 22) : (delay / 10);
-		if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
-			delay = delay << 1;
-		if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
-			delay = delay << 2;
-		/* XXX: /2 on turbo ? Let's be safe
-		 * for now */
-		udelay(100 + delay);
-	} else {
-		mdelay(1);
-	}
-
-	if (fast)
-		/*
-		 * Release RF Bus grant
-		 */
-		AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
-				    AR5K_PHY_RFBUS_REQ_REQUEST);
-	else {
-		/*
-		 * Perform ADC test to see if baseband is ready
-		 * Set tx hold and check adc test register
-		 */
-		phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
-		ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
-		for (i = 0; i <= 20; i++) {
-			if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
-				break;
-			udelay(200);
-		}
-		ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+	phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+	ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+	for (i = 0; i <= 20; i++) {
+		if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+			break;
+		udelay(200);
 	}
+	ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
 
 	/*
 	 * Start automatic gain control calibration
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3681caf54282..1a7fa6ea4cf5 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,7 +21,6 @@
 #include <linux/device.h>
 #include <linux/leds.h>
 #include <linux/completion.h>
-#include <linux/pm_qos_params.h>
 
 #include "debug.h"
 #include "common.h"
@@ -57,8 +56,6 @@ struct ath_node;
 
 #define A_MAX(a, b) ((a) > (b) ? (a) : (b))
 
-#define ATH9K_PM_QOS_DEFAULT_VALUE	55
-
 #define TSF_TO_TU(_h,_l) \
 	((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
 
@@ -218,6 +215,7 @@ struct ath_frame_info {
 struct ath_buf_state {
 	u8 bf_type;
 	u8 bfs_paprd;
+	unsigned long bfs_paprd_timestamp;
 	enum ath9k_internal_frame_type bfs_ftype;
 };
 
@@ -593,7 +591,6 @@ struct ath_softc {
 	struct work_struct paprd_work;
 	struct work_struct hw_check_work;
 	struct completion paprd_complete;
-	bool paprd_pending;
 
 	u32 intrstatus;
 	u32 sc_flags; /* SC_OP_* */
@@ -633,8 +630,6 @@ struct ath_softc {
 	struct ath_descdma txsdma;
 
 	struct ath_ant_comb ant_comb;
-
-	struct pm_qos_request_list pm_qos_req;
 };
 
 struct ath_wiphy {
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
 extern struct ieee80211_ops ath9k_ops;
 extern int ath9k_modparam_nohwcrypt;
 extern int led_blink;
-extern int ath9k_pm_qos_value;
 extern bool is_ath9k_unloaded;
 
 irqreturn_t ath_isr(int irq, void *dev);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 087a6a95edd5..a033d01bf8a0 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable;
 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
 
-int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
-module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
-
 bool is_ath9k_unloaded;
 /* We use the hw_value as an index into our private channel structure */
 
@@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
 	ath_init_leds(sc);
 	ath_start_rfkill_poll(sc);
 
-	pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
-			   PM_QOS_DEFAULT_VALUE);
-
 	return 0;
 
 error_world:
@@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc)
 	}
 
 	ieee80211_unregister_hw(hw);
-	pm_qos_remove_request(&sc->pm_qos_req);
 	ath_rx_cleanup(sc);
 	ath_tx_cleanup(sc);
 	ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9040c2ff1909..a09d15f7aa6e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -342,7 +342,6 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
 	tx_info->control.rates[1].idx = -1;
 
 	init_completion(&sc->paprd_complete);
-	sc->paprd_pending = true;
 	txctl.paprd = BIT(chain);
 
 	if (ath_tx_start(hw, skb, &txctl) != 0) {
@@ -353,7 +352,6 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
 
 	time_left = wait_for_completion_timeout(&sc->paprd_complete,
 			msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
-	sc->paprd_pending = false;
 
 	if (!time_left)
 		ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
@@ -1175,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
 			ath9k_btcoex_timer_resume(sc);
 	}
 
-	/* User has the option to provide pm-qos value as a module
-	 * parameter rather than using the default value of
-	 * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
-	 */
-	pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
-
 	if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
 		common->bus_ops->extn_synch_en(common);
 
@@ -1347,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 
 	sc->sc_flags |= SC_OP_INVALID;
 
-	pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
-
 	mutex_unlock(&sc->mutex);
 
 	ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 33a37edbaf79..07b7804aec5b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
 			ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
 						   bf->bf_state.bfs_paprd);
 
+		if (txctl->paprd)
+			bf->bf_state.bfs_paprd_timestamp = jiffies;
+
 		ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
 	}
 
@@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
 	bf->bf_buf_addr = 0;
 
 	if (bf->bf_state.bfs_paprd) {
-		if (!sc->paprd_pending)
+		if (time_after(jiffies,
+				bf->bf_state.bfs_paprd_timestamp +
+				msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
 			dev_kfree_skb_any(skb);
 		else
 			complete(&sc->paprd_complete);
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 939a0e96ed1f..84866a4b8350 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
 	cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
 
 	/* 2. Maybe the AP wants to send multicast/broadcast data? */
-	cam = !!(tim_ie->bitmap_ctrl & 0x01);
+	cam |= !!(tim_ie->bitmap_ctrl & 0x01);
 
 	if (!cam) {
 		/* back to low-power land. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a9b852be4509..39b6f16c87fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
 }
 #endif
 
-/**
- * iwl3945_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
-				struct iwl_rx_packet *pkt)
-{
-	bool rc = true;
-	struct iwl3945_notif_statistics current_stat;
-	int combined_plcp_delta;
-	unsigned int plcp_msec;
-	unsigned long plcp_received_jiffies;
-
-	if (priv->cfg->base_params->plcp_delta_threshold ==
-	    IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
-		IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
-		return rc;
-	}
-	memcpy(&current_stat, pkt->u.raw, sizeof(struct
-			iwl3945_notif_statistics));
-	/*
-	 * check for plcp_err and trigger radio reset if it exceeds
-	 * the plcp error threshold plcp_delta.
-	 */
-	plcp_received_jiffies = jiffies;
-	plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
-					(long) priv->plcp_jiffies);
-	priv->plcp_jiffies = plcp_received_jiffies;
-	/*
-	 * check to make sure plcp_msec is not 0 to prevent division
-	 * by zero.
-	 */
-	if (plcp_msec) {
-		combined_plcp_delta =
-			(le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
-			le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
-
-		if ((combined_plcp_delta > 0) &&
-			((combined_plcp_delta * 100) / plcp_msec) >
-			priv->cfg->base_params->plcp_delta_threshold) {
-			/*
-			 * if plcp_err exceed the threshold, the following
-			 * data is printed in csv format:
-			 *    Text: plcp_err exceeded %d,
-			 *    Received ofdm.plcp_err,
-			 *    Current ofdm.plcp_err,
-			 *    combined_plcp_delta,
-			 *    plcp_msec
-			 */
-			IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
-				"%u, %d, %u mSecs\n",
-				priv->cfg->base_params->plcp_delta_threshold,
-				le32_to_cpu(current_stat.rx.ofdm.plcp_err),
-				combined_plcp_delta, plcp_msec);
-			/*
-			 * Reset the RF radio due to the high plcp
-			 * error rate
-			 */
-			rc = false;
-		}
-	}
-	return rc;
-}
-
 void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
 		struct iwl_rx_mem_buffer *rxb)
 {
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = {
 	.isr_ops = {
 		.isr = iwl_isr_legacy,
 	},
-	.check_plcp_health = iwl3945_good_plcp_health,
 
 	.debugfs_ops = {
 		.rx_stats_read = iwl3945_ucode_rx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index af505bcd7ae0..ef36aff1bb43 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
 	.fw_name_pre = IWL6050_FW_PRE,				\
 	.ucode_api_max = IWL6050_UCODE_API_MAX,			\
 	.ucode_api_min = IWL6050_UCODE_API_MIN,			\
+	.valid_tx_ant = ANT_AB,		/* .cfg overwrite */	\
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */	\
 	.ops = &iwl6050_ops,					\
 	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,	\
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 36335b1b54d4..c1cfd9952e52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1157,6 +1157,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
 	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
+	/* Re-enable RF_KILL if it occurred */
+	else if (handled & CSR_INT_BIT_RF_KILL)
+		iwl_enable_rfkill_int(priv);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 	if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -1371,6 +1374,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
 	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
+	/* Re-enable RF_KILL if it occurred */
+	else if (handled & CSR_INT_BIT_RF_KILL)
+		iwl_enable_rfkill_int(priv);
 }
 
 /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4daa5b..0494d7b102d4 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
 	while (i != idx) {
 		u16 len;
 		struct sk_buff *skb;
+		dma_addr_t dma_addr;
 		desc = &ring[i];
 		len = le16_to_cpu(desc->len);
 		skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
 
 			len = priv->common.rx_mtu;
 		}
+		dma_addr = le32_to_cpu(desc->host_addr);
+		pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+			priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
 		skb_put(skb, len);
 
 		if (p54_rx(dev, skb)) {
-			pci_unmap_single(priv->pdev,
-					 le32_to_cpu(desc->host_addr),
-					 priv->common.rx_mtu + 32,
-					 PCI_DMA_FROMDEVICE);
+			pci_unmap_single(priv->pdev, dma_addr,
+				priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
 			rx_buf[i] = NULL;
-			desc->host_addr = 0;
+			desc->host_addr = cpu_to_le32(0);
 		} else {
 			skb_trim(skb, 0);
+			pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+				priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
 			desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
 		}
 
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index aa97971a38af..3b3f1e45ab3e 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
 		 */
 		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
+		/*
+		 * The hardware has already checked the Michael Mic and has
+		 * stripped it from the frame. Signal this to mac80211.
+		 */
+		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
 		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
 			rxdesc->flags |= RX_FLAG_DECRYPTED;
 		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
 	{ PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
 #endif
 #ifdef CONFIG_RT2800PCI_RT35XX
+	{ PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
 	{ PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
 	{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
 	{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index b97a4a54ff4c..197a36c05fda 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
 		 */
 		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 
+		/*
+		 * The hardware has already checked the Michael Mic and has
+		 * stripped it from the frame. Signal this to mac80211.
+		 */
+		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
 		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
 			rxdesc->flags |= RX_FLAG_DECRYPTED;
 		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 012e1a4016fe..40372bac9482 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
 
 	if (changed & BSS_CHANGED_BEACON) {
 		beacon = ieee80211_beacon_get(hw, vif);
+		if (!beacon)
+			goto out_sleep;
+
 		ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
 					      beacon->len);
 
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ffedfd492754..ea1580085347 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -3,7 +3,7 @@
 #
 
 menuconfig NFC_DEVICES
-	bool "NFC devices"
+	bool "Near Field Communication (NFC) devices"
 	default n
 	---help---
 	  You'll have to say Y if your computer contains an NFC device that
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index bae647264dd6..724f65d8f9e4 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -60,7 +60,7 @@ enum pn544_irq {
 struct pn544_info {
 	struct miscdevice miscdev;
 	struct i2c_client *i2c_dev;
-	struct regulator_bulk_data regs[2];
+	struct regulator_bulk_data regs[3];
 
 	enum pn544_state state;
 	wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
 
 static const char reg_vdd_io[]	= "Vdd_IO";
 static const char reg_vbat[]	= "VBat";
+static const char reg_vsim[]	= "VSim";
 
 /* sysfs interface */
 static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
 
 	info->regs[0].supply = reg_vdd_io;
 	info->regs[1].supply = reg_vbat;
+	info->regs[2].supply = reg_vsim;
 	r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
 				 info->regs);
 	if (r < 0)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 8ecaac983923..ea25e5bfcf23 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -23,6 +23,7 @@
 #include <linux/mm.h>
 #include <linux/fs.h>
 #include <linux/capability.h>
+#include <linux/security.h>
 #include <linux/pci-aspm.h>
 #include <linux/slab.h>
 #include "pci.h"
@@ -368,7 +369,7 @@ pci_read_config(struct file *filp, struct kobject *kobj,
 	u8 *data = (u8*) buf;
 
 	/* Several chips lock up trying to read undefined config space */
-	if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
+	if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) {
 		size = dev->cfg_size;
 	} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
 		size = 128;
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0bdda5b3ed55..42fbf1a75576 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
 		flags |= CONF_ENABLE_IOCARD;
 	if (flags & CONF_ENABLE_IOCARD)
 		s->socket.flags |= SS_IOCARD;
+	if (flags & CONF_ENABLE_ZVCARD)
+		s->socket.flags |= SS_ZVCARD | SS_IOCARD;
 	if (flags & CONF_ENABLE_SPKR) {
 		s->socket.flags |= SS_SPKR_ENA;
 		status = CCSR_AUDIO_ENA;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3755e7c8c715..2c540542b5af 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
 }
 #endif
 
-static void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev)
 {
 	struct pcmcia_low_level *ops = dev->platform_data;
 	/*
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index bb62ea87b8f9..b609b45469ed 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,4 @@
 int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
 void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
+void pxa2xx_configure_sockets(struct device *dev);
 
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index b9f8c8fb42bd..25afe637c657 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
 		lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
 
 		pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+		pxa2xx_configure_sockets(&sadev->dev);
 		ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
 				pxa2xx_drv_pcmcia_add_one);
 	}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d163bc2e2b9e..a59af5b24f0a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -227,7 +227,7 @@ config SONYPI_COMPAT
 config IDEAPAD_LAPTOP
 	tristate "Lenovo IdeaPad Laptop Extras"
 	depends on ACPI
-	depends on RFKILL
+	depends on RFKILL && INPUT
 	select INPUT_SPARSEKMAP
 	help
 	  This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c5c4b8c32eb8..38b34a73866a 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -84,7 +84,7 @@ MODULE_LICENSE("GPL");
  */
 #define AMW0_GUID1		"67C3371D-95A3-4C37-BB61-DD47B491DAAB"
 #define AMW0_GUID2		"431F16ED-0C2B-444C-B267-27DEB140CF9C"
-#define WMID_GUID1		"6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
+#define WMID_GUID1		"6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
 #define WMID_GUID2		"95764E09-FB56-4e83-B31A-37761F60994A"
 #define WMID_GUID3		"61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
 
@@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev,
 			return -EINVAL;
 	return count;
 }
-static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
+static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
 	set_bool_threeg);
 
 static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 4633fd8532cc..fe495939c307 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
 	struct proc_dir_entry *proc;
 	mode_t mode;
 
-	/*
-	 * If parameter uid or gid is not changed, keep the default setting for
-	 * our proc entries (-rw-rw-rw-) else, it means we care about security,
-	 * and then set to -rw-rw----
-	 */
-
 	if ((asus_uid == 0) && (asus_gid == 0)) {
-		mode = S_IFREG | S_IRUGO | S_IWUGO;
+		mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
 	} else {
 		mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
 		printk(KERN_WARNING "  asus_uid and asus_gid parameters are "
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 34657f96b5a5..ad24ef36f9f7 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked)
 	dell_send_request(buffer, 17, 11);
 
 	/* If the hardware switch controls this radio, and the hardware
-	   switch is disabled, don't allow changing the software state */
+	   switch is disabled, don't allow changing the software state.
+	   If the hardware switch is reported as not supported, always
+	   fire the SMI to toggle the killswitch. */
 	if ((hwswitch_state & BIT(hwswitch_bit)) &&
-	    !(buffer->output[1] & BIT(16))) {
+	    !(buffer->output[1] & BIT(16)) &&
+	    (buffer->output[1] & BIT(0))) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = {
 
 static void dell_update_rfkill(struct work_struct *ignored)
 {
+	int status;
+
+	get_buffer();
+	dell_send_request(buffer, 17, 11);
+	status = buffer->output[1];
+	release_buffer();
+
+	/* if hardware rfkill is not supported, set it explicitly */
+	if (!(status & BIT(0))) {
+		if (wifi_rfkill)
+			dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
+		if (bluetooth_rfkill)
+			dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
+		if (wwan_rfkill)
+			dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
+	}
+
 	if (wifi_rfkill)
 		dell_rfkill_query(wifi_rfkill, (void *)1);
 	if (bluetooth_rfkill)
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 930e62762365..61433d492862 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -60,69 +60,20 @@ enum pmic_gpio_register {
 #define GPOSW_DOU 0x08
 #define GPOSW_RDRV 0x30
 
+#define GPIO_UPDATE_TYPE	0x80000000
 
 #define NUM_GPIO 24
 
-struct pmic_gpio_irq {
-	spinlock_t lock;
-	u32 trigger[NUM_GPIO];
-	u32 dirty;
-	struct work_struct work;
-};
-
-
 struct pmic_gpio {
+	struct mutex		buslock;
 	struct gpio_chip	chip;
-	struct pmic_gpio_irq	irqtypes;
 	void			*gpiointr;
 	int			irq;
 	unsigned		irq_base;
+	unsigned int		update_type;
+	u32			trigger_type;
 };
 
-static void pmic_program_irqtype(int gpio, int type)
-{
-	if (type & IRQ_TYPE_EDGE_RISING)
-		intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
-	else
-		intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
-
-	if (type & IRQ_TYPE_EDGE_FALLING)
-		intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
-	else
-		intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
-};
-
-static void pmic_irqtype_work(struct work_struct *work)
-{
-	struct pmic_gpio_irq *t =
-		container_of(work, struct pmic_gpio_irq, work);
-	unsigned long flags;
-	int i;
-	u16 type;
-
-	spin_lock_irqsave(&t->lock, flags);
-	/* As we drop the lock, we may need multiple scans if we race the
-	   pmic_irq_type function */
-	while (t->dirty) {
-		/*
-		 *	For each pin that has the dirty bit set send an IPC
-		 *	message to configure the hardware via the PMIC
-		 */
-		for (i = 0; i < NUM_GPIO; i++) {
-			if (!(t->dirty & (1 << i)))
-				continue;
-			t->dirty &= ~(1 << i);
-			/* We can't trust the array entry or dirty
-			   once the lock is dropped */
-			type = t->trigger[i];
-			spin_unlock_irqrestore(&t->lock, flags);
-			pmic_program_irqtype(i, type);
-			spin_lock_irqsave(&t->lock, flags);
-		}
-	}
-	spin_unlock_irqrestore(&t->lock, flags);
-}
-
 static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
 	if (offset > 8) {
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 			1 << (offset - 16));
 }
 
-static int pmic_irq_type(unsigned irq, unsigned type)
+/*
+ * This is called from genirq with pg->buslock locked and
+ * irq_desc->lock held. We can not access the scu bus here, so we
+ * store the change and update in the bus_sync_unlock() function below
+ */
+static int pmic_irq_type(struct irq_data *data, unsigned type)
 {
-	struct pmic_gpio *pg = get_irq_chip_data(irq);
-	u32 gpio = irq - pg->irq_base;
-	unsigned long flags;
+	struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+	u32 gpio = data->irq - pg->irq_base;
 
 	if (gpio >= pg->chip.ngpio)
 		return -EINVAL;
 
-	spin_lock_irqsave(&pg->irqtypes.lock, flags);
-	pg->irqtypes.trigger[gpio] = type;
-	pg->irqtypes.dirty |=  (1 << gpio);
-	spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
-	schedule_work(&pg->irqtypes.work);
+	pg->trigger_type = type;
+	pg->update_type = gpio | GPIO_UPDATE_TYPE;
 	return 0;
 }
 
-
-
 static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
 	struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
@@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 }
 
 /* the gpiointr register is read-clear, so just do nothing. */
-static void pmic_irq_unmask(unsigned irq)
-{
-};
+static void pmic_irq_unmask(struct irq_data *data) { }
 
-static void pmic_irq_mask(unsigned irq)
-{
-};
+static void pmic_irq_mask(struct irq_data *data) { }
 
 static struct irq_chip pmic_irqchip = {
 	.name		= "PMIC-GPIO",
-	.mask		= pmic_irq_mask,
-	.unmask		= pmic_irq_unmask,
-	.set_type	= pmic_irq_type,
+	.irq_mask	= pmic_irq_mask,
+	.irq_unmask	= pmic_irq_unmask,
+	.irq_set_type	= pmic_irq_type,
 };
 
-static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
+static irqreturn_t pmic_irq_handler(int irq, void *data)
 {
-	struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
+	struct pmic_gpio *pg = data;
 	u8 intsts = *((u8 *)pg->gpiointr + 4);
 	int gpio;
+	irqreturn_t ret = IRQ_NONE;
 
 	for (gpio = 0; gpio < 8; gpio++) {
 		if (intsts & (1 << gpio)) {
 			pr_debug("pmic pin %d triggered\n", gpio);
 			generic_handle_irq(pg->irq_base + gpio);
+			ret = IRQ_HANDLED;
 		}
 	}
-
-	if (desc->chip->irq_eoi)
-		desc->chip->irq_eoi(irq_get_irq_data(irq));
-	else
-		dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq);
+	return ret;
 }
 
 static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
@@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
 	pg->chip.can_sleep = 1;
 	pg->chip.dev = dev;
 
-	INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work);
-	spin_lock_init(&pg->irqtypes.lock);
+	mutex_init(&pg->buslock);
 
 	pg->chip.dev = dev;
 	retval = gpiochip_add(&pg->chip);
@@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
 		printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
 		goto err;
 	}
-	set_irq_data(pg->irq, pg);
-	set_irq_chained_handler(pg->irq, pmic_irq_handler);
+
+	retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
+	if (retval) {
+		printk(KERN_WARNING "pmic: Interrupt request failed\n");
+		goto err;
+	}
+
 	for (i = 0; i < 8; i++) {
 		set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
 					handle_simple_irq, "demux");
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 1fe0f1feff71..865ef78d6f1a 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \
 			return -EINVAL; \
 	return count; \
 } \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
 	show_bool_##value, set_bool_##value);
 
 show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index dd599585c6a9..eb9922385ef8 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode)
 	if (keycode != KEY_RESERVED) {
 		mutex_lock(&tpacpi_inputdev_send_mutex);
 
+		input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
 		input_report_key(tpacpi_inputdev, keycode, 1);
-		if (keycode == KEY_UNKNOWN)
-			input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
-				    scancode);
 		input_sync(tpacpi_inputdev);
 
+		input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
 		input_report_key(tpacpi_inputdev, keycode, 0);
-		if (keycode == KEY_UNKNOWN)
-			input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
-				    scancode);
 		input_sync(tpacpi_inputdev);
 
 		mutex_unlock(&tpacpi_inputdev_send_mutex);
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cba1b43f7519..a4e8eb9fece6 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
 {
 	unsigned long flags;
 	int captured = 0;
-	struct pps_ktime ts_real;
+	struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
 
 	/* check event type */
 	BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 76b41853a877..1269fbd2deca 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
 
 	/* Several chips lock up trying to read undefined config space */
 	if (capable(CAP_SYS_ADMIN))
-		size = 0x200000;
+		size = RIO_MAINT_SPACE_SZ;
 
-	if (off > size)
+	if (off >= size)
 		return 0;
 	if (off + count > size) {
 		size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
 	loff_t init_off = off;
 	u8 *data = (u8 *) buf;
 
-	if (off > 0x200000)
+	if (off >= RIO_MAINT_SPACE_SZ)
 		return 0;
-	if (off + count > 0x200000) {
-		size = 0x200000 - off;
+	if (off + count > RIO_MAINT_SPACE_SZ) {
+		size = RIO_MAINT_SPACE_SZ - off;
 		count = size;
 	}
 
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
 		 .name = "config",
 		 .mode = S_IRUGO | S_IWUSR,
 		 },
-	.size = 0x200000,
+	.size = RIO_MAINT_SPACE_SZ,
 	.read = rio_read_config,
 	.write = rio_write_config,
 };
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index f53d31b950d4..2bb5de1f2421 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
 
 	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
 
-	BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+	BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
 
 	return mc13xxx_regulators[id].voltages[val];
 }
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 8b0d2c4bde91..06df898842c0 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
 		return REGULATOR_MODE_IDLE;
 	default:
 		BUG();
+		return -EINVAL;
 	}
 }
 
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cdd97192dc69..4941cade319f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -97,6 +97,18 @@ config RTC_INTF_DEV
 
 	  If unsure, say Y.
 
+config RTC_INTF_DEV_UIE_EMUL
+	bool "RTC UIE emulation on dev interface"
+	depends on RTC_INTF_DEV
+	help
+	  Provides an emulation for RTC_UIE if the underlying rtc chip
+	  driver does not expose RTC_UIE ioctls. Those requests generate
+	  once-per-second update interrupts, used for synchronization.
+
+	  The emulation code will read the time from the hardware
+	  clock several times per second, please enable this option
+	  only if you know that you really need it.
+
 config RTC_DRV_TEST
 	tristate "Test driver/device"
 	help
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c01967244d..cb2f0728fd70 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
 	}
 
 	if (err)
-		return err;
-
-	if (!rtc->ops)
+		/* nothing */;
+	else if (!rtc->ops)
 		err = -ENODEV;
 	else if (!rtc->ops->alarm_irq_enable)
 		err = -EINVAL;
@@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
 	if (err)
 		return err;
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+	if (enabled == 0 && rtc->uie_irq_active) {
+		mutex_unlock(&rtc->ops_lock);
+		return rtc_dev_update_irq_enable_emul(rtc, 0);
+	}
+#endif
 	/* make sure we're changing state */
 	if (rtc->uie_rtctimer.enabled == enabled)
 		goto out;
@@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
 
 out:
 	mutex_unlock(&rtc->ops_lock);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+	/*
+	 * Enable emulation if the driver did not provide
+	 * the update_irq_enable function pointer or if returned
+	 * -EINVAL to signal that it has been configured without
+	 * interrupts or that are not available at the moment.
+	 */
+	if (err == -EINVAL)
+		err = rtc_dev_update_irq_enable_emul(rtc, enabled);
+#endif
 	return err;
 
 }
@@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
  *
  * Triggers the registered irq_task function callback.
  */
-static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
 {
 	unsigned long flags;
 
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index b2752b6e7a2f..e725d51e773d 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -134,36 +134,29 @@ static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 	return ret;
 }
 
-static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
-			unsigned long arg)
+static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
 	int ret = 0;
 
 	spin_lock_irq(&rtc->lock);
 
-	switch (cmd) {
-	case RTC_AIE_ON:
+	if(enabled) {
 		if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
 			ret = -EINVAL;
-			break;
+			goto out;
 		}
 		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
 				| RTC_BIT(CTRL_TOPEN));
 		rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
 		rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
-		break;
-	case RTC_AIE_OFF:
+	} else {
 		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
 				& ~RTC_BIT(CTRL_TOPEN));
 		rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
 		rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
-		break;
-	default:
-		ret = -ENOIOCTLCMD;
-		break;
 	}
-
+out:
 	spin_unlock_irq(&rtc->lock);
 
 	return ret;
@@ -195,11 +188,11 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
 }
 
 static struct rtc_class_ops at32_rtc_ops = {
-	.ioctl		= at32_rtc_ioctl,
 	.read_time	= at32_rtc_readtime,
 	.set_time	= at32_rtc_settime,
 	.read_alarm	= at32_rtc_readalarm,
 	.set_alarm	= at32_rtc_setalarm,
+	.alarm_irq_enable = at32_rtc_alarm_irq_enable,
 };
 
 static int __init at32_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index bc8bbca9a2e2..26d1cf5d19ae 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -195,13 +195,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
 
 	/* important:  scrub old status before enabling IRQs */
 	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm off */
-		at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
-		break;
-	case RTC_AIE_ON:	/* alarm on */
-		at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
-		at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
-		break;
 	case RTC_UIE_OFF:	/* update off */
 		at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
 		break;
@@ -217,6 +210,18 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
 	return ret;
 }
 
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	pr_debug("%s(): cmd=%08x\n", __func__, enabled);
+
+	if (enabled) {
+		at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+		at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+	} else
+		at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+
+	return 0;
+}
 /*
  * Provide additional RTC information in /proc/driver/rtc
  */
@@ -270,6 +275,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
 	.read_alarm	= at91_rtc_readalarm,
 	.set_alarm	= at91_rtc_setalarm,
 	.proc		= at91_rtc_proc,
+	.alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
 /*
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f677e0710ca1..5469c52cba3d 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -229,12 +229,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
 	dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
 
 	switch (cmd) {
-	case RTC_AIE_OFF:		/* alarm off */
-		rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
-		break;
-	case RTC_AIE_ON:		/* alarm on */
-		rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
-		break;
 	case RTC_UIE_OFF:		/* update off */
 		rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
 		break;
@@ -249,6 +243,19 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
 	return ret;
 }
 
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct sam9_rtc *rtc = dev_get_drvdata(dev);
+	u32 mr = rtt_readl(rtc, MR);
+
+	dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr);
+	if (enabled)
+		rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
+	else
+		rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
+	return 0;
+}
+
 /*
  * Provide additional RTC information in /proc/driver/rtc
  */
@@ -302,6 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
 	.read_alarm	= at91_rtc_readalarm,
 	.set_alarm	= at91_rtc_setalarm,
 	.proc		= at91_rtc_proc,
+	.alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
 /*
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index b4b6087f2234..17971d93354d 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -259,15 +259,6 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
 		bfin_rtc_int_clear(~RTC_ISTAT_SEC);
 		break;
 
-	case RTC_AIE_ON:
-		dev_dbg_stamp(dev);
-		bfin_rtc_int_set_alarm(rtc);
-		break;
-	case RTC_AIE_OFF:
-		dev_dbg_stamp(dev);
-		bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
-		break;
-
 	default:
 		dev_dbg_stamp(dev);
 		ret = -ENOIOCTLCMD;
@@ -276,6 +267,17 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
 	return ret;
 }
 
+static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct bfin_rtc *rtc = dev_get_drvdata(dev);
+
+	dev_dbg_stamp(dev);
+	if (enabled)
+		bfin_rtc_int_set_alarm(rtc);
+	else
+		bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+}
+
 static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct bfin_rtc *rtc = dev_get_drvdata(dev);
@@ -362,6 +364,7 @@ static struct rtc_class_ops bfin_rtc_ops = {
 	.read_alarm    = bfin_rtc_read_alarm,
 	.set_alarm     = bfin_rtc_set_alarm,
 	.proc          = bfin_rtc_proc,
+	.alarm_irq_enable = bfin_rtc_alarm_irq_enable,
 };
 
 static int __devinit bfin_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 212b16edafc0..d0e06edb14c5 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
 	return err;
 }
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+/*
+ * Routine to poll RTC seconds field for change as often as possible,
+ * after first RTC_UIE use timer to reduce polling
+ */
+static void rtc_uie_task(struct work_struct *work)
+{
+	struct rtc_device *rtc =
+		container_of(work, struct rtc_device, uie_task);
+	struct rtc_time tm;
+	int num = 0;
+	int err;
+
+	err = rtc_read_time(rtc, &tm);
+
+	spin_lock_irq(&rtc->irq_lock);
+	if (rtc->stop_uie_polling || err) {
+		rtc->uie_task_active = 0;
+	} else if (rtc->oldsecs != tm.tm_sec) {
+		num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
+		rtc->oldsecs = tm.tm_sec;
+		rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
+		rtc->uie_timer_active = 1;
+		rtc->uie_task_active = 0;
+		add_timer(&rtc->uie_timer);
+	} else if (schedule_work(&rtc->uie_task) == 0) {
+		rtc->uie_task_active = 0;
+	}
+	spin_unlock_irq(&rtc->irq_lock);
+	if (num)
+		rtc_handle_legacy_irq(rtc, num, RTC_UF);
+}
+static void rtc_uie_timer(unsigned long data)
+{
+	struct rtc_device *rtc = (struct rtc_device *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rtc->irq_lock, flags);
+	rtc->uie_timer_active = 0;
+	rtc->uie_task_active = 1;
+	if ((schedule_work(&rtc->uie_task) == 0))
+		rtc->uie_task_active = 0;
+	spin_unlock_irqrestore(&rtc->irq_lock, flags);
+}
+
+static int clear_uie(struct rtc_device *rtc)
+{
+	spin_lock_irq(&rtc->irq_lock);
+	if (rtc->uie_irq_active) {
+		rtc->stop_uie_polling = 1;
+		if (rtc->uie_timer_active) {
+			spin_unlock_irq(&rtc->irq_lock);
+			del_timer_sync(&rtc->uie_timer);
+			spin_lock_irq(&rtc->irq_lock);
+			rtc->uie_timer_active = 0;
+		}
+		if (rtc->uie_task_active) {
+			spin_unlock_irq(&rtc->irq_lock);
+			flush_scheduled_work();
+			spin_lock_irq(&rtc->irq_lock);
+		}
+		rtc->uie_irq_active = 0;
+	}
+	spin_unlock_irq(&rtc->irq_lock);
+	return 0;
+}
+
+static int set_uie(struct rtc_device *rtc)
+{
+	struct rtc_time tm;
+	int err;
+
+	err = rtc_read_time(rtc, &tm);
+	if (err)
+		return err;
+	spin_lock_irq(&rtc->irq_lock);
+	if (!rtc->uie_irq_active) {
+		rtc->uie_irq_active = 1;
+		rtc->stop_uie_polling = 0;
+		rtc->oldsecs = tm.tm_sec;
+		rtc->uie_task_active = 1;
+		if (schedule_work(&rtc->uie_task) == 0)
+			rtc->uie_task_active = 0;
+	}
+	rtc->irq_data = 0;
+	spin_unlock_irq(&rtc->irq_lock);
+	return 0;
+}
+
+int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
+{
+	if (enabled)
+		return set_uie(rtc);
+	else
+		return clear_uie(rtc);
+}
+EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
+
+#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
 
 static ssize_t
 rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -154,19 +253,7 @@ static long rtc_dev_ioctl(struct file *file,
 	if (err)
 		goto done;
 
-	/* try the driver's ioctl interface */
-	if (ops->ioctl) {
-		err = ops->ioctl(rtc->dev.parent, cmd, arg);
-		if (err != -ENOIOCTLCMD) {
-			mutex_unlock(&rtc->ops_lock);
-			return err;
-		}
-	}
-
-	/* if the driver does not provide the ioctl interface
-	 * or if that particular ioctl was not implemented
-	 * (-ENOIOCTLCMD), we will try to emulate here.
-	 *
+	/*
 	 * Drivers *SHOULD NOT* provide ioctl implementations
 	 * for these requests.  Instead, provide methods to
 	 * support the following code, so that the RTC's main
@@ -329,7 +416,12 @@ static long rtc_dev_ioctl(struct file *file,
 		return err;
 
 	default:
-		err = -ENOTTY;
+		/* Finally try the driver's ioctl interface */
+		if (ops->ioctl) {
+			err = ops->ioctl(rtc->dev.parent, cmd, arg);
+			if (err == -ENOIOCTLCMD)
+				err = -ENOTTY;
+		}
 		break;
 	}
 
@@ -394,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc)
 
 	rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+	INIT_WORK(&rtc->uie_task, rtc_uie_task);
+	setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+#endif
+
 	cdev_init(&rtc->char_dev, &rtc_dev_fops);
 	rtc->char_dev.owner = rtc->owner;
 }
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index bf430f9091ed..60ce69600828 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -40,6 +40,26 @@ static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg)
 	__raw_writel(data, &priv->rtcregs[reg]);
 }
 
+
+static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct ds1286_priv *priv = dev_get_drvdata(dev);
+	unsigned long flags;
+	unsigned char val;
+
+	/* Allow or mask alarm interrupts */
+	spin_lock_irqsave(&priv->lock, flags);
+	val = ds1286_rtc_read(priv, RTC_CMD);
+	if (enabled)
+		val &=  ~RTC_TDM;
+	else
+		val |=  RTC_TDM;
+	ds1286_rtc_write(priv, val, RTC_CMD);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return 0;
+}
+
 #ifdef CONFIG_RTC_INTF_DEV
 
 static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
@@ -49,22 +69,6 @@ static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 	unsigned char val;
 
 	switch (cmd) {
-	case RTC_AIE_OFF:
-		/* Mask alarm int. enab. bit	*/
-		spin_lock_irqsave(&priv->lock, flags);
-		val = ds1286_rtc_read(priv, RTC_CMD);
-		val |=  RTC_TDM;
-		ds1286_rtc_write(priv, val, RTC_CMD);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		break;
-	case RTC_AIE_ON:
-		/* Allow alarm interrupts.	*/
-		spin_lock_irqsave(&priv->lock, flags);
-		val = ds1286_rtc_read(priv, RTC_CMD);
-		val &=  ~RTC_TDM;
-		ds1286_rtc_write(priv, val, RTC_CMD);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		break;
 	case RTC_WIE_OFF:
 		/* Mask watchdog int. enab. bit	*/
 		spin_lock_irqsave(&priv->lock, flags);
@@ -316,12 +320,13 @@ static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
 }
 
 static const struct rtc_class_ops ds1286_ops = {
-	.ioctl   	= ds1286_ioctl,
-	.proc   	= ds1286_proc,
+	.ioctl		= ds1286_ioctl,
+	.proc		= ds1286_proc,
 	.read_time	= ds1286_read_time,
 	.set_time	= ds1286_set_time,
 	.read_alarm	= ds1286_read_alarm,
 	.set_alarm	= ds1286_set_alarm,
+	.alarm_irq_enable = ds1286_alarm_irq_enable,
 };
 
 static int __devinit ds1286_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 077af1d7b9e4..57fbcc149ba7 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -139,49 +139,32 @@ static u8 hour2bcd(bool hr12, int hour)
  * Interface to RTC framework
  */
 
-#ifdef CONFIG_RTC_INTF_DEV
-
-/*
- * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
- */
-static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
+static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct ds1305	*ds1305 = dev_get_drvdata(dev);
 	u8		buf[2];
-	int		status = -ENOIOCTLCMD;
+	long		err = -EINVAL;
 
 	buf[0] = DS1305_WRITE | DS1305_CONTROL;
 	buf[1] = ds1305->ctrl[0];
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		status = 0;
-		if (!(buf[1] & DS1305_AEI0))
-			goto done;
-		buf[1] &= ~DS1305_AEI0;
-		break;
-
-	case RTC_AIE_ON:
-		status = 0;
+	if (enabled) {
 		if (ds1305->ctrl[0] & DS1305_AEI0)
 			goto done;
 		buf[1] |= DS1305_AEI0;
-		break;
-	}
-	if (status == 0) {
-		status = spi_write_then_read(ds1305->spi, buf, sizeof buf,
-				NULL, 0);
-		if (status >= 0)
-			ds1305->ctrl[0] = buf[1];
+	} else {
+		if (!(buf[1] & DS1305_AEI0))
+			goto done;
+		buf[1] &= ~DS1305_AEI0;
 	}
-
+	err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0);
+	if (err >= 0)
+		ds1305->ctrl[0] = buf[1];
 done:
-	return status;
+	return err;
+
 }
 
-#else
-#define ds1305_ioctl	NULL
-#endif
 
 /*
  * Get/set of date and time is pretty normal.
@@ -460,12 +443,12 @@ done:
 #endif
 
 static const struct rtc_class_ops ds1305_ops = {
-	.ioctl		= ds1305_ioctl,
 	.read_time	= ds1305_get_time,
 	.set_time	= ds1305_set_time,
 	.read_alarm	= ds1305_get_alarm,
 	.set_alarm	= ds1305_set_alarm,
 	.proc		= ds1305_proc,
+	.alarm_irq_enable = ds1305_alarm_irq_enable,
 };
 
 static void ds1305_work(struct work_struct *work)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 0d559b6416dd..4724ba3acf1a 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -495,50 +495,27 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 	return 0;
 }
 
-static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct i2c_client	*client = to_i2c_client(dev);
 	struct ds1307		*ds1307 = i2c_get_clientdata(client);
 	int			ret;
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		if (!test_bit(HAS_ALARM, &ds1307->flags))
-			return -ENOTTY;
-
-		ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
-		if (ret < 0)
-			return ret;
-
-		ret &= ~DS1337_BIT_A1IE;
-
-		ret = i2c_smbus_write_byte_data(client,
-						DS1337_REG_CONTROL, ret);
-		if (ret < 0)
-			return ret;
-
-		break;
-
-	case RTC_AIE_ON:
-		if (!test_bit(HAS_ALARM, &ds1307->flags))
-			return -ENOTTY;
+	if (!test_bit(HAS_ALARM, &ds1307->flags))
+		return -ENOTTY;
 
-		ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
-		if (ret < 0)
-			return ret;
+	ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+	if (ret < 0)
+		return ret;
 
+	if (enabled)
 		ret |= DS1337_BIT_A1IE;
+	else
+		ret &= ~DS1337_BIT_A1IE;
 
-		ret = i2c_smbus_write_byte_data(client,
-						DS1337_REG_CONTROL, ret);
-		if (ret < 0)
-			return ret;
-
-		break;
-
-	default:
-		return -ENOIOCTLCMD;
-	}
+	ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret);
+	if (ret < 0)
+		return ret;
 
 	return 0;
 }
@@ -548,7 +525,7 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
 	.set_time	= ds1307_set_time,
 	.read_alarm	= ds1337_read_alarm,
 	.set_alarm	= ds1337_set_alarm,
-	.ioctl		= ds1307_ioctl,
+	.alarm_irq_enable = ds1307_alarm_irq_enable,
 };
 
 /*----------------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 47fb6357c346..d834a63ec4b0 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -307,42 +307,25 @@ unlock:
 	mutex_unlock(&ds1374->mutex);
 }
 
-static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct ds1374 *ds1374 = i2c_get_clientdata(client);
-	int ret = -ENOIOCTLCMD;
+	int ret;
 
 	mutex_lock(&ds1374->mutex);
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
-		if (ret < 0)
-			goto out;
-
-		ret &= ~DS1374_REG_CR_WACE;
-
-		ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
-		if (ret < 0)
-			goto out;
-
-		break;
-
-	case RTC_AIE_ON:
-		ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
-		if (ret < 0)
-			goto out;
+	ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
+	if (ret < 0)
+		goto out;
 
+	if (enabled) {
 		ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE;
 		ret &= ~DS1374_REG_CR_WDALM;
-
-		ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
-		if (ret < 0)
-			goto out;
-
-		break;
+	} else {
+		ret &= ~DS1374_REG_CR_WACE;
 	}
+	ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
 
 out:
 	mutex_unlock(&ds1374->mutex);
@@ -354,7 +337,7 @@ static const struct rtc_class_ops ds1374_rtc_ops = {
 	.set_time = ds1374_set_time,
 	.read_alarm = ds1374_read_alarm,
 	.set_alarm = ds1374_set_alarm,
-	.ioctl = ds1374_ioctl,
+	.alarm_irq_enable = ds1374_alarm_irq_enable,
 };
 
 static int ds1374_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee19764c..950735415a7c 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
 /*
  * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
  *
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
  * Author: Jack Lan <jack.lan@freescale.com>
  *
  * This program is free software; you can redistribute  it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
 		time->tm_hour = bcd2bin(hour);
 	}
 
-	time->tm_wday = bcd2bin(week);
+	/* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+	time->tm_wday = bcd2bin(week) - 1;
 	time->tm_mday = bcd2bin(day);
-	time->tm_mon = bcd2bin(month & 0x7F);
+	/* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+	time->tm_mon = bcd2bin(month & 0x7F) - 1;
 	if (century)
 		add_century = 100;
 
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
 	buf[0] = bin2bcd(time->tm_sec);
 	buf[1] = bin2bcd(time->tm_min);
 	buf[2] = bin2bcd(time->tm_hour);
-	buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+	/* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+	buf[3] = bin2bcd(time->tm_wday + 1);
 	buf[4] = bin2bcd(time->tm_mday); /* Date */
-	buf[5] = bin2bcd(time->tm_mon);
+	/* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+	buf[5] = bin2bcd(time->tm_mon + 1);
 	if (time->tm_year >= 100) {
 		buf[5] |= 0x80;
 		buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 5a8daa358066..69fe664a2228 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -213,41 +213,27 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
 	return m41t80_set_datetime(to_i2c_client(dev), tm);
 }
 
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-static int
-m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	int rc;
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
-
 	rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
 	if (rc < 0)
 		goto err;
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		rc &= ~M41T80_ALMON_AFE;
-		break;
-	case RTC_AIE_ON:
+
+	if (enabled)
 		rc |= M41T80_ALMON_AFE;
-		break;
-	}
+	else
+		rc &= ~M41T80_ALMON_AFE;
+
 	if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
 		goto err;
+
 	return 0;
 err:
 	return -EIO;
 }
-#else
-#define	m41t80_rtc_ioctl NULL
-#endif
 
 static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
@@ -374,7 +360,7 @@ static struct rtc_class_ops m41t80_rtc_ops = {
 	.read_alarm = m41t80_rtc_read_alarm,
 	.set_alarm = m41t80_rtc_set_alarm,
 	.proc = m41t80_rtc_proc,
-	.ioctl = m41t80_rtc_ioctl,
+	.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index a99a0b554eb8..3978f4caf724 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -263,30 +263,21 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 /*
  * Handle commands from user-space
  */
-static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
-			unsigned long arg)
+static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	unsigned long flags;
-	int ret = 0;
 
 	spin_lock_irqsave(&m48t59->lock, flags);
-	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm interrupt off */
-		M48T59_WRITE(0x00, M48T59_INTR);
-		break;
-	case RTC_AIE_ON:	/* alarm interrupt on */
+	if (enabled)
 		M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
-		break;
-	default:
-		ret = -ENOIOCTLCMD;
-		break;
-	}
+	else
+		M48T59_WRITE(0x00, M48T59_INTR);
 	spin_unlock_irqrestore(&m48t59->lock, flags);
 
-	return ret;
+	return 0;
 }
 
 static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
@@ -330,12 +321,12 @@ static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
 }
 
 static const struct rtc_class_ops m48t59_rtc_ops = {
-	.ioctl		= m48t59_rtc_ioctl,
 	.read_time	= m48t59_rtc_read_time,
 	.set_time	= m48t59_rtc_set_time,
 	.read_alarm	= m48t59_rtc_readalarm,
 	.set_alarm	= m48t59_rtc_setalarm,
 	.proc		= m48t59_rtc_proc,
+	.alarm_irq_enable = m48t59_rtc_alarm_irq_enable,
 };
 
 static const struct rtc_class_ops m48t02_rtc_ops = {
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index bcd0cf63eb16..1db62db8469d 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -255,42 +255,21 @@ static int mrst_irq_set_state(struct device *dev, int enabled)
 	return 0;
 }
 
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
 /* Currently, the vRTC doesn't support UIE ON/OFF */
-static int
-mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct mrst_rtc	*mrst = dev_get_drvdata(dev);
 	unsigned long	flags;
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		if (!mrst->irq)
-			return -EINVAL;
-		break;
-	default:
-		/* PIE ON/OFF is handled by mrst_irq_set_state() */
-		return -ENOIOCTLCMD;
-	}
-
 	spin_lock_irqsave(&rtc_lock, flags);
-	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm off */
-		mrst_irq_disable(mrst, RTC_AIE);
-		break;
-	case RTC_AIE_ON:	/* alarm on */
+	if (enabled)
 		mrst_irq_enable(mrst, RTC_AIE);
-		break;
-	}
+	else
+		mrst_irq_disable(mrst, RTC_AIE);
 	spin_unlock_irqrestore(&rtc_lock, flags);
 	return 0;
 }
 
-#else
-#define	mrst_rtc_ioctl	NULL
-#endif
 
 #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
 
@@ -317,13 +296,13 @@ static int mrst_procfs(struct device *dev, struct seq_file *seq)
 #endif
 
 static const struct rtc_class_ops mrst_rtc_ops = {
-	.ioctl		= mrst_rtc_ioctl,
 	.read_time	= mrst_read_time,
 	.set_time	= mrst_set_time,
 	.read_alarm	= mrst_read_alarm,
 	.set_alarm	= mrst_set_alarm,
 	.proc		= mrst_procfs,
 	.irq_set_state	= mrst_irq_set_state,
+	.alarm_irq_enable = mrst_rtc_alarm_irq_enable,
 };
 
 static struct mrst_rtc	mrst_rtc;
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index b2fff0ca49f8..67820626e18f 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -82,7 +82,7 @@ static inline unsigned int msm6242_read(struct msm6242_priv *priv,
 static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
 				unsigned int reg)
 {
-	return __raw_writel(val, &priv->regs[reg]);
+	__raw_writel(val, &priv->regs[reg]);
 }
 
 static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index bcca47298554..60627a764514 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -169,25 +169,19 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
 	return 0;
 }
 
-static int mv_rtc_ioctl(struct device *dev, unsigned int cmd,
-			unsigned long arg)
+static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
 	void __iomem *ioaddr = pdata->ioaddr;
 
 	if (pdata->irq < 0)
-		return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
-		break;
-	case RTC_AIE_ON:
+		return -EINVAL; /* fall back into rtc-dev's emulation */
+
+	if (enabled)
 		writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
+	else
+		writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
 	return 0;
 }
 
@@ -216,7 +210,7 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = {
 	.set_time	= mv_rtc_set_time,
 	.read_alarm	= mv_rtc_read_alarm,
 	.set_alarm	= mv_rtc_set_alarm,
-	.ioctl		= mv_rtc_ioctl,
+	.alarm_irq_enable = mv_rtc_alarm_irq_enable,
 };
 
 static int __devinit mv_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index e72b523c79a5..b4dbf3a319b3 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -143,8 +143,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 	u8 reg;
 
 	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
 	case RTC_UIE_OFF:
 	case RTC_UIE_ON:
 		break;
@@ -156,13 +154,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 	rtc_wait_not_busy();
 	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
 	switch (cmd) {
-	/* AIE = Alarm Interrupt Enable */
-	case RTC_AIE_OFF:
-		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
-		break;
-	case RTC_AIE_ON:
-		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
-		break;
 	/* UIE = Update Interrupt Enable (1/second) */
 	case RTC_UIE_OFF:
 		reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
@@ -182,6 +173,24 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 #define	omap_rtc_ioctl	NULL
 #endif
 
+static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	u8 reg;
+
+	local_irq_disable();
+	rtc_wait_not_busy();
+	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+	if (enabled)
+		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+	else
+		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+	rtc_wait_not_busy();
+	rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+	local_irq_enable();
+
+	return 0;
+}
+
 /* this hardware doesn't support "don't care" alarm fields */
 static int tm2bcd(struct rtc_time *tm)
 {
@@ -309,6 +318,7 @@ static struct rtc_class_ops omap_rtc_ops = {
 	.set_time	= omap_rtc_set_time,
 	.read_alarm	= omap_rtc_read_alarm,
 	.set_alarm	= omap_rtc_set_alarm,
+	.alarm_irq_enable = omap_rtc_alarm_irq_enable,
 };
 
 static int omap_rtc_alarm;
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index c086fc30a84c..242bbf86c74a 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -81,12 +81,16 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
 
 static int rtc_proc_open(struct inode *inode, struct file *file)
 {
+	int ret;
 	struct rtc_device *rtc = PDE(inode)->data;
 
 	if (!try_module_get(THIS_MODULE))
 		return -ENODEV;
 
-	return single_open(file, rtc_proc_show, rtc);
+	ret = single_open(file, rtc_proc_show, rtc);
+	if (ret)
+		module_put(THIS_MODULE);
+	return ret;
 }
 
 static int rtc_proc_release(struct inode *inode, struct file *file)
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 36eb66184461..694da39b6dd2 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -76,7 +76,7 @@ static inline unsigned int rp5c01_read(struct rp5c01_priv *priv,
 static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val,
 				unsigned int reg)
 {
-	return __raw_writel(val, &priv->regs[reg]);
+	__raw_writel(val, &priv->regs[reg]);
 }
 
 static void rp5c01_lock(struct rp5c01_priv *priv)
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index dd14e202c2c8..6aaa1550e3b1 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -299,14 +299,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 		if (rs5c->type == rtc_rs5c372a
 				&& (buf & RS5C372A_CTRL1_SL1))
 			return -ENOIOCTLCMD;
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		/* these irq management calls only make sense for chips
-		 * which are wired up to an IRQ.
-		 */
-		if (!rs5c->has_irq)
-			return -ENOIOCTLCMD;
-		break;
 	default:
 		return -ENOIOCTLCMD;
 	}
@@ -317,12 +309,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 
 	addr = RS5C_ADDR(RS5C_REG_CTRL1);
 	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm off */
-		buf &= ~RS5C_CTRL1_AALE;
-		break;
-	case RTC_AIE_ON:	/* alarm on */
-		buf |= RS5C_CTRL1_AALE;
-		break;
 	case RTC_UIE_OFF:	/* update off */
 		buf &= ~RS5C_CTRL1_CT_MASK;
 		break;
@@ -347,6 +333,39 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 #endif
 
 
+static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct i2c_client	*client = to_i2c_client(dev);
+	struct rs5c372		*rs5c = i2c_get_clientdata(client);
+	unsigned char		buf;
+	int			status, addr;
+
+	buf = rs5c->regs[RS5C_REG_CTRL1];
+
+	if (!rs5c->has_irq)
+		return -EINVAL;
+
+	status = rs5c_get_regs(rs5c);
+	if (status < 0)
+		return status;
+
+	addr = RS5C_ADDR(RS5C_REG_CTRL1);
+	if (enabled)
+		buf |= RS5C_CTRL1_AALE;
+	else
+		buf &= ~RS5C_CTRL1_AALE;
+
+	if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
+		printk(KERN_WARNING "%s: can't update alarm\n",
+			rs5c->rtc->name);
+		status = -EIO;
+	} else
+		rs5c->regs[RS5C_REG_CTRL1] = buf;
+
+	return status;
+}
+
+
 /* NOTE:  Since RTC_WKALM_{RD,SET} were originally defined for EFI,
  * which only exposes a polled programming interface; and since
  * these calls map directly to those EFI requests; we don't demand
@@ -466,6 +485,7 @@ static const struct rtc_class_ops rs5c372_rtc_ops = {
 	.set_time	= rs5c372_rtc_set_time,
 	.read_alarm	= rs5c_read_alarm,
 	.set_alarm	= rs5c_set_alarm,
+	.alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 88ea52b8647a..5dfe5ffcb0d3 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -314,16 +314,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
 		unsigned long arg)
 {
 	switch (cmd) {
-	case RTC_AIE_OFF:
-		spin_lock_irq(&sa1100_rtc_lock);
-		RTSR &= ~RTSR_ALE;
-		spin_unlock_irq(&sa1100_rtc_lock);
-		return 0;
-	case RTC_AIE_ON:
-		spin_lock_irq(&sa1100_rtc_lock);
-		RTSR |= RTSR_ALE;
-		spin_unlock_irq(&sa1100_rtc_lock);
-		return 0;
 	case RTC_UIE_OFF:
 		spin_lock_irq(&sa1100_rtc_lock);
 		RTSR &= ~RTSR_HZE;
@@ -338,6 +328,17 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
 	return -ENOIOCTLCMD;
 }
 
+static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	spin_lock_irq(&sa1100_rtc_lock);
+	if (enabled)
+		RTSR |= RTSR_ALE;
+	else
+		RTSR &= ~RTSR_ALE;
+	spin_unlock_irq(&sa1100_rtc_lock);
+	return 0;
+}
+
 static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	rtc_time_to_tm(RCNR, tm);
@@ -410,6 +411,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
 	.proc = sa1100_rtc_proc,
 	.irq_set_freq = sa1100_irq_set_freq,
 	.irq_set_state = sa1100_irq_set_state,
+	.alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
 };
 
 static int sa1100_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 06e41ed93230..93314a9e7fa9 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -350,10 +350,6 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 	unsigned int ret = 0;
 
 	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		sh_rtc_setaie(dev, cmd == RTC_AIE_ON);
-		break;
 	case RTC_UIE_OFF:
 		rtc->periodic_freq &= ~PF_OXS;
 		sh_rtc_setcie(dev, 0);
@@ -369,6 +365,12 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 	return ret;
 }
 
+static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	sh_rtc_setaie(dev, enabled);
+	return 0;
+}
+
 static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -604,6 +606,7 @@ static struct rtc_class_ops sh_rtc_ops = {
 	.irq_set_state	= sh_rtc_irq_set_state,
 	.irq_set_freq	= sh_rtc_irq_set_freq,
 	.proc		= sh_rtc_proc,
+	.alarm_irq_enable = sh_rtc_alarm_irq_enable,
 };
 
 static int __init sh_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 51725f7755b0..a82d6fe97076 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -50,24 +50,9 @@ static int test_rtc_proc(struct device *dev, struct seq_file *seq)
 	return 0;
 }
 
-static int test_rtc_ioctl(struct device *dev, unsigned int cmd,
-	unsigned long arg)
+static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
 {
-	/* We do support interrupts, they're generated
-	 * using the sysfs interface.
-	 */
-	switch (cmd) {
-	case RTC_PIE_ON:
-	case RTC_PIE_OFF:
-	case RTC_UIE_ON:
-	case RTC_UIE_OFF:
-	case RTC_AIE_ON:
-	case RTC_AIE_OFF:
-		return 0;
-
-	default:
-		return -ENOIOCTLCMD;
-	}
+	return 0;
 }
 
 static const struct rtc_class_ops test_rtc_ops = {
@@ -76,7 +61,7 @@ static const struct rtc_class_ops test_rtc_ops = {
 	.read_alarm = test_rtc_read_alarm,
 	.set_alarm = test_rtc_set_alarm,
 	.set_mmss = test_rtc_set_mmss,
-	.ioctl = test_rtc_ioctl,
+	.alarm_irq_enable = test_rtc_alarm_irq_enable,
 };
 
 static ssize_t test_irq_show(struct device *dev,
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c3244244e8cf..769190ac6d11 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -240,26 +240,6 @@ static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
 static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
 	switch (cmd) {
-	case RTC_AIE_ON:
-		spin_lock_irq(&rtc_lock);
-
-		if (!alarm_enabled) {
-			enable_irq(aie_irq);
-			alarm_enabled = 1;
-		}
-
-		spin_unlock_irq(&rtc_lock);
-		break;
-	case RTC_AIE_OFF:
-		spin_lock_irq(&rtc_lock);
-
-		if (alarm_enabled) {
-			disable_irq(aie_irq);
-			alarm_enabled = 0;
-		}
-
-		spin_unlock_irq(&rtc_lock);
-		break;
 	case RTC_EPOCH_READ:
 		return put_user(epoch, (unsigned long __user *)arg);
 	case RTC_EPOCH_SET:
@@ -275,6 +255,24 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
 	return 0;
 }
 
+static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	spin_lock_irq(&rtc_lock);
+	if (enabled) {
+		if (!alarm_enabled) {
+			enable_irq(aie_irq);
+			alarm_enabled = 1;
+		}
+	} else {
+		if (alarm_enabled) {
+			disable_irq(aie_irq);
+			alarm_enabled = 0;
+		}
+	}
+	spin_unlock_irq(&rtc_lock);
+	return 0;
+}
+
 static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
 {
 	struct platform_device *pdev = (struct platform_device *)dev_id;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 318672d05563..a9fe23d5bd0f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
 static struct ccw_device_id dasd_eckd_ids[] = {
 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
-	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
+	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44578b56ad0a..d3e58d763b43 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
 {
 	struct Scsi_Host *host = rport_to_shost(rport);
 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+	unsigned long flags;
 
 	if (!fcport)
 		return;
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
 	 * Transport has effectively 'deleted' the rport, clear
 	 * all local references.
 	 */
-	spin_lock_irq(host->host_lock);
+	spin_lock_irqsave(host->host_lock, flags);
 	fcport->rport = fcport->drport = NULL;
 	*((fc_port_t **)rport->dd_data) = NULL;
-	spin_unlock_irq(host->host_lock);
+	spin_unlock_irqrestore(host->host_lock, flags);
 
 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
 		return;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f948e1a73aec..d9479c3fe5f8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data)
 {
 	fc_port_t *fcport = data;
 	struct fc_rport *rport;
+	unsigned long flags;
 
-	spin_lock_irq(fcport->vha->host->host_lock);
+	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
 	rport = fcport->drport ? fcport->drport: fcport->rport;
 	fcport->drport = NULL;
-	spin_unlock_irq(fcport->vha->host->host_lock);
+	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
 	if (rport)
 		fc_remote_port_delete(rport);
 }
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
 	struct fc_rport_identifiers rport_ids;
 	struct fc_rport *rport;
 	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
 
 	qla2x00_rport_del(fcport);
 
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
 		    "Unable to allocate fc remote port!\n");
 		return;
 	}
-	spin_lock_irq(fcport->vha->host->host_lock);
+	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
 	*((fc_port_t **)rport->dd_data) = fcport;
-	spin_unlock_irq(fcport->vha->host->host_lock);
+	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
 
 	rport->supported_classes = fcport->supported_classes;
 
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c194c23ca1fb..f27724d76cf6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
 	}
 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-			atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
 			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 			cmd->result = DID_NO_CONNECT << 16;
 			goto qc24_fail_command;
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
 {
 	struct fc_rport *rport;
 	scsi_qla_host_t *base_vha;
+	unsigned long flags;
 
 	if (!fcport->rport)
 		return;
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
 	rport = fcport->rport;
 	if (defer) {
 		base_vha = pci_get_drvdata(vha->hw->pdev);
-		spin_lock_irq(vha->host->host_lock);
+		spin_lock_irqsave(vha->host->host_lock, flags);
 		fcport->drport = rport;
-		spin_unlock_irq(vha->host->host_lock);
+		spin_unlock_irqrestore(vha->host->host_lock, flags);
 		set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
 		qla2xxx_wake_dpc(base_vha);
 	} else
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data)
 
 	set_user_nice(current, -20);
 
+	set_current_state(TASK_INTERRUPTIBLE);
 	while (!kthread_should_stop()) {
 		DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
 
-		set_current_state(TASK_INTERRUPTIBLE);
 		schedule();
 		__set_current_state(TASK_RUNNING);
 
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data)
 		qla2x00_do_dpc_all_vps(base_vha);
 
 		ha->dpc_active = 0;
+		set_current_state(TASK_INTERRUPTIBLE);
 	} /* End of while(1) */
+	__set_current_state(TASK_RUNNING);
 
 	DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
 
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b310934efed..a6b2d72022fc 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
 			    unsigned long long lba, unsigned int num, int write)
 {
 	int ret;
-	unsigned int block, rest = 0;
+	unsigned long long block, rest = 0;
 	int (*func)(struct scsi_cmnd *, unsigned char *, int);
 
 	func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 351d8a375b57..19752b09e155 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -7,10 +7,9 @@
 #include <linux/of_device.h>
 #include <linux/spi/pxa2xx_spi.h>
 
-struct awesome_struct {
+struct ce4100_info {
 	struct ssp_device ssp;
-	struct platform_device spi_pdev;
-	struct pxa2xx_spi_master spi_pdata;
+	struct platform_device *spi_pdev;
 };
 
 static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp)
 }
 EXPORT_SYMBOL_GPL(pxa_ssp_free);
 
-static void plat_dev_release(struct device *dev)
-{
-	struct awesome_struct *as = container_of(dev,
-			struct awesome_struct, spi_pdev.dev);
-
-	of_device_node_put(&as->spi_pdev.dev);
-}
-
 static int __devinit ce4100_spi_probe(struct pci_dev *dev,
 		const struct pci_device_id *ent)
 {
 	int ret;
 	resource_size_t phys_beg;
 	resource_size_t phys_len;
-	struct awesome_struct *spi_info;
+	struct ce4100_info *spi_info;
 	struct platform_device *pdev;
-	struct pxa2xx_spi_master *spi_pdata;
+	struct pxa2xx_spi_master spi_pdata;
 	struct ssp_device *ssp;
 
 	ret = pci_enable_device(dev);
@@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
 		return ret;
 	}
 
+	pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
 	spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
-	if (!spi_info) {
+	if (!pdev || !spi_info ) {
 		ret = -ENOMEM;
-		goto err_kz;
+		goto err_nomem;
 	}
-	ssp = &spi_info->ssp;
-	pdev = &spi_info->spi_pdev;
-	spi_pdata =  &spi_info->spi_pdata;
+	memset(&spi_pdata, 0, sizeof(spi_pdata));
+	spi_pdata.num_chipselect = dev->devfn;
 
-	pdev->name = "pxa2xx-spi";
-	pdev->id = dev->devfn;
-	pdev->dev.parent = &dev->dev;
-	pdev->dev.platform_data = &spi_info->spi_pdata;
+	ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
+	if (ret)
+		goto err_nomem;
 
+	pdev->dev.parent = &dev->dev;
 #ifdef CONFIG_OF
 	pdev->dev.of_node = dev->dev.of_node;
 #endif
-	pdev->dev.release = plat_dev_release;
-
-	spi_pdata->num_chipselect = dev->devfn;
-
+	ssp = &spi_info->ssp;
 	ssp->phys_base = pci_resource_start(dev, 0);
 	ssp->mmio_base = ioremap(phys_beg, phys_len);
 	if (!ssp->mmio_base) {
 		dev_err(&pdev->dev, "failed to ioremap() registers\n");
 		ret = -EIO;
-		goto err_remap;
+		goto err_nomem;
 	}
 	ssp->irq = dev->irq;
 	ssp->port_id = pdev->id;
@@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
 
 	pci_set_drvdata(dev, spi_info);
 
-	ret = platform_device_register(pdev);
+	ret = platform_device_add(pdev);
 	if (ret)
 		goto err_dev_add;
 
@@ -135,27 +123,21 @@ err_dev_add:
 	mutex_unlock(&ssp_lock);
 	iounmap(ssp->mmio_base);
 
-err_remap:
-	kfree(spi_info);
-
-err_kz:
+err_nomem:
 	release_mem_region(phys_beg, phys_len);
-
+	platform_device_put(pdev);
+	kfree(spi_info);
 	return ret;
 }
 
 static void __devexit ce4100_spi_remove(struct pci_dev *dev)
 {
-	struct awesome_struct *spi_info;
-	struct platform_device *pdev;
+	struct ce4100_info *spi_info;
 	struct ssp_device *ssp;
 
 	spi_info = pci_get_drvdata(dev);
-
 	ssp = &spi_info->ssp;
-	pdev = &spi_info->spi_pdev;
-
-	platform_device_unregister(pdev);
+	platform_device_unregister(spi_info->spi_pdev);
 
 	iounmap(ssp->mmio_base);
 	release_mem_region(pci_resource_start(dev, 0),
@@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
 }
 
 static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
-
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
 	{ },
 };
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index 56f60c8ea0ab..2c665fceaac7 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -509,9 +509,11 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
 	bytes_done = 0;
 
 	while (bytes_done < t->len) {
+		void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL;
+		const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL;
 		n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
-					   t->tx_buf + bytes_done,
-					   t->rx_buf + bytes_done,
+					   tx_buf,
+					   rx_buf,
 					   words, bits);
 		if (n < 0)
 			break;
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index c7345dbf43fa..f8533795ee7f 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -733,7 +733,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
 
 	/* Fetch the vendor specific tuples. */
 	res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
-				ssb_pcmcia_do_get_invariants, sprom);
+				ssb_pcmcia_do_get_invariants, iv);
 	if ((res == 0) || (res == -ENOSPC))
 		return 0;
 
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index f1235884cc5d..cd8392badff0 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -263,9 +263,7 @@ ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
 	switch (type) {
 	case NL80211_CHAN_HT20:
 	case NL80211_CHAN_NO_HT:
-		WL_LOCK(wl);
 		err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value);
-		WL_UNLOCK(wl);
 		break;
 	case NL80211_CHAN_HT40MINUS:
 	case NL80211_CHAN_HT40PLUS:
@@ -285,6 +283,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
 	int err = 0;
 	int new_int;
 
+	WL_LOCK(wl);
 	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
 		WL_NONE("%s: Setting listen interval to %d\n",
 			__func__, conf->listen_interval);
@@ -341,6 +340,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
 	}
 
  config_out:
+	WL_UNLOCK(wl);
 	return err;
 }
 
@@ -459,13 +459,21 @@ wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
 
 static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
 {
+	struct wl_info *wl = hw->priv;
 	WL_NONE("Scan Start\n");
+	WL_LOCK(wl);
+	wlc_scan_start(wl->wlc);
+	WL_UNLOCK(wl);
 	return;
 }
 
 static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
 {
+	struct wl_info *wl = hw->priv;
 	WL_NONE("Scan Complete\n");
+	WL_LOCK(wl);
+	wlc_scan_stop(wl->wlc);
+	WL_UNLOCK(wl);
 	return;
 }
 
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
index a1303863686c..e37e8058e2b8 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
@@ -8461,3 +8461,16 @@ static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh,
 
 	kfree(qi);
 }
+
+/*
+ * Flag 'scan in progress' to withold dynamic phy calibration
+ */
+void wlc_scan_start(struct wlc_info *wlc)
+{
+	wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
+}
+
+void wlc_scan_stop(struct wlc_info *wlc)
+{
+	wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
+}
diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h
index 146a6904a39b..aff413001b70 100644
--- a/drivers/staging/brcm80211/sys/wlc_pub.h
+++ b/drivers/staging/brcm80211/sys/wlc_pub.h
@@ -570,6 +570,8 @@ extern void wlc_enable_mac(struct wlc_info *wlc);
 extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate);
 extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg);
 extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg);
+extern void wlc_scan_start(struct wlc_info *wlc);
+extern void wlc_scan_stop(struct wlc_info *wlc);
 
 static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name,
 				    uint *arg)
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index aad47326d6dc..1502d80f6f78 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -439,6 +439,7 @@ config COMEDI_NI_AT_AO
 config COMEDI_NI_ATMIO
 	tristate "NI AT-MIO E series ISA-PNP card support"
 	depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON
+	select COMEDI_8255
 	default N
 	---help---
 	  Enable support for National Instruments AT-MIO E series cards
@@ -1040,6 +1041,8 @@ config COMEDI_NI_PCIDIO
 config COMEDI_NI_PCIMIO
 	tristate "NI PCI-MIO-E series and M series support"
 	depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+	select COMEDI_8255
+	select COMEDI_FC
 	default N
 	---help---
 	  Enable support for National Instruments PCI-MIO-E series and M series
@@ -1164,6 +1167,7 @@ config COMEDI_NI_LABPC_CS
 config COMEDI_NI_MIO_CS
 	tristate "NI DAQCard E series PCMCIA support"
 	depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+	select COMEDI_8255
 	select COMEDI_FC
 	default N
 	---help---
@@ -1268,7 +1272,6 @@ config COMEDI_MITE
 config COMEDI_NI_TIO
 	tristate "NI general purpose counter support"
 	depends on COMEDI_MITE
-	select COMEDI_8255
 	default N
 	---help---
 	  Enable support for National Instruments general purpose counters.
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index cd25b241cc1f..fd274e9c7b78 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -61,8 +61,6 @@
 #define PCI_DAQ_SIZE		4096
 #define PCI_DAQ_SIZE_660X       8192
 
-MODULE_LICENSE("GPL");
-
 struct mite_struct *mite_devices;
 EXPORT_SYMBOL(mite_devices);
 
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 14e716e99a5c..54741c9e1af5 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -527,3 +527,7 @@ static void __exit driver_ni6527_cleanup_module(void)
 
 module_init(driver_ni6527_init_module);
 module_exit(driver_ni6527_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 8b8e2aaf77fb..403fc0997d37 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -871,3 +871,7 @@ static void __exit driver_ni_65xx_cleanup_module(void)
 
 module_init(driver_ni_65xx_init_module);
 module_exit(driver_ni_65xx_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 6612b085c4ef..ca2aeaa9449c 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1421,3 +1421,7 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
 	};
 	return 0;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index e9f034efdc6f..d8d91f90060e 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -384,3 +384,7 @@ static int ni_670x_find_device(struct comedi_device *dev, int bus, int slot)
 	mite_list_devices();
 	return -EIO;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 84a15c34e484..005d2fe86ee4 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -1354,3 +1354,7 @@ static void __exit driver_pcidio_cleanup_module(void)
 
 module_init(driver_pcidio_init_module);
 module_exit(driver_pcidio_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 23a381247285..9148abdad074 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1853,3 +1853,7 @@ static int pcimio_dio_change(struct comedi_device *dev,
 
 	return 0;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 54706a16dc0a..b41c9640b72d 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -236,6 +236,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
 	if (status == 1) {
 		netif_carrier_on(net);
 		netif_wake_queue(net);
+		netif_notify_peers(net);
 	} else {
 		netif_carrier_off(net);
 		netif_stop_queue(net);
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
index e38e89df6e84..e2f6d6a3c850 100644
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ b/drivers/staging/intel_sst/intelmid_v2_control.c
@@ -874,7 +874,10 @@ static int nc_set_selected_input_dev(u8 value)
 		sc_access[3].reg_addr = 0x109;
 		sc_access[3].mask = MASK6;
 		sc_access[3].value = 0x00;
-		num_val = 4;
+		sc_access[4].reg_addr = 0x104;
+		sc_access[4].value = 0x3C;
+		sc_access[4].mask = 0xff;
+		num_val = 5;
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 5415712f01f8..4bd8cbdaee76 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -227,6 +227,7 @@ static int zram_read(struct zram *zram, struct bio *bio)
 
 		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
 			handle_zero_page(page);
+			index++;
 			continue;
 		}
 
@@ -235,12 +236,14 @@ static int zram_read(struct zram *zram, struct bio *bio)
 			pr_debug("Read before write: sector=%lu, size=%u",
 				(ulong)(bio->bi_sector), bio->bi_size);
 			/* Do nothing */
+			index++;
 			continue;
 		}
 
 		/* Page is stored uncompressed since it's incompressible */
 		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
 			handle_uncompressed_page(zram, page, index);
+			index++;
 			continue;
 		}
 
@@ -320,6 +323,7 @@ static int zram_write(struct zram *zram, struct bio *bio)
 			mutex_unlock(&zram->lock);
 			zram_stat_inc(&zram->stats.pages_zero);
 			zram_set_flag(zram, index, ZRAM_ZERO);
+			index++;
 			continue;
 		}
 
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 5cfd70819f08..973bb190ef57 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -13,8 +13,7 @@ target_core_mod-y		:= target_core_configfs.o \
 				   target_core_transport.o \
 				   target_core_cdb.o \
 				   target_core_ua.o \
-				   target_core_rd.o \
-				   target_core_mib.o
+				   target_core_rd.o
 
 obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
 
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2764510798b0..caf8dc18ee0a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -37,7 +37,6 @@
 #include <linux/parser.h>
 #include <linux/syscalls.h>
 #include <linux/configfs.h>
-#include <linux/proc_fs.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_device.h>
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item)
 {
 	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
 				struct se_subsystem_dev, se_dev_group);
-	struct config_group *dev_cg;
-
-	if (!(se_dev))
-		return;
+	struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+	struct se_subsystem_api *t = hba->transport;
+	struct config_group *dev_cg = &se_dev->se_dev_group;
 
-	dev_cg = &se_dev->se_dev_group;
 	kfree(dev_cg->default_groups);
+	/*
+	 * This pointer will set when the storage is enabled with:
+	 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+	 */
+	if (se_dev->se_dev_ptr) {
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+			"virtual_device() for se_dev_ptr: %p\n",
+			se_dev->se_dev_ptr);
+
+		se_free_virtual_device(se_dev->se_dev_ptr, hba);
+	} else {
+		/*
+		 * Release struct se_subsystem_dev->se_dev_su_ptr..
+		 */
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+			"device() for se_dev_su_ptr: %p\n",
+			se_dev->se_dev_su_ptr);
+
+		t->free_device(se_dev->se_dev_su_ptr);
+	}
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+			"_dev_t: %p\n", se_dev);
+	kfree(se_dev);
 }
 
 static ssize_t target_core_dev_show(struct config_item *item,
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
 	NULL,
 };
 
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	core_alua_free_lu_gp(lu_gp);
+}
+
 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+	.release		= target_core_alua_lu_gp_release,
 	.show_attribute		= target_core_alua_lu_gp_attr_show,
 	.store_attribute	= target_core_alua_lu_gp_attr_store,
 };
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp(
 	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
 		" Group: core/alua/lu_gps/%s, ID: %hu\n",
 		config_item_name(item), lu_gp->lu_gp_id);
-
+	/*
+	 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+	 * -> target_core_alua_lu_gp_release()
+	 */
 	config_item_put(item);
-	core_alua_free_lu_gp(lu_gp);
 }
 
 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
 	NULL,
 };
 
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+	.release		= target_core_alua_tg_pt_gp_release,
 	.show_attribute		= target_core_alua_tg_pt_gp_attr_show,
 	.store_attribute	= target_core_alua_tg_pt_gp_attr_store,
 };
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp(
 	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
 		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
 		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
-
+	/*
+	 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+	 * -> target_core_alua_tg_pt_gp_release().
+	 */
 	config_item_put(item);
-	core_alua_free_tg_pt_gp(tg_pt_gp);
 }
 
 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev(
 	struct se_subsystem_api *t;
 	struct config_item *df_item;
 	struct config_group *dev_cg, *tg_pt_gp_cg;
-	int i, ret;
+	int i;
 
 	hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
 
-	if (mutex_lock_interruptible(&hba->hba_access_mutex))
-		goto out;
-
+	mutex_lock(&hba->hba_access_mutex);
 	t = hba->transport;
 
 	spin_lock(&se_global->g_device_lock);
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev(
 		config_item_put(df_item);
 	}
 	kfree(tg_pt_gp_cg->default_groups);
-	core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+	/*
+	 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+	 * directly from target_core_alua_tg_pt_gp_release().
+	 */
 	T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
 
 	dev_cg = &se_dev->se_dev_group;
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev(
 		dev_cg->default_groups[i] = NULL;
 		config_item_put(df_item);
 	}
-
-	config_item_put(item);
 	/*
-	 * This pointer will set when the storage is enabled with:
-	 * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+	 * The releasing of se_dev and associated se_dev->se_dev_ptr is done
+	 * from target_core_dev_item_ops->release() ->target_core_dev_release().
 	 */
-	if (se_dev->se_dev_ptr) {
-		printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
-			"virtual_device() for se_dev_ptr: %p\n",
-				se_dev->se_dev_ptr);
-
-		ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
-		if (ret < 0)
-			goto hba_out;
-	} else {
-		/*
-		 * Release struct se_subsystem_dev->se_dev_su_ptr..
-		 */
-		printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
-			"device() for se_dev_su_ptr: %p\n",
-			se_dev->se_dev_su_ptr);
-
-		t->free_device(se_dev->se_dev_su_ptr);
-	}
-
-	printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
-		"_dev_t: %p\n", se_dev);
-
-hba_out:
+	config_item_put(item);
 	mutex_unlock(&hba->hba_access_mutex);
-out:
-	kfree(se_dev);
 }
 
 static struct configfs_group_operations target_core_hba_group_ops = {
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
 
 CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
 
+static void target_core_hba_release(struct config_item *item)
+{
+	struct se_hba *hba = container_of(to_config_group(item),
+				struct se_hba, hba_group);
+	core_delete_hba(hba);
+}
+
 static struct configfs_attribute *target_core_hba_attrs[] = {
 	&target_core_hba_hba_info.attr,
 	&target_core_hba_hba_mode.attr,
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = {
 };
 
 static struct configfs_item_operations target_core_hba_item_ops = {
+	.release		= target_core_hba_release,
 	.show_attribute		= target_core_hba_attr_show,
 	.store_attribute	= target_core_hba_attr_store,
 };
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget(
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_hba *hba = item_to_hba(item);
-
+	/*
+	 * core_delete_hba() is called from target_core_hba_item_ops->release()
+	 * -> target_core_hba_release()
+	 */
 	config_item_put(item);
-	core_delete_hba(hba);
 }
 
 static struct configfs_group_operations target_core_group_ops = {
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void)
 	struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
 	struct config_group *lu_gp_cg = NULL;
 	struct configfs_subsystem *subsys;
-	struct proc_dir_entry *scsi_target_proc = NULL;
 	struct t10_alua_lu_gp *lu_gp;
 	int ret;
 
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void)
 	if (core_dev_setup_virtual_lun0() < 0)
 		goto out;
 
-	scsi_target_proc = proc_mkdir("scsi_target", 0);
-	if (!(scsi_target_proc)) {
-		printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
-		goto out;
-	}
-	ret = init_scsi_target_mib();
-	if (ret < 0)
-		goto out;
-
 	return 0;
 
 out:
 	configfs_unregister_subsystem(subsys);
-	if (scsi_target_proc)
-		remove_proc_entry("scsi_target", 0);
 	core_dev_release_virtual_lun0();
 	rd_module_exit();
 out_global:
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void)
 		config_item_put(item);
 	}
 	kfree(lu_gp_cg->default_groups);
-	core_alua_free_lu_gp(se_global->default_lu_gp);
-	se_global->default_lu_gp = NULL;
+	lu_gp_cg->default_groups = NULL;
 
 	alua_cg = &se_global->alua_group;
 	for (i = 0; alua_cg->default_groups[i]; i++) {
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void)
 		config_item_put(item);
 	}
 	kfree(alua_cg->default_groups);
+	alua_cg->default_groups = NULL;
 
 	hba_cg = &se_global->target_core_hbagroup;
 	for (i = 0; hba_cg->default_groups[i]; i++) {
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void)
 		config_item_put(item);
 	}
 	kfree(hba_cg->default_groups);
-
-	for (i = 0; subsys->su_group.default_groups[i]; i++) {
-		item = &subsys->su_group.default_groups[i]->cg_item;
-		subsys->su_group.default_groups[i] = NULL;
-		config_item_put(item);
-	}
+	hba_cg->default_groups = NULL;
+	/*
+	 * We expect subsys->su_group.default_groups to be released
+	 * by configfs subsystem provider logic..
+	 */
+	configfs_unregister_subsystem(subsys);
 	kfree(subsys->su_group.default_groups);
 
-	configfs_unregister_subsystem(subsys);
+	core_alua_free_lu_gp(se_global->default_lu_gp);
+	se_global->default_lu_gp = NULL;
+
 	printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
 			" Infrastructure\n");
 
-	remove_scsi_target_mib();
-	remove_proc_entry("scsi_target", 0);
 	core_dev_release_virtual_lun0();
 	rd_module_exit();
 	release_se_global();
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 317ce58d426d..5da051a07fa3 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -373,11 +373,11 @@ int core_update_device_list_for_node(
 		/*
 		 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 		 * that have not been explictly concerted to MappedLUNs ->
-		 * struct se_lun_acl.
+		 * struct se_lun_acl, but we remove deve->alua_port_list from
+		 * port->sep_alua_list. This also means that active UAs and
+		 * NodeACL context specific PR metadata for demo-mode
+		 * MappedLUN *deve will be released below..
 		 */
-		if (!(deve->se_lun_acl))
-			return 0;
-
 		spin_lock_bh(&port->sep_alua_lock);
 		list_del(&deve->alua_port_list);
 		spin_unlock_bh(&port->sep_alua_lock);
@@ -395,12 +395,14 @@ int core_update_device_list_for_node(
 				printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
 					" already set for demo mode -> explict"
 					" LUN ACL transition\n");
+				spin_unlock_irq(&nacl->device_list_lock);
 				return -1;
 			}
 			if (deve->se_lun != lun) {
 				printk(KERN_ERR "struct se_dev_entry->se_lun does"
 					" match passed struct se_lun for demo mode"
 					" -> explict LUN ACL transition\n");
+				spin_unlock_irq(&nacl->device_list_lock);
 				return -1;
 			}
 			deve->se_lun_acl = lun_acl;
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev)
 		}
 	}
 	spin_unlock(&hba->device_lock);
-
-	while (atomic_read(&hba->dev_mib_access_count))
-		cpu_relax();
 }
 
 int se_dev_check_online(struct se_device *dev)
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 32b148d7e261..b65d1c8e7740 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
 
 CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
 
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+				struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+	core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
 static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
 	&target_fabric_mappedlun_write_protect.attr,
 	NULL,
 };
 
 static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+	.release		= target_fabric_mappedlun_release,
 	.show_attribute		= target_fabric_mappedlun_attr_show,
 	.store_attribute	= target_fabric_mappedlun_attr_store,
 	.allow_link		= target_fabric_mappedlun_link,
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun(
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_lun_acl *lacl = container_of(to_config_group(item),
-			struct se_lun_acl, se_lun_group);
-	struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
-
 	config_item_put(item);
-	core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_nodeacl(se_nacl);
 }
 
 static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+	.release		= target_fabric_nacl_base_release,
 	.show_attribute		= target_fabric_nacl_base_attr_show,
 	.store_attribute	= target_fabric_nacl_base_attr_store,
 };
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl(
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_portal_group *se_tpg = container_of(group,
-			struct se_portal_group, tpg_acl_group);
-	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
 	struct se_node_acl *se_nacl = container_of(to_config_group(item),
 			struct se_node_acl, acl_group);
 	struct config_item *df_item;
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl(
 		nacl_cg->default_groups[i] = NULL;
 		config_item_put(df_item);
 	}
-
+	/*
+	 * struct se_node_acl free is done in target_fabric_nacl_base_release()
+	 */
 	config_item_put(item);
-	tf->tf_ops.fabric_drop_nodeacl(se_nacl);
 }
 
 static struct configfs_group_operations target_fabric_nacl_group_ops = {
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
 
 CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
 
+static void target_fabric_np_base_release(struct config_item *item)
+{
+	struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+				struct se_tpg_np, tpg_np_group);
+	struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
 static struct configfs_item_operations target_fabric_np_base_item_ops = {
+	.release		= target_fabric_np_base_release,
 	.show_attribute		= target_fabric_np_base_attr_show,
 	.store_attribute	= target_fabric_np_base_attr_store,
 };
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np(
 	if (!(se_tpg_np) || IS_ERR(se_tpg_np))
 		return ERR_PTR(-EINVAL);
 
+	se_tpg_np->tpg_np_parent = se_tpg;
 	config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
 			&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
 
@@ -476,14 +502,10 @@ static void target_fabric_drop_np(
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_portal_group *se_tpg = container_of(group,
-				struct se_portal_group, tpg_np_group);
-	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
-	struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
-				struct se_tpg_np, tpg_np_group);
-
+	/*
+	 * struct se_tpg_np is released via target_fabric_np_base_release()
+	 */
 	config_item_put(item);
-	tf->tf_ops.fabric_drop_np(se_tpg_np);
 }
 
 static struct configfs_group_operations target_fabric_np_group_ops = {
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
  */
 CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
 
+static void target_fabric_tpg_release(struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+			struct se_portal_group, tpg_group);
+	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
 static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+	.release		= target_fabric_tpg_release,
 	.show_attribute		= target_fabric_tpg_attr_show,
 	.store_attribute	= target_fabric_tpg_attr_store,
 };
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg(
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
-	struct target_fabric_configfs *tf = wwn->wwn_tf;
 	struct se_portal_group *se_tpg = container_of(to_config_group(item),
 				struct se_portal_group, tpg_group);
 	struct config_group *tpg_cg = &se_tpg->tpg_group;
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg(
 	}
 
 	config_item_put(item);
-	tf->tf_ops.fabric_drop_tpg(se_tpg);
 }
 
+static void target_fabric_release_wwn(struct config_item *item)
+{
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+	.release	= target_fabric_release_wwn,
+};
+
 static struct configfs_group_operations target_fabric_tpg_group_ops = {
 	.make_group	= target_fabric_make_tpg,
 	.drop_item	= target_fabric_drop_tpg,
 };
 
-TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+		NULL);
 
 /* End of tfc_tpg_cit */
 
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn(
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct target_fabric_configfs *tf = container_of(group,
-				struct target_fabric_configfs, tf_group);
-	struct se_wwn *wwn = container_of(to_config_group(item),
-				struct se_wwn, wwn_group);
-
 	config_item_put(item);
-	tf->tf_ops.fabric_drop_wwn(wwn);
 }
 
 static struct configfs_group_operations target_fabric_wwn_group_ops = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c6e0d757e76e..67f0c09983c8 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice(
 
 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
 				FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
-	if (!(bd))
+	if (IS_ERR(bd))
 		goto failed;
 	/*
 	 * Setup the local scope queue_limits from struct request_queue->limits
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p)
 {
 	struct iblock_dev *ib_dev = p;
 
-	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
-	bioset_free(ib_dev->ibd_bio_set);
+	if (ib_dev->ibd_bd != NULL)
+		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+	if (ib_dev->ibd_bio_set != NULL)
+		bioset_free(ib_dev->ibd_bio_set);
 	kfree(ib_dev);
 }
 
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
deleted file mode 100644
index d5a48aa0d2d1..000000000000
--- a/drivers/target/target_core_mib.c
+++ /dev/null
@@ -1,1078 +0,0 @@
-/*******************************************************************************
- * Filename:  target_core_mib.c
- *
- * Copyright (c) 2006-2007 SBE, Inc.  All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
- *
- * Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- ******************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <generated/utsrelease.h>
-#include <linux/utsname.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_configfs.h>
-
-#include "target_core_hba.h"
-#include "target_core_mib.h"
-
-/* SCSI mib table index */
-static struct scsi_index_table scsi_index_table;
-
-#ifndef INITIAL_JIFFIES
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
-#endif
-
-/* SCSI Instance Table */
-#define SCSI_INST_SW_INDEX		1
-#define SCSI_TRANSPORT_INDEX		1
-
-#define NONE		"None"
-#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
-
-static inline int list_is_first(const struct list_head *list,
-				const struct list_head *head)
-{
-	return list->prev == head;
-}
-
-static void *locate_hba_start(
-	struct seq_file *seq,
-	loff_t *pos)
-{
-	spin_lock(&se_global->g_device_lock);
-	return seq_list_start(&se_global->g_se_dev_list, *pos);
-}
-
-static void *locate_hba_next(
-	struct seq_file *seq,
-	void *v,
-	loff_t *pos)
-{
-	return seq_list_next(v, &se_global->g_se_dev_list, pos);
-}
-
-static void locate_hba_stop(struct seq_file *seq, void *v)
-{
-	spin_unlock(&se_global->g_device_lock);
-}
-
-/****************************************************************************
- * SCSI MIB Tables
- ****************************************************************************/
-
-/*
- * SCSI Instance Table
- */
-static void *scsi_inst_seq_start(
-	struct seq_file *seq,
-	loff_t *pos)
-{
-	spin_lock(&se_global->hba_lock);
-	return seq_list_start(&se_global->g_hba_list, *pos);
-}
-
-static void *scsi_inst_seq_next(
-	struct seq_file *seq,
-	void *v,
-	loff_t *pos)
-{
-	return seq_list_next(v, &se_global->g_hba_list, pos);
-}
-
-static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
-{
-	spin_unlock(&se_global->hba_lock);
-}
-
-static int scsi_inst_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
-
-	if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
-		seq_puts(seq, "inst sw_indx\n");
-
-	seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
-	seq_printf(seq, "plugin: %s version: %s\n",
-			hba->transport->name, TARGET_CORE_VERSION);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_inst_seq_ops = {
-	.start	= scsi_inst_seq_start,
-	.next	= scsi_inst_seq_next,
-	.stop	= scsi_inst_seq_stop,
-	.show	= scsi_inst_seq_show
-};
-
-static int scsi_inst_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_inst_seq_ops);
-}
-
-static const struct file_operations scsi_inst_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_inst_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Device Table
- */
-static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-static int scsi_dev_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	char str[28];
-	int k;
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst indx role ports\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
-		   dev->dev_index, "Target", dev->dev_port_count);
-
-	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-
-	/* vendor */
-	for (k = 0; k < 8; k++)
-		str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
-				DEV_T10_WWN(dev)->vendor[k] : 0x20;
-	str[k] = 0x20;
-
-	/* model */
-	for (k = 0; k < 16; k++)
-		str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
-				DEV_T10_WWN(dev)->model[k] : 0x20;
-	str[k + 9] = 0;
-
-	seq_printf(seq, "dev_alias: %s\n", str);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_dev_seq_ops = {
-	.start  = scsi_dev_seq_start,
-	.next   = scsi_dev_seq_next,
-	.stop   = scsi_dev_seq_stop,
-	.show   = scsi_dev_seq_show
-};
-
-static int scsi_dev_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_dev_seq_ops);
-}
-
-static const struct file_operations scsi_dev_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_dev_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Port Table
- */
-static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_port_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-static int scsi_port_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	struct se_port *sep, *sep_tmp;
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst device indx role busy_count\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	/* FIXME: scsiPortBusyStatuses count */
-	spin_lock(&dev->se_port_lock);
-	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
-		seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
-			dev->dev_index, sep->sep_index, "Device",
-			dev->dev_index, 0);
-	}
-	spin_unlock(&dev->se_port_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_port_seq_ops = {
-	.start  = scsi_port_seq_start,
-	.next   = scsi_port_seq_next,
-	.stop   = scsi_port_seq_stop,
-	.show   = scsi_port_seq_show
-};
-
-static int scsi_port_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_port_seq_ops);
-}
-
-static const struct file_operations scsi_port_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_port_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Transport Table
- */
-static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-static int scsi_transport_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	struct se_port *se, *se_tmp;
-	struct se_portal_group *tpg;
-	struct t10_wwn *wwn;
-	char buf[64];
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst device indx dev_name\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	wwn = DEV_T10_WWN(dev);
-
-	spin_lock(&dev->se_port_lock);
-	list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
-		tpg = se->sep_tpg;
-		sprintf(buf, "scsiTransport%s",
-				TPG_TFO(tpg)->get_fabric_name());
-
-		seq_printf(seq, "%u %s %u %s+%s\n",
-			hba->hba_index, /* scsiTransportIndex */
-			buf,  /* scsiTransportType */
-			(TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
-			TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
-			0,
-			TPG_TFO(tpg)->tpg_get_wwn(tpg),
-			(strlen(wwn->unit_serial)) ?
-			/* scsiTransportDevName */
-			wwn->unit_serial : wwn->vendor);
-	}
-	spin_unlock(&dev->se_port_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_transport_seq_ops = {
-	.start  = scsi_transport_seq_start,
-	.next   = scsi_transport_seq_next,
-	.stop   = scsi_transport_seq_stop,
-	.show   = scsi_transport_seq_show
-};
-
-static int scsi_transport_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_transport_seq_ops);
-}
-
-static const struct file_operations scsi_transport_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_transport_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Target Device Table
- */
-static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-
-#define LU_COUNT	1  /* for now */
-static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	int non_accessible_lus = 0;
-	char status[16];
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst indx num_LUs status non_access_LUs"
-			" resets\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	switch (dev->dev_status) {
-	case TRANSPORT_DEVICE_ACTIVATED:
-		strcpy(status, "activated");
-		break;
-	case TRANSPORT_DEVICE_DEACTIVATED:
-		strcpy(status, "deactivated");
-		non_accessible_lus = 1;
-		break;
-	case TRANSPORT_DEVICE_SHUTDOWN:
-		strcpy(status, "shutdown");
-		non_accessible_lus = 1;
-		break;
-	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
-	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
-		strcpy(status, "offline");
-		non_accessible_lus = 1;
-		break;
-	default:
-		sprintf(status, "unknown(%d)", dev->dev_status);
-		non_accessible_lus = 1;
-	}
-
-	seq_printf(seq, "%u %u %u %s %u %u\n",
-		   hba->hba_index, dev->dev_index, LU_COUNT,
-		   status, non_accessible_lus, dev->num_resets);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_tgt_dev_seq_ops = {
-	.start  = scsi_tgt_dev_seq_start,
-	.next   = scsi_tgt_dev_seq_next,
-	.stop   = scsi_tgt_dev_seq_stop,
-	.show   = scsi_tgt_dev_seq_show
-};
-
-static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_tgt_dev_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_dev_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_tgt_dev_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Target Port Table
- */
-static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	struct se_port *sep, *sep_tmp;
-	struct se_portal_group *tpg;
-	u32 rx_mbytes, tx_mbytes;
-	unsigned long long num_cmds;
-	char buf[64];
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst device indx name port_index in_cmds"
-			" write_mbytes read_mbytes hs_in_cmds\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	spin_lock(&dev->se_port_lock);
-	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
-		tpg = sep->sep_tpg;
-		sprintf(buf, "%sPort#",
-			TPG_TFO(tpg)->get_fabric_name());
-
-		seq_printf(seq, "%u %u %u %s%d %s%s%d ",
-		     hba->hba_index,
-		     dev->dev_index,
-		     sep->sep_index,
-		     buf, sep->sep_index,
-		     TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
-		     TPG_TFO(tpg)->tpg_get_tag(tpg));
-
-		spin_lock(&sep->sep_lun->lun_sep_lock);
-		num_cmds = sep->sep_stats.cmd_pdus;
-		rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
-		tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
-		spin_unlock(&sep->sep_lun->lun_sep_lock);
-
-		seq_printf(seq, "%llu %u %u %u\n", num_cmds,
-			rx_mbytes, tx_mbytes, 0);
-	}
-	spin_unlock(&dev->se_port_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_tgt_port_seq_ops = {
-	.start  = scsi_tgt_port_seq_start,
-	.next   = scsi_tgt_port_seq_next,
-	.stop   = scsi_tgt_port_seq_stop,
-	.show   = scsi_tgt_port_seq_show
-};
-
-static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_tgt_port_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_port_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_tgt_port_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Authorized Initiator Table:
- * It contains the SCSI Initiators authorized to be attached to one of the
- * local Target ports.
- * Iterates through all active TPGs and extracts the info from the ACLs
- */
-static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	spin_lock_bh(&se_global->se_tpg_lock);
-	return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
-					 loff_t *pos)
-{
-	return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
-{
-	spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
-						se_tpg_list);
-	struct se_dev_entry *deve;
-	struct se_lun *lun;
-	struct se_node_acl *se_nacl;
-	int j;
-
-	if (list_is_first(&se_tpg->se_tpg_list,
-			  &se_global->g_se_tpg_list))
-		seq_puts(seq, "inst dev port indx dev_or_port intr_name "
-			 "map_indx att_count num_cmds read_mbytes "
-			 "write_mbytes hs_num_cmds creation_time row_status\n");
-
-	if (!(se_tpg))
-		return 0;
-
-	spin_lock(&se_tpg->acl_node_lock);
-	list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
-
-		atomic_inc(&se_nacl->mib_ref_count);
-		smp_mb__after_atomic_inc();
-		spin_unlock(&se_tpg->acl_node_lock);
-
-		spin_lock_irq(&se_nacl->device_list_lock);
-		for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
-			deve = &se_nacl->device_list[j];
-			if (!(deve->lun_flags &
-					TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
-			    (!deve->se_lun))
-				continue;
-			lun = deve->se_lun;
-			if (!lun->lun_se_dev)
-				continue;
-
-			seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
-					" %u %s\n",
-				/* scsiInstIndex */
-				(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
-				TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
-				0,
-				/* scsiDeviceIndex */
-				lun->lun_se_dev->dev_index,
-				/* scsiAuthIntrTgtPortIndex */
-				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
-				/* scsiAuthIntrIndex */
-				se_nacl->acl_index,
-				/* scsiAuthIntrDevOrPort */
-				1,
-				/* scsiAuthIntrName */
-				se_nacl->initiatorname[0] ?
-					se_nacl->initiatorname : NONE,
-				/* FIXME: scsiAuthIntrLunMapIndex */
-				0,
-				/* scsiAuthIntrAttachedTimes */
-				deve->attach_count,
-				/* scsiAuthIntrOutCommands */
-				deve->total_cmds,
-				/* scsiAuthIntrReadMegaBytes */
-				(u32)(deve->read_bytes >> 20),
-				/* scsiAuthIntrWrittenMegaBytes */
-				(u32)(deve->write_bytes >> 20),
-				/* FIXME: scsiAuthIntrHSOutCommands */
-				0,
-				/* scsiAuthIntrLastCreation */
-				(u32)(((u32)deve->creation_time -
-					    INITIAL_JIFFIES) * 100 / HZ),
-				/* FIXME: scsiAuthIntrRowStatus */
-				"Ready");
-		}
-		spin_unlock_irq(&se_nacl->device_list_lock);
-
-		spin_lock(&se_tpg->acl_node_lock);
-		atomic_dec(&se_nacl->mib_ref_count);
-		smp_mb__after_atomic_dec();
-	}
-	spin_unlock(&se_tpg->acl_node_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_auth_intr_seq_ops = {
-	.start	= scsi_auth_intr_seq_start,
-	.next	= scsi_auth_intr_seq_next,
-	.stop	= scsi_auth_intr_seq_stop,
-	.show	= scsi_auth_intr_seq_show
-};
-
-static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_auth_intr_seq_ops);
-}
-
-static const struct file_operations scsi_auth_intr_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_auth_intr_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Attached Initiator Port Table:
- * It lists the SCSI Initiators attached to one of the local Target ports.
- * Iterates through all active TPGs and use active sessions from each TPG
- * to list the info fo this table.
- */
-static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	spin_lock_bh(&se_global->se_tpg_lock);
-	return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
-					 loff_t *pos)
-{
-	return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
-{
-	spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
-						se_tpg_list);
-	struct se_dev_entry *deve;
-	struct se_lun *lun;
-	struct se_node_acl *se_nacl;
-	struct se_session *se_sess;
-	unsigned char buf[64];
-	int j;
-
-	if (list_is_first(&se_tpg->se_tpg_list,
-			  &se_global->g_se_tpg_list))
-		seq_puts(seq, "inst dev port indx port_auth_indx port_name"
-			" port_ident\n");
-
-	if (!(se_tpg))
-		return 0;
-
-	spin_lock(&se_tpg->session_lock);
-	list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
-		if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
-		    (!se_sess->se_node_acl) ||
-		    (!se_sess->se_node_acl->device_list))
-			continue;
-
-		atomic_inc(&se_sess->mib_ref_count);
-		smp_mb__after_atomic_inc();
-		se_nacl = se_sess->se_node_acl;
-		atomic_inc(&se_nacl->mib_ref_count);
-		smp_mb__after_atomic_inc();
-		spin_unlock(&se_tpg->session_lock);
-
-		spin_lock_irq(&se_nacl->device_list_lock);
-		for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
-			deve = &se_nacl->device_list[j];
-			if (!(deve->lun_flags &
-					TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
-			   (!deve->se_lun))
-				continue;
-
-			lun = deve->se_lun;
-			if (!lun->lun_se_dev)
-				continue;
-
-			memset(buf, 0, 64);
-			if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
-				TPG_TFO(se_tpg)->sess_get_initiator_sid(
-					se_sess, (unsigned char *)&buf[0], 64);
-
-			seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
-				/* scsiInstIndex */
-				(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
-				TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
-				0,
-				/* scsiDeviceIndex */
-				lun->lun_se_dev->dev_index,
-				/* scsiPortIndex */
-				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
-				/* scsiAttIntrPortIndex */
-				(TPG_TFO(se_tpg)->sess_get_index != NULL) ?
-				TPG_TFO(se_tpg)->sess_get_index(se_sess) :
-				0,
-				/* scsiAttIntrPortAuthIntrIdx */
-				se_nacl->acl_index,
-				/* scsiAttIntrPortName */
-				se_nacl->initiatorname[0] ?
-					se_nacl->initiatorname : NONE,
-				/* scsiAttIntrPortIdentifier */
-				buf);
-		}
-		spin_unlock_irq(&se_nacl->device_list_lock);
-
-		spin_lock(&se_tpg->session_lock);
-		atomic_dec(&se_nacl->mib_ref_count);
-		smp_mb__after_atomic_dec();
-		atomic_dec(&se_sess->mib_ref_count);
-		smp_mb__after_atomic_dec();
-	}
-	spin_unlock(&se_tpg->session_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_att_intr_port_seq_ops = {
-	.start	= scsi_att_intr_port_seq_start,
-	.next	= scsi_att_intr_port_seq_next,
-	.stop	= scsi_att_intr_port_seq_stop,
-	.show	= scsi_att_intr_port_seq_show
-};
-
-static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_att_intr_port_seq_ops);
-}
-
-static const struct file_operations scsi_att_intr_port_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_att_intr_port_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Logical Unit Table
- */
-static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-#define SCSI_LU_INDEX		1
-static int scsi_lu_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	int j;
-	char str[28];
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
-		" dev_type status state-bit num_cmds read_mbytes"
-		" write_mbytes resets full_stat hs_num_cmds creation_time\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	/* Fix LU state, if we can read it from the device */
-	seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
-			dev->dev_index, SCSI_LU_INDEX,
-			(unsigned long long)0, /* FIXME: scsiLuDefaultLun */
-			(strlen(DEV_T10_WWN(dev)->unit_serial)) ?
-			/* scsiLuWwnName */
-			(char *)&DEV_T10_WWN(dev)->unit_serial[0] :
-			"None");
-
-	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-	/* scsiLuVendorId */
-	for (j = 0; j < 8; j++)
-		str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
-			DEV_T10_WWN(dev)->vendor[j] : 0x20;
-	str[8] = 0;
-	seq_printf(seq, " %s", str);
-
-	/* scsiLuProductId */
-	for (j = 0; j < 16; j++)
-		str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
-			DEV_T10_WWN(dev)->model[j] : 0x20;
-	str[16] = 0;
-	seq_printf(seq, " %s", str);
-
-	/* scsiLuRevisionId */
-	for (j = 0; j < 4; j++)
-		str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
-			DEV_T10_WWN(dev)->revision[j] : 0x20;
-	str[4] = 0;
-	seq_printf(seq, " %s", str);
-
-	seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
-		/* scsiLuPeripheralType */
-		   TRANSPORT(dev)->get_device_type(dev),
-		   (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
-		"available" : "notavailable", /* scsiLuStatus */
-		"exposed", 	/* scsiLuState */
-		(unsigned long long)dev->num_cmds,
-		/* scsiLuReadMegaBytes */
-		(u32)(dev->read_bytes >> 20),
-		/* scsiLuWrittenMegaBytes */
-		(u32)(dev->write_bytes >> 20),
-		dev->num_resets, /* scsiLuInResets */
-		0, /* scsiLuOutTaskSetFullStatus */
-		0, /* scsiLuHSInCommands */
-		(u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
-							100 / HZ));
-
-	return 0;
-}
-
-static const struct seq_operations scsi_lu_seq_ops = {
-	.start  = scsi_lu_seq_start,
-	.next   = scsi_lu_seq_next,
-	.stop   = scsi_lu_seq_stop,
-	.show   = scsi_lu_seq_show
-};
-
-static int scsi_lu_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_lu_seq_ops);
-}
-
-static const struct file_operations scsi_lu_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_lu_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/****************************************************************************/
-
-/*
- * Remove proc fs entries
- */
-void remove_scsi_target_mib(void)
-{
-	remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_port", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
-	remove_proc_entry("scsi_target/mib", NULL);
-}
-
-/*
- * Create proc fs entries for the mib tables
- */
-int init_scsi_target_mib(void)
-{
-	struct proc_dir_entry *dir_entry;
-	struct proc_dir_entry *scsi_inst_entry;
-	struct proc_dir_entry *scsi_dev_entry;
-	struct proc_dir_entry *scsi_port_entry;
-	struct proc_dir_entry *scsi_transport_entry;
-	struct proc_dir_entry *scsi_tgt_dev_entry;
-	struct proc_dir_entry *scsi_tgt_port_entry;
-	struct proc_dir_entry *scsi_auth_intr_entry;
-	struct proc_dir_entry *scsi_att_intr_port_entry;
-	struct proc_dir_entry *scsi_lu_entry;
-
-	dir_entry = proc_mkdir("scsi_target/mib", NULL);
-	if (!(dir_entry)) {
-		printk(KERN_ERR "proc_mkdir() failed.\n");
-		return -1;
-	}
-
-	scsi_inst_entry =
-		create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
-	if (scsi_inst_entry)
-		scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
-	else
-		goto error;
-
-	scsi_dev_entry =
-		create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
-	if (scsi_dev_entry)
-		scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
-	else
-		goto error;
-
-	scsi_port_entry =
-		create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
-	if (scsi_port_entry)
-		scsi_port_entry->proc_fops = &scsi_port_seq_fops;
-	else
-		goto error;
-
-	scsi_transport_entry =
-		create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
-	if (scsi_transport_entry)
-		scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
-	else
-		goto error;
-
-	scsi_tgt_dev_entry =
-		create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
-	if (scsi_tgt_dev_entry)
-		scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
-	else
-		goto error;
-
-	scsi_tgt_port_entry =
-		create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
-	if (scsi_tgt_port_entry)
-		scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
-	else
-		goto error;
-
-	scsi_auth_intr_entry =
-		create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
-	if (scsi_auth_intr_entry)
-		scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
-	else
-		goto error;
-
-	scsi_att_intr_port_entry =
-	      create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
-	if (scsi_att_intr_port_entry)
-		scsi_att_intr_port_entry->proc_fops =
-				&scsi_att_intr_port_seq_fops;
-	else
-		goto error;
-
-	scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
-	if (scsi_lu_entry)
-		scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
-	else
-		goto error;
-
-	return 0;
-
-error:
-	printk(KERN_ERR "create_proc_entry() failed.\n");
-	remove_scsi_target_mib();
-	return -1;
-}
-
-/*
- * Initialize the index table for allocating unique row indexes to various mib
- * tables
- */
-void init_scsi_index_table(void)
-{
-	memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
-	spin_lock_init(&scsi_index_table.lock);
-}
-
-/*
- * Allocate a new row index for the entry type specified
- */
-u32 scsi_get_new_index(scsi_index_t type)
-{
-	u32 new_index;
-
-	if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
-		printk(KERN_ERR "Invalid index type %d\n", type);
-		return -1;
-	}
-
-	spin_lock(&scsi_index_table.lock);
-	new_index = ++scsi_index_table.scsi_mib_index[type];
-	if (new_index == 0)
-		new_index = ++scsi_index_table.scsi_mib_index[type];
-	spin_unlock(&scsi_index_table.lock);
-
-	return new_index;
-}
-EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
deleted file mode 100644
index 277204633850..000000000000
--- a/drivers/target/target_core_mib.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef TARGET_CORE_MIB_H
-#define TARGET_CORE_MIB_H
-
-typedef enum {
-	SCSI_INST_INDEX,
-	SCSI_DEVICE_INDEX,
-	SCSI_AUTH_INTR_INDEX,
-	SCSI_INDEX_TYPE_MAX
-} scsi_index_t;
-
-struct scsi_index_table {
-	spinlock_t	lock;
-	u32 		scsi_mib_index[SCSI_INDEX_TYPE_MAX];
-} ____cacheline_aligned;
-
-/* SCSI Port stats */
-struct scsi_port_stats {
-	u64	cmd_pdus;
-	u64	tx_data_octets;
-	u64	rx_data_octets;
-} ____cacheline_aligned;
-
-extern int init_scsi_target_mib(void);
-extern void remove_scsi_target_mib(void);
-extern void init_scsi_index_table(void);
-extern u32 scsi_get_new_index(scsi_index_t);
-
-#endif   /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 742d24609a9b..f2a08477a68c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk(
 	 */
 	bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
 				FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
-	if (!(bd)) {
-		printk("pSCSI: blkdev_get_by_path() failed\n");
+	if (IS_ERR(bd)) {
+		printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
 		scsi_device_put(sd);
 		return NULL;
 	}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index abfa81a57115..c26f67467623 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
 	spin_lock_init(&acl->device_list_lock);
 	spin_lock_init(&acl->nacl_sess_lock);
 	atomic_set(&acl->acl_pr_ref_count, 0);
-	atomic_set(&acl->mib_ref_count, 0);
 	acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
 	acl->se_tpg = tpg;
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
 		cpu_relax();
 }
 
-void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
-{
-	while (atomic_read(&nacl->mib_ref_count) != 0)
-		cpu_relax();
-}
-
 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
 {
 	int i, ret;
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl(
 	spin_unlock_bh(&tpg->session_lock);
 
 	core_tpg_wait_for_nacl_pr_ref(acl);
-	core_tpg_wait_for_mib_ref(acl);
 	core_clear_initiator_node_from_tpg(acl, tpg);
 	core_free_device_list_for_node(acl, tpg);
 
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register);
 
 int core_tpg_deregister(struct se_portal_group *se_tpg)
 {
+	struct se_node_acl *nacl, *nacl_tmp;
+
 	printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
 		" for endpoint: %s Portal Tag %u\n",
 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
 
 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
 		cpu_relax();
+	/*
+	 * Release any remaining demo-mode generated se_node_acl that have
+	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+	 * in transport_deregister_session().
+	 */
+	spin_lock_bh(&se_tpg->acl_node_lock);
+	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+			acl_list) {
+		list_del(&nacl->acl_list);
+		se_tpg->num_node_acls--;
+		spin_unlock_bh(&se_tpg->acl_node_lock);
+
+		core_tpg_wait_for_nacl_pr_ref(nacl);
+		core_free_device_list_for_node(nacl, se_tpg);
+		TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+
+		spin_lock_bh(&se_tpg->acl_node_lock);
+	}
+	spin_unlock_bh(&se_tpg->acl_node_lock);
 
 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
 		core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 28b6292ff298..236e22d8cfae 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -379,6 +379,40 @@ void release_se_global(void)
 	se_global = NULL;
 }
 
+/* SCSI statistics table index */
+static struct scsi_index_table scsi_index_table;
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables.
+ */
+void init_scsi_index_table(void)
+{
+	memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+	spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+	u32 new_index;
+
+	if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+		printk(KERN_ERR "Invalid index type %d\n", type);
+		return -EINVAL;
+	}
+
+	spin_lock(&scsi_index_table.lock);
+	new_index = ++scsi_index_table.scsi_mib_index[type];
+	if (new_index == 0)
+		new_index = ++scsi_index_table.scsi_mib_index[type];
+	spin_unlock(&scsi_index_table.lock);
+
+	return new_index;
+}
+
 void transport_init_queue_obj(struct se_queue_obj *qobj)
 {
 	atomic_set(&qobj->queue_cnt, 0);
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void)
 	}
 	INIT_LIST_HEAD(&se_sess->sess_list);
 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
-	atomic_set(&se_sess->mib_ref_count, 0);
 
 	return se_sess;
 }
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess)
 		transport_free_session(se_sess);
 		return;
 	}
-	/*
-	 * Wait for possible reference in drivers/target/target_core_mib.c:
-	 * scsi_att_intr_port_seq_show()
-	 */
-	while (atomic_read(&se_sess->mib_ref_count) != 0)
-		cpu_relax();
 
 	spin_lock_bh(&se_tpg->session_lock);
 	list_del(&se_sess->sess_list);
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess)
 				spin_unlock_bh(&se_tpg->acl_node_lock);
 
 				core_tpg_wait_for_nacl_pr_ref(se_nacl);
-				core_tpg_wait_for_mib_ref(se_nacl);
 				core_free_device_list_for_node(se_nacl, se_tpg);
 				TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
 						se_nacl);
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map(
 
 		return ret;
 	}
+
+	BUG_ON(list_empty(se_mem_list));
 	/*
 	 * This is the normal path for all normal non BIDI and BIDI-COMMAND
 	 * WRITE payloads..  If we need to do BIDI READ passthrough for
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
 		struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
 		u32 se_mem_cnt = 0, task_offset = 0;
 
-		BUG_ON(list_empty(cmd->t_task->t_mem_list));
+		if (!list_empty(T_TASK(cmd)->t_mem_list))
+			se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
+					struct se_mem, se_list);
 
 		ret = transport_do_se_mem_map(dev, task,
 				cmd->t_task->t_mem_list, NULL, se_mem,
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7a5dba3ca23..bf7c687519ef 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,7 +4,6 @@
 
 menuconfig THERMAL
 	tristate "Generic Thermal sysfs driver"
-	depends on NET
 	help
 	  Generic Thermal Sysfs driver offers a generic mechanism for
 	  thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 7d0e63c79280..713b7ea4a607 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
 
 static unsigned int thermal_event_seqnum;
 
-static struct genl_family thermal_event_genl_family = {
-	.id = GENL_ID_GENERATE,
-	.name = THERMAL_GENL_FAMILY_NAME,
-	.version = THERMAL_GENL_VERSION,
-	.maxattr = THERMAL_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group thermal_event_mcgrp = {
-	.name = THERMAL_GENL_MCAST_GROUP_NAME,
-};
-
-static int genetlink_init(void);
-static void genetlink_exit(void);
-
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
 	int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
 EXPORT_SYMBOL(thermal_zone_device_unregister);
 
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.name = THERMAL_GENL_FAMILY_NAME,
+	.version = THERMAL_GENL_VERSION,
+	.maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+	.name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
 int generate_netlink_event(u32 orig, enum events event)
 {
 	struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
 	return result;
 }
 
+static void genetlink_exit(void)
+{
+	genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
 static int __init thermal_init(void)
 {
 	int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
 	return result;
 }
 
-static void genetlink_exit(void)
-{
-	genl_unregister_family(&thermal_event_genl_family);
-}
-
 static void __exit thermal_exit(void)
 {
 	class_unregister(&thermal_class);
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
index e6bed5f177ff..d79e7e9bf9d2 100644
--- a/drivers/tty/hvc/Makefile
+++ b/drivers/tty/hvc/Makefile
@@ -10,4 +10,3 @@ obj-$(CONFIG_HVC_XEN)		+= hvc_xen.o
 obj-$(CONFIG_HVC_IUCV)		+= hvc_iucv.o
 obj-$(CONFIG_HVC_UDBG)		+= hvc_udbg.o
 obj-$(CONFIG_HVCS)		+= hvcs.o
-obj-$(CONFIG_VIRTIO_CONSOLE)	+= virtio_console.o
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 44b8412a04e8..aa2e5d3eb01a 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2414,6 +2414,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
 
 	gsm->initiator = c->initiator;
 	gsm->mru = c->mru;
+	gsm->mtu = c->mtu;
 	gsm->encoding = c->encapsulation;
 	gsm->adaption = c->adaption;
 	gsm->n2 = c->n2;
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index be0ebce36e54..de0160e3f8c4 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -262,7 +262,7 @@ static void status_handle(struct m68k_serial *info, unsigned short status)
 
 static void receive_chars(struct m68k_serial *info, unsigned short rx)
 {
-	struct tty_struct *tty = info->port.tty;
+	struct tty_struct *tty = info->tty;
 	m68328_uart *uart = &uart_addr[info->line];
 	unsigned char ch, flag;
 
@@ -329,7 +329,7 @@ static void transmit_chars(struct m68k_serial *info)
 		goto clear_and_return;
 	}
 
-	if((info->xmit_cnt <= 0) || info->port.tty->stopped) {
+	if((info->xmit_cnt <= 0) || info->tty->stopped) {
 		/* That's peculiar... TX ints off */
 		uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
 		goto clear_and_return;
@@ -383,7 +383,7 @@ static void do_softint(struct work_struct *work)
 	struct m68k_serial	*info = container_of(work, struct m68k_serial, tqueue);
 	struct tty_struct	*tty;
 	
-	tty = info->port.tty;
+	tty = info->tty;
 	if (!tty)
 		return;
 #if 0
@@ -407,7 +407,7 @@ static void do_serial_hangup(struct work_struct *work)
 	struct m68k_serial	*info = container_of(work, struct m68k_serial, tqueue_hangup);
 	struct tty_struct	*tty;
 	
-	tty = info->port.tty;
+	tty = info->tty;
 	if (!tty)
 		return;
 
@@ -451,8 +451,8 @@ static int startup(struct m68k_serial * info)
 	uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
 #endif
 
-	if (info->port.tty)
-		clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+	if (info->tty)
+		clear_bit(TTY_IO_ERROR, &info->tty->flags);
 	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
 
 	/*
@@ -486,8 +486,8 @@ static void shutdown(struct m68k_serial * info)
 		info->xmit_buf = 0;
 	}
 
-	if (info->port.tty)
-		set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+	if (info->tty)
+		set_bit(TTY_IO_ERROR, &info->tty->flags);
 	
 	info->flags &= ~S_INITIALIZED;
 	local_irq_restore(flags);
@@ -553,9 +553,9 @@ static void change_speed(struct m68k_serial *info)
 	unsigned cflag;
 	int	i;
 
-	if (!info->port.tty || !info->port.tty->termios)
+	if (!info->tty || !info->tty->termios)
 		return;
-	cflag = info->port.tty->termios->c_cflag;
+	cflag = info->tty->termios->c_cflag;
 	if (!(port = info->port))
 		return;
 
@@ -970,7 +970,6 @@ static void send_break(struct m68k_serial * info, unsigned int duration)
 static int rs_ioctl(struct tty_struct *tty, struct file * file,
 		    unsigned int cmd, unsigned long arg)
 {
-	int error;
 	struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
 	int retval;
 
@@ -1104,7 +1103,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
 	tty_ldisc_flush(tty);
 	tty->closing = 0;
 	info->event = 0;
-	info->port.tty = NULL;
+	info->tty = NULL;
 #warning "This is not and has never been valid so fix it"	
 #if 0
 	if (tty->ldisc.num != ldiscs[N_TTY].num) {
@@ -1142,7 +1141,7 @@ void rs_hangup(struct tty_struct *tty)
 	info->event = 0;
 	info->count = 0;
 	info->flags &= ~S_NORMAL_ACTIVE;
-	info->port.tty = NULL;
+	info->tty = NULL;
 	wake_up_interruptible(&info->open_wait);
 }
 
@@ -1261,7 +1260,7 @@ int rs_open(struct tty_struct *tty, struct file * filp)
 
 	info->count++;
 	tty->driver_data = info;
-	info->port.tty = tty;
+	info->tty = tty;
 
 	/*
 	 * Start up serial port
@@ -1338,7 +1337,7 @@ rs68328_init(void)
 	    info = &m68k_soft[i];
 	    info->magic = SERIAL_MAGIC;
 	    info->port = (int) &uart_addr[i];
-	    info->port.tty = NULL;
+	    info->tty = NULL;
 	    info->irq = uart_irqs[i];
 	    info->custom_divisor = 16;
 	    info->close_delay = 50;
diff --git a/drivers/tty/serial/68360serial.c b/drivers/tty/serial/68360serial.c
index 88b13356ec10..bc21eeae8fde 100644
--- a/drivers/tty/serial/68360serial.c
+++ b/drivers/tty/serial/68360serial.c
@@ -2428,6 +2428,7 @@ static const struct tty_operations rs_360_ops = {
 	/* .read_proc = rs_360_read_proc, */
 	.tiocmget = rs_360_tiocmget,
 	.tiocmset = rs_360_tiocmset,
+	.get_icount = rs_360_get_icount,
 };
 
 static int __init rs_360_init(void)
diff --git a/drivers/tty/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
index e381b895b04d..9b1ff2b6bb37 100644
--- a/drivers/tty/serial/bfin_5xx.c
+++ b/drivers/tty/serial/bfin_5xx.c
@@ -370,10 +370,8 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
 {
 	struct bfin_serial_port *uart = dev_id;
 
-	spin_lock(&uart->port.lock);
 	while (UART_GET_LSR(uart) & DR)
 		bfin_serial_rx_chars(uart);
-	spin_unlock(&uart->port.lock);
 
 	return IRQ_HANDLED;
 }
@@ -490,9 +488,8 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
 {
 	int x_pos, pos;
 
-	dma_disable_irq(uart->tx_dma_channel);
-	dma_disable_irq(uart->rx_dma_channel);
-	spin_lock_bh(&uart->port.lock);
+	dma_disable_irq_nosync(uart->rx_dma_channel);
+	spin_lock_bh(&uart->rx_lock);
 
 	/* 2D DMA RX buffer ring is used. Because curr_y_count and
 	 * curr_x_count can't be read as an atomic operation,
@@ -523,8 +520,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
 		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
 	}
 
-	spin_unlock_bh(&uart->port.lock);
-	dma_enable_irq(uart->tx_dma_channel);
+	spin_unlock_bh(&uart->rx_lock);
 	dma_enable_irq(uart->rx_dma_channel);
 
 	mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -571,7 +567,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
 	unsigned short irqstat;
 	int x_pos, pos;
 
-	spin_lock(&uart->port.lock);
+	spin_lock(&uart->rx_lock);
 	irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
 	clear_dma_irqstat(uart->rx_dma_channel);
 
@@ -589,7 +585,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
 		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
 	}
 
-	spin_unlock(&uart->port.lock);
+	spin_unlock(&uart->rx_lock);
 
 	return IRQ_HANDLED;
 }
@@ -1332,6 +1328,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
 		}
 
 #ifdef CONFIG_SERIAL_BFIN_DMA
+		spin_lock_init(&uart->rx_lock);
 		uart->tx_done	    = 1;
 		uart->tx_count	    = 0;
 
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index beb1afa27d8d..7b951adac54b 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
 	s->rts = 0;
 
 	sprintf(b, "max3100-%d", s->minor);
-	s->workqueue = create_freezeable_workqueue(b);
+	s->workqueue = create_freezable_workqueue(b);
 	if (!s->workqueue) {
 		dev_warn(&s->spi->dev, "cannot create workqueue\n");
 		return -EBUSY;
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 910870edf708..750b4f627315 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
 	struct max3107_port *s = container_of(port, struct max3107_port, port);
 
 	/* Initialize work queue */
-	s->workqueue = create_freezeable_workqueue("max3107");
+	s->workqueue = create_freezable_workqueue("max3107");
 	if (!s->workqueue) {
 		dev_err(&s->spi->dev, "Workqueue creation failed\n");
 		return -EBUSY;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 8e0dd254eb11..81f13958e751 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -571,6 +571,7 @@ struct sysrq_state {
 	unsigned int alt_use;
 	bool active;
 	bool need_reinject;
+	bool reinjecting;
 };
 
 static void sysrq_reinject_alt_sysrq(struct work_struct *work)
@@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
 	unsigned int alt_code = sysrq->alt_use;
 
 	if (sysrq->need_reinject) {
+		/* we do not want the assignment to be reordered */
+		sysrq->reinjecting = true;
+		mb();
+
 		/* Simulate press and release of Alt + SysRq */
 		input_inject_event(handle, EV_KEY, alt_code, 1);
 		input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1);
@@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
 		input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0);
 		input_inject_event(handle, EV_KEY, alt_code, 0);
 		input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
+
+		mb();
+		sysrq->reinjecting = false;
 	}
 }
 
@@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle,
 	bool was_active = sysrq->active;
 	bool suppress;
 
+	/*
+	 * Do not filter anything if we are in the process of re-injecting
+	 * Alt+SysRq combination.
+	 */
+	if (sysrq->reinjecting)
+		return false;
+
 	switch (type) {
 
 	case EV_SYN:
@@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle,
 				sysrq->alt_use = sysrq->alt;
 				/*
 				 * If nothing else will be pressed we'll need
-				 * to * re-inject Alt-SysRq keysroke.
+				 * to re-inject Alt-SysRq keysroke.
 				 */
 				sysrq->need_reinject = true;
 			}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d6ede989ff22..4ab49d4eebf4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1607,6 +1607,7 @@ static const struct usb_device_id acm_ids[] = {
 	{ NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
 	{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
 	{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
 	{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
 
 	/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6a95017fa62b..e935f71d7a34 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1955,7 +1955,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
 
 	dev_dbg(&rhdev->dev, "usb %s%s\n",
 			(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
-	clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 	if (!hcd->driver->bus_resume)
 		return -ENOENT;
 	if (hcd->state == HC_STATE_RUNNING)
@@ -1963,6 +1962,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
 
 	hcd->state = HC_STATE_RESUMING;
 	status = hcd->driver->bus_resume(hcd);
+	clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 	if (status == 0) {
 		/* TRSMRCY = 10 msec */
 		msleep(10);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4310cc4b1cb5..0f299b7aad60 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
 
 	mutex_lock(&usb_address0_mutex);
 
-	if (!udev->config && oldspeed == USB_SPEED_SUPER) {
-		/* Don't reset USB 3.0 devices during an initial setup */
-		usb_set_device_state(udev, USB_STATE_DEFAULT);
-	} else {
-		/* Reset the device; full speed may morph to high speed */
-		/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
-		retval = hub_port_reset(hub, port1, udev, delay);
-		if (retval < 0)		/* error or disconnect */
-			goto fail;
-		/* success, speed is known */
-	}
+	/* Reset the device; full speed may morph to high speed */
+	/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+	retval = hub_port_reset(hub, port1, udev, delay);
+	if (retval < 0)		/* error or disconnect */
+		goto fail;
+	/* success, speed is known */
+
 	retval = -ENODEV;
 
 	if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
@@ -2753,6 +2749,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
 		udev->ttport = hdev->ttport;
 	} else if (udev->speed != USB_SPEED_HIGH
 			&& hdev->speed == USB_SPEED_HIGH) {
+		if (!hub->tt.hub) {
+			dev_err(&udev->dev, "parent hub has no TT\n");
+			retval = -EINVAL;
+			goto fail;
+		}
 		udev->tt = &hub->tt;
 		udev->ttport = port1;
 	}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 44c595432d6f..81ce6a8e1d94 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
 	{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
 
+	/* Samsung Android phone modem - ID conflict with SPH-I500 */
+	{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
+			USB_QUIRK_CONFIG_INTF_STRINGS },
+
 	/* Roland SC-8820 */
 	{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
 	/* M-Systems Flash Disk Pioneers */
 	{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* Keytouch QWERTY Panel keyboard */
+	{ USB_DEVICE(0x0926, 0x3333), .driver_info =
+			USB_QUIRK_CONFIG_INTF_STRINGS },
+
 	/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
 	{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
 
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 06bb9d4587e9..d50099675f28 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -546,6 +546,8 @@ config USB_GADGET_CI13XXX_MSM
 	  ci13xxx_udc core.
 	  This driver depends on OTG driver for PHY initialization,
 	  clock management, powering up VBUS, and power management.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
 
 	  Say "y" to link the driver statically, or "m" to build a
 	  dynamically linked module called "ci13xxx_msm" and force all
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index b5dbb2308f56..6d8e533949eb 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -293,6 +293,7 @@
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
 
 #include "gadget_chips.h"
 
@@ -2763,7 +2764,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
 			return ERR_PTR(-ENOMEM);
 		common->free_storage_on_release = 1;
 	} else {
-		memset(common, 0, sizeof common);
+		memset(common, 0, sizeof *common);
 		common->free_storage_on_release = 0;
 	}
 
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 20d43da319ae..015118535f77 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -258,7 +258,7 @@ static int pipe_buffer_setting(struct r8a66597 *r8a66597,
 		break;
 	case R8A66597_BULK:
 		/* isochronous pipes may be used as bulk pipes */
-		if (info->pipe > R8A66597_BASE_PIPENUM_BULK)
+		if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
 			bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
 		else
 			bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 24046c0f5878..0e6afa260ed8 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -151,6 +151,8 @@ config USB_EHCI_MSM
 	  Qualcomm chipsets. Root Hub has inbuilt TT.
 	  This driver depends on OTG driver for PHY initialization,
 	  clock management, powering up VBUS, and power management.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
 
 config USB_EHCI_HCD_PPC_OF
 	bool "EHCI support for PPC USB controller on OF platform bus"
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 2baf8a849086..a869e3c103d3 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -227,8 +227,8 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
 	 * mark HW unaccessible.  The PM and USB cores make sure that
 	 * the root hub is either suspended or stopped.
 	 */
-	spin_lock_irqsave(&ehci->lock, flags);
 	ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
+	spin_lock_irqsave(&ehci->lock, flags);
 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
 	(void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 796ea0c8900f..8a515f0d5988 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -111,6 +111,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
 {
 	int		port;
 	u32		temp;
+	unsigned long	flags;
 
 	/* If remote wakeup is enabled for the root hub but disabled
 	 * for the controller, we must adjust all the port wakeup flags
@@ -120,6 +121,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
 	if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
 		return;
 
+	spin_lock_irqsave(&ehci->lock, flags);
+
 	/* clear phy low-power mode before changing wakeup flags */
 	if (ehci->has_hostpc) {
 		port = HCS_N_PORTS(ehci->hcs_params);
@@ -131,7 +134,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
 			temp = ehci_readl(ehci, hostpc_reg);
 			ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
 		}
+		spin_unlock_irqrestore(&ehci->lock, flags);
 		msleep(5);
+		spin_lock_irqsave(&ehci->lock, flags);
 	}
 
 	port = HCS_N_PORTS(ehci->hcs_params);
@@ -170,6 +175,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
 	/* Does the root hub have a port wakeup pending? */
 	if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
 		usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
 }
 
 static int ehci_bus_suspend (struct usb_hcd *hcd)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 680f2ef4e59f..f784ceb862a3 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
 	hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
 			dev_name(&pdev->dev));
 	if (!hcd) {
-		dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
+		dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
 		ret = -ENOMEM;
 		goto err_create_hcd;
 	}
@@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
 
 	ret = omap_start_ehc(omap, hcd);
 	if (ret) {
-		dev_dbg(&pdev->dev, "failed to start ehci\n");
+		dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
 		goto err_start;
 	}
 
@@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
 
 	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
 	if (ret) {
-		dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+		dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
 		goto err_add_hcd;
 	}
 
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bed07d4aab06..07bb982e59f6 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -367,8 +367,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
 	 * mark HW unaccessible.  The PM and USB cores make sure that
 	 * the root hub is either suspended or stopped.
 	 */
-	spin_lock_irqsave (&ehci->lock, flags);
 	ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
+	spin_lock_irqsave (&ehci->lock, flags);
 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
 	(void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 990f06b89eaa..2e9602a10e9b 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -861,6 +861,7 @@ static int sl811h_urb_enqueue(
 			DBG("dev %d ep%d maxpacket %d\n",
 				udev->devnum, epnum, ep->maxpacket);
 			retval = -EINVAL;
+			kfree(ep);
 			goto fail;
 		}
 
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4abbf381..0231814a97a5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
 	}
 }
 
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
 {
-	void *addr;
+	struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+	void __iomem *addr;
 	u32 temp;
 	u64 temp_64;
 
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
 	}
 }
 
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
 {
 	/* Fields are 32 bits wide, DMA addresses are in bytes */
 	int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
 		dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
 }
 
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
 		     struct xhci_container_ctx *ctx,
 		     unsigned int last_ep)
 {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f0e7a6..a9534396e85b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
 
 /***************** Streams structures manipulation *************************/
 
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
 		unsigned int num_stream_ctxs,
 		struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
 {
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
  * The stream context array must be a power of 2, and can be as small as
  * 64 bytes or as large as 1MB.
  */
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
 		unsigned int num_stream_ctxs, dma_addr_t *dma,
 		gfp_t mem_flags)
 {
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 	val &= DBOFF_MASK;
 	xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
 			" from cap regs base addr\n", val);
-	xhci->dba = (void *) xhci->cap_regs + val;
+	xhci->dba = (void __iomem *) xhci->cap_regs + val;
 	xhci_dbg_regs(xhci);
 	xhci_print_run_regs(xhci);
 	/* Set ir_set to interrupt register set 0 */
-	xhci->ir_set = (void *) xhci->run_regs->ir_set;
+	xhci->ir_set = &xhci->run_regs->ir_set[0];
 
 	/*
 	 * Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 	/* Set the event ring dequeue address */
 	xhci_set_hc_event_deq(xhci);
 	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+	xhci_print_ir_set(xhci, 0);
 
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c1ce5a..3289bf4832c9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
 			dev->eps[ep_index].stopped_trb,
 			&state->new_cycle_state);
-	if (!state->new_deq_seg)
-		BUG();
+	if (!state->new_deq_seg) {
+		WARN_ON(1);
+		return;
+	}
+
 	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
 	xhci_dbg(xhci, "Finding endpoint context\n");
 	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
 			state->new_deq_ptr,
 			&state->new_cycle_state);
-	if (!state->new_deq_seg)
-		BUG();
+	if (!state->new_deq_seg) {
+		WARN_ON(1);
+		return;
+	}
 
 	trb = &state->new_deq_ptr->generic;
 	if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
 
 		/* Scatter gather list entries may cross 64KB boundaries */
 		running_total = TRB_MAX_BUFF_SIZE -
-			(sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+			(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+		running_total &= TRB_MAX_BUFF_SIZE - 1;
 		if (running_total != 0)
 			num_trbs++;
 
 		/* How many more 64KB chunks to transfer, how many more TRBs? */
-		while (running_total < sg_dma_len(sg)) {
+		while (running_total < sg_dma_len(sg) && running_total < temp) {
 			num_trbs++;
 			running_total += TRB_MAX_BUFF_SIZE;
 		}
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
 {
 	if (num_trbs != 0)
-		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
 				"TRBs, %d left\n", __func__,
 				urb->ep->desc.bEndpointAddress, num_trbs);
 	if (running_total != urb->transfer_buffer_length)
-		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
 				"queued %#x (%d), asked for %#x (%d)\n",
 				__func__,
 				urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 	sg = urb->sg;
 	addr = (u64) sg_dma_address(sg);
 	this_sg_len = sg_dma_len(sg);
-	trb_buff_len = TRB_MAX_BUFF_SIZE -
-		(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+	trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
 	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
 	if (trb_buff_len > urb->transfer_buffer_length)
 		trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 				(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
 				(unsigned int) addr + trb_buff_len);
 		if (TRB_MAX_BUFF_SIZE -
-				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+				(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
 			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
 			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
 					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 		}
 
 		trb_buff_len = TRB_MAX_BUFF_SIZE -
-			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+			(addr & (TRB_MAX_BUFF_SIZE - 1));
 		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
 			trb_buff_len =
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 	num_trbs = 0;
 	/* How much data is (potentially) left before the 64KB boundary? */
 	running_total = TRB_MAX_BUFF_SIZE -
-		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+	running_total &= TRB_MAX_BUFF_SIZE - 1;
 
 	/* If there's some data on this 64KB chunk, or we have to send a
 	 * zero-length transfer, we need at least one TRB
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 	/* How much data is in the first TRB? */
 	addr = (u64) urb->transfer_dma;
 	trb_buff_len = TRB_MAX_BUFF_SIZE -
-		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
-	if (urb->transfer_buffer_length < trb_buff_len)
+		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+	if (trb_buff_len > urb->transfer_buffer_length)
 		trb_buff_len = urb->transfer_buffer_length;
 
 	first_trb = true;
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
 	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
 	td_len = urb->iso_frame_desc[i].length;
 
-	running_total = TRB_MAX_BUFF_SIZE -
-			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+	running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+	running_total &= TRB_MAX_BUFF_SIZE - 1;
 	if (running_total != 0)
 		num_trbs++;
 
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34cf4e165877..2083fc2179b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
 /*
  * Set the run bit and wait for the host to be running.
  */
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
 {
 	u32 temp;
 	int ret;
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
 
 
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
 {
 	unsigned long flags;
 	int temp;
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
 			xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
 	xhci_writel(xhci, ER_IRQ_ENABLE(temp),
 			&xhci->ir_set->irq_pending);
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+	xhci_print_ir_set(xhci, 0);
 
 	if (NUM_TEST_NOOPS > 0)
 		doorbell = xhci_setup_one_noop(xhci);
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
 	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 	xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 			&xhci->ir_set->irq_pending);
-	xhci_print_ir_set(xhci, xhci->ir_set, 0);
+	xhci_print_ir_set(xhci, 0);
 
 	xhci_dbg(xhci, "cleaning up memory\n");
 	xhci_mem_cleanup(xhci);
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 		temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
 		xhci_writel(xhci, ER_IRQ_DISABLE(temp),
 				&xhci->ir_set->irq_pending);
-		xhci_print_ir_set(xhci, xhci->ir_set, 0);
+		xhci_print_ir_set(xhci, 0);
 
 		xhci_dbg(xhci, "cleaning up memory\n");
 		xhci_mem_cleanup(xhci);
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
 /* Returns 1 if the arguments are OK;
  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
  */
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
 		struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
 		const char *func) {
 	struct xhci_hcd	*xhci;
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
 	xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
 }
 
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
 		unsigned int slot_id, unsigned int ep_index,
 		struct xhci_dequeue_state *deq_state)
 {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7f236fd22015..7f127df6dd55 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
 }
 
 /* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
 void xhci_print_registers(struct xhci_hcd *xhci);
 void xhci_dbg_regs(struct xhci_hcd *xhci);
 void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index eeba228eb2af..9d49d1cd7ce2 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb)
 		musb->xceiv->set_power = bfin_musb_set_power;
 
 	musb->isr = blackfin_interrupt;
+	musb->double_buffer_not_ok = true;
 
 	return 0;
 }
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 07cf394e491b..c292d5c499e7 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
 
 static inline struct musb *dev_to_musb(struct device *dev)
 {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-	/* usbcore insists dev->driver_data is a "struct hcd *" */
-	return hcd_to_musb(dev_get_drvdata(dev));
-#else
 	return dev_get_drvdata(dev);
-#endif
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1869,6 +1864,7 @@ allocate_instance(struct device *dev,
 	INIT_LIST_HEAD(&musb->out_bulk);
 
 	hcd->uses_new_polling = 1;
+	hcd->has_tt = 1;
 
 	musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
 	musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
@@ -1876,10 +1872,9 @@ allocate_instance(struct device *dev,
 	musb = kzalloc(sizeof *musb, GFP_KERNEL);
 	if (!musb)
 		return NULL;
-	dev_set_drvdata(dev, musb);
 
 #endif
-
+	dev_set_drvdata(dev, musb);
 	musb->mregs = mbase;
 	musb->ctrl_base = mbase;
 	musb->nIrq = -ENODEV;
@@ -2191,7 +2186,7 @@ static int __init musb_probe(struct platform_device *pdev)
 	void __iomem	*base;
 
 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!iomem || irq == 0)
+	if (!iomem || irq <= 0)
 		return -ENODEV;
 
 	base = ioremap(iomem->start, resource_size(iomem));
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d0c236f8e191..e6400be8a0f8 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -497,6 +497,19 @@ struct musb {
 	struct usb_gadget_driver *gadget_driver;	/* its driver */
 #endif
 
+	/*
+	 * FIXME: Remove this flag.
+	 *
+	 * This is only added to allow Blackfin to work
+	 * with current driver. For some unknown reason
+	 * Blackfin doesn't work with double buffering
+	 * and that's enabled by default.
+	 *
+	 * We added this flag to forcefully disable double
+	 * buffering until we get it working.
+	 */
+	unsigned                double_buffer_not_ok:1 __deprecated;
+
 	struct musb_hdrc_config	*config;
 
 #ifdef MUSB_CONFIG_PROC_FS
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 916065ba9e70..3a97c4e2d4f5 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -169,6 +169,9 @@ struct dma_controller {
 							dma_addr_t dma_addr,
 							u32 length);
 	int			(*channel_abort)(struct dma_channel *);
+	int			(*is_compatible)(struct dma_channel *channel,
+							u16 maxpacket,
+							void *buf, u32 length);
 };
 
 /* called after channel_program(), may indicate a fault */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ed58c6c8f15c..2fe304611dcf 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -92,11 +92,33 @@
 
 /* ----------------------------------------------------------------------- */
 
+#define is_buffer_mapped(req) (is_dma_capable() && \
+					(req->map_state != UN_MAPPED))
+
 /* Maps the buffer to dma  */
 
 static inline void map_dma_buffer(struct musb_request *request,
-				struct musb *musb)
+			struct musb *musb, struct musb_ep *musb_ep)
 {
+	int compatible = true;
+	struct dma_controller *dma = musb->dma_controller;
+
+	request->map_state = UN_MAPPED;
+
+	if (!is_dma_capable() || !musb_ep->dma)
+		return;
+
+	/* Check if DMA engine can handle this request.
+	 * DMA code must reject the USB request explicitly.
+	 * Default behaviour is to map the request.
+	 */
+	if (dma->is_compatible)
+		compatible = dma->is_compatible(musb_ep->dma,
+				musb_ep->packet_sz, request->request.buf,
+				request->request.length);
+	if (!compatible)
+		return;
+
 	if (request->request.dma == DMA_ADDR_INVALID) {
 		request->request.dma = dma_map_single(
 				musb->controller,
@@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request,
 				request->tx
 					? DMA_TO_DEVICE
 					: DMA_FROM_DEVICE);
-		request->mapped = 1;
+		request->map_state = MUSB_MAPPED;
 	} else {
 		dma_sync_single_for_device(musb->controller,
 			request->request.dma,
@@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request,
 			request->tx
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
-		request->mapped = 0;
+		request->map_state = PRE_MAPPED;
 	}
 }
 
@@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request,
 static inline void unmap_dma_buffer(struct musb_request *request,
 				struct musb *musb)
 {
+	if (!is_buffer_mapped(request))
+		return;
+
 	if (request->request.dma == DMA_ADDR_INVALID) {
 		DBG(20, "not unmapping a never mapped buffer\n");
 		return;
 	}
-	if (request->mapped) {
+	if (request->map_state == MUSB_MAPPED) {
 		dma_unmap_single(musb->controller,
 			request->request.dma,
 			request->request.length,
@@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request,
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
 		request->request.dma = DMA_ADDR_INVALID;
-		request->mapped = 0;
-	} else {
+	} else { /* PRE_MAPPED */
 		dma_sync_single_for_cpu(musb->controller,
 			request->request.dma,
 			request->request.length,
 			request->tx
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
-
 	}
+	request->map_state = UN_MAPPED;
 }
 
 /*
@@ -172,8 +196,7 @@ __acquires(ep->musb->lock)
 
 	ep->busy = 1;
 	spin_unlock(&musb->lock);
-	if (is_dma_capable() && ep->dma)
-		unmap_dma_buffer(req, musb);
+	unmap_dma_buffer(req, musb);
 	if (request->status == 0)
 		DBG(5, "%s done request %p,  %d/%d\n",
 				ep->end_point.name, request,
@@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
 			csr);
 
 #ifndef	CONFIG_MUSB_PIO_ONLY
-	if (is_dma_capable() && musb_ep->dma) {
+	if (is_buffer_mapped(req)) {
 		struct dma_controller	*c = musb->dma_controller;
 		size_t request_size;
 
@@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
 		 * Unmap the dma buffer back to cpu if dma channel
 		 * programming fails
 		 */
-		if (is_dma_capable() && musb_ep->dma)
-			unmap_dma_buffer(req, musb);
+		unmap_dma_buffer(req, musb);
 
 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
 				(u8 *) (request->buf + request->actual));
@@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
 		return;
 	}
 
-	if (is_cppi_enabled() && musb_ep->dma) {
+	if (is_cppi_enabled() && is_buffer_mapped(req)) {
 		struct dma_controller	*c = musb->dma_controller;
 		struct dma_channel	*channel = musb_ep->dma;
 
@@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
 		len = musb_readw(epio, MUSB_RXCOUNT);
 		if (request->actual < request->length) {
 #ifdef CONFIG_USB_INVENTRA_DMA
-			if (is_dma_capable() && musb_ep->dma) {
+			if (is_buffer_mapped(req)) {
 				struct dma_controller	*c;
 				struct dma_channel	*channel;
 				int			use_dma = 0;
@@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
 			fifo_count = min_t(unsigned, len, fifo_count);
 
 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
-			if (tusb_dma_omap() && musb_ep->dma) {
+			if (tusb_dma_omap() && is_buffer_mapped(req)) {
 				struct dma_controller *c = musb->dma_controller;
 				struct dma_channel *channel = musb_ep->dma;
 				u32 dma_addr = request->dma + request->actual;
@@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
 			 * programming fails. This buffer is mapped if the
 			 * channel allocation is successful
 			 */
-			 if (is_dma_capable() && musb_ep->dma) {
+			 if (is_buffer_mapped(req)) {
 				unmap_dma_buffer(req, musb);
 
 				/*
@@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
 		/* Set TXMAXP with the FIFO size of the endpoint
 		 * to disable double buffering mode.
 		 */
-		musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		if (musb->double_buffer_not_ok)
+			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
+		else
+			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
+					| (musb_ep->hb_mult << 11));
 
 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
 		if (musb_readw(regs, MUSB_TXCSR)
@@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
 		/* Set RXMAXP with the FIFO size of the endpoint
 		 * to disable double buffering mode.
 		 */
-		musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		if (musb->double_buffer_not_ok)
+			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
+		else
+			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
+					| (musb_ep->hb_mult << 11));
 
 		/* force shared fifo to OUT-only mode */
 		if (hw_ep->is_shared_fifo) {
@@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
 	request->epnum = musb_ep->current_epnum;
 	request->tx = musb_ep->is_in;
 
-	if (is_dma_capable() && musb_ep->dma)
-		map_dma_buffer(request, musb);
-	else
-		request->mapped = 0;
+	map_dma_buffer(request, musb, musb_ep);
 
 	spin_lock_irqsave(&musb->lock, lockflags);
 
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index dec8dc008191..a55354fbccf5 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -35,13 +35,19 @@
 #ifndef __MUSB_GADGET_H
 #define __MUSB_GADGET_H
 
+enum buffer_map_state {
+	UN_MAPPED = 0,
+	PRE_MAPPED,
+	MUSB_MAPPED
+};
+
 struct musb_request {
 	struct usb_request	request;
 	struct musb_ep		*ep;
 	struct musb		*musb;
 	u8 tx;			/* endpoint direction */
 	u8 epnum;
-	u8 mapped;
+	enum buffer_map_state map_state;
 };
 
 static inline struct musb_request *to_musb_request(struct usb_request *req)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4d5bcb4e14d2..0f523d7db57b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
 	/* Set RXMAXP with the FIFO size of the endpoint
 	 * to disable double buffer mode.
 	 */
-	if (musb->hwvers < MUSB_HWVERS_2000)
+	if (musb->double_buffer_not_ok)
 		musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
 	else
 		musb_writew(ep->regs, MUSB_RXMAXP,
@@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
 		/* protocol/endpoint/interval/NAKlimit */
 		if (epnum) {
 			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
-			if (can_bulk_split(musb, qh->type))
+			if (musb->double_buffer_not_ok)
 				musb_writew(epio, MUSB_TXMAXP,
-					packet_sz
-					| ((hw_ep->max_packet_sz_tx /
-						packet_sz) - 1) << 11);
+						hw_ep->max_packet_sz_tx);
 			else
 				musb_writew(epio, MUSB_TXMAXP,
-					packet_sz);
+						qh->maxpacket |
+						((qh->hb_mult - 1) << 11));
 			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
 		} else {
 			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index f763d62f151c..21056c924c74 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
 {
 	musb_writew(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
-		((u16)((u32) dma_addr & 0xFFFF)));
+		dma_addr);
 	musb_writew(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
-		((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
+		(dma_addr >> 16));
 }
 
 static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
 {
-	return musb_readl(mbase,
+	u32 count = musb_readw(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
+
+	count = count << 16;
+
+	count |= musb_readw(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
+
+	return count;
 }
 
 static inline void musb_write_hsdma_count(void __iomem *mbase,
 				u8 bchannel, u32 len)
 {
-	musb_writel(mbase,
+	musb_writew(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
+	musb_writew(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
-		len);
+		(len >> 16));
 }
 
 #endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3f12333fc41..bc8badd16897 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb)
 
 static int omap2430_musb_exit(struct musb *musb)
 {
+	del_timer_sync(&musb_idle_timer);
 
 	omap2430_low_level_exit(musb);
 	otg_put_transceiver(musb->xceiv);
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 9fb875d5f09c..9ffc8237fb4b 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -103,6 +103,8 @@ config USB_MSM_OTG_72K
 	  required after resetting the hardware and power management.
 	  This driver is required even for peripheral only or host only
 	  mode configurations.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
 
 config AB8500_USB
         tristate "AB8500 USB Transceiver Driver"
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4787c0cd063f..f349a3629d00 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -100,6 +100,7 @@ struct ftdi_sio_quirk {
 static int   ftdi_jtag_probe(struct usb_serial *serial);
 static int   ftdi_mtxorb_hack_setup(struct usb_serial *serial);
 static int   ftdi_NDI_device_setup(struct usb_serial *serial);
+static int   ftdi_stmclite_probe(struct usb_serial *serial);
 static void  ftdi_USB_UIRT_setup(struct ftdi_private *priv);
 static void  ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
 
@@ -123,6 +124,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
 	.port_probe = ftdi_HE_TIRA1_setup,
 };
 
+static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+	.probe	= ftdi_stmclite_probe,
+};
+
 /*
  * The 8U232AM has the same API as the sio except for:
  * - it can support MUCH higher baudrates; up to:
@@ -616,6 +621,7 @@ static struct usb_device_id id_table_combined [] = {
 	{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
 	{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+	{ USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
 	{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
 	{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
 	{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
@@ -810,6 +816,8 @@ static struct usb_device_id id_table_combined [] = {
 	{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
 	{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+		.driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
 	{ },					/* Optional parameter entry */
 	{ }					/* Terminating entry */
 };
@@ -1709,6 +1717,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
 }
 
 /*
+ * First and second port on STMCLiteadaptors is reserved for JTAG interface
+ * and the forth port for pio
+ */
+static int ftdi_stmclite_probe(struct usb_serial *serial)
+{
+	struct usb_device *udev = serial->dev;
+	struct usb_interface *interface = serial->interface;
+
+	dbg("%s", __func__);
+
+	if (interface == udev->actconfig->interface[2])
+		return 0;
+
+	dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
+
+	return -ENODEV;
+}
+
+/*
  * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
  * We have to correct it if we want to read from it.
  */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index ed160def8584..117e8e6f93c6 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -518,6 +518,12 @@
 #define RATOC_PRODUCT_ID_USB60F	0xb020
 
 /*
+ * Acton Research Corp.
+ */
+#define ACTON_VID		0x0647	/* Vendor ID */
+#define ACTON_SPECTRAPRO_PID	0x0100
+
+/*
  * Contec products (http://www.contec.com)
  * Submitted by Daniel Sangorrin
  */
@@ -1034,6 +1040,12 @@
 #define WHT_PID			0x0004 /* Wireless Handheld Terminal */
 
 /*
+ * STMicroelectonics
+ */
+#define ST_VID			0x0483
+#define ST_STMCLT1030_PID	0x3747 /* ST Micro Connect Lite STMCLT1030 */
+
+/*
  * Papouch products (http://www.papouch.com/)
  * Submitted by Folkert van Heusden
  */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index cd769ef24f8a..3b246d93cf22 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2889,8 +2889,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
 
 	dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
 
-	edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
-	edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
+	edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
+	edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
 	edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
 
 	for (rec = ihex_next_binrec(rec); rec;
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 7481ff8a49e4..0457813eebee 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
 	{ USB_DEVICE(0x1199, 0x68A3), 	/* Sierra Wireless Direct IP modems */
 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
 	},
+	{ USB_DEVICE(0x0f3d, 0x68A3), 	/* Airprime/Sierra Wireless Direct IP modems */
+	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+	},
        { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
 
 	{ }
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index b2902f307b47..a910004f4079 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -369,9 +369,9 @@ failed_1port:
 
 static void __exit ti_exit(void)
 {
+	usb_deregister(&ti_usb_driver);
 	usb_serial_deregister(&ti_1port_device);
 	usb_serial_deregister(&ti_2port_device);
-	usb_deregister(&ti_usb_driver);
 }
 
 
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index b004b2a485c3..9c014e2ecd68 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
 		    __func__, status, endpoint);
 	} else {
 		tty = tty_port_tty_get(&port->port);
-		if (urb->actual_length) {
-			tty_insert_flip_string(tty, data, urb->actual_length);
-			tty_flip_buffer_push(tty);
-		} else
-			dbg("%s: empty read urb received", __func__);
-		tty_kref_put(tty);
+		if (tty) {
+			if (urb->actual_length) {
+				tty_insert_flip_string(tty, data,
+						urb->actual_length);
+				tty_flip_buffer_push(tty);
+			} else
+				dbg("%s: empty read urb received", __func__);
+			tty_kref_put(tty);
+		}
 
 		/* Resubmit urb so we continue receiving */
 		if (status != -ESHUTDOWN) {
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 15a5d89b7f39..1c11959a7d58 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
+#include <linux/usb/cdc.h>
 #include "visor.h"
 
 /*
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
 
 	dbg("%s", __func__);
 
+	/*
+	 * some Samsung Android phones in modem mode have the same ID
+	 * as SPH-I500, but they are ACM devices, so dont bind to them
+	 */
+	if (id->idVendor == SAMSUNG_VENDOR_ID &&
+		id->idProduct == SAMSUNG_SPH_I500_ID &&
+		serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
+		serial->dev->descriptor.bDeviceSubClass ==
+			USB_CDC_SUBCLASS_ACM)
+		return -ENODEV;
+
 	if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
 		dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
 			serial->dev->actconfig->desc.bConfigurationValue);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 24bd5d7c3deb..c1602b8c5594 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1397,6 +1397,13 @@ UNUSUAL_DEV(  0x0f19, 0x0105, 0x0100, 0x0100,
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE ),
 
+/* Submitted by Nick Holloway */
+UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
+		"VTech",
+		"Kidizoom",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY ),
+
 /* Reported by Michael Stattmann <michael@stattmann.com> */
 UNUSUAL_DEV(  0x0fce, 0xd008, 0x0000, 0x0000,
 		"Sony Ericsson",
@@ -1890,6 +1897,13 @@ UNUSUAL_DEV(  0x1e68, 0x001b, 0x0000, 0x0000,
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
 
+/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
+UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
+		"Coby Electronics",
+		"MP3 Player",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+
 UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
 		"ST",
 		"2A",
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3a7e9ff8a746..38e96ab90945 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -593,19 +593,17 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
 
 	/* get interface & functional clock objects */
 	hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
-	hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+	if (IS_ERR(hdq_data->hdq_ick)) {
+		dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
+		ret = PTR_ERR(hdq_data->hdq_ick);
+		goto err_ick;
+	}
 
-	if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
-		dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
-		if (IS_ERR(hdq_data->hdq_ick)) {
-			ret = PTR_ERR(hdq_data->hdq_ick);
-			goto err_clk;
-		}
-		if (IS_ERR(hdq_data->hdq_fck)) {
-			ret = PTR_ERR(hdq_data->hdq_fck);
-			clk_put(hdq_data->hdq_ick);
-			goto err_clk;
-		}
+	hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+	if (IS_ERR(hdq_data->hdq_fck)) {
+		dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
+		ret = PTR_ERR(hdq_data->hdq_fck);
+		goto err_fck;
 	}
 
 	hdq_data->hdq_usecount = 0;
@@ -665,10 +663,12 @@ err_fnclk:
 	clk_disable(hdq_data->hdq_ick);
 
 err_intfclk:
-	clk_put(hdq_data->hdq_ick);
 	clk_put(hdq_data->hdq_fck);
 
-err_clk:
+err_fck:
+	clk_put(hdq_data->hdq_ick);
+
+err_ick:
 	iounmap(hdq_data->hdq_base);
 
 err_ioremap:
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 2e2400e7322e..31649b7b672f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -862,12 +862,12 @@ config SBC_EPX_C3_WATCHDOG
 
 # M68K Architecture
 
-config M548x_WATCHDOG
-	tristate "MCF548x watchdog support"
+config M54xx_WATCHDOG
+	tristate "MCF54xx watchdog support"
 	depends on M548x
 	help
 	  To compile this driver as a module, choose M here: the
-	  module will be called m548x_wdt.
+	  module will be called m54xx_wdt.
 
 # MIPS Architecture
 
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index dd776651917c..20e44c4782b3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -106,7 +106,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
 # M32R Architecture
 
 # M68K Architecture
-obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o
+obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
 
 # MIPS Architecture
 obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
diff --git a/drivers/watchdog/m548x_wdt.c b/drivers/watchdog/m54xx_wdt.c
index cabbcfe1c847..4d43286074aa 100644
--- a/drivers/watchdog/m548x_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -1,7 +1,7 @@
 /*
- * drivers/watchdog/m548x_wdt.c
+ * drivers/watchdog/m54xx_wdt.c
  *
- * Watchdog driver for ColdFire MCF548x processors
+ * Watchdog driver for ColdFire MCF547x & MCF548x processors
  * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be>
  *
  * Adapted from the IXP4xx watchdog driver, which carries these notices:
@@ -29,8 +29,8 @@
 #include <linux/uaccess.h>
 
 #include <asm/coldfire.h>
-#include <asm/m548xsim.h>
-#include <asm/m548xgpt.h>
+#include <asm/m54xxsim.h>
+#include <asm/m54xxgpt.h>
 
 static int nowayout = WATCHDOG_NOWAYOUT;
 static unsigned int heartbeat = 30;	/* (secs) Default is 0.5 minute */
@@ -76,7 +76,7 @@ static void wdt_keepalive(void)
 	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
 }
 
-static int m548x_wdt_open(struct inode *inode, struct file *file)
+static int m54xx_wdt_open(struct inode *inode, struct file *file)
 {
 	if (test_and_set_bit(WDT_IN_USE, &wdt_status))
 		return -EBUSY;
@@ -86,7 +86,7 @@ static int m548x_wdt_open(struct inode *inode, struct file *file)
 	return nonseekable_open(inode, file);
 }
 
-static ssize_t m548x_wdt_write(struct file *file, const char *data,
+static ssize_t m54xx_wdt_write(struct file *file, const char *data,
 						size_t len, loff_t *ppos)
 {
 	if (len) {
@@ -112,10 +112,10 @@ static ssize_t m548x_wdt_write(struct file *file, const char *data,
 static const struct watchdog_info ident = {
 	.options	= WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
 				WDIOF_KEEPALIVEPING,
-	.identity	= "Coldfire M548x Watchdog",
+	.identity	= "Coldfire M54xx Watchdog",
 };
 
-static long m548x_wdt_ioctl(struct file *file, unsigned int cmd,
+static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
 							 unsigned long arg)
 {
 	int ret = -ENOTTY;
@@ -161,7 +161,7 @@ static long m548x_wdt_ioctl(struct file *file, unsigned int cmd,
 	return ret;
 }
 
-static int m548x_wdt_release(struct inode *inode, struct file *file)
+static int m54xx_wdt_release(struct inode *inode, struct file *file)
 {
 	if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
 		wdt_disable();
@@ -177,45 +177,45 @@ static int m548x_wdt_release(struct inode *inode, struct file *file)
 }
 
 
-static const struct file_operations m548x_wdt_fops = {
+static const struct file_operations m54xx_wdt_fops = {
 	.owner		= THIS_MODULE,
 	.llseek		= no_llseek,
-	.write		= m548x_wdt_write,
-	.unlocked_ioctl	= m548x_wdt_ioctl,
-	.open		= m548x_wdt_open,
-	.release	= m548x_wdt_release,
+	.write		= m54xx_wdt_write,
+	.unlocked_ioctl	= m54xx_wdt_ioctl,
+	.open		= m54xx_wdt_open,
+	.release	= m54xx_wdt_release,
 };
 
-static struct miscdevice m548x_wdt_miscdev = {
+static struct miscdevice m54xx_wdt_miscdev = {
 	.minor		= WATCHDOG_MINOR,
 	.name		= "watchdog",
-	.fops		= &m548x_wdt_fops,
+	.fops		= &m54xx_wdt_fops,
 };
 
-static int __init m548x_wdt_init(void)
+static int __init m54xx_wdt_init(void)
 {
 	if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
-						"Coldfire M548x Watchdog")) {
+						"Coldfire M54xx Watchdog")) {
 		printk(KERN_WARNING
-				"Coldfire M548x Watchdog : I/O region busy\n");
+				"Coldfire M54xx Watchdog : I/O region busy\n");
 		return -EBUSY;
 	}
 	printk(KERN_INFO "ColdFire watchdog driver is loaded.\n");
 
-	return misc_register(&m548x_wdt_miscdev);
+	return misc_register(&m54xx_wdt_miscdev);
 }
 
-static void __exit m548x_wdt_exit(void)
+static void __exit m54xx_wdt_exit(void)
 {
-	misc_deregister(&m548x_wdt_miscdev);
+	misc_deregister(&m54xx_wdt_miscdev);
 	release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
 }
 
-module_init(m548x_wdt_init);
-module_exit(m548x_wdt_exit);
+module_init(m54xx_wdt_init);
+module_exit(m54xx_wdt_exit);
 
 MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
-MODULE_DESCRIPTION("Coldfire M548x Watchdog");
+MODULE_DESCRIPTION("Coldfire M54xx Watchdog");
 
 module_param(heartbeat, int, 0);
 MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index db8c4c4ac880..24177272bcb8 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
 #ifdef CONFIG_PM_SLEEP
 static int xen_hvm_suspend(void *data)
 {
+	int err;
 	struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
 	int *cancelled = data;
 
 	BUG_ON(!irqs_disabled());
 
+	err = sysdev_suspend(PMSG_SUSPEND);
+	if (err) {
+		printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
+		       err);
+		return err;
+	}
+
 	*cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
 
 	xen_hvm_post_suspend(*cancelled);
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data)
 		xen_timer_resume();
 	}
 
+	sysdev_resume();
+
 	return 0;
 }