summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-08-23 17:02:42 +0200
committerArnd Bergmann <arnd@arndb.de>2012-08-23 17:02:42 +0200
commit1e72fe1fca4490474984d6356bbf66e2daa89f73 (patch)
tree12d95820cbb2083a6c78cfe550a0f9b20b1246eb /drivers
parent8a211d362cc94df2bb42323ab13d258650529bec (diff)
parenta46d2619d7180bda12bad2bf15bbd0731dfc2dcf (diff)
downloadlinux-1e72fe1fca4490474984d6356bbf66e2daa89f73.tar.gz
Merge branch 'imx/fixes-for-3.6' of git://git.linaro.org/people/shawnguo/linux-2.6 into fixes
* 'imx/fixes-for-3.6' of git://git.linaro.org/people/shawnguo/linux-2.6:
  ARM: dts: imx51-babbage: fix esdhc cd/wp properties
  ARM: imx6: spin the cpu until hardware takes it down
  ARM i.MX6q: Add virtual 1/3.5 dividers in the LDB clock path

Also updates to Linux 3.6-rc2

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpica/achware.h12
-rw-r--r--drivers/acpi/acpica/hwesleep.c19
-rw-r--r--drivers/acpi/acpica/hwsleep.c20
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c22
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/numa.c12
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/acpi/power.c4
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sleep.c75
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/common.c4
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/bcma/sprom.c4
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/char/agp/intel-agp.h39
-rw-r--r--drivers/char/agp/intel-gtt.c60
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c1
-rw-r--r--drivers/dma/imx-dma.c36
-rw-r--r--drivers/dma/tegra20-apb-dma.c18
-rw-r--r--drivers/gpio/gpio-langwell.c7
-rw-r--r--drivers/gpio/gpio-msic.c2
-rw-r--r--drivers/gpio/gpio-mxc.c5
-rw-r--r--drivers/gpio/gpio-pxa.c30
-rw-r--r--drivers/gpio/gpio-samsung.c14
-rw-r--r--drivers/gpio/gpio-sch.c3
-rw-r--r--drivers/gpu/drm/drm_edid_load.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c12
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c14
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h20
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c10
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c15
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c48
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c37
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c71
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c14
-rw-r--r--drivers/gpu/drm/radeon/r600.c20
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c65
-rw-r--r--drivers/gpu/drm/radeon/r600d.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c13
-rw-r--r--drivers/gpu/drm/radeon/si.c35
-rw-r--r--drivers/gpu/drm/radeon/sid.h3
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/iommu/amd_iommu.c25
-rw-r--r--drivers/iommu/amd_iommu_init.c6
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c26
-rw-r--r--drivers/iommu/tegra-smmu.c17
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c12
-rw-r--r--drivers/isdn/mISDN/layer2.c2
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-lp8788.c2
-rw-r--r--drivers/leds/leds-renesas-tpu.c2
-rw-r--r--drivers/mtd/maps/uclinux.c5
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/appletalk/ltpc.c4
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c72
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c36
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c14
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/sfc/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/tx.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c1
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/rndis_filter.c11
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/macvtap.c3
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c1
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/cdc-phonet.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/b43/main.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c13
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/main.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c3
-rw-r--r--drivers/pinctrl/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c1
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c1
-rw-r--r--drivers/pinctrl/pinctrl-u300.c8
-rw-r--r--drivers/platform/x86/classmate-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c2
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp_accel.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c4
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c12
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/xo15-ebook.c2
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-cmos.c1
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/sh/intc/core.c27
-rw-r--r--drivers/usb/early/ehci-dbgp.c2
-rw-r--r--drivers/vhost/Kconfig3
-rw-r--r--drivers/vhost/Kconfig.tcm6
-rw-r--r--drivers/vhost/Makefile2
-rw-r--r--drivers/vhost/tcm_vhost.c1628
-rw-r--r--drivers/vhost/tcm_vhost.h101
-rw-r--r--drivers/zorro/zorro.c2
168 files changed, 2956 insertions, 774 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index ac7034129f3f..d5fdd36190cc 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -69,7 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, ac_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_ac_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
 
 static struct acpi_driver acpi_ac_driver = {
@@ -313,6 +315,7 @@ static int acpi_ac_add(struct acpi_device *device)
 	return result;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_ac_resume(struct device *dev)
 {
 	struct acpi_ac *ac;
@@ -332,6 +335,7 @@ static int acpi_ac_resume(struct device *dev)
 		kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
 	return 0;
 }
+#endif
 
 static int acpi_ac_remove(struct acpi_device *device, int type)
 {
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 5ccb99ae3a6f..5de4ec72766d 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void);
 /*
  * hwsleep - sleep/wake support (Legacy sleep registers)
  */
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state);
 
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state);
 
-acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_wake(u8 sleep_state);
 
 /*
  * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
  */
 void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
 
-acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_sleep(u8 sleep_state);
 
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state);
 
-acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_wake(u8 sleep_state);
 
 /*
  * hwvalid - Port I/O with validation
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 48518dac5342..94996f9ae3ad 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -90,7 +90,6 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
  * FUNCTION:    acpi_hw_extended_sleep
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
- *              flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
@@ -100,7 +99,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_sleep(u8 sleep_state)
 {
 	acpi_status status;
 	u8 sleep_type_value;
@@ -125,12 +124,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
 
 	acpi_gbl_system_awake_and_running = FALSE;
 
-	/* Optionally execute _GTS (Going To Sleep) */
-
-	if (flags & ACPI_EXECUTE_GTS) {
-		acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
-	}
-
 	/* Flush caches, as per ACPI specification */
 
 	ACPI_FLUSH_CPU_CACHE();
@@ -172,7 +165,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
  * FUNCTION:    acpi_hw_extended_wake_prep
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              flags               - ACPI_EXECUTE_BFS to run optional method
  *
  * RETURN:      Status
  *
@@ -181,7 +173,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
 {
 	acpi_status status;
 	u8 sleep_type_value;
@@ -200,11 +192,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
 				 &acpi_gbl_FADT.sleep_control);
 	}
 
-	/* Optionally execute _BFS (Back From Sleep) */
-
-	if (flags & ACPI_EXECUTE_BFS) {
-		acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
-	}
 	return_ACPI_STATUS(AE_OK);
 }
 
@@ -222,7 +209,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_wake(u8 sleep_state)
 {
 	ACPI_FUNCTION_TRACE(hw_extended_wake);
 
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 9960fe9ef533..3fddde056a5e 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -56,7 +56,6 @@ ACPI_MODULE_NAME("hwsleep")
  * FUNCTION:    acpi_hw_legacy_sleep
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
- *              flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("hwsleep")
  *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
  *
  ******************************************************************************/
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
 {
 	struct acpi_bit_register_info *sleep_type_reg_info;
 	struct acpi_bit_register_info *sleep_enable_reg_info;
@@ -110,12 +109,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
 		return_ACPI_STATUS(status);
 	}
 
-	/* Optionally execute _GTS (Going To Sleep) */
-
-	if (flags & ACPI_EXECUTE_GTS) {
-		acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
-	}
-
 	/* Get current value of PM1A control */
 
 	status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
@@ -214,7 +207,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
  * FUNCTION:    acpi_hw_legacy_wake_prep
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              flags               - ACPI_EXECUTE_BFS to run optional method
  *
  * RETURN:      Status
  *
@@ -224,7 +216,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
 {
 	acpi_status status;
 	struct acpi_bit_register_info *sleep_type_reg_info;
@@ -275,11 +267,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
 		}
 	}
 
-	/* Optionally execute _BFS (Back From Sleep) */
-
-	if (flags & ACPI_EXECUTE_BFS) {
-		acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
-	}
 	return_ACPI_STATUS(status);
 }
 
@@ -288,7 +275,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
  * FUNCTION:    acpi_hw_legacy_wake
  *
  * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *              flags               - Reserved, set to zero
  *
  * RETURN:      Status
  *
@@ -297,7 +283,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_wake(u8 sleep_state)
 {
 	acpi_status status;
 
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f8684bfe7907..1f165a750ae2 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep")
 
 /* Local prototypes */
 static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id);
+acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
 
 /*
  * Dispatch table used to efficiently branch to the various sleep
@@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
  *
  ******************************************************************************/
 static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
+acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
 {
 	acpi_status status;
 	struct acpi_sleep_functions *sleep_functions =
@@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
 	 * use the extended sleep registers
 	 */
 	if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
-		status = sleep_functions->extended_function(sleep_state, flags);
+		status = sleep_functions->extended_function(sleep_state);
 	} else {
 		/* Legacy sleep */
 
-		status = sleep_functions->legacy_function(sleep_state, flags);
+		status = sleep_functions->legacy_function(sleep_state);
 	}
 
 	return (status);
@@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
 	 * For the case where reduced-hardware-only code is being generated,
 	 * we know that only the extended sleep registers are available
 	 */
-	status = sleep_functions->extended_function(sleep_state, flags);
+	status = sleep_functions->extended_function(sleep_state);
 	return (status);
 
 #endif				/* !ACPI_REDUCED_HARDWARE */
@@ -349,7 +349,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
  * FUNCTION:    acpi_enter_sleep_state
  *
  * PARAMETERS:  sleep_state         - Which sleep state to enter
- *              flags               - ACPI_EXECUTE_GTS to run optional method
  *
  * RETURN:      Status
  *
@@ -357,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
  *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
  *
  ******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
 {
 	acpi_status status;
 
@@ -371,7 +370,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
 	}
 
 	status =
-	    acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID);
+	    acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
 	return_ACPI_STATUS(status);
 }
 
@@ -391,14 +390,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
  *              Called with interrupts DISABLED.
  *
  ******************************************************************************/
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
 {
 	acpi_status status;
 
 	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
 
 	status =
-	    acpi_hw_sleep_dispatch(sleep_state, flags,
+	    acpi_hw_sleep_dispatch(sleep_state,
 				   ACPI_WAKE_PREP_FUNCTION_ID);
 	return_ACPI_STATUS(status);
 }
@@ -423,8 +422,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
 
 	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
 
-
-	status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
+	status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID);
 	return_ACPI_STATUS(status);
 }
 
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index ff2c876ec412..45e3e1759fb8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1052,6 +1052,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 /* this is needed to learn about changes made in suspended state */
 static int acpi_battery_resume(struct device *dev)
 {
@@ -1068,6 +1069,7 @@ static int acpi_battery_resume(struct device *dev)
 	acpi_battery_update(battery);
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
 
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 79d4c22f7a6d..314a3b84bbc7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -78,7 +78,9 @@ static int acpi_button_add(struct acpi_device *device);
 static int acpi_button_remove(struct acpi_device *device, int type);
 static void acpi_button_notify(struct acpi_device *device, u32 event);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_button_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
 
 static struct acpi_driver acpi_button_driver = {
@@ -310,6 +312,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_button_resume(struct device *dev)
 {
 	struct acpi_device *device = to_acpi_device(dev);
@@ -319,6 +322,7 @@ static int acpi_button_resume(struct device *dev)
 		return acpi_lid_send_state(device);
 	return 0;
 }
+#endif
 
 static int acpi_button_add(struct acpi_device *device)
 {
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 669d9ee80d16..bc36a476f1ab 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -53,8 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, fan_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_fan_suspend(struct device *dev);
 static int acpi_fan_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
 
 static struct acpi_driver acpi_fan_driver = {
@@ -184,6 +186,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_fan_suspend(struct device *dev)
 {
 	if (!dev)
@@ -207,6 +210,7 @@ static int acpi_fan_resume(struct device *dev)
 
 	return result;
 }
+#endif
 
 static int __init acpi_fan_init(void)
 {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e56f3be7b07d..cb31298ca684 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -237,6 +237,8 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
 	return 0;
 }
 
+static int __initdata parsed_numa_memblks;
+
 static int __init
 acpi_parse_memory_affinity(struct acpi_subtable_header * header,
 			   const unsigned long end)
@@ -250,8 +252,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
 	acpi_table_print_srat_entry(header);
 
 	/* let architecture-dependent part to do it */
-	acpi_numa_memory_affinity_init(memory_affinity);
-
+	if (!acpi_numa_memory_affinity_init(memory_affinity))
+		parsed_numa_memblks++;
 	return 0;
 }
 
@@ -304,8 +306,10 @@ int __init acpi_numa_init(void)
 
 	acpi_numa_arch_fixup();
 
-	if (cnt <= 0)
-		return cnt ?: -ENOENT;
+	if (cnt < 0)
+		return cnt;
+	else if (!parsed_numa_memblks)
+		return -ENOENT;
 	return 0;
 }
 
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ec54014c321c..72a2c98bc429 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -573,8 +573,15 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
 			OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
 	if (pci_msi_enabled())
 		flags |= OSC_MSI_SUPPORT;
-	if (flags != base_flags)
-		acpi_pci_osc_support(root, flags);
+	if (flags != base_flags) {
+		status = acpi_pci_osc_support(root, flags);
+		if (ACPI_FAILURE(status)) {
+			dev_info(root->bus->bridge, "ACPI _OSC support "
+				"notification failed, disabling PCIe ASPM\n");
+			pcie_no_aspm();
+			flags = base_flags;
+		}
+	}
 
 	if (!pcie_ports_disabled
 	    && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 215ecd097408..fc1803414629 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -67,7 +67,9 @@ static const struct acpi_device_id power_device_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, power_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_power_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
 
 static struct acpi_driver acpi_power_driver = {
@@ -775,6 +777,7 @@ static int acpi_power_remove(struct acpi_device *device, int type)
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_power_resume(struct device *dev)
 {
 	int result = 0, state;
@@ -803,6 +806,7 @@ static int acpi_power_resume(struct device *dev)
 
 	return result;
 }
+#endif
 
 int __init acpi_power_init(void)
 {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index ff8e04f2fab4..bfc31cb0dd3e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -437,7 +437,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
 		/* Normal CPU soft online event */
 		} else {
 			acpi_processor_ppc_has_changed(pr, 0);
-			acpi_processor_cst_has_changed(pr);
+			acpi_processor_hotplug(pr);
 			acpi_processor_reevaluate_tstate(pr, action);
 			acpi_processor_tstate_has_changed(pr);
 		}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index c0b9aa5faf4c..ff0740e0a9c2 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -988,6 +988,7 @@ static void acpi_sbs_rmdirs(void)
 #endif
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_sbs_resume(struct device *dev)
 {
 	struct acpi_sbs *sbs;
@@ -997,6 +998,7 @@ static int acpi_sbs_resume(struct device *dev)
 	acpi_sbs_callback(sbs);
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
 
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7a7a9c929247..fdcdbb652915 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,36 +28,7 @@
 #include "internal.h"
 #include "sleep.h"
 
-u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
-static unsigned int gts, bfs;
-static int set_param_wake_flag(const char *val, struct kernel_param *kp)
-{
-	int ret = param_set_int(val, kp);
-
-	if (ret)
-		return ret;
-
-	if (kp->arg == (const char *)&gts) {
-		if (gts)
-			wake_sleep_flags |= ACPI_EXECUTE_GTS;
-		else
-			wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
-	}
-	if (kp->arg == (const char *)&bfs) {
-		if (bfs)
-			wake_sleep_flags |= ACPI_EXECUTE_BFS;
-		else
-			wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
-	}
-	return ret;
-}
-module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
-module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
-MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
-MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
-
 static u8 sleep_states[ACPI_S_STATE_COUNT];
-static bool pwr_btn_event_pending;
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
 {
@@ -110,6 +81,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
 
 #ifdef CONFIG_ACPI_SLEEP
 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+static bool pwr_btn_event_pending;
 
 /*
  * The ACPI specification wants us to save NVS memory regions during hibernation
@@ -305,7 +277,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
 	switch (acpi_state) {
 	case ACPI_STATE_S1:
 		barrier();
-		status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
+		status = acpi_enter_sleep_state(acpi_state);
 		break;
 
 	case ACPI_STATE_S3:
@@ -319,8 +291,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
 	/* This violates the spec but is required for bug compatibility. */
 	acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
 
-	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
+	/* Reprogram control registers */
+	acpi_leave_sleep_state_prep(acpi_state);
 
 	/* ACPI 3.0 specs (P62) says that it's the responsibility
 	 * of the OSPM to clear the status bit [ implying that the
@@ -603,9 +575,9 @@ static int acpi_hibernation_enter(void)
 	ACPI_FLUSH_CPU_CACHE();
 
 	/* This shouldn't return.  If it returns, we have a problem */
-	status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
-	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
+	status = acpi_enter_sleep_state(ACPI_STATE_S4);
+	/* Reprogram control registers */
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
 
 	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
 }
@@ -617,8 +589,8 @@ static void acpi_hibernation_leave(void)
 	 * enable it here.
 	 */
 	acpi_enable();
-	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
+	/* Reprogram control registers */
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
 	/* Check the hardware signature */
 	if (facs && s4_hardware_signature != facs->hardware_signature) {
 		printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -892,33 +864,7 @@ static void acpi_power_off(void)
 	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
 	printk(KERN_DEBUG "%s called\n", __func__);
 	local_irq_disable();
-	acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
-}
-
-/*
- * ACPI 2.0 created the optional _GTS and _BFS,
- * but industry adoption has been neither rapid nor broad.
- *
- * Linux gets into trouble when it executes poorly validated
- * paths through the BIOS, so disable _GTS and _BFS by default,
- * but do speak up and offer the option to enable them.
- */
-static void __init acpi_gts_bfs_check(void)
-{
-	acpi_handle dummy;
-
-	if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
-	{
-		printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
-		printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
-			"please notify linux-acpi@vger.kernel.org\n");
-	}
-	if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
-	{
-		printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
-		printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
-			"please notify linux-acpi@vger.kernel.org\n");
-	}
+	acpi_enter_sleep_state(ACPI_STATE_S5);
 }
 
 int __init acpi_sleep_init(void)
@@ -979,6 +925,5 @@ int __init acpi_sleep_init(void)
 	 * object can also be evaluated when the system enters S5.
 	 */
 	register_reboot_notifier(&tts_notifier);
-	acpi_gts_bfs_check();
 	return 0;
 }
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 240a24400976..7c3f98ba4afe 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
 {
 	int result = 0;
 
-	if (!strncmp(val, "enable", strlen("enable"))) {
+	if (!strncmp(val, "enable", sizeof("enable") - 1)) {
 		result = acpi_debug_trace(trace_method_name, trace_debug_level,
 					  trace_debug_layer, 0);
 		if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
 		goto exit;
 	}
 
-	if (!strncmp(val, "disable", strlen("disable"))) {
+	if (!strncmp(val, "disable", sizeof("disable") - 1)) {
 		int name = 0;
 		result = acpi_debug_trace((char *)&name, trace_debug_level,
 					  trace_debug_layer, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 9fe90e9fecb5..edda74a43406 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -106,7 +106,9 @@ static const struct acpi_device_id  thermal_device_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_thermal_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
 
 static struct acpi_driver acpi_thermal_driver = {
@@ -1041,6 +1043,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_thermal_resume(struct device *dev)
 {
 	struct acpi_thermal *tz;
@@ -1075,6 +1078,7 @@ static int acpi_thermal_resume(struct device *dev)
 
 	return AE_OK;
 }
+#endif
 
 static int thermal_act(const struct dmi_system_id *d) {
 
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index d4386019af5d..96cce6d53195 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev)
 	{  
 		printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
 			    dev->number);  
-		return error;  
+		return -ENOMEM;
 	}  
 	IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
 			dev->number, iadev->pci->revision, base, iadev->irq);)
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 869d7ff2227f..eb78e9640c4a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -169,8 +169,7 @@ void pm_clk_init(struct device *dev)
  */
 int pm_clk_create(struct device *dev)
 {
-	int ret = dev_pm_get_subsys_data(dev);
-	return ret < 0 ? ret : 0;
+	return dev_pm_get_subsys_data(dev);
 }
 
 /**
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index a14085cc613f..39c32529b833 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -24,7 +24,6 @@
 int dev_pm_get_subsys_data(struct device *dev)
 {
 	struct pm_subsys_data *psd;
-	int ret = 0;
 
 	psd = kzalloc(sizeof(*psd), GFP_KERNEL);
 	if (!psd)
@@ -40,7 +39,6 @@ int dev_pm_get_subsys_data(struct device *dev)
 		dev->power.subsys_data = psd;
 		pm_clk_init(dev);
 		psd = NULL;
-		ret = 1;
 	}
 
 	spin_unlock_irq(&dev->power.lock);
@@ -48,7 +46,7 @@ int dev_pm_get_subsys_data(struct device *dev)
 	/* kfree() verifies that its argument is nonzero. */
 	kfree(psd);
 
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
 
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 11b32d2642df..a6e5672c67e7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -272,6 +272,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
 	{ 0, },
 };
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 26823d97fd9f..9ea4627dc0c2 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
 		/* for these chips OTP is always available */
 		present = true;
 		break;
-
+	case BCMA_CHIP_ID_BCM43228:
+		present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
+		break;
 	default:
 		present = false;
 		break;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2e0e7fc1dbba..dbe6135a2abe 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3537,9 +3537,9 @@ static void drbd_cleanup(void)
 }
 
 /**
- * drbd_congested() - Callback for pdflush
+ * drbd_congested() - Callback for the flusher thread
  * @congested_data:	User data
- * @bdi_bits:		Bits pdflush is currently interested in
+ * @bdi_bits:		Bits the BDI flusher thread is currently interested in
  *
  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
  */
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 57226424690c..6f007b6c240d 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -239,16 +239,45 @@
 #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG		0x016A
 #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB		0x0F00 /* VLV1 */
 #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG		0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB				0x0400 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_HASWELL_HB			0x0400 /* Desktop */
 #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG		0x0402
 #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG		0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB			0x0404 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG	0x0422
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB		0x0404 /* Mobile */
 #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG		0x0406
 #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG		0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB			0x0408 /* Server */
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG	0x0426
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB		0x0408 /* Server */
 #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG		0x040a
 #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG		0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV		0x0c16 /* SDV */
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB			0x0c04
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG	0x042a
+#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB		0x0c04
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG	0x0C02
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG	0x0C12
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG	0x0C22
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG	0x0C06
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG	0x0C16
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG	0x0C26
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG	0x0C0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG	0x0C1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG	0x0C2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG	0x0A02
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG	0x0A12
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG	0x0A22
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG	0x0A06
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG	0x0A16
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG	0x0A26
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG	0x0A0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG	0x0A1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG	0x0A2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG	0x0D12
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG	0x0D22
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG	0x0D32
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG	0x0D16
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG	0x0D26
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG	0x0D36
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG	0x0D1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG	0x0D2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG	0x0D3A
 
 #endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9ed92ef5829b..08fc5cbb13cd 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1502,15 +1502,73 @@ static const struct intel_gtt_driver_description {
 	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
 	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
 	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
 	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
 	    "Haswell", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
 	    "Haswell", &sandybridge_gtt_driver },
-	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV,
+	{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
+	    "Haswell", &sandybridge_gtt_driver },
+	{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
 	    "Haswell", &sandybridge_gtt_driver },
 	{ 0, NULL, NULL }
 };
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 89682fa8801e..c4be3519a587 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -807,6 +807,7 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
 #endif
 
+#ifdef CONFIG_PM_SLEEP
 static int tpm_tis_resume(struct device *dev)
 {
 	struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -816,6 +817,7 @@ static int tpm_tis_resume(struct device *dev)
 
 	return tpm_pm_resume(dev);
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
 
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index cdc02ac8f41a..503996a94a6a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -454,6 +454,7 @@ static int __init pcc_cpufreq_probe(void)
 					mem_resource->address_length);
 	if (pcch_virt_addr == NULL) {
 		pr_debug("probe: could not map shared mem region\n");
+		ret = -ENOMEM;
 		goto out_free;
 	}
 	pcch_hdr = pcch_virt_addr;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index fcfeb3cd8d31..5084975d793c 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -172,7 +172,8 @@ struct imxdma_engine {
 	struct device_dma_parameters	dma_parms;
 	struct dma_device		dma_device;
 	void __iomem			*base;
-	struct clk			*dma_clk;
+	struct clk			*dma_ahb;
+	struct clk			*dma_ipg;
 	spinlock_t			lock;
 	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
 	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
@@ -976,10 +977,20 @@ static int __init imxdma_probe(struct platform_device *pdev)
 		return 0;
 	}
 
-	imxdma->dma_clk = clk_get(NULL, "dma");
-	if (IS_ERR(imxdma->dma_clk))
-		return PTR_ERR(imxdma->dma_clk);
-	clk_enable(imxdma->dma_clk);
+	imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(imxdma->dma_ipg)) {
+		ret = PTR_ERR(imxdma->dma_ipg);
+		goto err_clk;
+	}
+
+	imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(imxdma->dma_ahb)) {
+		ret = PTR_ERR(imxdma->dma_ahb);
+		goto err_clk;
+	}
+
+	clk_prepare_enable(imxdma->dma_ipg);
+	clk_prepare_enable(imxdma->dma_ahb);
 
 	/* reset DMA module */
 	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -988,16 +999,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
 		ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
-			kfree(imxdma);
-			return ret;
+			goto err_enable;
 		}
 
 		ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
 			free_irq(MX1_DMA_INT, NULL);
-			kfree(imxdma);
-			return ret;
+			goto err_enable;
 		}
 	}
 
@@ -1094,7 +1103,10 @@ err_init:
 		free_irq(MX1_DMA_INT, NULL);
 		free_irq(MX1_DMA_ERR, NULL);
 	}
-
+err_enable:
+	clk_disable_unprepare(imxdma->dma_ipg);
+	clk_disable_unprepare(imxdma->dma_ahb);
+err_clk:
 	kfree(imxdma);
 	return ret;
 }
@@ -1114,7 +1126,9 @@ static int __exit imxdma_remove(struct platform_device *pdev)
 		free_irq(MX1_DMA_ERR, NULL);
 	}
 
-        kfree(imxdma);
+	clk_disable_unprepare(imxdma->dma_ipg);
+	clk_disable_unprepare(imxdma->dma_ahb);
+	kfree(imxdma);
 
         return 0;
 }
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d52dbc6c54ab..24acd711e032 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1119,15 +1119,21 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
 {
 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+	struct tegra_dma *tdma = tdc->tdma;
+	int ret;
 
 	dma_cookie_init(&tdc->dma_chan);
 	tdc->config_init = false;
-	return 0;
+	ret = clk_prepare_enable(tdma->dma_clk);
+	if (ret < 0)
+		dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
+	return ret;
 }
 
 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
 {
 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+	struct tegra_dma *tdma = tdc->tdma;
 
 	struct tegra_dma_desc *dma_desc;
 	struct tegra_dma_sg_req *sg_req;
@@ -1163,6 +1169,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
 		list_del(&sg_req->node);
 		kfree(sg_req);
 	}
+	clk_disable_unprepare(tdma->dma_clk);
 }
 
 /* Tegra20 specific DMA controller information */
@@ -1255,6 +1262,13 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
 		}
 	}
 
+	/* Enable clock before accessing registers */
+	ret = clk_prepare_enable(tdma->dma_clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+		goto err_pm_disable;
+	}
+
 	/* Reset DMA controller */
 	tegra_periph_reset_assert(tdma->dma_clk);
 	udelay(2);
@@ -1265,6 +1279,8 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
 	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
 	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
 
+	clk_disable_unprepare(tdma->dma_clk);
+
 	INIT_LIST_HEAD(&tdma->dma_dev.channels);
 	for (i = 0; i < cdata->nr_channels; i++) {
 		struct tegra_dma_channel *tdc = &tdma->channels[i];
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index a1c8754f52cf..202a99207b7d 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -339,7 +339,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
 	resource_size_t start, len;
 	struct lnw_gpio *lnw;
 	u32 gpio_base;
-	int retval = 0;
+	int retval;
 	int ngpio = id->driver_data;
 
 	retval = pci_enable_device(pdev);
@@ -357,6 +357,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
 	base = ioremap_nocache(start, len);
 	if (!base) {
 		dev_err(&pdev->dev, "error mapping bar1\n");
+		retval = -EFAULT;
 		goto err3;
 	}
 	gpio_base = *((u32 *)base + 1);
@@ -381,8 +382,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
 
 	lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
 					    &lnw_gpio_irq_ops, lnw);
-	if (!lnw->domain)
+	if (!lnw->domain) {
+		retval = -ENOMEM;
 		goto err3;
+	}
 
 	lnw->reg_base = base;
 	lnw->chip.label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 71a838f44501..b38986285868 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -99,7 +99,7 @@ static int msic_gpio_to_oreg(unsigned offset)
 	if (offset < 20)
 		return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
 
-	return INTEL_MSIC_GPIO1HV0CTLO + offset + 20;
+	return INTEL_MSIC_GPIO1HV0CTLO - offset + 20;
 }
 
 static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 4db460b6ecf7..80f44bb64a87 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -465,9 +465,8 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
 		goto out_iounmap;
 
 	port->bgc.gc.to_irq = mxc_gpio_to_irq;
-	port->bgc.gc.base = pdev->id * 32;
-	port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir);
-	port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
+	port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
+					     pdev->id * 32;
 
 	err = gpiochip_add(&port->bgc.gc);
 	if (err)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 58a6a63a6ece..9cac88a65f78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -62,6 +62,7 @@ int pxa_last_gpio;
 
 #ifdef CONFIG_OF
 static struct irq_domain *domain;
+static struct device_node *pxa_gpio_of_node;
 #endif
 
 struct pxa_gpio_chip {
@@ -277,6 +278,24 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 				(value ? GPSR_OFFSET : GPCR_OFFSET));
 }
 
+#ifdef CONFIG_OF_GPIO
+static int pxa_gpio_of_xlate(struct gpio_chip *gc,
+			     const struct of_phandle_args *gpiospec,
+			     u32 *flags)
+{
+	if (gpiospec->args[0] > pxa_last_gpio)
+		return -EINVAL;
+
+	if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip)
+		return -EINVAL;
+
+	if (flags)
+		*flags = gpiospec->args[1];
+
+	return gpiospec->args[0] % 32;
+}
+#endif
+
 static int __devinit pxa_init_gpio_chip(int gpio_end,
 					int (*set_wake)(unsigned int, unsigned int))
 {
@@ -304,6 +323,11 @@ static int __devinit pxa_init_gpio_chip(int gpio_end,
 		c->get = pxa_gpio_get;
 		c->set = pxa_gpio_set;
 		c->to_irq = pxa_gpio_to_irq;
+#ifdef CONFIG_OF_GPIO
+		c->of_node = pxa_gpio_of_node;
+		c->of_xlate = pxa_gpio_of_xlate;
+		c->of_gpio_n_cells = 2;
+#endif
 
 		/* number of GPIOs on last bank may be less than 32 */
 		c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
@@ -488,6 +512,7 @@ static int pxa_gpio_nums(void)
 	return count;
 }
 
+#ifdef CONFIG_OF
 static struct of_device_id pxa_gpio_dt_ids[] = {
 	{ .compatible = "mrvl,pxa-gpio" },
 	{ .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO },
@@ -505,9 +530,9 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
 
 const struct irq_domain_ops pxa_irq_domain_ops = {
 	.map	= pxa_irq_domain_map,
+	.xlate	= irq_domain_xlate_twocell,
 };
 
-#ifdef CONFIG_OF
 static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
 {
 	int ret, nr_banks, nr_gpios, irq_base;
@@ -545,6 +570,7 @@ static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
 	}
 	domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0,
 				       &pxa_irq_domain_ops, NULL);
+	pxa_gpio_of_node = np;
 	return 0;
 err:
 	iounmap(gpio_reg_base);
@@ -653,7 +679,7 @@ static struct platform_driver pxa_gpio_driver = {
 	.probe		= pxa_gpio_probe,
 	.driver		= {
 		.name	= "pxa-gpio",
-		.of_match_table = pxa_gpio_dt_ids,
+		.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
 	},
 };
 
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 92f7b2bb79d4..ba126cc04073 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,12 +2454,6 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
 		},
 	}, {
 		.chip	= {
-			.base	= EXYNOS5_GPC4(0),
-			.ngpio	= EXYNOS5_GPIO_C4_NR,
-			.label	= "GPC4",
-		},
-	}, {
-		.chip	= {
 			.base	= EXYNOS5_GPD0(0),
 			.ngpio	= EXYNOS5_GPIO_D0_NR,
 			.label	= "GPD0",
@@ -2513,6 +2507,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
 			.label	= "GPY6",
 		},
 	}, {
+		.chip	= {
+			.base	= EXYNOS5_GPC4(0),
+			.ngpio	= EXYNOS5_GPIO_C4_NR,
+			.label	= "GPC4",
+		},
+	}, {
 		.config	= &samsung_gpio_cfgs[9],
 		.irq_base = IRQ_EINT(0),
 		.chip	= {
@@ -2836,7 +2836,7 @@ static __init void exynos5_gpiolib_init(void)
 	}
 
 	/* need to set base address for gpc4 */
-	exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
+	exynos5_gpios_1[20].base = gpio_base1 + 0x2E0;
 
 	/* need to set base address for gpx */
 	chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 424dce8e3f30..8707d4572a06 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -241,7 +241,8 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
 			break;
 
 		default:
-			return -ENODEV;
+			err = -ENODEV;
+			goto err_sch_gpio_core;
 	}
 
 	sch_gpio_core.dev = &pdev->dev;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 66d4a28ad5a2..0303935d10e2 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name,
 {
 	const struct firmware *fw;
 	struct platform_device *pdev;
-	u8 *fwdata = NULL, *edid;
+	u8 *fwdata = NULL, *edid, *new_edid;
 	int fwsize, expected;
 	int builtin = 0, err = 0;
 	int i, valid_extensions = 0;
@@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name,
 		    "\"%s\" for connector \"%s\"\n", valid_extensions,
 		    edid[0x7e], name, connector_name);
 		edid[0x7e] = valid_extensions;
-		edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
+		new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
 		    GFP_KERNEL);
-		if (edid == NULL) {
+		if (new_edid == NULL) {
 			err = -ENOMEM;
+			kfree(edid);
 			goto relfw_out;
 		}
+		edid = new_edid;
 	}
 
 	connector->display_info.raw_edid = edid;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ed22612bc847..a24ffbe97c01 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -346,11 +346,40 @@ static const struct pci_device_id pciidlist[] = {		/* aka */
 	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
 	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
 	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
 	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
 	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
 	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
 	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
-	INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
+	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
+	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
+	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
+	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
+	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
+	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
+	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
+	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
+	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
+	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
+	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
+	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
+	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
+	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
+	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
+	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
+	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
+	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
+	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
+	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
+	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
+	INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
+	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
+	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
+	INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
+	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
+	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
+	INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
 	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
 	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
 	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index da8b01fb1bf8..a9d58d72bb4d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -451,7 +451,6 @@ int i915_switch_context(struct intel_ring_buffer *ring,
 	struct drm_i915_file_private *file_priv = NULL;
 	struct i915_hw_context *to;
 	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
-	int ret;
 
 	if (dev_priv->hw_contexts_disabled)
 		return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 5af631e788c8..ff2819ea0813 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	target_i915_obj = to_intel_bo(target_obj);
 	target_offset = target_i915_obj->gtt_offset;
 
+	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+	 * pipe_control writes because the gpu doesn't properly redirect them
+	 * through the ppgtt for non_secure batchbuffers. */
+	if (unlikely(IS_GEN6(dev) &&
+	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+	    !target_i915_obj->has_global_gtt_mapping)) {
+		i915_gem_gtt_bind_object(target_i915_obj,
+					 target_i915_obj->cache_level);
+	}
+
 	/* The target buffer should have appeared before us in the
 	 * exec_object list, so it should have a GTT space bound by now.
 	 */
@@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 		io_mapping_unmap_atomic(reloc_page);
 	}
 
-	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
-	 * pipe_control writes because the gpu doesn't properly redirect them
-	 * through the ppgtt for non_secure batchbuffers. */
-	if (unlikely(IS_GEN6(dev) &&
-	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
-	    !target_i915_obj->has_global_gtt_mapping)) {
-		i915_gem_gtt_bind_object(target_i915_obj,
-					 target_i915_obj->cache_level);
-	}
-
 	/* and update the user's relocation entry */
 	reloc->presumed_offset = target_offset;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9fd25a435536..ee9b68f6bc36 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -361,7 +361,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->mm.gtt->needs_dmar)
+	/* don't map imported dma buf objects */
+	if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
 		return intel_gtt_map_memory(obj->pages,
 					    obj->base.size >> PAGE_SHIFT,
 					    &obj->sg_list,
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2f5388af8df9..7631807a2788 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+#ifdef CONFIG_PM
 static u32 calc_residency(struct drm_device *dev, const u32 reg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -224,3 +225,14 @@ void i915_teardown_sysfs(struct drm_device *dev)
 	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
 	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
 }
+#else
+void i915_setup_sysfs(struct drm_device *dev)
+{
+	return;
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+	return;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f6159765f1eb..a69a3d0d3acf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -869,6 +869,7 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
 	unsigned long bestppm, ppm, absppm;
 	int dotclk, flag;
 
+	flag = 0;
 	dotclk = target * 1000;
 	bestppm = 1000000;
 	ppm = absppm = 0;
@@ -3753,17 +3754,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
 			continue;
 		}
 
-		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
-			/* Use VBT settings if we have an eDP panel */
-			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
-
-			if (edp_bpc < display_bpc) {
-				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
-				display_bpc = edp_bpc;
-			}
-			continue;
-		}
-
 		/* Not one of the known troublemakers, check the EDID */
 		list_for_each_entry(connector, &dev->mode_config.connector_list,
 				    head) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0a56b9ab0f58..a6c426afaa7a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1174,10 +1174,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
 	pp = ironlake_get_pp_control(dev_priv);
-	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+	/* We need to switch off panel power _and_ force vdd, for otherwise some
+	 * panels get very unhappy and cease to work. */
+	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 	I915_WRITE(PCH_PP_CONTROL, pp);
 	POSTING_READ(PCH_PP_CONTROL);
 
+	intel_dp->want_panel_vdd = false;
+
 	ironlake_wait_panel_off(intel_dp);
 }
 
@@ -1287,11 +1291,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
 	 * ensure that we have vdd while we switch off the panel. */
 	ironlake_edp_panel_vdd_on(intel_dp);
 	ironlake_edp_backlight_off(intel_dp);
-	ironlake_edp_panel_off(intel_dp);
-
 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	ironlake_edp_panel_off(intel_dp);
 	intel_dp_link_down(intel_dp);
-	ironlake_edp_panel_vdd_off(intel_dp, false);
 }
 
 static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1326,11 +1328,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
 		/* Switching the panel off requires vdd. */
 		ironlake_edp_panel_vdd_on(intel_dp);
 		ironlake_edp_backlight_off(intel_dp);
-		ironlake_edp_panel_off(intel_dp);
-
 		intel_dp_sink_dpms(intel_dp, mode);
+		ironlake_edp_panel_off(intel_dp);
 		intel_dp_link_down(intel_dp);
-		ironlake_edp_panel_vdd_off(intel_dp, false);
 
 		if (is_cpu_edp(intel_dp))
 			ironlake_edp_pll_off(encoder);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 84353559441c..132ab511b90c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -46,15 +46,16 @@
 })
 
 #define wait_for_atomic_us(COND, US) ({ \
-	int i, ret__ = -ETIMEDOUT;	\
-	for (i = 0; i < (US); i++) {	\
-		if ((COND)) {		\
-			ret__ = 0;	\
-			break;		\
-		}			\
-		udelay(1);		\
-	}				\
-	ret__;				\
+	unsigned long timeout__ = jiffies + usecs_to_jiffies(US);	\
+	int ret__ = 0;							\
+	while (!(COND)) {						\
+		if (time_after(jiffies, timeout__)) {			\
+			ret__ = -ETIMEDOUT;				\
+			break;						\
+		}							\
+		cpu_relax();						\
+	}								\
+	ret__;								\
 })
 
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
@@ -380,7 +381,6 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
 				    const struct drm_display_mode *mode,
 				    struct drm_display_mode *adjusted_mode);
 extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
-extern u32 intel_panel_get_backlight(struct drm_device *dev);
 extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
 extern int intel_panel_setup_backlight(struct drm_device *dev);
 extern void intel_panel_enable_backlight(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1991a4408cf9..b9755f6378d8 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -486,9 +486,6 @@ int intel_setup_gmbus(struct drm_device *dev)
 		bus->dev_priv = dev_priv;
 
 		bus->adapter.algo = &gmbus_algorithm;
-		ret = i2c_add_adapter(&bus->adapter);
-		if (ret)
-			goto err;
 
 		/* By default use a conservative clock rate */
 		bus->reg0 = port | GMBUS_RATE_100KHZ;
@@ -498,6 +495,10 @@ int intel_setup_gmbus(struct drm_device *dev)
 			bus->force_bit = true;
 
 		intel_gpio_setup(bus, port);
+
+		ret = i2c_add_adapter(&bus->adapter);
+		if (ret)
+			goto err;
 	}
 
 	intel_i2c_reset(dev_priv->dev);
@@ -540,9 +541,6 @@ void intel_teardown_gmbus(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	if (dev_priv->gmbus == NULL)
-		return;
-
 	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
 		struct intel_gmbus *bus = &dev_priv->gmbus[i];
 		i2c_del_adapter(&bus->adapter);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 10c7d39034e1..3df4f5fa892a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -213,7 +213,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
 	return val;
 }
 
-u32 intel_panel_get_backlight(struct drm_device *dev)
+static u32 intel_panel_get_backlight(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
@@ -311,9 +311,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
 	if (dev_priv->backlight_level == 0)
 		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
 
-	dev_priv->backlight_enabled = true;
-	intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
-
 	if (INTEL_INFO(dev)->gen >= 4) {
 		uint32_t reg, tmp;
 
@@ -326,7 +323,7 @@ void intel_panel_enable_backlight(struct drm_device *dev,
 		 * we don't track the backlight dpms state, hence check whether
 		 * we have to do anything first. */
 		if (tmp & BLM_PWM_ENABLE)
-			return;
+			goto set_level;
 
 		if (dev_priv->num_pipe == 3)
 			tmp &= ~BLM_PIPE_SELECT_IVB;
@@ -347,6 +344,14 @@ void intel_panel_enable_backlight(struct drm_device *dev,
 			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
 		}
 	}
+
+set_level:
+	/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+	 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+	 * registers are set.
+	 */
+	dev_priv->backlight_enabled = true;
+	intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
 }
 
 static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 94aabcaa3a67..58c07cdafb7e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3963,6 +3963,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 		DRM_ERROR("Force wake wait timed out\n");
 
 	I915_WRITE_NOTRACE(FORCEWAKE, 1);
+	POSTING_READ(FORCEWAKE);
 
 	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
 		DRM_ERROR("Force wake wait timed out\n");
@@ -3983,6 +3984,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
 		DRM_ERROR("Force wake wait timed out\n");
 
 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
+	POSTING_READ(FORCEWAKE_MT);
 
 	if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
 		DRM_ERROR("Force wake wait timed out\n");
@@ -4018,14 +4020,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
-	/* The below doubles as a POSTING_READ */
+	POSTING_READ(FORCEWAKE);
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
 	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
-	/* The below doubles as a POSTING_READ */
+	POSTING_READ(FORCEWAKE_MT);
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index bf0195a96d53..e2a73b38abe9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -227,31 +227,36 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
 	 * number of bits based on the write domains has little performance
 	 * impact.
 	 */
-	flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
-	flags |= PIPE_CONTROL_TLB_INVALIDATE;
-	flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
-	flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
-	flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
-	flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
-	flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
-	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-	/*
-	 * Ensure that any following seqno writes only happen when the render
-	 * cache is indeed flushed (but only if the caller actually wants that).
-	 */
-	if (flush_domains)
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		/*
+		 * Ensure that any following seqno writes only happen
+		 * when the render cache is indeed flushed.
+		 */
 		flags |= PIPE_CONTROL_CS_STALL;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		/*
+		 * TLB invalidate requires a post-sync write.
+		 */
+		flags |= PIPE_CONTROL_QW_WRITE;
+	}
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(ring, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
 	intel_ring_emit(ring, flags);
 	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0); /* lower dword */
-	intel_ring_emit(ring, 0); /* uppwer dword */
-	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
 
 	return 0;
@@ -289,8 +294,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 	I915_WRITE_HEAD(ring, 0);
 	ring->write_tail(ring, 0);
 
-	/* Initialize the ring. */
-	I915_WRITE_START(ring, obj->gtt_offset);
 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
 	/* G45 ring initialization fails to reset head to zero */
@@ -316,6 +319,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 		}
 	}
 
+	/* Initialize the ring. This must happen _after_ we've cleared the ring
+	 * registers with the above sequence (the readback of the HEAD registers
+	 * also enforces ordering), otherwise the hw might lose the new ring
+	 * register values. */
+	I915_WRITE_START(ring, obj->gtt_offset);
 	I915_WRITE_CTL(ring,
 			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_VALID);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26a6a4d0d078..d172e9873131 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -444,13 +444,16 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
 	struct i2c_msg *msgs;
 	int i, ret = true;
 
+        /* Would be simpler to allocate both in one go ? */        
 	buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
 	if (!buf)
 		return false;
 
 	msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
-	if (!msgs)
+	if (!msgs) {
+	        kfree(buf);
 		return false;
+        }
 
 	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a4d7c500c97b..b69642d5d850 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -468,10 +468,11 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
 	unsigned int delta, tmpdelta;
-	unsigned int testr, testn, testm, testo;
+	int testr, testn, testm, testo;
 	unsigned int p, m, n;
-	unsigned int computed;
+	unsigned int computed, vco;
 	int tmp;
+	const unsigned int m_div_val[] = { 1, 2, 4, 8 };
 
 	m = n = p = 0;
 	vcomax = 1488000;
@@ -490,12 +491,13 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
 				if (delta == 0)
 					break;
 				for (testo = 5; testo < 33; testo++) {
-					computed = pllreffreq * (testn + 1) /
+					vco = pllreffreq * (testn + 1) /
 						(testr + 1);
-					if (computed < vcomin)
+					if (vco < vcomin)
 						continue;
-					if (computed > vcomax)
+					if (vco > vcomax)
 						continue;
+					computed = vco / (m_div_val[testm] * (testo + 1));
 					if (computed > clock)
 						tmpdelta = computed - clock;
 					else
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 77e564667b5c..240cf962c999 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -229,7 +229,7 @@ nouveau_i2c_init(struct drm_device *dev)
 			}
 			break;
 		case 6: /* NV50- DP AUX */
-			port->drive = entry[0];
+			port->drive = entry[0] & 0x0f;
 			port->sense = port->drive;
 			port->adapter.algo = &nouveau_dp_i2c_algo;
 			break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 1cdfd6e757ce..1866dbb49979 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -731,7 +731,6 @@ nouveau_card_init(struct drm_device *dev)
 			case 0xa3:
 			case 0xa5:
 			case 0xa8:
-			case 0xaf:
 				nva3_copy_create(dev);
 				break;
 			}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
index cc82d799fc3b..c564c5e4c30a 100644
--- a/drivers/gpu/drm/nouveau/nv84_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -117,17 +117,22 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	unsigned long flags;
+	u32 save;
 
 	/* remove channel from playlist, will context switch if active */
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 	nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
 	nv50_fifo_playlist_update(dev);
 
+	save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
 	/* tell any engines on this channel to unload their contexts */
 	nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
 	if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
 		NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
 
+	nv_wr32(dev, 0x002520, save);
+
 	nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
@@ -184,10 +189,13 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv84_fifo_priv *priv = nv_engine(dev, engine);
 	int i;
+	u32 save;
 
 	/* set playlist length to zero, fifo will unload context */
 	nv_wr32(dev, 0x0032ec, 0);
 
+	save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
 	/* tell all connected engines to unload their contexts */
 	for (i = 0; i < priv->base.channels; i++) {
 		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
@@ -199,6 +207,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
 		}
 	}
 
+	nv_wr32(dev, 0x002520, save);
 	nv_wr32(dev, 0x002140, 0);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 7c95c44e2887..4e712b10ebdb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -557,7 +557,7 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
 	nouveau_mem_exec(&exec, info->perflvl);
 
 	if (dev_priv->chipset < 0xd0)
-		nv_wr32(dev, 0x611200, 0x00003300);
+		nv_wr32(dev, 0x611200, 0x00003330);
 	else
 		nv_wr32(dev, 0x62c000, 0x03030300);
 }
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index d0d60e1e7f95..dac525b2994e 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -790,7 +790,7 @@ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	int ch = EVO_CURS(nv_crtc->index);
 
-	evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
+	evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
 	evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
index 1855ecbd843b..e98d144e6eb9 100644
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -294,6 +294,25 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
 	printk(" on channel 0x%010llx\n", (u64)inst << 12);
 }
 
+static int
+nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
+{
+	struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = NULL;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	if (likely(chid >= 0 && chid < priv->base.channels)) {
+		chan = dev_priv->channels.ptr[chid];
+		if (likely(chan))
+			ret = nouveau_finish_page_flip(chan, NULL);
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return ret;
+}
+
 static void
 nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
 {
@@ -303,11 +322,21 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
 	u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
 	u32 subc = (addr & 0x00070000);
 	u32 mthd = (addr & 0x00003ffc);
+	u32 show = stat;
+
+	if (stat & 0x00200000) {
+		if (mthd == 0x0054) {
+			if (!nve0_fifo_page_flip(dev, chid))
+				show &= ~0x00200000;
+		}
+	}
 
-	NV_INFO(dev, "PSUBFIFO %d:", unit);
-	nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
-	NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
-		unit, chid, subc, mthd, data);
+	if (show) {
+		NV_INFO(dev, "PFIFO%d:", unit);
+		nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+		NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+			unit, chid, subc, mthd, data);
+	}
 
 	nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
 	nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9e6f76fec527..c6fcb5b86a45 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -259,7 +259,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
 		/* adjust pm to dpms changes BEFORE enabling crtcs */
 		radeon_pm_compute_clocks(rdev);
 		/* disable crtc pair power gating before programming */
-		if (ASIC_IS_DCE6(rdev))
+		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
 			atombios_powergate_crtc(crtc, ATOM_DISABLE);
 		atombios_enable_crtc(crtc, ATOM_ENABLE);
 		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -279,7 +279,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
 		atombios_enable_crtc(crtc, ATOM_DISABLE);
 		radeon_crtc->enabled = false;
 		/* power gating is per-pair */
-		if (ASIC_IS_DCE6(rdev)) {
+		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) {
 			struct drm_crtc *other_crtc;
 			struct radeon_crtc *other_radeon_crtc;
 			list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
@@ -1531,12 +1531,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
 				 * crtc virtual pixel clock.
 				 */
 				if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
-					if (ASIC_IS_DCE5(rdev))
-						return ATOM_DCPLL;
+					if (rdev->clock.dp_extclk)
+						return ATOM_PPLL_INVALID;
 					else if (ASIC_IS_DCE6(rdev))
 						return ATOM_PPLL0;
-					else if (rdev->clock.dp_extclk)
-						return ATOM_PPLL_INVALID;
+					else if (ASIC_IS_DCE5(rdev))
+						return ATOM_DCPLL;
 				}
 			}
 		}
@@ -1635,18 +1635,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
 static void atombios_crtc_prepare(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
 
+	radeon_crtc->in_mode_set = true;
 	/* pick pll */
 	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
 
+	/* disable crtc pair power gating before programming */
+	if (ASIC_IS_DCE6(rdev))
+		atombios_powergate_crtc(crtc, ATOM_DISABLE);
+
 	atombios_lock_crtc(crtc, ATOM_ENABLE);
 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 }
 
 static void atombios_crtc_commit(struct drm_crtc *crtc)
 {
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
 	atombios_lock_crtc(crtc, ATOM_DISABLE);
+	radeon_crtc->in_mode_set = false;
 }
 
 static void atombios_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e585a3b947eb..e93b80a6d4e9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1229,24 +1229,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
 
 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
 {
-	save->vga_control[0] = RREG32(D1VGA_CONTROL);
-	save->vga_control[1] = RREG32(D2VGA_CONTROL);
 	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
 	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
-	save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
-	save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-	if (rdev->num_crtc >= 4) {
-		save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
-		save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
-		save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
-		save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
-	}
-	if (rdev->num_crtc >= 6) {
-		save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
-		save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
-		save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
-		save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
-	}
 
 	/* Stop all video */
 	WREG32(VGA_RENDER_CONTROL, 0);
@@ -1357,47 +1341,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
 	/* Unlock host access */
 	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
 	mdelay(1);
-	/* Restore video state */
-	WREG32(D1VGA_CONTROL, save->vga_control[0]);
-	WREG32(D2VGA_CONTROL, save->vga_control[1]);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
-		WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
-		WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
-	}
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
-	}
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
-		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
-	}
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	if (rdev->num_crtc >= 4) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	}
-	if (rdev->num_crtc >= 6) {
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
-	}
 	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
@@ -1986,10 +1929,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 	if (rdev->flags & RADEON_IS_IGP)
 		rdev->config.evergreen.tile_config |= 1 << 4;
 	else {
-		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-			rdev->config.evergreen.tile_config |= 1 << 4;
-		else
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
 			rdev->config.evergreen.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.evergreen.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.evergreen.tile_config |= 2 << 4;
+			break;
+		}
 	}
 	rdev->config.evergreen.tile_config |= 0 << 8;
 	rdev->config.evergreen.tile_config |=
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c16554122ccd..e44a62a07fe3 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -788,6 +788,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
 	case V_030000_SQ_TEX_DIM_1D_ARRAY:
 	case V_030000_SQ_TEX_DIM_2D_ARRAY:
 		depth = 1;
+		break;
+	case V_030000_SQ_TEX_DIM_2D_MSAA:
+	case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		surf.nsamples = 1 << llevel;
+		llevel = 0;
+		depth = 1;
+		break;
 	case V_030000_SQ_TEX_DIM_3D:
 		break;
 	default:
@@ -961,13 +968,15 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
 
 	if (track->db_dirty) {
 		/* Check stencil buffer */
-		if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
+		if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
+		    G_028800_STENCIL_ENABLE(track->db_depth_control)) {
 			r = evergreen_cs_track_validate_stencil(p);
 			if (r)
 				return r;
 		}
 		/* Check depth buffer */
-		if (G_028800_Z_ENABLE(track->db_depth_control)) {
+		if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
+		    G_028800_Z_ENABLE(track->db_depth_control)) {
 			r = evergreen_cs_track_validate_depth(p);
 			if (r)
 				return r;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index d3bd098e4e19..79347855d9bf 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1277,6 +1277,8 @@
 #define   S_028044_FORMAT(x)                           (((x) & 0x1) << 0)
 #define   G_028044_FORMAT(x)                           (((x) >> 0) & 0x1)
 #define   C_028044_FORMAT                              0xFFFFFFFE
+#define	    V_028044_STENCIL_INVALID			0
+#define	    V_028044_STENCIL_8				1
 #define   G_028044_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
 #define DB_Z_READ_BASE					0x28048
 #define DB_STENCIL_READ_BASE				0x2804c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9945d86d9001..853800e8582f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -574,10 +574,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 	if (rdev->flags & RADEON_IS_IGP)
 		rdev->config.cayman.tile_config |= 1 << 4;
 	else {
-		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-			rdev->config.cayman.tile_config |= 1 << 4;
-		else
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
 			rdev->config.cayman.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.cayman.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.cayman.tile_config |= 2 << 4;
+			break;
+		}
 	}
 	rdev->config.cayman.tile_config |=
 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 637280f541a3..d79c639ae739 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3789,3 +3789,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
 		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
 	}
 }
+
+/**
+ * r600_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (R6xx-cayman).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	mutex_lock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&rdev->gpu_clock_mutex);
+	return clock;
+}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ca87f7afaf23..3dab49cb1d4a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -764,8 +764,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
 	}
 
 	/* Check depth buffer */
-	if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
-		G_028800_Z_ENABLE(track->db_depth_control))) {
+	if (track->db_dirty &&
+	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
+	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+	     G_028800_Z_ENABLE(track->db_depth_control))) {
 		r = r600_cs_track_validate_db(p);
 		if (r)
 			return r;
@@ -1557,13 +1559,14 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
 					      u32 tiling_flags)
 {
 	struct r600_cs_track *track = p->track;
-	u32 nfaces, llevel, blevel, w0, h0, d0;
-	u32 word0, word1, l0_size, mipmap_size, word2, word3;
+	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
+	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
 	u32 height_align, pitch, pitch_align, depth_align;
-	u32 array, barray, larray;
+	u32 barray, larray;
 	u64 base_align;
 	struct array_mode_checker array_check;
 	u32 format;
+	bool is_array;
 
 	/* on legacy kernel we don't perform advanced check */
 	if (p->rdev == NULL)
@@ -1581,12 +1584,28 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
 	}
 	word1 = radeon_get_ib_value(p, idx + 1);
+	word2 = radeon_get_ib_value(p, idx + 2) << 8;
+	word3 = radeon_get_ib_value(p, idx + 3) << 8;
+	word4 = radeon_get_ib_value(p, idx + 4);
+	word5 = radeon_get_ib_value(p, idx + 5);
+	dim = G_038000_DIM(word0);
 	w0 = G_038000_TEX_WIDTH(word0) + 1;
+	pitch = (G_038000_PITCH(word0) + 1) * 8;
 	h0 = G_038004_TEX_HEIGHT(word1) + 1;
 	d0 = G_038004_TEX_DEPTH(word1);
+	format = G_038004_DATA_FORMAT(word1);
+	blevel = G_038010_BASE_LEVEL(word4);
+	llevel = G_038014_LAST_LEVEL(word5);
+	/* pitch in texels */
+	array_check.array_mode = G_038000_TILE_MODE(word0);
+	array_check.group_size = track->group_size;
+	array_check.nbanks = track->nbanks;
+	array_check.npipes = track->npipes;
+	array_check.nsamples = 1;
+	array_check.blocksize = r600_fmt_get_blocksize(format);
 	nfaces = 1;
-	array = 0;
-	switch (G_038000_DIM(word0)) {
+	is_array = false;
+	switch (dim) {
 	case V_038000_SQ_TEX_DIM_1D:
 	case V_038000_SQ_TEX_DIM_2D:
 	case V_038000_SQ_TEX_DIM_3D:
@@ -1599,29 +1618,25 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
 		break;
 	case V_038000_SQ_TEX_DIM_1D_ARRAY:
 	case V_038000_SQ_TEX_DIM_2D_ARRAY:
-		array = 1;
+		is_array = true;
 		break;
-	case V_038000_SQ_TEX_DIM_2D_MSAA:
 	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		is_array = true;
+		/* fall through */
+	case V_038000_SQ_TEX_DIM_2D_MSAA:
+		array_check.nsamples = 1 << llevel;
+		llevel = 0;
+		break;
 	default:
 		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
 		return -EINVAL;
 	}
-	format = G_038004_DATA_FORMAT(word1);
 	if (!r600_fmt_is_valid_texture(format, p->family)) {
 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
 			 __func__, __LINE__, format);
 		return -EINVAL;
 	}
 
-	/* pitch in texels */
-	pitch = (G_038000_PITCH(word0) + 1) * 8;
-	array_check.array_mode = G_038000_TILE_MODE(word0);
-	array_check.group_size = track->group_size;
-	array_check.nbanks = track->nbanks;
-	array_check.npipes = track->npipes;
-	array_check.nsamples = 1;
-	array_check.blocksize = r600_fmt_get_blocksize(format);
 	if (r600_get_array_mode_alignment(&array_check,
 					  &pitch_align, &height_align, &depth_align, &base_align)) {
 		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1647,20 +1662,13 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
 		return -EINVAL;
 	}
 
-	word2 = radeon_get_ib_value(p, idx + 2) << 8;
-	word3 = radeon_get_ib_value(p, idx + 3) << 8;
-
-	word0 = radeon_get_ib_value(p, idx + 4);
-	word1 = radeon_get_ib_value(p, idx + 5);
-	blevel = G_038010_BASE_LEVEL(word0);
-	llevel = G_038014_LAST_LEVEL(word1);
 	if (blevel > llevel) {
 		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
 			 blevel, llevel);
 	}
-	if (array == 1) {
-		barray = G_038014_BASE_ARRAY(word1);
-		larray = G_038014_LAST_ARRAY(word1);
+	if (is_array) {
+		barray = G_038014_BASE_ARRAY(word5);
+		larray = G_038014_LAST_ARRAY(word5);
 
 		nfaces = larray - barray + 1;
 	}
@@ -1677,7 +1685,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
 		return -EINVAL;
 	}
 	/* using get ib will give us the offset into the mipmap bo */
-	word3 = radeon_get_ib_value(p, idx + 3) << 8;
 	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
 		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
 		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4b116ae75fc2..fd328f4c3ea8 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -602,6 +602,9 @@
 #define RLC_HB_WPTR                                       0x3f1c
 #define RLC_HB_WPTR_LSB_ADDR                              0x3f14
 #define RLC_HB_WPTR_MSB_ADDR                              0x3f18
+#define RLC_GPU_CLOCK_COUNT_LSB				  0x3f38
+#define RLC_GPU_CLOCK_COUNT_MSB				  0x3f3c
+#define RLC_CAPTURE_GPU_CLOCK_COUNT			  0x3f40
 #define RLC_MC_CNTL                                       0x3f44
 #define RLC_UCODE_CNTL                                    0x3f48
 #define RLC_UCODE_ADDR                                    0x3f2c
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5431af292408..99304194a65c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -300,6 +300,7 @@ struct radeon_bo_va {
 	uint64_t			soffset;
 	uint64_t			eoffset;
 	uint32_t			flags;
+	struct radeon_fence		*fence;
 	bool				valid;
 };
 
@@ -1533,6 +1534,7 @@ struct radeon_device {
 	unsigned 		debugfs_count;
 	/* virtual memory */
 	struct radeon_vm_manager	vm_manager;
+	struct mutex			gpu_clock_mutex;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -1733,11 +1735,11 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
 #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
 #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
-#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
-#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
-#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
-#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
-#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
+#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
+#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
 
 /* Common functions */
 /* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f4af24310438..18c38d14c8cd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -255,13 +255,10 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
  * rv515
  */
 struct rv515_mc_save {
-	u32 d1vga_control;
-	u32 d2vga_control;
 	u32 vga_render_control;
 	u32 vga_hdp_control;
-	u32 d1crtc_control;
-	u32 d2crtc_control;
 };
+
 int rv515_init(struct radeon_device *rdev);
 void rv515_fini(struct radeon_device *rdev);
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -371,6 +368,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
 			unsigned num_gpu_pages,
 			struct radeon_sa_bo *vb);
 int r600_mc_wait_for_idle(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
 
 /*
  * rv770,rv730,rv710,rv740
@@ -389,11 +387,10 @@ void r700_cp_fini(struct radeon_device *rdev);
  * evergreen
  */
 struct evergreen_mc_save {
-	u32 vga_control[6];
 	u32 vga_render_control;
 	u32 vga_hdp_control;
-	u32 crtc_control[6];
 };
+
 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
 int evergreen_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
@@ -472,5 +469,6 @@ int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
 void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
 void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+uint64_t si_get_gpu_clock(struct radeon_device *rdev);
 
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index b1e3820df363..f9c21f9d16bc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1263,6 +1263,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
 union igp_info {
 	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
 	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
 };
 
 bool radeon_atombios_sideport_present(struct radeon_device *rdev)
@@ -1390,27 +1392,50 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
 	u16 data_offset, size;
-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
+	union igp_info *igp_info;
 	u8 frev, crev;
 	u16 percentage = 0, rate = 0;
 
 	/* get any igp specific overrides */
 	if (atom_parse_data_header(mode_info->atom_context, index, &size,
 				   &frev, &crev, &data_offset)) {
-		igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
+		igp_info = (union igp_info *)
 			(mode_info->atom_context->bios + data_offset);
-		switch (id) {
-		case ASIC_INTERNAL_SS_ON_TMDS:
-			percentage = le16_to_cpu(igp_info->usDVISSPercentage);
-			rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
+		switch (crev) {
+		case 6:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
 			break;
-		case ASIC_INTERNAL_SS_ON_HDMI:
-			percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
-			rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
+		case 7:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
 			break;
-		case ASIC_INTERNAL_SS_ON_LVDS:
-			percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
-			rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
 			break;
 		}
 		if (percentage)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 576f4f6919f2..f75247d42ffd 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -719,6 +719,34 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
 	return i2c;
 }
 
+static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct radeon_i2c_bus_rec i2c;
+	u16 offset;
+	u8 id, blocks, clk, data;
+	int i;
+
+	i2c.valid = false;
+
+	offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+	if (offset) {
+		blocks = RBIOS8(offset + 2);
+		for (i = 0; i < blocks; i++) {
+			id = RBIOS8(offset + 3 + (i * 5) + 0);
+			if (id == 136) {
+				clk = RBIOS8(offset + 3 + (i * 5) + 3);
+				data = RBIOS8(offset + 3 + (i * 5) + 4);
+				/* gpiopad */
+				i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+							    (1 << clk), (1 << data));
+				break;
+			}
+		}
+	}
+	return i2c;
+}
+
 void radeon_combios_i2c_init(struct radeon_device *rdev)
 {
 	struct drm_device *dev = rdev->ddev;
@@ -755,30 +783,14 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
 	} else if (rdev->family == CHIP_RS300 ||
 		   rdev->family == CHIP_RS400 ||
 		   rdev->family == CHIP_RS480) {
-		u16 offset;
-		u8 id, blocks, clk, data;
-		int i;
-
 		/* 0x68 */
 		i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
 		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
 
-		offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
-		if (offset) {
-			blocks = RBIOS8(offset + 2);
-			for (i = 0; i < blocks; i++) {
-				id = RBIOS8(offset + 3 + (i * 5) + 0);
-				if (id == 136) {
-					clk = RBIOS8(offset + 3 + (i * 5) + 3);
-					data = RBIOS8(offset + 3 + (i * 5) + 4);
-					/* gpiopad */
-					i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
-								    (1 << clk), (1 << data));
-					rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
-					break;
-				}
-			}
-		}
+		/* gpiopad */
+		i2c = radeon_combios_get_i2c_info_from_table(rdev);
+		if (i2c.valid)
+			rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
 	} else if ((rdev->family == CHIP_R200) ||
 		   (rdev->family >= CHIP_R300)) {
 		/* 0x68 */
@@ -2321,7 +2333,10 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
 			connector = (tmp >> 12) & 0xf;
 
 			ddc_type = (tmp >> 8) & 0xf;
-			ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+			if (ddc_type == 5)
+				ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
+			else
+				ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
 
 			switch (connector) {
 			case CONNECTOR_PROPRIETARY_LEGACY:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 8a4c49ef0cc4..b4a0db24f4dd 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
 	return 0;
 }
 
+static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
+				  struct radeon_fence *fence)
+{
+	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_list *lobj;
+
+	if (parser->chunk_ib_idx == -1) {
+		return;
+	}
+	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
+		return;
+	}
+
+	list_for_each_entry(lobj, &parser->validated, tv.head) {
+		struct radeon_bo_va *bo_va;
+		struct radeon_bo *rbo = lobj->bo;
+
+		bo_va = radeon_bo_va(rbo, vm);
+		radeon_fence_unref(&bo_va->fence);
+		bo_va->fence = radeon_fence_ref(fence);
+	}
+}
+
 /**
  * cs_parser_fini() - clean parser states
  * @parser:	parser structure holding parsing context.
@@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
 {
 	unsigned i;
 
-	if (!error)
+	if (!error) {
+		/* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
+		radeon_bo_vm_fence_va(parser, parser->ib.fence);
 		ttm_eu_fence_buffer_objects(&parser->validated,
 					    parser->ib.fence);
-	else
+	} else {
 		ttm_eu_backoff_reservation(&parser->validated);
+	}
 
 	if (parser->relocs != NULL) {
 		for (i = 0; i < parser->nrelocs; i++) {
@@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 
 	if (parser->chunk_ib_idx == -1)
 		return 0;
-
 	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
 		return 0;
 
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 711e95ad39bf..8794744cdf1a 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -67,7 +67,8 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
 
 	if (ASIC_IS_DCE4(rdev)) {
 		WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
-		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		       EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
 	} else if (ASIC_IS_AVIVO(rdev)) {
 		WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
 		WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
@@ -94,7 +95,8 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
 	if (ASIC_IS_DCE4(rdev)) {
 		WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
 		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
-		       EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+		       EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		       EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
 	} else if (ASIC_IS_AVIVO(rdev)) {
 		WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
 		WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 742af8244e89..d2e243867ac6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1009,6 +1009,7 @@ int radeon_device_init(struct radeon_device *rdev,
 	atomic_set(&rdev->ih.lock, 0);
 	mutex_init(&rdev->gem.mutex);
 	mutex_init(&rdev->pm.mutex);
+	mutex_init(&rdev->gpu_clock_mutex);
 	init_rwsem(&rdev->pm.mclk_lock);
 	init_rwsem(&rdev->exclusive_lock);
 	init_waitqueue_head(&rdev->irq.vblank_queue);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dcea6f01ae4e..d7269f48d37c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -59,9 +59,12 @@
  *   2.15.0 - add max_pipes query
  *   2.16.0 - fix evergreen 2D tiled surface calculation
  *   2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
+ *   2.18.0 - r600-eg: allow "invalid" DB formats
+ *   2.19.0 - r600-eg: MSAA textures
+ *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	17
+#define KMS_DRIVER_MINOR	20
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b3720054614d..bb3b7fe05ccd 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 		return -EINVAL;
 	}
 
-	if (bo_va->valid)
+	if (bo_va->valid && mem)
 		return 0;
 
 	ngpu_pages = radeon_bo_ngpu_pages(bo);
@@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
 		     struct radeon_bo *bo)
 {
 	struct radeon_bo_va *bo_va;
+	int r;
 
 	bo_va = radeon_bo_va(bo, vm);
 	if (bo_va == NULL)
 		return 0;
 
+	/* wait for va use to end */
+	while (bo_va->fence) {
+		r = radeon_fence_wait(bo_va->fence, false);
+		if (r) {
+			DRM_ERROR("error while waiting for fence: %d\n", r);
+		}
+		if (r == -EDEADLK) {
+			r = radeon_gpu_reset(rdev);
+			if (!r)
+				continue;
+		}
+		break;
+	}
+	radeon_fence_unref(&bo_va->fence);
+
 	mutex_lock(&rdev->vm_manager.lock);
 	mutex_lock(&vm->mutex);
 	radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
@@ -934,7 +950,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
 }
 
 /**
- * radeon_vm_init - tear down a vm instance
+ * radeon_vm_fini - tear down a vm instance
  *
  * @rdev: radeon_device pointer
  * @vm: requested vm
@@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 	radeon_vm_unbind_locked(rdev, vm);
 	mutex_unlock(&rdev->vm_manager.lock);
 
-	/* remove all bo */
+	/* remove all bo at this point non are busy any more because unbind
+	 * waited for the last vm fence to signal
+	 */
 	r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
 	if (!r) {
 		bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
 		list_del_init(&bo_va->bo_list);
 		list_del_init(&bo_va->vm_list);
+		radeon_fence_unref(&bo_va->fence);
 		radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
 		kfree(bo_va);
 	}
@@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 		r = radeon_bo_reserve(bo_va->bo, false);
 		if (!r) {
 			list_del_init(&bo_va->bo_list);
+			radeon_fence_unref(&bo_va->fence);
 			radeon_bo_unreserve(bo_va->bo);
 			kfree(bo_va);
 		}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 84d045245739..1b57b0058ad6 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
 	struct radeon_device *rdev = rbo->rdev;
 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
 	struct radeon_vm *vm = &fpriv->vm;
-	struct radeon_bo_va *bo_va, *tmp;
 
 	if (rdev->family < CHIP_CAYMAN) {
 		return;
 	}
 
 	if (radeon_bo_reserve(rbo, false)) {
+		dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
 		return;
 	}
-	list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
-		if (bo_va->vm == vm) {
-			/* remove from this vm address space */
-			mutex_lock(&vm->mutex);
-			list_del(&bo_va->vm_list);
-			mutex_unlock(&vm->mutex);
-			list_del(&bo_va->bo_list);
-			kfree(bo_va);
-		}
-	}
+	radeon_vm_bo_rmv(rdev, vm, rbo);
 	radeon_bo_unreserve(rbo);
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 1d73f16b5d97..414b4acf6947 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -29,6 +29,7 @@
 #include "drm_sarea.h"
 #include "radeon.h"
 #include "radeon_drm.h"
+#include "radeon_asic.h"
 
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
@@ -167,17 +168,39 @@ static void radeon_set_filp_rights(struct drm_device *dev,
 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
 	struct radeon_device *rdev = dev->dev_private;
-	struct drm_radeon_info *info;
+	struct drm_radeon_info *info = data;
 	struct radeon_mode_info *minfo = &rdev->mode_info;
-	uint32_t *value_ptr;
-	uint32_t value;
+	uint32_t value, *value_ptr;
+	uint64_t value64, *value_ptr64;
 	struct drm_crtc *crtc;
 	int i, found;
 
-	info = data;
+	/* TIMESTAMP is a 64-bit value, needs special handling. */
+	if (info->request == RADEON_INFO_TIMESTAMP) {
+		if (rdev->family >= CHIP_R600) {
+			value_ptr64 = (uint64_t*)((unsigned long)info->value);
+			if (rdev->family >= CHIP_TAHITI) {
+				value64 = si_get_gpu_clock(rdev);
+			} else {
+				value64 = r600_get_gpu_clock(rdev);
+			}
+
+			if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
+				DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+				return -EFAULT;
+			}
+			return 0;
+		} else {
+			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+			return -EINVAL;
+		}
+	}
+
 	value_ptr = (uint32_t *)((unsigned long)info->value);
-	if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
+	if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
+		DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
 		return -EFAULT;
+	}
 
 	switch (info->request) {
 	case RADEON_INFO_DEVICE_ID:
@@ -337,7 +360,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 		return -EINVAL;
 	}
 	if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
-		DRM_ERROR("copy_to_user\n");
+		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
 		return -EFAULT;
 	}
 	return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index d5fd615897ec..94b4a1c12893 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1025,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
 
 static void radeon_crtc_prepare(struct drm_crtc *crtc)
 {
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct drm_crtc *crtci;
 
+	radeon_crtc->in_mode_set = true;
 	/*
 	* The hardware wedges sometimes if you reconfigure one CRTC
 	* whilst another is running (see fdo bug #24611).
@@ -1038,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
 
 static void radeon_crtc_commit(struct drm_crtc *crtc)
 {
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct drm_crtc *crtci;
 
@@ -1048,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
 		if (crtci->enabled)
 			radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
 	}
+	radeon_crtc->in_mode_set = false;
 }
 
 static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f380d59c5763..d56978949f34 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -275,6 +275,7 @@ struct radeon_crtc {
 	u16 lut_r[256], lut_g[256], lut_b[256];
 	bool enabled;
 	bool can_tile;
+	bool in_mode_set;
 	uint32_t crtc_offset;
 	struct drm_gem_object *cursor_bo;
 	uint64_t cursor_addr;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f1a4c803c1d..1cb014b571ab 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
 
 	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
 		/* remove from all vm address space */
-		mutex_lock(&bo_va->vm->mutex);
-		list_del(&bo_va->vm_list);
-		mutex_unlock(&bo_va->vm->mutex);
-		list_del(&bo_va->bo_list);
-		kfree(bo_va);
+		radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index a12fbcc8ccb6..aa8ef491ef3c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
 
 void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
 {
-	save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
-	save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
 	save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
 	save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
-	save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
-	save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
 
 	/* Stop all video */
 	WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
@@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
 	/* Unlock host access */
 	WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
 	mdelay(1);
-	/* Restore video state */
-	WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
-	WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
-	WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
-	WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
-	WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
-	WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
-	WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
-	WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
 	WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index c053f8193771..0139e227e3c7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1639,11 +1639,19 @@ static void si_gpu_init(struct radeon_device *rdev)
 		/* XXX what about 12? */
 		rdev->config.si.tile_config |= (3 << 0);
 		break;
-	}
-	if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-		rdev->config.si.tile_config |= 1 << 4;
-	else
+	}	
+	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+	case 0: /* four banks */
 		rdev->config.si.tile_config |= 0 << 4;
+		break;
+	case 1: /* eight banks */
+		rdev->config.si.tile_config |= 1 << 4;
+		break;
+	case 2: /* sixteen banks */
+	default:
+		rdev->config.si.tile_config |= 2 << 4;
+		break;
+	}
 	rdev->config.si.tile_config |=
 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
 	rdev->config.si.tile_config |=
@@ -3960,3 +3968,22 @@ void si_fini(struct radeon_device *rdev)
 	rdev->bios = NULL;
 }
 
+/**
+ * si_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t si_get_gpu_clock(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	mutex_lock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&rdev->gpu_clock_mutex);
+	return clock;
+}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7869089e8761..ef4815c27b1c 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -698,6 +698,9 @@
 #define RLC_UCODE_ADDR                                    0xC32C
 #define RLC_UCODE_DATA                                    0xC330
 
+#define RLC_GPU_CLOCK_COUNT_LSB                           0xC338
+#define RLC_GPU_CLOCK_COUNT_MSB                           0xC33C
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0xC340
 #define RLC_MC_CNTL                                       0xC344
 #define RLC_UCODE_CNTL                                    0xC348
 
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 7bd65bdd15a8..291ecc145585 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -308,7 +308,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
 	/* need to attach */
 	attach = dma_buf_attach(dma_buf, dev->dev);
 	if (IS_ERR(attach))
-		return ERR_PTR(PTR_ERR(attach));
+		return ERR_CAST(attach);
 
 	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
 	if (IS_ERR(sg)) {
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6d1cbdfc9b2a..b64502dfa9f4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -296,8 +296,13 @@ static int iommu_init_device(struct device *dev)
 	} else
 		dma_pdev = pci_dev_get(pdev);
 
+	/* Account for quirked devices */
 	swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
 
+	/*
+	 * If it's a multifunction device that does not support our
+	 * required ACS flags, add to the same group as function 0.
+	 */
 	if (dma_pdev->multifunction &&
 	    !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
 		swap_pci_ref(&dma_pdev,
@@ -305,14 +310,28 @@ static int iommu_init_device(struct device *dev)
 					  PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
 					  0)));
 
+	/*
+	 * Devices on the root bus go through the iommu.  If that's not us,
+	 * find the next upstream device and test ACS up to the root bus.
+	 * Finding the next device may require skipping virtual buses.
+	 */
 	while (!pci_is_root_bus(dma_pdev->bus)) {
-		if (pci_acs_path_enabled(dma_pdev->bus->self,
-					 NULL, REQ_ACS_FLAGS))
+		struct pci_bus *bus = dma_pdev->bus;
+
+		while (!bus->self) {
+			if (!pci_is_root_bus(bus))
+				bus = bus->parent;
+			else
+				goto root_bus;
+		}
+
+		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
 			break;
 
-		swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+		swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
 	}
 
+root_bus:
 	group = iommu_group_get(&dma_pdev->dev);
 	pci_dev_put(dma_pdev);
 	if (!group) {
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 500e7f15f5c2..0a2ea317120a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1131,9 +1131,6 @@ static int __init amd_iommu_init_pci(void)
 			break;
 	}
 
-	/* Make sure ACS will be enabled */
-	pci_request_acs();
-
 	ret = amd_iommu_init_devices();
 
 	print_iommu_info();
@@ -1652,6 +1649,9 @@ static bool detect_ivrs(void)
 
 	early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
 
+	/* Make sure ACS will be enabled during PCI probe */
+	pci_request_acs();
+
 	return true;
 }
 
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 45350ff5e93c..80bad32aa463 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
 	spin_lock_init(&priv->pgtablelock);
 	INIT_LIST_HEAD(&priv->clients);
 
-	dom->geometry.aperture_start = 0;
-	dom->geometry.aperture_end   = ~0UL;
-	dom->geometry.force_aperture = true;
+	domain->geometry.aperture_start = 0;
+	domain->geometry.aperture_end   = ~0UL;
+	domain->geometry.force_aperture = true;
 
 	domain->priv = priv;
 	return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 7469b5346643..2297ec193eb4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2008,6 +2008,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
 	if (!drhd) {
 		printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
 			pci_name(pdev));
+		free_domain_mem(domain);
 		return NULL;
 	}
 	iommu = drhd->iommu;
@@ -4124,8 +4125,13 @@ static int intel_iommu_add_device(struct device *dev)
 	} else
 		dma_pdev = pci_dev_get(pdev);
 
+	/* Account for quirked devices */
 	swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
 
+	/*
+	 * If it's a multifunction device that does not support our
+	 * required ACS flags, add to the same group as function 0.
+	 */
 	if (dma_pdev->multifunction &&
 	    !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
 		swap_pci_ref(&dma_pdev,
@@ -4133,14 +4139,28 @@ static int intel_iommu_add_device(struct device *dev)
 					  PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
 					  0)));
 
+	/*
+	 * Devices on the root bus go through the iommu.  If that's not us,
+	 * find the next upstream device and test ACS up to the root bus.
+	 * Finding the next device may require skipping virtual buses.
+	 */
 	while (!pci_is_root_bus(dma_pdev->bus)) {
-		if (pci_acs_path_enabled(dma_pdev->bus->self,
-					 NULL, REQ_ACS_FLAGS))
+		struct pci_bus *bus = dma_pdev->bus;
+
+		while (!bus->self) {
+			if (!pci_is_root_bus(bus))
+				bus = bus->parent;
+			else
+				goto root_bus;
+		}
+
+		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
 			break;
 
-		swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+		swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
 	}
 
+root_bus:
 	group = iommu_group_get(&dma_pdev->dev);
 	pci_dev_put(dma_pdev);
 	if (!group) {
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4ba325ab6262..2a4bb36bc688 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain,
 			goto out;
 		}
 	}
-	dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev));
+	dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
 out:
 	spin_unlock(&as->client_lock);
 }
 
 static int smmu_iommu_domain_init(struct iommu_domain *domain)
 {
-	int i, err = -ENODEV;
+	int i, err = -EAGAIN;
 	unsigned long flags;
 	struct smmu_as *as;
 	struct smmu_device *smmu = smmu_handle;
@@ -814,11 +814,14 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain)
 	/* Look for a free AS with lock held */
 	for  (i = 0; i < smmu->num_as; i++) {
 		as = &smmu->as[i];
-		if (!as->pdir_page) {
-			err = alloc_pdir(as);
-			if (!err)
-				goto found;
-		}
+
+		if (as->pdir_page)
+			continue;
+
+		err = alloc_pdir(as);
+		if (!err)
+			goto found;
+
 		if (err != -EAGAIN)
 			break;
 	}
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 5405ec644db3..baf2686aa8eb 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -16,7 +16,6 @@
 #include <linux/sched.h>
 #include "isdnloop.h"
 
-static char *revision = "$Revision: 1.11.6.7 $";
 static char *isdnloop_id = "loop0";
 
 MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
@@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
 static int __init
 isdnloop_init(void)
 {
-	char *p;
-	char rev[10];
-
-	if ((p = strchr(revision, ':'))) {
-		strcpy(rev, p + 1);
-		p = strchr(rev, '$');
-		*p = 0;
-	} else
-		strcpy(rev, " ??? ");
-	printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
-
 	if (isdnloop_id)
 		return (isdnloop_addcard(isdnloop_id));
 
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 0dc8abca1407..949cabb88f1c 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
 	InitWin(l2);
 	l2->l2m.fsm = &l2fsm;
 	if (test_bit(FLG_LAPB, &l2->flag) ||
-	    test_bit(FLG_PTP, &l2->flag) ||
+	    test_bit(FLG_FIXED_TEI, &l2->flag) ||
 	    test_bit(FLG_LAPD_NET, &l2->flag))
 		l2->l2m.state = ST_L2_4;
 	else
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 6157cbbf4113..363975b3c925 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -224,7 +224,7 @@ void led_trigger_event(struct led_trigger *trig,
 		struct led_classdev *led_cdev;
 
 		led_cdev = list_entry(entry, struct led_classdev, trig_list);
-		led_set_brightness(led_cdev, brightness);
+		__led_set_brightness(led_cdev, brightness);
 	}
 	read_unlock(&trig->leddev_list_lock);
 }
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 53bd136f1ef0..0ade6ebfc914 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -63,7 +63,7 @@ static int lp8788_led_init_device(struct lp8788_led *led,
 	/* scale configuration */
 	addr = LP8788_ISINK_CTRL;
 	mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
-	val = cfg->scale << cfg->num;
+	val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
 	ret = lp8788_update_bits(led->lp, addr, mask, val);
 	if (ret)
 		return ret;
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index 9ee12c28059a..771ea067e680 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -247,7 +247,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
 
 	if (!cfg) {
 		dev_err(&pdev->dev, "missing platform data\n");
-		goto err0;
+		return -ENODEV;
 	}
 
 	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index cfff454f628b..c3bb304eca07 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -19,14 +19,13 @@
 #include <linux/mtd/map.h>
 #include <linux/mtd/partitions.h>
 #include <asm/io.h>
+#include <asm/sections.h>
 
 /****************************************************************************/
 
-extern char _ebss;
-
 struct map_info uclinux_ram_map = {
 	.name = "RAM",
-	.phys = (unsigned long)&_ebss,
+	.phys = (unsigned long)__bss_stop,
 	.size = 0,
 };
 
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 545c09ed9079..cff6f023c03a 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -996,9 +996,7 @@ static int __init cops_module_init(void)
 		printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
 			cardname);
 	cops_dev = cops_probe(-1);
-	if (IS_ERR(cops_dev))
-		return PTR_ERR(cops_dev);
-        return 0;
+	return PTR_RET(cops_dev);
 }
 
 static void __exit cops_module_exit(void)
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 0910dce3996d..b5782cdf0bca 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void)
 		       "ltpc: Autoprobing is not recommended for modules\n");
 
 	dev_ltpc = ltpc_probe();
-	if (IS_ERR(dev_ltpc))
-		return PTR_ERR(dev_ltpc);
-	return 0;
+	return PTR_RET(dev_ltpc);
 }
 module_init(ltpc_module_init);
 #endif
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index f0c8bd54ce29..021d69c5d9bc 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1712,7 +1712,7 @@ e100_set_network_leds(int active)
 static void
 e100_netpoll(struct net_device* netdev)
 {
-	e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
+	e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
 }
 #endif
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 77bcd4cb4ffb..463b9ec57d80 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1278,7 +1278,7 @@ struct bnx2x {
 #define BNX2X_FW_RX_ALIGN_START	(1UL << BNX2X_RX_ALIGN_SHIFT)
 
 #define BNX2X_FW_RX_ALIGN_END					\
-	max(1UL << BNX2X_RX_ALIGN_SHIFT, 			\
+	max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT,			\
 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
 #define BNX2X_PXP_DRAM_ALIGN		(BNX2X_RX_ALIGN_SHIFT - 5)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index dd451c3dd83d..02b5a343b195 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4041,20 +4041,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
 	return val != 0;
 }
 
-/*
- * Reset the load status for the current engine.
- */
-static void bnx2x_clear_load_status(struct bnx2x *bp)
-{
-	u32 val;
-	u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
-		    BNX2X_PATH0_LOAD_CNT_MASK);
-	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
-	val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
-	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
-}
-
 static void _print_next_block(int idx, const char *blk)
 {
 	pr_cont("%s%s", idx ? ", " : "", blk);
@@ -9384,32 +9370,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
 	return rc;
 }
 
-static bool __devinit bnx2x_can_flr(struct bnx2x *bp)
-{
-	int pos;
-	u32 cap;
-	struct pci_dev *dev = bp->pdev;
-
-	pos = pci_pcie_cap(dev);
-	if (!pos)
-		return false;
-
-	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
-	if (!(cap & PCI_EXP_DEVCAP_FLR))
-		return false;
-
-	return true;
-}
-
 static int __devinit bnx2x_do_flr(struct bnx2x *bp)
 {
 	int i, pos;
 	u16 status;
 	struct pci_dev *dev = bp->pdev;
 
-	/* probe the capability first */
-	if (bnx2x_can_flr(bp))
-		return -ENOTTY;
+
+	if (CHIP_IS_E1x(bp)) {
+		BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
+		return -EINVAL;
+	}
+
+	/* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
+	if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+		BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
+			  bp->common.bc_ver);
+		return -EINVAL;
+	}
 
 	pos = pci_pcie_cap(dev);
 	if (!pos)
@@ -9429,12 +9407,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp)
 		"transaction is not cleared; proceeding with reset anyway\n");
 
 clear:
-	if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
-		BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
-			  bp->common.bc_ver);
-		return -EINVAL;
-	}
 
+	BNX2X_DEV_INFO("Initiating FLR\n");
 	bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
 
 	return 0;
@@ -9454,8 +9428,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
 	 * the one required, then FLR will be sufficient to clean any residue
 	 * left by previous driver
 	 */
-	if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp))
-		return bnx2x_do_flr(bp);
+	rc = bnx2x_test_firmware_version(bp, false);
+
+	if (!rc) {
+		/* fw version is good */
+		BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
+		rc = bnx2x_do_flr(bp);
+	}
+
+	if (!rc) {
+		/* FLR was performed */
+		BNX2X_DEV_INFO("FLR successful\n");
+		return 0;
+	}
+
+	BNX2X_DEV_INFO("Could not FLR\n");
 
 	/* Close the MCP request, return failure*/
 	rc = bnx2x_prev_mcp_done(bp);
@@ -11427,9 +11414,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
 	if (!chip_is_e1x)
 		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 
-	/* Reset the load counter */
-	bnx2x_clear_load_status(bp);
-
 	dev->watchdog_timeo = TX_TIMEOUT;
 
 	dev->netdev_ops = &bnx2x_netdev_ops;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 734fd87cd990..62f754bd0dfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
 		break;
 
 	default:
+		kfree(new_cmd);
 		BNX2X_ERR("Unknown command: %d\n", cmd);
 		return -EINVAL;
 	}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c60de89b6669..90a903d83d87 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
 
 	if (adapter->num_rx_qs != MAX_RX_QS)
 		dev_info(&adapter->pdev->dev,
-			"Created only %d receive queues", adapter->num_rx_qs);
+			"Created only %d receive queues\n", adapter->num_rx_qs);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 0b3bade957fd..080c89093feb 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-	u32 ctrl, ctrl_ext, eecd;
+	u32 ctrl, ctrl_ext, eecd, tctl;
 	s32 ret_val;
 
 	/*
@@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 	ew32(IMC, 0xffffffff);
 
 	ew32(RCTL, 0);
-	ew32(TCTL, E1000_TCTL_PSP);
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	ew32(TCTL, tctl);
 	e1e_flush();
 
 	usleep_range(10000, 20000);
@@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
 			 * auto-negotiation in the TXCW register and disable
 			 * forced link in the Device Control register in an
 			 * attempt to auto-negotiate with our link partner.
-			 * If the partner code word is null, stop forcing
-			 * and restart auto negotiation.
 			 */
-			if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW))  {
+			if (rxcw & E1000_RXCW_C) {
 				/* Enable autoneg, and unforce link up */
 				ew32(TXCW, mac->txcw);
 				ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 95b245310f17..46c3b1f9ff89 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
 	pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
 }
 
+static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
+				 struct e1000_buffer *bi)
+{
+	int i;
+	struct e1000_ps_page *ps_page;
+
+	for (i = 0; i < adapter->rx_ps_pages; i++) {
+		ps_page = &bi->ps_pages[i];
+
+		if (ps_page->page) {
+			pr_info("packet dump for ps_page %d:\n", i);
+			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+				       16, 1, page_address(ps_page->page),
+				       PAGE_SIZE, true);
+		}
+	}
+}
+
 /*
  * e1000e_dump - Print registers, Tx-ring and Rx-ring
  */
@@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
 			(unsigned long long)buffer_info->time_stamp,
 			buffer_info->skb, next_desc);
 
-		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+		if (netif_msg_pktdata(adapter) && buffer_info->skb)
 			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
-				       16, 1, phys_to_virt(buffer_info->dma),
-				       buffer_info->length, true);
+				       16, 1, buffer_info->skb->data,
+				       buffer_info->skb->len, true);
 	}
 
 	/* Print Rx Ring Summary */
@@ -381,10 +399,8 @@ rx_ring_summary:
 					buffer_info->skb, next_desc);
 
 				if (netif_msg_pktdata(adapter))
-					print_hex_dump(KERN_INFO, "",
-						DUMP_PREFIX_ADDRESS, 16, 1,
-						phys_to_virt(buffer_info->dma),
-						adapter->rx_ps_bsize0, true);
+					e1000e_dump_ps_pages(adapter,
+							     buffer_info);
 			}
 		}
 		break;
@@ -444,12 +460,12 @@ rx_ring_summary:
 					(unsigned long long)buffer_info->dma,
 					buffer_info->skb, next_desc);
 
-				if (netif_msg_pktdata(adapter))
+				if (netif_msg_pktdata(adapter) &&
+				    buffer_info->skb)
 					print_hex_dump(KERN_INFO, "",
 						       DUMP_PREFIX_ADDRESS, 16,
 						       1,
-						       phys_to_virt
-						       (buffer_info->dma),
+						       buffer_info->skb->data,
 						       adapter->rx_buffer_len,
 						       true);
 			}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 5e84eaac48c1..ba994fb4cec6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 	 */
 	size += NVM_WORD_SIZE_BASE_SHIFT;
 
+	/*
+	 * Check for invalid size
+	 */
+	if ((hw->mac.type == e1000_82576) && (size > 15)) {
+		pr_notice("The NVM size is not valid, defaulting to 32K\n");
+		size = 15;
+	}
+
 	nvm->word_size = 1 << size;
 	if (hw->mac.type < e1000_i210) {
 		nvm->opcode_bits        = 8;
@@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 	} else
 		nvm->type = e1000_nvm_flash_hw;
 
-	/*
-	 * Check for invalid size
-	 */
-	if ((hw->mac.type == e1000_82576) && (size > 15)) {
-		pr_notice("The NVM size is not valid, defaulting to 32K\n");
-		size = 15;
-	}
-
 	/* NVM Function Pointers */
 	switch (hw->mac.type) {
 	case e1000_82580:
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 10efcd88dca0..28394bea5253 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -156,8 +156,12 @@
 				    : (0x0E018 + ((_n) * 0x40)))
 #define E1000_TXDCTL(_n)  ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
 				    : (0x0E028 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
-#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_RXCTL(_n)	  ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+				      (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n)	E1000_RXCTL(_n)
+#define E1000_TXCTL(_n)   ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+				      (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
 #define E1000_TDWBAL(_n)  ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
 				    : (0x0E038 + ((_n) * 0x40)))
 #define E1000_TDWBAH(_n)  ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a19c84cad0e9..70591117051b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 	/* When SoL/IDER sessions are active, autoneg/speed/duplex
 	 * cannot be changed */
 	if (igb_check_reset_block(hw)) {
-		dev_err(&adapter->pdev->dev, "Cannot change link "
-			"characteristics when SoL/IDER is active.\n");
+		dev_err(&adapter->pdev->dev,
+			"Cannot change link characteristics when SoL/IDER is active.\n");
 		return -EINVAL;
 	}
 
@@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
 		wr32(reg, (_test[pat] & write));
 		val = rd32(reg) & mask;
 		if (val != (_test[pat] & write & mask)) {
-			dev_err(&adapter->pdev->dev, "pattern test reg %04X "
-				"failed: got 0x%08X expected 0x%08X\n",
+			dev_err(&adapter->pdev->dev,
+				"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
 				reg, val, (_test[pat] & write & mask));
 			*data = reg;
 			return 1;
@@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
 	wr32(reg, write & mask);
 	val = rd32(reg);
 	if ((write & mask) != (val & mask)) {
-		dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
-			" got 0x%08X expected 0x%08X\n", reg,
+		dev_err(&adapter->pdev->dev,
+			"set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
 			(val & mask), (write & mask));
 		*data = reg;
 		return 1;
@@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
 	wr32(E1000_STATUS, toggle);
 	after = rd32(E1000_STATUS) & toggle;
 	if (value != after) {
-		dev_err(&adapter->pdev->dev, "failed STATUS register test "
-			"got: 0x%08X expected: 0x%08X\n", after, value);
+		dev_err(&adapter->pdev->dev,
+			"failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+			after, value);
 		*data = 1;
 		return 1;
 	}
@@ -1497,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
 		break;
 	}
 
+	/* add small delay to avoid loopback test failure */
+	msleep(50);
+
 	/* force 1000, set loopback */
 	igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
 
@@ -1777,16 +1781,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
 	 * sessions are active */
 	if (igb_check_reset_block(&adapter->hw)) {
 		dev_err(&adapter->pdev->dev,
-			"Cannot do PHY loopback test "
-			"when SoL/IDER is active.\n");
+			"Cannot do PHY loopback test when SoL/IDER is active.\n");
 		*data = 0;
 		goto out;
 	}
 	if ((adapter->hw.mac.type == e1000_i210)
-		|| (adapter->hw.mac.type == e1000_i210)) {
+		|| (adapter->hw.mac.type == e1000_i211)) {
 		dev_err(&adapter->pdev->dev,
-			"Loopback test not supported "
-			"on this part at this time.\n");
+			"Loopback test not supported on this part at this time.\n");
 		*data = 0;
 		goto out;
 	}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b7c2d5050572..48cc4fb1a307 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter)
 				(u64)buffer_info->time_stamp,
 				buffer_info->skb, next_desc);
 
-			if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+			if (netif_msg_pktdata(adapter) && buffer_info->skb)
 				print_hex_dump(KERN_INFO, "",
 					DUMP_PREFIX_ADDRESS,
-					16, 1, phys_to_virt(buffer_info->dma),
+					16, 1, buffer_info->skb->data,
 					buffer_info->length, true);
 		}
 	}
@@ -547,18 +547,17 @@ rx_ring_summary:
 					(u64)buffer_info->dma,
 					buffer_info->skb, next_desc);
 
-				if (netif_msg_pktdata(adapter)) {
+				if (netif_msg_pktdata(adapter) &&
+				    buffer_info->dma && buffer_info->skb) {
 					print_hex_dump(KERN_INFO, "",
-						DUMP_PREFIX_ADDRESS,
-						16, 1,
-						phys_to_virt(buffer_info->dma),
-						IGB_RX_HDR_LEN, true);
+						  DUMP_PREFIX_ADDRESS,
+						  16, 1, buffer_info->skb->data,
+						  IGB_RX_HDR_LEN, true);
 					print_hex_dump(KERN_INFO, "",
 					  DUMP_PREFIX_ADDRESS,
 					  16, 1,
-					  phys_to_virt(
-					    buffer_info->page_dma +
-					    buffer_info->page_offset),
+					  page_address(buffer_info->page) +
+						      buffer_info->page_offset,
 					  PAGE_SIZE/2, true);
 				}
 			}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 50fc137501da..18bf08c9d7a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 		/* Set KX4/KX/KR support according to speed requested */
 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
-		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
 				autoc |= IXGBE_AUTOC_KX4_SUPP;
 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
 			    (hw->phy.smart_speed_active == false))
 				autoc |= IXGBE_AUTOC_KR_SUPP;
+		}
 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 			autoc |= IXGBE_AUTOC_KX_SUPP;
 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f32e70300770..5aba5ecdf1e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 		/* If source MAC is equal to our own MAC and not performing
 		 * the selftest or flb disabled - drop the packet */
 		if (s_mac == priv->mac &&
-			(!(dev->features & NETIF_F_LOOPBACK) ||
-			 !priv->validate_loopback))
+		    !((dev->features & NETIF_F_LOOPBACK) ||
+		      priv->validate_loopback))
 			goto next;
 
 		/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 019d856b1334..10bba09c44ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 	ring->cons = 0xffffffff;
 	ring->last_nr_txbb = 1;
 	ring->poll_cnt = 0;
-	ring->blocked = 0;
 	memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
 	memset(ring->buf, 0, ring->buf_size);
 
@@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
 	ring->cons += txbbs_skipped;
 	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
 
-	/* Wakeup Tx queue if this ring stopped it */
-	if (unlikely(ring->blocked)) {
-		if ((u32) (ring->prod - ring->cons) <=
-		     ring->size - HEADROOM - MAX_DESC_TXBBS) {
-			ring->blocked = 0;
-			netif_tx_wake_queue(ring->tx_queue);
-			priv->port_stats.wake_queue++;
-		}
+	/*
+	 * Wakeup Tx queue if this stopped, and at least 1 packet
+	 * was completed
+	 */
+	if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
+		netif_tx_wake_queue(ring->tx_queue);
+		priv->port_stats.wake_queue++;
 	}
 }
 
@@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
 		/* every full Tx ring stops queue */
 		netif_tx_stop_queue(ring->tx_queue);
-		ring->blocked = 1;
 		priv->port_stats.queue_stopped++;
 
 		return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 48d0e90194cb..827b72dfce99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
 					 "on this HCA, aborting.\n");
 				return -EINVAL;
 			}
-			if (port_type[i] == MLX4_PORT_TYPE_ETH &&
-			    port_type[i + 1] == MLX4_PORT_TYPE_IB)
-				return -EINVAL;
 		}
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5f1ab105debc..9d27e42264e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -248,7 +248,6 @@ struct mlx4_en_tx_ring {
 	u32 doorbell_qpn;
 	void *buf;
 	u16 poll_cnt;
-	int blocked;
 	struct mlx4_en_tx_info *tx_info;
 	u8 *bounce_buf;
 	u32 last_nr_txbb;
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index 802498293528..34ee09bae36e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -81,20 +81,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev,
 	}
 
 	/*
-	 * Adjust port configuration:
-	 * If port 1 sensed nothing and port 2 is IB, set both as IB
-	 * If port 2 sensed nothing and port 1 is Eth, set both as Eth
-	 */
-	if (stype[0] == MLX4_PORT_TYPE_ETH) {
-		for (i = 1; i < dev->caps.num_ports; i++)
-			stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
-	}
-	if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
-		for (i = 0; i < dev->caps.num_ports - 1; i++)
-			stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
-	}
-
-	/*
 	 * If sensed nothing, remain in current configuration.
 	 */
 	for (i = 0; i < dev->caps.num_ports; i++)
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 4069edab229e..53743f7a2ca9 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev)
 						   "phy-mode", NULL);
 		if (mode && !strcmp(mode, "mii"))
 			return PHY_INTERFACE_MODE_MII;
-		return PHY_INTERFACE_MODE_RMII;
 	}
-
-	/* non-DT */
-#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
-	return PHY_INTERFACE_MODE_MII;
-#else
 	return PHY_INTERFACE_MODE_RMII;
-#endif
 }
 
 static bool use_iram_for_net(struct device *dev)
 {
 	if (dev && dev->of_node)
 		return of_property_read_bool(dev->of_node, "use-iram");
-
-	/* non-DT */
-#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
-	return true;
-#else
 	return false;
-#endif
 }
 
 /* Receive Status information word */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 70554a1b2b02..65a8d49106a4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx)
 		goto fail2;
 	}
 
+	BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
+	if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
+		rc = -EINVAL;
+		goto fail3;
+	}
 	efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
 
 	rc = efx_probe_filters(efx);
@@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx)
 	net_dev->irq = efx->pci_dev->irq;
 	net_dev->netdev_ops = &efx_netdev_ops;
 	SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+	net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
 
 	rtnl_lock();
 
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index be8f9158a714..70755c97251a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -30,6 +30,7 @@ extern netdev_tx_t
 efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
 
 /* RX */
 extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 #define EFX_MAX_EVQ_SIZE 16384UL
 #define EFX_MIN_EVQ_SIZE 512UL
 
-/* The smallest [rt]xq_entries that the driver supports. Callers of
- * efx_wake_queue() assume that they can subsequently send at least one
- * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
-#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EFX_TSO_MAX_SEGS	100
+
+/* The smallest [rt]xq_entries that the driver supports.  RX minimum
+ * is a bit arbitrary.  For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EFX_RXQ_MIN_ENT		128U
+#define EFX_TXQ_MIN_ENT(efx)	(2 * efx_tx_max_skb_descs(efx))
 
 /* Filters */
 extern int efx_probe_filters(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 10536f93b561..8cba2df82b18 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
 				     struct ethtool_ringparam *ring)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
+	u32 txq_entries;
 
 	if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
 	    ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
 	    ring->tx_pending > EFX_MAX_DMAQ_SIZE)
 		return -EINVAL;
 
-	if (ring->rx_pending < EFX_MIN_RING_SIZE ||
-	    ring->tx_pending < EFX_MIN_RING_SIZE) {
+	if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
 		netif_err(efx, drv, efx->net_dev,
-			  "TX and RX queues cannot be smaller than %ld\n",
-			  EFX_MIN_RING_SIZE);
+			  "RX queues cannot be smaller than %u\n",
+			  EFX_RXQ_MIN_ENT);
 		return -EINVAL;
 	}
 
-	return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+	txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
+	if (txq_entries != ring->tx_pending)
+		netif_warn(efx, drv, efx->net_dev,
+			   "increasing TX queue size to minimum of %u\n",
+			   txq_entries);
+
+	return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
 }
 
 static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9b225a7769f7..18713436b443 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
 	return len;
 }
 
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+	/* Header and payload descriptor for each output segment, plus
+	 * one for every input fragment boundary within a segment
+	 */
+	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+	/* Possibly one more per segment for the alignment workaround */
+	if (EFX_WORKAROUND_5391(efx))
+		max_descs += EFX_TSO_MAX_SEGS;
+
+	/* Possibly more for PCIe page boundaries within input fragments */
+	if (PAGE_SIZE > EFX_PAGE_SIZE)
+		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+	return max_descs;
+}
+
 /*
  * Add a socket buffer to a TX queue
  *
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cd01ee7ecef1..b93245c11995 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
  * the necessary resources and invokes the main to init
  * the net device, register the mdio bus etc.
  */
-static int stmmac_pltfr_probe(struct platform_device *pdev)
+static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 {
 	int ret = 0;
 	struct resource *res;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 482648fcf0b6..98934bdf6acf 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev)
 }
 
 int ixp46x_phc_index = -1;
+EXPORT_SYMBOL_GPL(ixp46x_phc_index);
 
 static int ixp4xx_get_ts_info(struct net_device *dev,
 			      struct ethtool_ts_info *info)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6cee2917eb02..4a1a5f58fa73 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device)
 	unsigned long flags;
 
 	net_device = hv_get_drvdata(device);
-	spin_lock_irqsave(&device->channel->inbound_lock, flags);
-	net_device->destroy = true;
-	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
-
-	/* Wait for all send completions */
-	wait_event(net_device->wait_drain,
-		   atomic_read(&net_device->num_outstanding_sends) == 0);
 
 	netvsc_disconnect_vsp(net_device);
 
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index e5d6146937fa..1e88a1095934 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -718,6 +718,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
 {
 	struct rndis_request *request;
 	struct rndis_halt_request *halt;
+	struct netvsc_device *nvdev = dev->net_dev;
+	struct hv_device *hdev = nvdev->dev;
+	ulong flags;
 
 	/* Attempt to do a rndis device halt */
 	request = get_rndis_request(dev, RNDIS_MSG_HALT,
@@ -735,6 +738,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
 	dev->state = RNDIS_DEV_UNINITIALIZED;
 
 cleanup:
+	spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
+	nvdev->destroy = true;
+	spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
+
+	/* Wait for all send completions */
+	wait_event(nvdev->wait_drain,
+		atomic_read(&nvdev->num_outstanding_sends) == 0);
+
 	if (request)
 		put_rndis_request(dev, request);
 	return;
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index a561ae44a9ac..c6a0299aa9f9 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
 	/* If not add the 'RPOLC', we can't catch the receive interrupt.
 	 * It's related with the HW layout and the IR transiver.
 	 */
-	val |= IREN | RPOLC;
+	val |= UMOD_IRDA | RPOLC;
 	UART_PUT_GCTL(port, val);
 	return ret;
 }
@@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev
 	bfin_sir_stop_rx(port);
 
 	val = UART_GET_GCTL(port);
-	val &= ~(UCEN | IREN | RPOLC);
+	val &= ~(UCEN | UMOD_MASK | RPOLC);
 	UART_PUT_GCTL(port, val);
 
 #ifdef CONFIG_SIR_BFIN_DMA
@@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work)
 	 * reset all the UART.
 	 */
 	val = UART_GET_GCTL(port);
-	val &= ~(IREN | RPOLC);
+	val &= ~(UMOD_MASK | RPOLC);
 	UART_PUT_GCTL(port, val);
 	SSYNC();
-	val |= IREN | RPOLC;
+	val |= UMOD_IRDA | RPOLC;
 	UART_PUT_GCTL(port, val);
 	SSYNC();
 	/* bfin_sir_set_speed(port, self->speed); */
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0737bd4d1669..0f0f9ce3a776 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
 	int i;
 
 	for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
-		if (rcu_dereference(vlan->taps[i]) == q)
+		if (rcu_dereference_protected(vlan->taps[i],
+					      lockdep_is_held(&macvtap_lock)) == q)
 			return i;
 	}
 
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index e0cc4ef33dee..eefe49e8713c 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -101,7 +101,6 @@ err:
 		n--;
 		gpio_free(s->gpio[n]);
 	}
-	devm_kfree(&pdev->dev, s);
 	return r;
 }
 
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1c98321b56cc..162464fe86bf 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 	if (sk_pppox(po)->sk_state & PPPOX_DEAD)
 		goto tx_error;
 
-	rt = ip_route_output_ports(&init_net, &fl4, NULL,
+	rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
 				   opt->dst_addr.sin_addr.s_addr,
 				   opt->src_addr.sin_addr.s_addr,
 				   0, 0, IPPROTO_GRE,
@@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
 	po->chan.private = sk;
 	po->chan.ops = &pptp_chan_ops;
 
-	rt = ip_route_output_ports(&init_net, &fl4, sk,
+	rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
 				   opt->dst_addr.sin_addr.s_addr,
 				   opt->src_addr.sin_addr.s_addr,
 				   0, 0,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 926d4db5cb38..3a16d4fdaa05 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun)
 	netif_tx_lock_bh(tun->dev);
 	netif_carrier_off(tun->dev);
 	tun->tfile = NULL;
-	tun->socket.file = NULL;
 	netif_tx_unlock_bh(tun->dev);
 
 	/* Drop read queue */
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 64610048ce87..7d78669000d7 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev)
 		struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
 
 		if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
+			usb_free_urb(req);
 			usbpn_close(dev);
 			return -ENOMEM;
 		}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f4ce5957df32..4cd582a4f625 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = {
 	  .driver_info = (unsigned long) &wwan_info,
 	},
 
+	/* Dell branded MBM devices like DW5550 */
+	{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+		| USB_DEVICE_ID_MATCH_VENDOR,
+	  .idVendor = 0x413c,
+	  .bInterfaceClass = USB_CLASS_COMM,
+	  .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+	  .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+	  .driver_info = (unsigned long) &wwan_info,
+	},
+
+	/* Toshiba branded MBM devices */
+	{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+		| USB_DEVICE_ID_MATCH_VENDOR,
+	  .idVendor = 0x0930,
+	  .bInterfaceClass = USB_CLASS_COMM,
+	  .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+	  .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+	  .driver_info = (unsigned long) &wwan_info,
+	},
+
 	/* Generic CDC-NCM devices */
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM,
 		USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cfa91ab7acf8..60b6a9daff7e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -730,6 +730,7 @@ int ath9k_hw_init(struct ath_hw *ah)
 	case AR9300_DEVID_QCA955X:
 	case AR9300_DEVID_AR9580:
 	case AR9300_DEVID_AR9462:
+	case AR9485_DEVID_AR1111:
 		break;
 	default:
 		if (common->bus_ops->ath_bus_type == ATH_USB)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dd0c146d81dc..ce7332c64efb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -49,6 +49,7 @@
 #define AR9300_DEVID_AR9462	0x0034
 #define AR9300_DEVID_AR9330	0x0035
 #define AR9300_DEVID_QCA955X	0x0038
+#define AR9485_DEVID_AR1111	0x0037
 
 #define AR5416_AR9100_DEVID	0x000b
 
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 87b89d55e637..d455de9162ec 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 	{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E  AR9485 */
 	{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E  AR9580 */
 	{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E  AR9462 */
+	{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E  AR1111/AR9485 */
 	{ 0 }
 };
 
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b80352b308d5..a140165dfee0 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev)
 	if (dev->dev->chip_id == 0x4301) {
 		mask |= 0x0060;
 		set |= 0x0060;
+	} else if (dev->dev->chip_id == 0x5354) {
+		/* Don't allow overtaking buttons GPIOs */
+		set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */
 	}
-	if (dev->dev->chip_id == 0x5354)
-		set &= 0xff02;
+
 	if (0 /* FIXME: conditional unknown */ ) {
 		b43_write16(dev, B43_MMIO_GPIO_MASK,
 			    b43_read16(dev, B43_MMIO_GPIO_MASK)
 			    | 0x0100);
-		mask |= 0x0180;
-		set |= 0x0180;
+		/* BT Coexistance Input */
+		mask |= 0x0080;
+		set |= 0x0080;
+		/* BT Coexistance Out */
+		mask |= 0x0100;
+		set |= 0x0100;
 	}
 	if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) {
+		/* PA is controlled by gpio 9, let ucode handle it */
 		b43_write16(dev, B43_MMIO_GPIO_MASK,
 			    b43_read16(dev, B43_MMIO_GPIO_MASK)
 			    | 0x0200);
 		mask |= 0x0200;
 		set |= 0x0200;
 	}
-	if (dev->dev->core_rev >= 2)
-		mask |= 0x0010;	/* FIXME: This is redundant. */
 
 	switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
 	case B43_BUS_BCMA:
 		bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL,
 				(bcma_cc_read32(&dev->dev->bdev->bus->drv_cc,
-					BCMA_CC_GPIOCTL) & mask) | set);
+					BCMA_CC_GPIOCTL) & ~mask) | set);
 		break;
 #endif
 #ifdef CONFIG_B43_SSB
@@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev)
 		if (gpiodev)
 			ssb_write32(gpiodev, B43_GPIO_CONTROL,
 				    (ssb_read32(gpiodev, B43_GPIO_CONTROL)
-				    & mask) | set);
+				    & ~mask) | set);
 		break;
 #endif
 	}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 9a4c63f927cb..7ed7d7577024 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
 {
 	struct brcms_c_info *wlc = wlc_cm->wlc;
 	struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
-	const struct ieee80211_reg_rule *reg_rule;
 	struct txpwr_limits txpwr;
-	int ret;
 
 	brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
 
@@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
 	);
 
 	/* set or restore gmode as required by regulatory */
-	ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule);
-	if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
+	if (ch->flags & IEEE80211_CHAN_NO_OFDM)
 		brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
 	else
 		brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 9e79d47e077f..192ad5c1fcc8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -121,7 +121,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = {
 		 IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN2GHZ(14, 2484,
 		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
+		 IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
+		 IEEE80211_CHAN_NO_OFDM)
 };
 
 static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 6fddd2785e6e..a82f46c10f5e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -707,11 +707,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
  */
 static bool rs_use_green(struct ieee80211_sta *sta)
 {
-	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-	struct iwl_rxon_context *ctx = sta_priv->ctx;
-
-	return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
-		!(ctx->ht.non_gf_sta_present);
+	/*
+	 * There's a bug somewhere in this code that causes the
+	 * scaling to get stuck because GF+SGI can't be combined
+	 * in SISO rates. Until we find that bug, disable GF, it
+	 * has only limited benefit and we still interoperate with
+	 * GF APs since we can always receive GF transmissions.
+	 */
+	return false;
 }
 
 /**
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index eb5de800ed90..1c10b542ab23 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv,
 			netif_tx_wake_all_queues(priv->dev);
 	}
 
+	kfree(cmd);
 done:
 	lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
 	return ret;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 76caebaa4397..e970897f6ab5 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func)
 		kfree(packet);
 	}
 
+	kfree(card);
 	lbs_deb_leave(LBS_DEB_SDIO);
 }
 
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 58048189bd24..fe1ea43c5149 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -571,7 +571,10 @@ static int lbs_thread(void *data)
 			netdev_info(dev, "Timeout submitting command 0x%04x\n",
 				    le16_to_cpu(cmdnode->cmdbuf->command));
 			lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
-			if (priv->reset_card)
+
+			/* Reset card, but only when it isn't in the process
+			 * of being shutdown anyway. */
+			if (!dev->dismantle && priv->reset_card)
 				priv->reset_card(priv);
 		}
 		priv->cmd_timed_out = 0;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88455b1b9fe0..cb8c2aca54e4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
 	mutex_unlock(&rt2x00dev->csr_mutex);
 }
 
+static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	int i, count;
+
+	rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+	if (rt2x00_get_field32(reg, WLAN_EN))
+		return 0;
+
+	rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
+	rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
+	rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
+	rt2x00_set_field32(&reg, WLAN_EN, 1);
+	rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+
+	udelay(REGISTER_BUSY_DELAY);
+
+	count = 0;
+	do {
+		/*
+		 * Check PLL_LD & XTAL_RDY.
+		 */
+		for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+			rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
+			if (rt2x00_get_field32(reg, PLL_LD) &&
+			    rt2x00_get_field32(reg, XTAL_RDY))
+				break;
+			udelay(REGISTER_BUSY_DELAY);
+		}
+
+		if (i >= REGISTER_BUSY_COUNT) {
+
+			if (count >= 10)
+				return -EIO;
+
+			rt2800_register_write(rt2x00dev, 0x58, 0x018);
+			udelay(REGISTER_BUSY_DELAY);
+			rt2800_register_write(rt2x00dev, 0x58, 0x418);
+			udelay(REGISTER_BUSY_DELAY);
+			rt2800_register_write(rt2x00dev, 0x58, 0x618);
+			udelay(REGISTER_BUSY_DELAY);
+			count++;
+		} else {
+			count = 0;
+		}
+
+		rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+		rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
+		rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
+		rt2x00_set_field32(&reg, WLAN_RESET, 1);
+		rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+		udelay(10);
+		rt2x00_set_field32(&reg, WLAN_RESET, 0);
+		rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+		udelay(10);
+		rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
+	} while (count != 0);
+
+	return 0;
+}
+
 void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
 			const u8 command, const u8 token,
 			const u8 arg0, const u8 arg1)
@@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
 {
 	unsigned int i;
 	u32 reg;
+	int retval;
+
+	if (rt2x00_rt(rt2x00dev, RT3290)) {
+		retval = rt2800_enable_wlan_rt3290(rt2x00dev);
+		if (retval)
+			return -EBUSY;
+	}
 
 	/*
 	 * If driver doesn't wake up firmware here,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 235376e9cb04..98aa426a3564 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -980,66 +980,6 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 	return rt2800_validate_eeprom(rt2x00dev);
 }
 
-static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
-{
-	u32 reg;
-	int i, count;
-
-	rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
-	if (rt2x00_get_field32(reg, WLAN_EN))
-		return 0;
-
-	rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
-	rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
-	rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
-	rt2x00_set_field32(&reg, WLAN_EN, 1);
-	rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
-
-	udelay(REGISTER_BUSY_DELAY);
-
-	count = 0;
-	do {
-		/*
-		 * Check PLL_LD & XTAL_RDY.
-		 */
-		for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
-			rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
-			if (rt2x00_get_field32(reg, PLL_LD) &&
-			    rt2x00_get_field32(reg, XTAL_RDY))
-				break;
-			udelay(REGISTER_BUSY_DELAY);
-		}
-
-		if (i >= REGISTER_BUSY_COUNT) {
-
-			if (count >= 10)
-				return -EIO;
-
-			rt2800_register_write(rt2x00dev, 0x58, 0x018);
-			udelay(REGISTER_BUSY_DELAY);
-			rt2800_register_write(rt2x00dev, 0x58, 0x418);
-			udelay(REGISTER_BUSY_DELAY);
-			rt2800_register_write(rt2x00dev, 0x58, 0x618);
-			udelay(REGISTER_BUSY_DELAY);
-			count++;
-		} else {
-			count = 0;
-		}
-
-		rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
-		rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
-		rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
-		rt2x00_set_field32(&reg, WLAN_RESET, 1);
-		rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
-		udelay(10);
-		rt2x00_set_field32(&reg, WLAN_RESET, 0);
-		rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
-		udelay(10);
-		rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
-	} while (count != 0);
-
-	return 0;
-}
 static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
 	int retval;
@@ -1063,17 +1003,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 		return retval;
 
 	/*
-	 * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
-	 * clk for rt3290. That avoid the MCU fail in start phase.
-	 */
-	if (rt2x00_rt(rt2x00dev, RT3290)) {
-		retval = rt2800_enable_wlan_rt3290(rt2x00dev);
-
-		if (retval)
-			return retval;
-	}
-
-	/*
 	 * This device has multiple filters for control frames
 	 * and has a separate filter for PS Poll frames.
 	 */
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index f32259686b45..3f7bc5cadf9a 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
 
 static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
 {
-	struct ieee80211_conf conf = { .flags = 0 };
-	struct rt2x00lib_conf libconf = { .conf = &conf };
+	struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
 
 	rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
 }
diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c
index 75d3eff94296..3674d877ed7c 100644
--- a/drivers/pinctrl/pinctrl-imx23.c
+++ b/drivers/pinctrl/pinctrl-imx23.c
@@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void)
 {
 	return platform_driver_register(&imx23_pinctrl_driver);
 }
-arch_initcall(imx23_pinctrl_init);
+postcore_initcall(imx23_pinctrl_init);
 
 static void __exit imx23_pinctrl_exit(void)
 {
diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c
index b973026811a2..0f5b2122b1ba 100644
--- a/drivers/pinctrl/pinctrl-imx28.c
+++ b/drivers/pinctrl/pinctrl-imx28.c
@@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void)
 {
 	return platform_driver_register(&imx28_pinctrl_driver);
 }
-arch_initcall(imx28_pinctrl_init);
+postcore_initcall(imx28_pinctrl_init);
 
 static void __exit imx28_pinctrl_exit(void)
 {
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 6f99769c6733..5f3e9d0221e1 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -766,7 +766,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1",
 DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1");
 DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
 DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1");
-DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2");
+DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2");
 DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1");
 DB8500_FUNC_GROUPS(usb, "usb_a_1");
 DB8500_FUNC_GROUPS(trig, "trig_b_1");
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 53b0d49a7a1c..ec6ac501b23a 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -1731,7 +1731,6 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
 	for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
 		if (!nmk_gpio_chips[i]) {
 			dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
-			devm_kfree(&pdev->dev, npct);
 			return -EPROBE_DEFER;
 		}
 		npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip;
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index 2aae8a8978e9..7fca6ce5952b 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1217,7 +1217,6 @@ out_no_rsc_remap:
 	iounmap(spmx->gpio_virtbase);
 out_no_gpio_remap:
 	platform_set_drvdata(pdev, NULL);
-	devm_kfree(&pdev->dev, spmx);
 	return ret;
 }
 
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index a7ad8c112d91..309f5b9a70ec 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1121,10 +1121,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev)
 	upmx->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		ret = -ENOENT;
-		goto out_no_resource;
-	}
+	if (!res)
+		return -ENOENT;
 	upmx->phybase = res->start;
 	upmx->physize = resource_size(res);
 
@@ -1165,8 +1163,6 @@ out_no_remap:
 	platform_set_drvdata(pdev, NULL);
 out_no_memregion:
 	release_mem_region(upmx->phybase, upmx->physize);
-out_no_resource:
-	devm_kfree(&pdev->dev, upmx);
 	return ret;
 }
 
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 2ca7dd1ab3e4..cd33add118ce 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -350,6 +350,7 @@ static void cmpc_accel_idev_init_v4(struct input_dev *inputdev)
 	inputdev->close = cmpc_accel_close_v4;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int cmpc_accel_suspend_v4(struct device *dev)
 {
 	struct input_dev *inputdev;
@@ -384,6 +385,7 @@ static int cmpc_accel_resume_v4(struct device *dev)
 
 	return 0;
 }
+#endif
 
 static int cmpc_accel_add_v4(struct acpi_device *acpi)
 {
@@ -752,6 +754,7 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type)
 	return cmpc_remove_acpi_notify_device(acpi);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int cmpc_tablet_resume(struct device *dev)
 {
 	struct input_dev *inputdev = dev_get_drvdata(dev);
@@ -761,6 +764,7 @@ static int cmpc_tablet_resume(struct device *dev)
 		input_report_switch(inputdev, SW_TABLET_MODE, !val);
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume);
 
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index d2e41735a47b..7acae3f85f3b 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -440,11 +440,13 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_fujitsu_resume(struct device *dev)
 {
 	fujitsu_reset();
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume);
 
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index d9ab6f64dcec..777c7e3dda51 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -305,10 +305,12 @@ static int hdaps_probe(struct platform_device *dev)
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int hdaps_resume(struct device *dev)
 {
 	return hdaps_device_init();
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume);
 
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index f4d91154ad67..6b9af989632b 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -352,7 +352,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
 }
 
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int lis3lv02d_suspend(struct device *dev)
 {
 	/* make sure the device is off when we suspend */
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index f64441844317..2111dbb7e1e3 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -85,7 +85,9 @@
 #define MSI_STANDARD_EC_TOUCHPAD_ADDRESS	0xe4
 #define MSI_STANDARD_EC_TOUCHPAD_MASK		(1 << 4)
 
+#ifdef CONFIG_PM_SLEEP
 static int msi_laptop_resume(struct device *device);
+#endif
 static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume);
 
 #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS	0x2f
@@ -753,6 +755,7 @@ err_bluetooth:
 	return retval;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int msi_laptop_resume(struct device *device)
 {
 	u8 data;
@@ -773,6 +776,7 @@ static int msi_laptop_resume(struct device *device)
 
 	return 0;
 }
+#endif
 
 static int __init msi_laptop_input_setup(void)
 {
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 24480074bcf0..8e8caa767d6a 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -188,7 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_pcc_hotkey_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume);
 
 static struct acpi_driver acpi_pcc_driver = {
@@ -540,6 +542,7 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc)
 
 /* kernel module interface */
 
+#ifdef CONFIG_PM_SLEEP
 static int acpi_pcc_hotkey_resume(struct device *dev)
 {
 	struct pcc_acpi *pcc;
@@ -556,6 +559,7 @@ static int acpi_pcc_hotkey_resume(struct device *dev)
 
 	return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode);
 }
+#endif
 
 static int acpi_pcc_hotkey_add(struct acpi_device *device)
 {
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 9363969ad07a..daaddec68def 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -140,7 +140,10 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
 		 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
 		 "(default: 0)");
 
+#ifdef CONFIG_PM_SLEEP
 static void sony_nc_kbd_backlight_resume(void);
+static void sony_nc_thermal_resume(void);
+#endif
 static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
 		unsigned int handle);
 static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
@@ -151,7 +154,6 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd);
 
 static int sony_nc_thermal_setup(struct platform_device *pd);
 static void sony_nc_thermal_cleanup(struct platform_device *pd);
-static void sony_nc_thermal_resume(void);
 
 static int sony_nc_lid_resume_setup(struct platform_device *pd);
 static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
@@ -1431,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
 	sony_nc_handles_cleanup(pd);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static void sony_nc_function_resume(void)
 {
 	unsigned int i, result, bitmask, arg;
@@ -1508,6 +1511,7 @@ static int sony_nc_resume(struct device *dev)
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume);
 
@@ -1872,6 +1876,7 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static void sony_nc_kbd_backlight_resume(void)
 {
 	int ignore = 0;
@@ -1888,6 +1893,7 @@ static void sony_nc_kbd_backlight_resume(void)
 				(kbdbl_ctl->base + 0x200) |
 				(kbdbl_ctl->timeout << 0x10), &ignore);
 }
+#endif
 
 struct battery_care_control {
 	struct device_attribute attrs[2];
@@ -2210,6 +2216,7 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd)
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static void sony_nc_thermal_resume(void)
 {
 	unsigned int status = sony_nc_thermal_mode_get();
@@ -2217,6 +2224,7 @@ static void sony_nc_thermal_resume(void)
 	if (status != th_handle->mode)
 		sony_nc_thermal_mode_set(th_handle->mode);
 }
+#endif
 
 /* resume on LID open */
 struct snc_lid_resume_control {
@@ -4287,6 +4295,7 @@ err_free_resources:
 	return result;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int sony_pic_suspend(struct device *dev)
 {
 	if (sony_pic_disable(to_acpi_device(dev)))
@@ -4300,6 +4309,7 @@ static int sony_pic_resume(struct device *dev)
 			spic_dev.cur_ioport, spic_dev.cur_irq);
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume);
 
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e7f73287636c..f28f36ccdcf4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -922,6 +922,7 @@ static struct input_dev *tpacpi_inputdev;
 static struct mutex tpacpi_inputdev_send_mutex;
 static LIST_HEAD(tpacpi_all_drivers);
 
+#ifdef CONFIG_PM_SLEEP
 static int tpacpi_suspend_handler(struct device *dev)
 {
 	struct ibm_struct *ibm, *itmp;
@@ -949,6 +950,7 @@ static int tpacpi_resume_handler(struct device *dev)
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(tpacpi_pm,
 			 tpacpi_suspend_handler, tpacpi_resume_handler);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index c13ba5bac93f..5f1256d5e933 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1296,6 +1296,7 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int toshiba_acpi_suspend(struct device *device)
 {
 	struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
@@ -1317,6 +1318,7 @@ static int toshiba_acpi_resume(struct device *device)
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
 			 toshiba_acpi_suspend, toshiba_acpi_resume);
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 715a43cb5e3c..5e5d6317d690 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -41,7 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, bt_device_ids);
 
+#ifdef CONFIG_PM_SLEEP
 static int toshiba_bt_resume(struct device *dev);
+#endif
 static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume);
 
 static struct acpi_driver toshiba_bt_rfkill_driver = {
@@ -90,10 +92,12 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
 	toshiba_bluetooth_enable(device->handle);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int toshiba_bt_resume(struct device *dev)
 {
 	return toshiba_bluetooth_enable(to_acpi_device(dev)->handle);
 }
+#endif
 
 static int toshiba_bt_rfkill_add(struct acpi_device *device)
 {
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 849c07c13bf6..38ba39d7ca7d 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -77,10 +77,12 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event)
 	}
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int ebook_switch_resume(struct device *dev)
 {
 	return ebook_send_state(to_acpi_device(dev));
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume);
 
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index eb415bd76494..9592b936b71b 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -582,6 +582,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
 void rtc_update_irq(struct rtc_device *rtc,
 		unsigned long num, unsigned long events)
 {
+	pm_stay_awake(rtc->dev.parent);
 	schedule_work(&rtc->irqwork);
 }
 EXPORT_SYMBOL_GPL(rtc_update_irq);
@@ -844,6 +845,7 @@ void rtc_timer_do_work(struct work_struct *work)
 
 	mutex_lock(&rtc->ops_lock);
 again:
+	pm_relax(rtc->dev.parent);
 	__rtc_read_time(rtc, &tm);
 	now = rtc_tm_to_ktime(tm);
 	while ((next = timerqueue_getnext(&rtc->timerqueue))) {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 132333d75408..4267789ca995 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -568,7 +568,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
 		hpet_mask_rtc_irq_bit(RTC_AIE);
 
 		CMOS_READ(RTC_INTR_FLAGS);
-		pm_wakeup_event(cmos_rtc.dev, 0);
 	}
 	spin_unlock(&rtc_lock);
 
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 6a6f76bf6e3d..b1032931a1c4 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -242,11 +242,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
 	switch (sdias_evbuf.event_status) {
 		case EVSTATE_ALL_STORED:
 			TRACE("all stored\n");
+			break;
 		case EVSTATE_PART_STORED:
 			TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
 			break;
 		case EVSTATE_NO_DATA:
 			TRACE("no data\n");
+			/* fall through */
 		default:
 			pr_err("Error from SCLP while copying hsa. "
 			       "Event status = %x\n",
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 2374468615ed..32c26d795ed0 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -324,8 +324,16 @@ int __init register_intc_controller(struct intc_desc *desc)
 
 		res = irq_create_identity_mapping(d->domain, irq);
 		if (unlikely(res)) {
-			pr_err("can't get irq_desc for %d\n", irq);
-			continue;
+			if (res == -EEXIST) {
+				res = irq_domain_associate(d->domain, irq, irq);
+				if (unlikely(res)) {
+					pr_err("domain association failure\n");
+					continue;
+				}
+			} else {
+				pr_err("can't identity map IRQ %d\n", irq);
+				continue;
+			}
 		}
 
 		intc_irq_xlate_set(irq, vect->enum_id, d);
@@ -345,8 +353,19 @@ int __init register_intc_controller(struct intc_desc *desc)
 			 */
 			res = irq_create_identity_mapping(d->domain, irq2);
 			if (unlikely(res)) {
-				pr_err("can't get irq_desc for %d\n", irq2);
-				continue;
+				if (res == -EEXIST) {
+					res = irq_domain_associate(d->domain,
+								   irq, irq);
+					if (unlikely(res)) {
+						pr_err("domain association "
+						       "failure\n");
+						continue;
+					}
+				} else {
+					pr_err("can't identity map IRQ %d\n",
+					       irq);
+					continue;
+				}
 			}
 
 			vect2->enum_id = 0;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index ee0ebacf8227..89dcf155d57e 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
 	writel(FLAG_CF, &ehci_regs->configured_flag);
 
 	/* Wait until the controller is no longer halted */
-	loop = 10;
+	loop = 1000;
 	do {
 		status = readl(&ehci_regs->status);
 		if (!(status & STS_HALT))
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index e4e2fd1b5107..202bba6c997c 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -9,3 +9,6 @@ config VHOST_NET
 	  To compile this driver as a module, choose M here: the module will
 	  be called vhost_net.
 
+if STAGING
+source "drivers/vhost/Kconfig.tcm"
+endif
diff --git a/drivers/vhost/Kconfig.tcm b/drivers/vhost/Kconfig.tcm
new file mode 100644
index 000000000000..a9c6f76e3208
--- /dev/null
+++ b/drivers/vhost/Kconfig.tcm
@@ -0,0 +1,6 @@
+config TCM_VHOST
+	tristate "TCM_VHOST fabric module (EXPERIMENTAL)"
+	depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m
+	default n
+	---help---
+	Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index 72dd02050bb9..a27b053bc9ab 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -1,2 +1,4 @@
 obj-$(CONFIG_VHOST_NET) += vhost_net.o
 vhost_net-y := vhost.o net.o
+
+obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
new file mode 100644
index 000000000000..fb366540ed54
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.c
@@ -0,0 +1,1628 @@
+/*******************************************************************************
+ * Vhost kernel TCM fabric driver for virtio SCSI initiators
+ *
+ * (C) Copyright 2010-2012 RisingTide Systems LLC.
+ * (C) Copyright 2010-2012 IBM Corp.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/compat.h>
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
+#include <linux/virtio_scsi.h>
+
+#include "vhost.c"
+#include "vhost.h"
+#include "tcm_vhost.h"
+
+struct vhost_scsi {
+	atomic_t vhost_ref_cnt;
+	struct tcm_vhost_tpg *vs_tpg;
+	struct vhost_dev dev;
+	struct vhost_virtqueue vqs[3];
+
+	struct vhost_work vs_completion_work; /* cmd completion work item */
+	struct list_head vs_completion_list;  /* cmd completion queue */
+	spinlock_t vs_completion_lock;        /* protects s_completion_list */
+};
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
+
+static struct workqueue_struct *tcm_vhost_workqueue;
+
+/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
+static DEFINE_MUTEX(tcm_vhost_mutex);
+static LIST_HEAD(tcm_vhost_list);
+
+static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static char *tcm_vhost_get_fabric_name(void)
+{
+	return "vhost";
+}
+
+static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_fabric_proto_ident(se_tpg);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_fabric_proto_ident(se_tpg);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_fabric_proto_ident(se_tpg);
+	default:
+		pr_err("Unknown tport_proto_id: 0x%02x, using"
+			" SAS emulation\n", tport->tport_proto_id);
+		break;
+	}
+
+	return sas_get_fabric_proto_ident(se_tpg);
+}
+
+static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	return &tport->tport_name[0];
+}
+
+static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	return tpg->tport_tpgt;
+}
+
+static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 tcm_vhost_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	default:
+		pr_err("Unknown tport_proto_id: 0x%02x, using"
+			" SAS emulation\n", tport->tport_proto_id);
+		break;
+	}
+
+	return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+			format_code, buf);
+}
+
+static u32 tcm_vhost_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	default:
+		pr_err("Unknown tport_proto_id: 0x%02x, using"
+			" SAS emulation\n", tport->tport_proto_id);
+		break;
+	}
+
+	return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+			format_code);
+}
+
+static char *tcm_vhost_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport = tpg->tport;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	case SCSI_PROTOCOL_FCP:
+		return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	default:
+		pr_err("Unknown tport_proto_id: 0x%02x, using"
+			" SAS emulation\n", tport->tport_proto_id);
+		break;
+	}
+
+	return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+			port_nexus_ptr);
+}
+
+static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
+	struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_nacl *nacl;
+
+	nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
+	if (!nacl) {
+		pr_err("Unable to alocate struct tcm_vhost_nacl\n");
+		return NULL;
+	}
+
+	return &nacl->se_node_acl;
+}
+
+static void tcm_vhost_release_fabric_acl(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl)
+{
+	struct tcm_vhost_nacl *nacl = container_of(se_nacl,
+			struct tcm_vhost_nacl, se_node_acl);
+	kfree(nacl);
+}
+
+static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
+{
+	return;
+}
+
+static int tcm_vhost_shutdown_session(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static void tcm_vhost_close_session(struct se_session *se_sess)
+{
+	return;
+}
+
+static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
+{
+	/* Go ahead and process the write immediately */
+	target_execute_cmd(se_cmd);
+	return 0;
+}
+
+static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
+{
+	return;
+}
+
+static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *);
+
+static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+				struct tcm_vhost_cmd, tvc_se_cmd);
+	vhost_scsi_complete_cmd(tv_cmd);
+	return 0;
+}
+
+static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
+{
+	struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+				struct tcm_vhost_cmd, tvc_se_cmd);
+	vhost_scsi_complete_cmd(tv_cmd);
+	return 0;
+}
+
+static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd,
+	u32 sense_length)
+{
+	return 0;
+}
+
+static u16 tcm_vhost_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
+{
+	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+
+	/* TODO locking against target/backend threads? */
+	transport_generic_free_cmd(se_cmd, 1);
+
+	if (tv_cmd->tvc_sgl_count) {
+		u32 i;
+		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+
+		kfree(tv_cmd->tvc_sgl);
+	}
+
+	kfree(tv_cmd);
+}
+
+/* Dequeue a command from the completion list */
+static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
+	struct vhost_scsi *vs)
+{
+	struct tcm_vhost_cmd *tv_cmd = NULL;
+
+	spin_lock_bh(&vs->vs_completion_lock);
+	if (list_empty(&vs->vs_completion_list)) {
+		spin_unlock_bh(&vs->vs_completion_lock);
+		return NULL;
+	}
+
+	list_for_each_entry(tv_cmd, &vs->vs_completion_list,
+			    tvc_completion_list) {
+		list_del(&tv_cmd->tvc_completion_list);
+		break;
+	}
+	spin_unlock_bh(&vs->vs_completion_lock);
+	return tv_cmd;
+}
+
+/* Fill in status and signal that we are done processing this command
+ *
+ * This is scheduled in the vhost work queue so we are called with the owner
+ * process mm and can access the vring.
+ */
+static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+{
+	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+					vs_completion_work);
+	struct tcm_vhost_cmd *tv_cmd;
+
+	while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) {
+		struct virtio_scsi_cmd_resp v_rsp;
+		struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+		int ret;
+
+		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
+			tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
+
+		memset(&v_rsp, 0, sizeof(v_rsp));
+		v_rsp.resid = se_cmd->residual_count;
+		/* TODO is status_qualifier field needed? */
+		v_rsp.status = se_cmd->scsi_status;
+		v_rsp.sense_len = se_cmd->scsi_sense_length;
+		memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
+		       v_rsp.sense_len);
+		ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
+		if (likely(ret == 0))
+			vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
+		else
+			pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
+		vhost_scsi_free_cmd(tv_cmd);
+	}
+
+	vhost_signal(&vs->dev, &vs->vqs[2]);
+}
+
+static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
+{
+	struct vhost_scsi *vs = tv_cmd->tvc_vhost;
+
+	pr_debug("%s tv_cmd %p\n", __func__, tv_cmd);
+
+	spin_lock_bh(&vs->vs_completion_lock);
+	list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
+	spin_unlock_bh(&vs->vs_completion_lock);
+
+	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+}
+
+static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
+	struct tcm_vhost_tpg *tv_tpg,
+	struct virtio_scsi_cmd_req *v_req,
+	u32 exp_data_len,
+	int data_direction)
+{
+	struct tcm_vhost_cmd *tv_cmd;
+	struct tcm_vhost_nexus *tv_nexus;
+	struct se_portal_group *se_tpg = &tv_tpg->se_tpg;
+	struct se_session *se_sess;
+	struct se_cmd *se_cmd;
+	int sam_task_attr;
+
+	tv_nexus = tv_tpg->tpg_nexus;
+	if (!tv_nexus) {
+		pr_err("Unable to locate active struct tcm_vhost_nexus\n");
+		return ERR_PTR(-EIO);
+	}
+	se_sess = tv_nexus->tvn_se_sess;
+
+	tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
+	if (!tv_cmd) {
+		pr_err("Unable to allocate struct tcm_vhost_cmd\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
+	tv_cmd->tvc_tag = v_req->tag;
+
+	se_cmd = &tv_cmd->tvc_se_cmd;
+	/*
+	 * Locate the SAM Task Attr from virtio_scsi_cmd_req
+	 */
+	sam_task_attr = v_req->task_attr;
+	/*
+	 * Initialize struct se_cmd descriptor from TCM infrastructure
+	 */
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len,
+				data_direction, sam_task_attr,
+				&tv_cmd->tvc_sense_buf[0]);
+
+#if 0	/* FIXME: vhost_scsi_allocate_cmd() BIDI operation */
+	if (bidi)
+		se_cmd->se_cmd_flags |= SCF_BIDI;
+#endif
+	return tv_cmd;
+}
+
+/*
+ * Map a user memory range into a scatterlist
+ *
+ * Returns the number of scatterlist entries used or -errno on error.
+ */
+static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
+	unsigned int sgl_count, void __user *ptr, size_t len, int write)
+{
+	struct scatterlist *sg = sgl;
+	unsigned int npages = 0;
+	int ret;
+
+	while (len > 0) {
+		struct page *page;
+		unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
+		unsigned int nbytes = min_t(unsigned int,
+				PAGE_SIZE - offset, len);
+
+		if (npages == sgl_count) {
+			ret = -ENOBUFS;
+			goto err;
+		}
+
+		ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
+		BUG_ON(ret == 0); /* we should either get our page or fail */
+		if (ret < 0)
+			goto err;
+
+		sg_set_page(sg, page, nbytes, offset);
+		ptr += nbytes;
+		len -= nbytes;
+		sg++;
+		npages++;
+	}
+	return npages;
+
+err:
+	/* Put pages that we hold */
+	for (sg = sgl; sg != &sgl[npages]; sg++)
+		put_page(sg_page(sg));
+	return ret;
+}
+
+static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+	struct iovec *iov, unsigned int niov, int write)
+{
+	int ret;
+	unsigned int i;
+	u32 sgl_count;
+	struct scatterlist *sg;
+
+	/*
+	 * Find out how long sglist needs to be
+	 */
+	sgl_count = 0;
+	for (i = 0; i < niov; i++) {
+		sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
+				PAGE_SIZE - 1) >> PAGE_SHIFT) -
+				((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
+	}
+	/* TODO overflow checking */
+
+	sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
+	if (!sg)
+		return -ENOMEM;
+	pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__,
+	       sg, sgl_count, IS_ERR(sg));
+	sg_init_table(sg, sgl_count);
+
+	tv_cmd->tvc_sgl = sg;
+	tv_cmd->tvc_sgl_count = sgl_count;
+
+	pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
+	for (i = 0; i < niov; i++) {
+		ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
+					iov[i].iov_len, write);
+		if (ret < 0) {
+			for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+				put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+			kfree(tv_cmd->tvc_sgl);
+			tv_cmd->tvc_sgl = NULL;
+			tv_cmd->tvc_sgl_count = 0;
+			return ret;
+		}
+
+		sg += ret;
+		sgl_count -= ret;
+	}
+	return 0;
+}
+
+static void tcm_vhost_submission_work(struct work_struct *work)
+{
+	struct tcm_vhost_cmd *tv_cmd =
+		container_of(work, struct tcm_vhost_cmd, work);
+	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+	struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
+	int rc, sg_no_bidi = 0;
+	/*
+	 * Locate the struct se_lun pointer based on v_req->lun, and
+	 * attach it to struct se_cmd
+	 */
+	rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun);
+	if (rc < 0) {
+		pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun);
+		transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd,
+			tv_cmd->tvc_se_cmd.scsi_sense_reason, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	}
+
+	rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb);
+	if (rc == -ENOMEM) {
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	} else if (rc < 0) {
+		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+			tcm_vhost_queue_status(se_cmd);
+		else
+			transport_send_check_condition_and_sense(se_cmd,
+					se_cmd->scsi_sense_reason, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	}
+
+	if (tv_cmd->tvc_sgl_count) {
+		sg_ptr = tv_cmd->tvc_sgl;
+		/*
+		 * For BIDI commands, pass in the extra READ buffer
+		 * to transport_generic_map_mem_to_cmd() below..
+		 */
+/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
+#if 0
+		if (se_cmd->se_cmd_flags & SCF_BIDI) {
+			sg_bidi_ptr = NULL;
+			sg_no_bidi = 0;
+		}
+#endif
+	} else {
+		sg_ptr = NULL;
+	}
+
+	rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr,
+				tv_cmd->tvc_sgl_count, sg_bidi_ptr,
+				sg_no_bidi);
+	if (rc < 0) {
+		transport_send_check_condition_and_sense(se_cmd,
+				se_cmd->scsi_sense_reason, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	}
+	transport_handle_cdb_direct(se_cmd);
+}
+
+static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
+{
+	struct vhost_virtqueue *vq = &vs->vqs[2];
+	struct virtio_scsi_cmd_req v_req;
+	struct tcm_vhost_tpg *tv_tpg;
+	struct tcm_vhost_cmd *tv_cmd;
+	u32 exp_data_len, data_first, data_num, data_direction;
+	unsigned out, in, i;
+	int head, ret;
+
+	/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
+	tv_tpg = vs->vs_tpg;
+	if (unlikely(!tv_tpg)) {
+		pr_err("%s endpoint not set\n", __func__);
+		return;
+	}
+
+	mutex_lock(&vq->mutex);
+	vhost_disable_notify(&vs->dev, vq);
+
+	for (;;) {
+		head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+					ARRAY_SIZE(vq->iov), &out, &in,
+					NULL, NULL);
+		pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
+					head, out, in);
+		/* On error, stop handling until the next kick. */
+		if (unlikely(head < 0))
+			break;
+		/* Nothing new?  Wait for eventfd to tell us they refilled. */
+		if (head == vq->num) {
+			if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
+				vhost_disable_notify(&vs->dev, vq);
+				continue;
+			}
+			break;
+		}
+
+/* FIXME: BIDI operation */
+		if (out == 1 && in == 1) {
+			data_direction = DMA_NONE;
+			data_first = 0;
+			data_num = 0;
+		} else if (out == 1 && in > 1) {
+			data_direction = DMA_FROM_DEVICE;
+			data_first = out + 1;
+			data_num = in - 1;
+		} else if (out > 1 && in == 1) {
+			data_direction = DMA_TO_DEVICE;
+			data_first = 1;
+			data_num = out - 1;
+		} else {
+			vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
+					out, in);
+			break;
+		}
+
+		/*
+		 * Check for a sane resp buffer so we can report errors to
+		 * the guest.
+		 */
+		if (unlikely(vq->iov[out].iov_len !=
+					sizeof(struct virtio_scsi_cmd_resp))) {
+			vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+				" bytes\n", vq->iov[out].iov_len);
+			break;
+		}
+
+		if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
+			vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
+				" bytes\n", vq->iov[0].iov_len);
+			break;
+		}
+		pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
+			" len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
+		ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
+				sizeof(v_req));
+		if (unlikely(ret)) {
+			vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
+			break;
+		}
+
+		exp_data_len = 0;
+		for (i = 0; i < data_num; i++)
+			exp_data_len += vq->iov[data_first + i].iov_len;
+
+		tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req,
+					exp_data_len, data_direction);
+		if (IS_ERR(tv_cmd)) {
+			vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
+					PTR_ERR(tv_cmd));
+			break;
+		}
+		pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
+			": %d\n", tv_cmd, exp_data_len, data_direction);
+
+		tv_cmd->tvc_vhost = vs;
+
+		if (unlikely(vq->iov[out].iov_len !=
+				sizeof(struct virtio_scsi_cmd_resp))) {
+			vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+				" bytes, out: %d, in: %d\n",
+				vq->iov[out].iov_len, out, in);
+			break;
+		}
+
+		tv_cmd->tvc_resp = vq->iov[out].iov_base;
+
+		/*
+		 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
+		 * that will be used by tcm_vhost_new_cmd_map() and down into
+		 * target_setup_cmd_from_cdb()
+		 */
+		memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
+		/*
+		 * Check that the recieved CDB size does not exceeded our
+		 * hardcoded max for tcm_vhost
+		 */
+		/* TODO what if cdb was too small for varlen cdb header? */
+		if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
+					TCM_VHOST_MAX_CDB_SIZE)) {
+			vq_err(vq, "Received SCSI CDB with command_size: %d that"
+				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+				scsi_command_size(tv_cmd->tvc_cdb),
+				TCM_VHOST_MAX_CDB_SIZE);
+			break; /* TODO */
+		}
+		tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
+
+		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
+			tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
+
+		if (data_direction != DMA_NONE) {
+			ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
+					&vq->iov[data_first], data_num,
+					data_direction == DMA_TO_DEVICE);
+			if (unlikely(ret)) {
+				vq_err(vq, "Failed to map iov to sgl\n");
+				break; /* TODO */
+			}
+		}
+
+		/*
+		 * Save the descriptor from vhost_get_vq_desc() to be used to
+		 * complete the virtio-scsi request in TCM callback context via
+		 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
+		 */
+		tv_cmd->tvc_vq_desc = head;
+		/*
+		 * Dispatch tv_cmd descriptor for cmwq execution in process
+		 * context provided by tcm_vhost_workqueue.  This also ensures
+		 * tv_cmd is executed on the same kworker CPU as this vhost
+		 * thread to gain positive L2 cache locality effects..
+		 */
+		INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
+		queue_work(tcm_vhost_workqueue, &tv_cmd->work);
+	}
+
+	mutex_unlock(&vq->mutex);
+}
+
+static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
+{
+	pr_err("%s: The handling func for control queue.\n", __func__);
+}
+
+static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
+{
+	pr_err("%s: The handling func for event queue.\n", __func__);
+}
+
+static void vhost_scsi_handle_kick(struct vhost_work *work)
+{
+	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+						poll.work);
+	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+	vhost_scsi_handle_vq(vs);
+}
+
+/*
+ * Called from vhost_scsi_ioctl() context to walk the list of available
+ * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ */
+static int vhost_scsi_set_endpoint(
+	struct vhost_scsi *vs,
+	struct vhost_scsi_target *t)
+{
+	struct tcm_vhost_tport *tv_tport;
+	struct tcm_vhost_tpg *tv_tpg;
+	int index;
+
+	mutex_lock(&vs->dev.mutex);
+	/* Verify that ring has been setup correctly. */
+	for (index = 0; index < vs->dev.nvqs; ++index) {
+		/* Verify that ring has been setup correctly. */
+		if (!vhost_vq_access_ok(&vs->vqs[index])) {
+			mutex_unlock(&vs->dev.mutex);
+			return -EFAULT;
+		}
+	}
+
+	if (vs->vs_tpg) {
+		mutex_unlock(&vs->dev.mutex);
+		return -EEXIST;
+	}
+	mutex_unlock(&vs->dev.mutex);
+
+	mutex_lock(&tcm_vhost_mutex);
+	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
+		mutex_lock(&tv_tpg->tv_tpg_mutex);
+		if (!tv_tpg->tpg_nexus) {
+			mutex_unlock(&tv_tpg->tv_tpg_mutex);
+			continue;
+		}
+		if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) {
+			mutex_unlock(&tv_tpg->tv_tpg_mutex);
+			continue;
+		}
+		tv_tport = tv_tpg->tport;
+
+		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
+		    (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
+			atomic_inc(&tv_tpg->tv_tpg_vhost_count);
+			smp_mb__after_atomic_inc();
+			mutex_unlock(&tv_tpg->tv_tpg_mutex);
+			mutex_unlock(&tcm_vhost_mutex);
+
+			mutex_lock(&vs->dev.mutex);
+			vs->vs_tpg = tv_tpg;
+			atomic_inc(&vs->vhost_ref_cnt);
+			smp_mb__after_atomic_inc();
+			mutex_unlock(&vs->dev.mutex);
+			return 0;
+		}
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+	}
+	mutex_unlock(&tcm_vhost_mutex);
+	return -EINVAL;
+}
+
+static int vhost_scsi_clear_endpoint(
+	struct vhost_scsi *vs,
+	struct vhost_scsi_target *t)
+{
+	struct tcm_vhost_tport *tv_tport;
+	struct tcm_vhost_tpg *tv_tpg;
+	int index;
+
+	mutex_lock(&vs->dev.mutex);
+	/* Verify that ring has been setup correctly. */
+	for (index = 0; index < vs->dev.nvqs; ++index) {
+		if (!vhost_vq_access_ok(&vs->vqs[index])) {
+			mutex_unlock(&vs->dev.mutex);
+			return -EFAULT;
+		}
+	}
+
+	if (!vs->vs_tpg) {
+		mutex_unlock(&vs->dev.mutex);
+		return -ENODEV;
+	}
+	tv_tpg = vs->vs_tpg;
+	tv_tport = tv_tpg->tport;
+
+	if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
+	    (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
+		mutex_unlock(&vs->dev.mutex);
+		pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
+			" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
+			tv_tport->tport_name, tv_tpg->tport_tpgt,
+			t->vhost_wwpn, t->vhost_tpgt);
+		return -EINVAL;
+	}
+	atomic_dec(&tv_tpg->tv_tpg_vhost_count);
+	vs->vs_tpg = NULL;
+	mutex_unlock(&vs->dev.mutex);
+
+	return 0;
+}
+
+static int vhost_scsi_open(struct inode *inode, struct file *f)
+{
+	struct vhost_scsi *s;
+	int r;
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
+	INIT_LIST_HEAD(&s->vs_completion_list);
+	spin_lock_init(&s->vs_completion_lock);
+
+	s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick;
+	s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick;
+	s->vqs[2].handle_kick = vhost_scsi_handle_kick;
+	r = vhost_dev_init(&s->dev, s->vqs, 3);
+	if (r < 0) {
+		kfree(s);
+		return r;
+	}
+
+	f->private_data = s;
+	return 0;
+}
+
+static int vhost_scsi_release(struct inode *inode, struct file *f)
+{
+	struct vhost_scsi *s = f->private_data;
+
+	if (s->vs_tpg && s->vs_tpg->tport) {
+		struct vhost_scsi_target backend;
+
+		memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
+				sizeof(backend.vhost_wwpn));
+		backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
+		vhost_scsi_clear_endpoint(s, &backend);
+	}
+
+	vhost_dev_cleanup(&s->dev, false);
+	kfree(s);
+	return 0;
+}
+
+static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
+{
+	if (features & ~VHOST_FEATURES)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&vs->dev.mutex);
+	if ((features & (1 << VHOST_F_LOG_ALL)) &&
+	    !vhost_log_access_ok(&vs->dev)) {
+		mutex_unlock(&vs->dev.mutex);
+		return -EFAULT;
+	}
+	vs->dev.acked_features = features;
+	/* TODO possibly smp_wmb() and flush vqs */
+	mutex_unlock(&vs->dev.mutex);
+	return 0;
+}
+
+static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
+				unsigned long arg)
+{
+	struct vhost_scsi *vs = f->private_data;
+	struct vhost_scsi_target backend;
+	void __user *argp = (void __user *)arg;
+	u64 __user *featurep = argp;
+	u64 features;
+	int r;
+
+	switch (ioctl) {
+	case VHOST_SCSI_SET_ENDPOINT:
+		if (copy_from_user(&backend, argp, sizeof backend))
+			return -EFAULT;
+
+		return vhost_scsi_set_endpoint(vs, &backend);
+	case VHOST_SCSI_CLEAR_ENDPOINT:
+		if (copy_from_user(&backend, argp, sizeof backend))
+			return -EFAULT;
+
+		return vhost_scsi_clear_endpoint(vs, &backend);
+	case VHOST_SCSI_GET_ABI_VERSION:
+		if (copy_from_user(&backend, argp, sizeof backend))
+			return -EFAULT;
+
+		backend.abi_version = VHOST_SCSI_ABI_VERSION;
+
+		if (copy_to_user(argp, &backend, sizeof backend))
+			return -EFAULT;
+		return 0;
+	case VHOST_GET_FEATURES:
+		features = VHOST_FEATURES;
+		if (copy_to_user(featurep, &features, sizeof features))
+			return -EFAULT;
+		return 0;
+	case VHOST_SET_FEATURES:
+		if (copy_from_user(&features, featurep, sizeof features))
+			return -EFAULT;
+		return vhost_scsi_set_features(vs, features);
+	default:
+		mutex_lock(&vs->dev.mutex);
+		r = vhost_dev_ioctl(&vs->dev, ioctl, arg);
+		mutex_unlock(&vs->dev.mutex);
+		return r;
+	}
+}
+
+static const struct file_operations vhost_scsi_fops = {
+	.owner          = THIS_MODULE,
+	.release        = vhost_scsi_release,
+	.unlocked_ioctl = vhost_scsi_ioctl,
+	/* TODO compat ioctl? */
+	.open           = vhost_scsi_open,
+	.llseek		= noop_llseek,
+};
+
+static struct miscdevice vhost_scsi_misc = {
+	MISC_DYNAMIC_MINOR,
+	"vhost-scsi",
+	&vhost_scsi_fops,
+};
+
+static int __init vhost_scsi_register(void)
+{
+	return misc_register(&vhost_scsi_misc);
+}
+
+static int vhost_scsi_deregister(void)
+{
+	return misc_deregister(&vhost_scsi_misc);
+}
+
+static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
+{
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return "SAS";
+	case SCSI_PROTOCOL_FCP:
+		return "FCP";
+	case SCSI_PROTOCOL_ISCSI:
+		return "iSCSI";
+	default:
+		break;
+	}
+
+	return "Unknown";
+}
+
+static int tcm_vhost_port_link(
+	struct se_portal_group *se_tpg,
+	struct se_lun *lun)
+{
+	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+
+	atomic_inc(&tv_tpg->tv_tpg_port_count);
+	smp_mb__after_atomic_inc();
+
+	return 0;
+}
+
+static void tcm_vhost_port_unlink(
+	struct se_portal_group *se_tpg,
+	struct se_lun *se_lun)
+{
+	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+
+	atomic_dec(&tv_tpg->tv_tpg_port_count);
+	smp_mb__after_atomic_dec();
+}
+
+static struct se_node_acl *tcm_vhost_make_nodeacl(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl, *se_nacl_new;
+	struct tcm_vhost_nacl *nacl;
+	u64 wwpn = 0;
+	u32 nexus_depth;
+
+	/* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL); */
+	se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
+	if (!se_nacl_new)
+		return ERR_PTR(-ENOMEM);
+
+	nexus_depth = 1;
+	/*
+	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+	 * when converting a NodeACL from demo mode -> explict
+	 */
+	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+				name, nexus_depth);
+	if (IS_ERR(se_nacl)) {
+		tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
+		return se_nacl;
+	}
+	/*
+	 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
+	 */
+	nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
+	nacl->iport_wwpn = wwpn;
+
+	return se_nacl;
+}
+
+static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
+{
+	struct tcm_vhost_nacl *nacl = container_of(se_acl,
+				struct tcm_vhost_nacl, se_node_acl);
+	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
+	kfree(nacl);
+}
+
+static int tcm_vhost_make_nexus(
+	struct tcm_vhost_tpg *tv_tpg,
+	const char *name)
+{
+	struct se_portal_group *se_tpg;
+	struct tcm_vhost_nexus *tv_nexus;
+
+	mutex_lock(&tv_tpg->tv_tpg_mutex);
+	if (tv_tpg->tpg_nexus) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		pr_debug("tv_tpg->tpg_nexus already exists\n");
+		return -EEXIST;
+	}
+	se_tpg = &tv_tpg->se_tpg;
+
+	tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
+	if (!tv_nexus) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+		return -ENOMEM;
+	}
+	/*
+	 *  Initialize the struct se_session pointer
+	 */
+	tv_nexus->tvn_se_sess = transport_init_session();
+	if (IS_ERR(tv_nexus->tvn_se_sess)) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		kfree(tv_nexus);
+		return -ENOMEM;
+	}
+	/*
+	 * Since we are running in 'demo mode' this call with generate a
+	 * struct se_node_acl for the tcm_vhost struct se_portal_group with
+	 * the SCSI Initiator port name of the passed configfs group 'name'.
+	 */
+	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+				se_tpg, (unsigned char *)name);
+	if (!tv_nexus->tvn_se_sess->se_node_acl) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		pr_debug("core_tpg_check_initiator_node_acl() failed"
+				" for %s\n", name);
+		transport_free_session(tv_nexus->tvn_se_sess);
+		kfree(tv_nexus);
+		return -ENOMEM;
+	}
+	/*
+	 * Now register the TCM vHost virtual I_T Nexus as active with the
+	 * call to __transport_register_session()
+	 */
+	__transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
+			tv_nexus->tvn_se_sess, tv_nexus);
+	tv_tpg->tpg_nexus = tv_nexus;
+
+	mutex_unlock(&tv_tpg->tv_tpg_mutex);
+	return 0;
+}
+
+static int tcm_vhost_drop_nexus(
+	struct tcm_vhost_tpg *tpg)
+{
+	struct se_session *se_sess;
+	struct tcm_vhost_nexus *tv_nexus;
+
+	mutex_lock(&tpg->tv_tpg_mutex);
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		return -ENODEV;
+	}
+
+	se_sess = tv_nexus->tvn_se_sess;
+	if (!se_sess) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		return -ENODEV;
+	}
+
+	if (atomic_read(&tpg->tv_tpg_port_count)) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		pr_err("Unable to remove TCM_vHost I_T Nexus with"
+			" active TPG port count: %d\n",
+			atomic_read(&tpg->tv_tpg_port_count));
+		return -EPERM;
+	}
+
+	if (atomic_read(&tpg->tv_tpg_vhost_count)) {
+		mutex_unlock(&tpg->tv_tpg_mutex);
+		pr_err("Unable to remove TCM_vHost I_T Nexus with"
+			" active TPG vhost count: %d\n",
+			atomic_read(&tpg->tv_tpg_vhost_count));
+		return -EPERM;
+	}
+
+	pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated"
+		" %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
+		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+	/*
+	 * Release the SCSI I_T Nexus to the emulated vHost Target Port
+	 */
+	transport_deregister_session(tv_nexus->tvn_se_sess);
+	tpg->tpg_nexus = NULL;
+	mutex_unlock(&tpg->tv_tpg_mutex);
+
+	kfree(tv_nexus);
+	return 0;
+}
+
+static ssize_t tcm_vhost_tpg_show_nexus(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_nexus *tv_nexus;
+	ssize_t ret;
+
+	mutex_lock(&tv_tpg->tv_tpg_mutex);
+	tv_nexus = tv_tpg->tpg_nexus;
+	if (!tv_nexus) {
+		mutex_unlock(&tv_tpg->tv_tpg_mutex);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%s\n",
+			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+	mutex_unlock(&tv_tpg->tv_tpg_mutex);
+
+	return ret;
+}
+
+static ssize_t tcm_vhost_tpg_store_nexus(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+	struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
+	unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
+	int ret;
+	/*
+	 * Shutdown the active I_T nexus if 'NULL' is passed..
+	 */
+	if (!strncmp(page, "NULL", 4)) {
+		ret = tcm_vhost_drop_nexus(tv_tpg);
+		return (!ret) ? count : ret;
+	}
+	/*
+	 * Otherwise make sure the passed virtual Initiator port WWN matches
+	 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
+	 * tcm_vhost_make_nexus().
+	 */
+	if (strlen(page) >= TCM_VHOST_NAMELEN) {
+		pr_err("Emulated NAA Sas Address: %s, exceeds"
+				" max: %d\n", page, TCM_VHOST_NAMELEN);
+		return -EINVAL;
+	}
+	snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
+
+	ptr = strstr(i_port, "naa.");
+	if (ptr) {
+		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
+			pr_err("Passed SAS Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_vhost_dump_proto_id(tport_wwn));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "fc.");
+	if (ptr) {
+		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
+			pr_err("Passed FCP Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_vhost_dump_proto_id(tport_wwn));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[3]; /* Skip over "fc." */
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "iqn.");
+	if (ptr) {
+		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
+			pr_err("Passed iSCSI Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_vhost_dump_proto_id(tport_wwn));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	pr_err("Unable to locate prefix for emulated Initiator Port:"
+			" %s\n", i_port);
+	return -EINVAL;
+	/*
+	 * Clear any trailing newline for the NAA WWN
+	 */
+check_newline:
+	if (i_port[strlen(i_port)-1] == '\n')
+		i_port[strlen(i_port)-1] = '\0';
+
+	ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
+	&tcm_vhost_tpg_nexus.attr,
+	NULL,
+};
+
+static struct se_portal_group *tcm_vhost_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_vhost_tport *tport = container_of(wwn,
+			struct tcm_vhost_tport, tport_wwn);
+
+	struct tcm_vhost_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct tcm_vhost_tpg");
+		return ERR_PTR(-ENOMEM);
+	}
+	mutex_init(&tpg->tv_tpg_mutex);
+	INIT_LIST_HEAD(&tpg->tv_tpg_list);
+	tpg->tport = tport;
+	tpg->tport_tpgt = tpgt;
+
+	ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
+				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		kfree(tpg);
+		return NULL;
+	}
+	mutex_lock(&tcm_vhost_mutex);
+	list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
+	mutex_unlock(&tcm_vhost_mutex);
+
+	return &tpg->se_tpg;
+}
+
+static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
+{
+	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+				struct tcm_vhost_tpg, se_tpg);
+
+	mutex_lock(&tcm_vhost_mutex);
+	list_del(&tpg->tv_tpg_list);
+	mutex_unlock(&tcm_vhost_mutex);
+	/*
+	 * Release the virtual I_T Nexus for this vHost TPG
+	 */
+	tcm_vhost_drop_nexus(tpg);
+	/*
+	 * Deregister the se_tpg from TCM..
+	 */
+	core_tpg_deregister(se_tpg);
+	kfree(tpg);
+}
+
+static struct se_wwn *tcm_vhost_make_tport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_vhost_tport *tport;
+	char *ptr;
+	u64 wwpn = 0;
+	int off = 0;
+
+	/* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL); */
+
+	tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
+	if (!tport) {
+		pr_err("Unable to allocate struct tcm_vhost_tport");
+		return ERR_PTR(-ENOMEM);
+	}
+	tport->tport_wwpn = wwpn;
+	/*
+	 * Determine the emulated Protocol Identifier and Target Port Name
+	 * based on the incoming configfs directory name.
+	 */
+	ptr = strstr(name, "naa.");
+	if (ptr) {
+		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
+		goto check_len;
+	}
+	ptr = strstr(name, "fc.");
+	if (ptr) {
+		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
+		off = 3; /* Skip over "fc." */
+		goto check_len;
+	}
+	ptr = strstr(name, "iqn.");
+	if (ptr) {
+		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
+		goto check_len;
+	}
+
+	pr_err("Unable to locate prefix for emulated Target Port:"
+			" %s\n", name);
+	kfree(tport);
+	return ERR_PTR(-EINVAL);
+
+check_len:
+	if (strlen(name) >= TCM_VHOST_NAMELEN) {
+		pr_err("Emulated %s Address: %s, exceeds"
+			" max: %d\n", name, tcm_vhost_dump_proto_id(tport),
+			TCM_VHOST_NAMELEN);
+		kfree(tport);
+		return ERR_PTR(-EINVAL);
+	}
+	snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
+
+	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
+		" %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
+
+	return &tport->tport_wwn;
+}
+
+static void tcm_vhost_drop_tport(struct se_wwn *wwn)
+{
+	struct tcm_vhost_tport *tport = container_of(wwn,
+				struct tcm_vhost_tport, tport_wwn);
+
+	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
+		" %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
+		tport->tport_name);
+
+	kfree(tport);
+}
+
+static ssize_t tcm_vhost_wwn_show_attr_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
+		"on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+		utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_vhost, version);
+
+static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
+	&tcm_vhost_wwn_version.attr,
+	NULL,
+};
+
+static struct target_core_fabric_ops tcm_vhost_ops = {
+	.get_fabric_name		= tcm_vhost_get_fabric_name,
+	.get_fabric_proto_ident		= tcm_vhost_get_fabric_proto_ident,
+	.tpg_get_wwn			= tcm_vhost_get_fabric_wwn,
+	.tpg_get_tag			= tcm_vhost_get_tag,
+	.tpg_get_default_depth		= tcm_vhost_get_default_depth,
+	.tpg_get_pr_transport_id	= tcm_vhost_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= tcm_vhost_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= tcm_vhost_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= tcm_vhost_check_true,
+	.tpg_check_demo_mode_cache	= tcm_vhost_check_true,
+	.tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
+	.tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
+	.tpg_alloc_fabric_acl		= tcm_vhost_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= tcm_vhost_release_fabric_acl,
+	.tpg_get_inst_index		= tcm_vhost_tpg_get_inst_index,
+	.release_cmd			= tcm_vhost_release_cmd,
+	.shutdown_session		= tcm_vhost_shutdown_session,
+	.close_session			= tcm_vhost_close_session,
+	.sess_get_index			= tcm_vhost_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= tcm_vhost_write_pending,
+	.write_pending_status		= tcm_vhost_write_pending_status,
+	.set_default_node_attributes	= tcm_vhost_set_default_node_attrs,
+	.get_task_tag			= tcm_vhost_get_task_tag,
+	.get_cmd_state			= tcm_vhost_get_cmd_state,
+	.queue_data_in			= tcm_vhost_queue_data_in,
+	.queue_status			= tcm_vhost_queue_status,
+	.queue_tm_rsp			= tcm_vhost_queue_tm_rsp,
+	.get_fabric_sense_len		= tcm_vhost_get_fabric_sense_len,
+	.set_fabric_sense_len		= tcm_vhost_set_fabric_sense_len,
+	/*
+	 * Setup callers for generic logic in target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn		= tcm_vhost_make_tport,
+	.fabric_drop_wwn		= tcm_vhost_drop_tport,
+	.fabric_make_tpg		= tcm_vhost_make_tpg,
+	.fabric_drop_tpg		= tcm_vhost_drop_tpg,
+	.fabric_post_link		= tcm_vhost_port_link,
+	.fabric_pre_unlink		= tcm_vhost_port_unlink,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_make_nodeacl		= tcm_vhost_make_nodeacl,
+	.fabric_drop_nodeacl		= tcm_vhost_drop_nodeacl,
+};
+
+static int tcm_vhost_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric;
+	int ret;
+
+	pr_debug("TCM_VHOST fabric module %s on %s/%s"
+		" on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+		utsname()->machine);
+	/*
+	 * Register the top level struct config_item_type with TCM core
+	 */
+	fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
+	if (IS_ERR(fabric)) {
+		pr_err("target_fabric_configfs_init() failed\n");
+		return PTR_ERR(fabric);
+	}
+	/*
+	 * Setup fabric->tf_ops from our local tcm_vhost_ops
+	 */
+	fabric->tf_ops = tcm_vhost_ops;
+	/*
+	 * Setup default attribute lists for various fabric->tf_cit_tmpl
+	 */
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+	/*
+	 * Register the fabric for use within TCM
+	 */
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		pr_err("target_fabric_configfs_register() failed"
+				" for TCM_VHOST\n");
+		return ret;
+	}
+	/*
+	 * Setup our local pointer to *fabric
+	 */
+	tcm_vhost_fabric_configfs = fabric;
+	pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
+	return 0;
+};
+
+static void tcm_vhost_deregister_configfs(void)
+{
+	if (!tcm_vhost_fabric_configfs)
+		return;
+
+	target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
+	tcm_vhost_fabric_configfs = NULL;
+	pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
+};
+
+static int __init tcm_vhost_init(void)
+{
+	int ret = -ENOMEM;
+
+	tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
+	if (!tcm_vhost_workqueue)
+		goto out;
+
+	ret = vhost_scsi_register();
+	if (ret < 0)
+		goto out_destroy_workqueue;
+
+	ret = tcm_vhost_register_configfs();
+	if (ret < 0)
+		goto out_vhost_scsi_deregister;
+
+	return 0;
+
+out_vhost_scsi_deregister:
+	vhost_scsi_deregister();
+out_destroy_workqueue:
+	destroy_workqueue(tcm_vhost_workqueue);
+out:
+	return ret;
+};
+
+static void tcm_vhost_exit(void)
+{
+	tcm_vhost_deregister_configfs();
+	vhost_scsi_deregister();
+	destroy_workqueue(tcm_vhost_workqueue);
+};
+
+MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_vhost_init);
+module_exit(tcm_vhost_exit);
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
new file mode 100644
index 000000000000..c983ed21e413
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.h
@@ -0,0 +1,101 @@
+#define TCM_VHOST_VERSION  "v0.1"
+#define TCM_VHOST_NAMELEN 256
+#define TCM_VHOST_MAX_CDB_SIZE 32
+
+struct tcm_vhost_cmd {
+	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
+	int tvc_vq_desc;
+	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
+	u64 tvc_tag;
+	/* The number of scatterlists associated with this cmd */
+	u32 tvc_sgl_count;
+	/* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+	u32 tvc_lun;
+	/* Pointer to the SGL formatted memory from virtio-scsi */
+	struct scatterlist *tvc_sgl;
+	/* Pointer to response */
+	struct virtio_scsi_cmd_resp __user *tvc_resp;
+	/* Pointer to vhost_scsi for our device */
+	struct vhost_scsi *tvc_vhost;
+	/* The TCM I/O descriptor that is accessed via container_of() */
+	struct se_cmd tvc_se_cmd;
+	/* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+	struct work_struct work;
+	/* Copy of the incoming SCSI command descriptor block (CDB) */
+	unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+	/* Sense buffer that will be mapped into outgoing status */
+	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
+	/* Completed commands list, serviced from vhost worker thread */
+	struct list_head tvc_completion_list;
+};
+
+struct tcm_vhost_nexus {
+	/* Pointer to TCM session for I_T Nexus */
+	struct se_session *tvn_se_sess;
+};
+
+struct tcm_vhost_nacl {
+	/* Binary World Wide unique Port Name for Vhost Initiator port */
+	u64 iport_wwpn;
+	/* ASCII formatted WWPN for Sas Initiator port */
+	char iport_name[TCM_VHOST_NAMELEN];
+	/* Returned by tcm_vhost_make_nodeacl() */
+	struct se_node_acl se_node_acl;
+};
+
+struct tcm_vhost_tpg {
+	/* Vhost port target portal group tag for TCM */
+	u16 tport_tpgt;
+	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
+	atomic_t tv_tpg_port_count;
+	/* Used for vhost_scsi device reference to tpg_nexus */
+	atomic_t tv_tpg_vhost_count;
+	/* list for tcm_vhost_list */
+	struct list_head tv_tpg_list;
+	/* Used to protect access for tpg_nexus */
+	struct mutex tv_tpg_mutex;
+	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
+	struct tcm_vhost_nexus *tpg_nexus;
+	/* Pointer back to tcm_vhost_tport */
+	struct tcm_vhost_tport *tport;
+	/* Returned by tcm_vhost_make_tpg() */
+	struct se_portal_group se_tpg;
+};
+
+struct tcm_vhost_tport {
+	/* SCSI protocol the tport is providing */
+	u8 tport_proto_id;
+	/* Binary World Wide unique Port Name for Vhost Target port */
+	u64 tport_wwpn;
+	/* ASCII formatted WWPN for Vhost Target port */
+	char tport_name[TCM_VHOST_NAMELEN];
+	/* Returned by tcm_vhost_make_tport() */
+	struct se_wwn tport_wwn;
+};
+
+/*
+ * As per request from MST, keep TCM_VHOST related ioctl defines out of
+ * linux/vhost.h (user-space) for now..
+ */
+
+#include <linux/vhost.h>
+
+/*
+ * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
+ *
+ * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
+ *            RFC-v2 vhost-scsi userspace.  Add GET_ABI_VERSION ioctl usage
+ */
+
+#define VHOST_SCSI_ABI_VERSION	0
+
+struct vhost_scsi_target {
+	int abi_version;
+	unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
+	unsigned short vhost_tpgt;
+};
+
+/* VHOST_SCSI specific defines */
+#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
+#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
+#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 181fa8158a8b..858c9714b2f3 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -37,7 +37,6 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
      */
 
 struct zorro_bus {
-	struct list_head devices;	/* list of devices on this bus */
 	struct device dev;
 };
 
@@ -136,7 +135,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
 	if (!bus)
 		return -ENOMEM;
 
-	INIT_LIST_HEAD(&bus->devices);
 	bus->dev.parent = &pdev->dev;
 	dev_set_name(&bus->dev, "zorro");
 	error = device_register(&bus->dev);