summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-07-27 23:14:08 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-07-27 23:14:08 +0200
commitf6248dd88dba3aeb19351410a027d92eee7ceb72 (patch)
tree9dfe33ca030186ffd5007784d0647a8306d41be9 /drivers
parent880a66275ef4d1e08e5d4dcf4cec768de18c68ef (diff)
parent02cb489be7ad07e74ab40baa908d2e20460ebdb2 (diff)
downloadlinux-f6248dd88dba3aeb19351410a027d92eee7ceb72.tar.gz
Merge branches 'acpi-pm' and 'acpi-numa'
* acpi-pm:
  ACPI / PM / EC: Flush all EC work in acpi_freeze_sync()
  ACPI / EC: Add parameter to force disable the GPE on suspend

* acpi-numa:
  ACPI: NUMA: Fix typo in the full name of SRAT
  ACPI: NUMA: add missing include in acpi_numa.h
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/core.c10
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/android/binder.c17
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/power/domain.c8
-rw-r--r--drivers/base/regmap/regmap-w1.c4
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/clocksource/timer-of.c12
-rw-r--r--drivers/cpufreq/intel_pstate.c21
-rw-r--r--drivers/dax/device-dax.h2
-rw-r--r--drivers/dax/device.c33
-rw-r--r--drivers/dax/pmem.c12
-rw-r--r--drivers/dma-buf/dma-fence.c17
-rw-r--r--drivers/dma-buf/sync_debug.c2
-rw-r--r--drivers/dma-buf/sync_file.c8
-rw-r--r--drivers/fsi/fsi-core.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h3
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c41
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c66
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-multitouch.c16
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hwmon/applesmc.c13
-rw-r--r--drivers/ide/ide-timings.c18
-rw-r--r--drivers/infiniband/core/addr.c46
-rw-r--r--drivers/infiniband/core/cma.c34
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c36
-rw-r--r--drivers/infiniband/core/verbs.c51
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h9
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c119
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c86
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c40
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c38
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c52
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c32
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-gic-realview.c2
-rw-r--r--drivers/irqchip/irq-mips-cpu.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/isdn/divert/isdn_divert.c25
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/mux/Kconfig19
-rw-r--r--drivers/mux/mux-core.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c70
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c299
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c10
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c49
-rw-r--r--drivers/net/phy/mdio-mux.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c30
-rw-r--r--drivers/net/usb/cdc_ncm.c28
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c6
-rw-r--r--drivers/net/usb/smsc95xx.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/nvdimm/core.c7
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/pci.c43
-rw-r--r--drivers/nvme/target/admin-cmd.c16
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c5
-rw-r--r--drivers/nvme/target/fc.c8
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvmem/rockchip-efuse.c2
-rw-r--r--drivers/scsi/cxlflash/main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c10
-rw-r--r--drivers/scsi/isci/request.c14
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi.h17
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c419
-rw-r--r--drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h210
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/sg.c5
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/spmi/spmi-pmic-arb.c17
-rw-r--r--drivers/spmi/spmi.c12
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c19
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/sm750fb/sm750.c24
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/spk_priv.h2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c22
-rw-r--r--drivers/staging/vboxvideo/Kconfig12
-rw-r--r--drivers/staging/vboxvideo/Makefile7
-rw-r--r--drivers/staging/vboxvideo/TODO9
-rw-r--r--drivers/staging/vboxvideo/hgsmi_base.c246
-rw-r--r--drivers/staging/vboxvideo/hgsmi_ch_setup.h66
-rw-r--r--drivers/staging/vboxvideo/hgsmi_channels.h53
-rw-r--r--drivers/staging/vboxvideo/hgsmi_defs.h92
-rw-r--r--drivers/staging/vboxvideo/modesetting.c142
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c286
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h296
-rw-r--r--drivers/staging/vboxvideo/vbox_err.h50
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c412
-rw-r--r--drivers/staging/vboxvideo/vbox_hgsmi.c115
-rw-r--r--drivers/staging/vboxvideo/vbox_irq.c197
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c534
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c877
-rw-r--r--drivers/staging/vboxvideo/vbox_prime.c74
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c472
-rw-r--r--drivers/staging/vboxvideo/vboxvideo.h491
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_guest.h95
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_vbe.h84
-rw-r--r--drivers/staging/vboxvideo/vbva_base.c233
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c10
-rw-r--r--drivers/thunderbolt/switch.c3
-rw-r--r--drivers/tty/pty.c85
-rw-r--r--drivers/tty/serial/fsl_lpuart.c24
-rw-r--r--drivers/tty/serial/imx.c27
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/serial/st-asc.c1
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/dwc2/gadget.c3
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c18
-rw-r--r--drivers/usb/dwc3/gadget.c8
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c20
-rw-r--r--drivers/usb/gadget/function/f_uac2.c25
-rw-r--r--drivers/usb/gadget/udc/Kconfig5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c14
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c6
-rw-r--r--drivers/usb/host/pci-quirks.c54
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-hub.c14
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/renesas_usbhs/common.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c31
-rw-r--r--drivers/usb/storage/isd200.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h1
-rw-r--r--drivers/w1/masters/omap_hdq.c3
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--drivers/xen/xen-balloon.c22
235 files changed, 7254 insertions, 1075 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b75b734ee73a..19182d091587 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3160,6 +3160,8 @@ static struct acpi_driver acpi_nfit_driver = {
 
 static __init int nfit_init(void)
 {
+	int ret;
+
 	BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
 	BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
 	BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
@@ -3187,8 +3189,14 @@ static __init int nfit_init(void)
 		return -ENOMEM;
 
 	nfit_mce_register();
+	ret = acpi_bus_register_driver(&acpi_nfit_driver);
+	if (ret) {
+		nfit_mce_unregister();
+		destroy_workqueue(nfit_wq);
+	}
+
+	return ret;
 
-	return acpi_bus_register_driver(&acpi_nfit_driver);
 }
 
 static __exit void nfit_exit(void)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index edb0c79f7c64..917f1cc0fda4 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -443,7 +443,7 @@ int __init acpi_numa_init(void)
 	 * So go over all cpu entries in SRAT to get apicid to node mapping.
 	 */
 
-	/* SRAT: Static Resource Affinity Table */
+	/* SRAT: System Resource Affinity Table */
 	if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
 		struct acpi_subtable_proc srat_proc[3];
 
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index aae4d8d4be36..f7665c31feca 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc,
 	list_add_tail(&t->work.entry, target_list);
 	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
 	list_add_tail(&tcomplete->entry, &thread->todo);
-	if (target_wait)
-		wake_up_interruptible(target_wait);
+	if (target_wait) {
+		if (reply || !(t->flags & TF_ONE_WAY))
+			wake_up_interruptible_sync(target_wait);
+		else
+			wake_up_interruptible(target_wait);
+	}
 	return;
 
 err_translate_failed:
@@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
 			proc->pid, current->pid, cmd, arg);*/
 
-	if (unlikely(current->mm != proc->vma_vm_mm)) {
-		pr_err("current mm mismatch proc mm\n");
-		return -EINVAL;
-	}
 	trace_binder_ioctl(cmd, arg);
 
 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
 	if (proc == NULL)
 		return -ENOMEM;
-	get_task_struct(current);
-	proc->tsk = current;
-	proc->vma_vm_mm = current->mm;
+	get_task_struct(current->group_leader);
+	proc->tsk = current->group_leader;
 	INIT_LIST_HEAD(&proc->todo);
 	init_waitqueue_head(&proc->wait);
 	proc->default_priority = task_nice(current);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 292dec18ffb8..07bdd51b3b9a 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1613,7 +1613,7 @@ static int zatm_init_one(struct pci_dev *pci_dev,
 
 	ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
 	if (ret < 0)
-		goto out_disable;
+		goto out_release;
 
 	zatm_dev->pci_dev = pci_dev;
 	dev->dev_data = zatm_dev;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 3b8210ebb50e..60303aa28587 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1222,8 +1222,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
 
 	spin_unlock_irq(&dev->power.lock);
 
-	dev_pm_domain_set(dev, &genpd->domain);
-
 	return gpd_data;
 
  err_free:
@@ -1237,8 +1235,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
 static void genpd_free_dev_data(struct device *dev,
 				struct generic_pm_domain_data *gpd_data)
 {
-	dev_pm_domain_set(dev, NULL);
-
 	spin_lock_irq(&dev->power.lock);
 
 	dev->power.subsys_data->domain_data = NULL;
@@ -1275,6 +1271,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
 	if (ret)
 		goto out;
 
+	dev_pm_domain_set(dev, &genpd->domain);
+
 	genpd->device_count++;
 	genpd->max_off_time_changed = true;
 
@@ -1336,6 +1334,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
 	if (genpd->detach_dev)
 		genpd->detach_dev(genpd, dev);
 
+	dev_pm_domain_set(dev, NULL);
+
 	list_del_init(&pdd->list_node);
 
 	genpd_unlock(genpd);
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 5f04e7bf063e..e6c64b0be5b2 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -1,7 +1,7 @@
 /*
  * Register map access API - W1 (1-Wire) support
  *
- * Copyright (C) 2017 OAO Radioavionica
+ * Copyright (c) 2017 Radioavionica Corporation
  * Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -11,7 +11,7 @@
 
 #include <linux/regmap.h>
 #include <linux/module.h>
-#include "../../w1/w1.h"
+#include <linux/w1.h>
 
 #include "internal.h"
 
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index dea7d85134ee..87a0a29f6e7e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -626,7 +626,6 @@ static void recv_work(struct work_struct *work)
 	struct nbd_device *nbd = args->nbd;
 	struct nbd_config *config = nbd->config;
 	struct nbd_cmd *cmd;
-	int ret = 0;
 
 	while (1) {
 		cmd = nbd_read_stat(nbd, args->index);
@@ -636,7 +635,6 @@ static void recv_work(struct work_struct *work)
 			mutex_lock(&nsock->tx_lock);
 			nbd_mark_nsock_dead(nbd, nsock, 1);
 			mutex_unlock(&nsock->tx_lock);
-			ret = PTR_ERR(cmd);
 			break;
 		}
 
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index f6e7491c873c..d509b500a7b5 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -41,8 +41,16 @@ static __init int timer_irq_init(struct device_node *np,
 	struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
 	struct clock_event_device *clkevt = &to->clkevt;
 
-	of_irq->irq = of_irq->name ? of_irq_get_byname(np, of_irq->name):
-		irq_of_parse_and_map(np, of_irq->index);
+	if (of_irq->name) {
+		of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
+		if (ret < 0) {
+			pr_err("Failed to get interrupt %s for %s\n",
+			       of_irq->name, np->full_name);
+			return ret;
+		}
+	} else	{
+		of_irq->irq = irq_of_parse_and_map(np, of_irq->index);
+	}
 	if (!of_irq->irq) {
 		pr_err("Failed to map interrupt for %s\n", np->full_name);
 		return -EINVAL;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7fb8b7c980d..6cd503525638 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -225,6 +225,9 @@ struct global_params {
  * @vid:		Stores VID limits for this CPU
  * @pid:		Stores PID parameters for this CPU
  * @last_sample_time:	Last Sample time
+ * @aperf_mperf_shift:	Number of clock cycles after aperf, merf is incremented
+ *			This shift is a multiplier to mperf delta to
+ *			calculate CPU busy.
  * @prev_aperf:		Last APERF value read from APERF MSR
  * @prev_mperf:		Last MPERF value read from MPERF MSR
  * @prev_tsc:		Last timestamp counter (TSC) value
@@ -259,6 +262,7 @@ struct cpudata {
 
 	u64	last_update;
 	u64	last_sample_time;
+	u64	aperf_mperf_shift;
 	u64	prev_aperf;
 	u64	prev_mperf;
 	u64	prev_tsc;
@@ -321,6 +325,7 @@ struct pstate_funcs {
 	int (*get_min)(void);
 	int (*get_turbo)(void);
 	int (*get_scaling)(void);
+	int (*get_aperf_mperf_shift)(void);
 	u64 (*get_val)(struct cpudata*, int pstate);
 	void (*get_vid)(struct cpudata *);
 	void (*update_util)(struct update_util_data *data, u64 time,
@@ -1486,6 +1491,11 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
 	return val;
 }
 
+static int knl_get_aperf_mperf_shift(void)
+{
+	return 10;
+}
+
 static int knl_get_turbo_pstate(void)
 {
 	u64 value;
@@ -1543,6 +1553,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
 	cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
 
+	if (pstate_funcs.get_aperf_mperf_shift)
+		cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
+
 	if (pstate_funcs.get_vid)
 		pstate_funcs.get_vid(cpu);
 
@@ -1616,7 +1629,8 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
 	int32_t busy_frac, boost;
 	int target, avg_pstate;
 
-	busy_frac = div_fp(sample->mperf, sample->tsc);
+	busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
+			   sample->tsc);
 
 	boost = cpu->iowait_boost;
 	cpu->iowait_boost >>= 1;
@@ -1675,7 +1689,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
 		sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
 		perf_scaled = mul_fp(perf_scaled, sample_ratio);
 	} else {
-		sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+		sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift),
+				      cpu->sample.tsc);
 		if (sample_ratio < int_tofp(1))
 			perf_scaled = 0;
 	}
@@ -1807,6 +1822,7 @@ static const struct pstate_funcs knl_funcs = {
 	.get_max_physical = core_get_max_pstate_physical,
 	.get_min = core_get_min_pstate,
 	.get_turbo = knl_get_turbo_pstate,
+	.get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
 	.get_scaling = core_get_scaling,
 	.get_val = core_get_val,
 	.update_util = intel_pstate_update_util_pid,
@@ -2403,6 +2419,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
 	pstate_funcs.get_val   = funcs->get_val;
 	pstate_funcs.get_vid   = funcs->get_vid;
 	pstate_funcs.update_util = funcs->update_util;
+	pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
 
 	intel_pstate_use_acpi_profile();
 }
diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h
index fdcd9769ffde..688b051750bd 100644
--- a/drivers/dax/device-dax.h
+++ b/drivers/dax/device-dax.h
@@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent,
 		int region_id, struct resource *res, unsigned int align,
 		void *addr, unsigned long flags);
 struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
-		struct resource *res, int count);
+		int id, struct resource *res, int count);
 #endif /* __DEVICE_DAX_H__ */
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 12943d19bfc4..e9f3b3e4bbf4 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -529,7 +529,8 @@ static void dev_dax_release(struct device *dev)
 	struct dax_region *dax_region = dev_dax->region;
 	struct dax_device *dax_dev = dev_dax->dax_dev;
 
-	ida_simple_remove(&dax_region->ida, dev_dax->id);
+	if (dev_dax->id >= 0)
+		ida_simple_remove(&dax_region->ida, dev_dax->id);
 	dax_region_put(dax_region);
 	put_dax(dax_dev);
 	kfree(dev_dax);
@@ -559,7 +560,7 @@ static void unregister_dev_dax(void *dev)
 }
 
 struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
-		struct resource *res, int count)
+		int id, struct resource *res, int count)
 {
 	struct device *parent = dax_region->dev;
 	struct dax_device *dax_dev;
@@ -567,7 +568,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
 	struct inode *inode;
 	struct device *dev;
 	struct cdev *cdev;
-	int rc = 0, i;
+	int rc, i;
+
+	if (!count)
+		return ERR_PTR(-EINVAL);
 
 	dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
 	if (!dev_dax)
@@ -587,10 +591,16 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
 	if (i < count)
 		goto err_id;
 
-	dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
-	if (dev_dax->id < 0) {
-		rc = dev_dax->id;
-		goto err_id;
+	if (id < 0) {
+		id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+		dev_dax->id = id;
+		if (id < 0) {
+			rc = id;
+			goto err_id;
+		}
+	} else {
+		/* region provider owns @id lifetime */
+		dev_dax->id = -1;
 	}
 
 	/*
@@ -598,8 +608,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
 	 * device outside of mmap of the resulting character device.
 	 */
 	dax_dev = alloc_dax(dev_dax, NULL, NULL);
-	if (!dax_dev)
+	if (!dax_dev) {
+		rc = -ENOMEM;
 		goto err_dax;
+	}
 
 	/* from here on we're committed to teardown via dax_dev_release() */
 	dev = &dev_dax->dev;
@@ -620,7 +632,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
 	dev->parent = parent;
 	dev->groups = dax_attribute_groups;
 	dev->release = dev_dax_release;
-	dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
+	dev_set_name(dev, "dax%d.%d", dax_region->id, id);
 
 	rc = cdev_device_add(cdev, dev);
 	if (rc) {
@@ -636,7 +648,8 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
 	return dev_dax;
 
  err_dax:
-	ida_simple_remove(&dax_region->ida, dev_dax->id);
+	if (dev_dax->id >= 0)
+		ida_simple_remove(&dax_region->ida, dev_dax->id);
  err_id:
 	kfree(dev_dax);
 
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9f2a0b4fd801..8d8c852ba8f2 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data)
 
 static int dax_pmem_probe(struct device *dev)
 {
-	int rc;
 	void *addr;
 	struct resource res;
+	int rc, id, region_id;
 	struct nd_pfn_sb *pfn_sb;
 	struct dev_dax *dev_dax;
 	struct dax_pmem *dax_pmem;
-	struct nd_region *nd_region;
 	struct nd_namespace_io *nsio;
 	struct dax_region *dax_region;
 	struct nd_namespace_common *ndns;
@@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device *dev)
 	/* adjust the dax_region resource to the start of data */
 	res.start += le64_to_cpu(pfn_sb->dataoff);
 
-	nd_region = to_nd_region(dev->parent);
-	dax_region = alloc_dax_region(dev, nd_region->id, &res,
+	rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
+	if (rc != 2)
+		return -EINVAL;
+
+	dax_region = alloc_dax_region(dev, region_id, &res,
 			le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
 	if (!dax_region)
 		return -ENOMEM;
 
 	/* TODO: support for subdividing a dax region... */
-	dev_dax = devm_create_dev_dax(dax_region, &res, 1);
+	dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
 
 	/* child dev_dax instances now own the lifetime of the dax_region */
 	dax_region_put(dax_region);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 57da14c15987..56e0a0e1b600 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence)
 	if (WARN_ON(!fence))
 		return -EINVAL;
 
-	if (!ktime_to_ns(fence->timestamp)) {
-		fence->timestamp = ktime_get();
-		smp_mb__before_atomic();
-	}
-
 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 		ret = -EINVAL;
 
@@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence)
 		 * we might have raced with the unlocked dma_fence_signal,
 		 * still run through all callbacks
 		 */
-	} else
+	} else {
+		fence->timestamp = ktime_get();
+		set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
 		trace_dma_fence_signaled(fence);
+	}
 
 	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
 		list_del_init(&cur->node);
@@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence)
 	if (!fence)
 		return -EINVAL;
 
-	if (!ktime_to_ns(fence->timestamp)) {
-		fence->timestamp = ktime_get();
-		smp_mb__before_atomic();
-	}
-
 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return -EINVAL;
 
+	fence->timestamp = ktime_get();
+	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
 	trace_dma_fence_signaled(fence);
 
 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 82a6e7f6d37f..59a3b2f8ee91 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s,
 		   show ? "_" : "",
 		   sync_status_str(status));
 
-	if (status) {
+	if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
 		struct timespec64 ts64 =
 			ktime_to_timespec64(fence->timestamp);
 
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 545e2c5c4815..d7e219d2669d 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -391,7 +391,13 @@ static void sync_fill_fence_info(struct dma_fence *fence,
 		sizeof(info->driver_name));
 
 	info->status = dma_fence_get_status(fence);
-	info->timestamp_ns = ktime_to_ns(fence->timestamp);
+	while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+	       !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+		cpu_relax();
+	info->timestamp_ns =
+		test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
+		ktime_to_ns(fence->timestamp) :
+		ktime_set(0, 0);
 }
 
 static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index a485864cb512..06432d84cbf8 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -532,7 +532,7 @@ static inline uint32_t fsi_smode_sid(int x)
 	return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
 }
 
-static const uint32_t fsi_slave_smode(int id)
+static uint32_t fsi_slave_smode(int id)
 {
 	return FSI_SMODE_WSC | FSI_SMODE_ECRC
 		| fsi_smode_sid(id)
@@ -883,17 +883,16 @@ struct bus_type fsi_bus_type = {
 };
 EXPORT_SYMBOL_GPL(fsi_bus_type);
 
-static int fsi_init(void)
+static int __init fsi_init(void)
 {
 	return bus_register(&fsi_bus_type);
 }
+postcore_initcall(fsi_init);
 
 static void fsi_exit(void)
 {
 	bus_unregister(&fsi_bus_type);
 }
-
-module_init(fsi_init);
 module_exit(fsi_exit);
 module_param(discard_errors, int, 0664);
 MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5f8ada1d872b..37971d9402e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -101,7 +101,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 	if (adev->kfd) {
 		struct kgd2kfd_shared_resources gpu_resources = {
 			.compute_vmid_bitmap = 0xFF00,
-			.num_mec = adev->gfx.mec.num_mec,
 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
 		};
@@ -122,7 +121,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 
 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
 		 * nbits is not compile time constant */
-		last_valid_bit = adev->gfx.mec.num_mec
+		last_valid_bit = 1 /* only first MEC can have compute queues */
 				* adev->gfx.mec.num_pipe_per_mec
 				* adev->gfx.mec.num_queue_per_pipe;
 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 88187bfc5ea3..3f95f7cb4019 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -226,10 +226,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 
 	kfd->shared_resources = *gpu_resources;
 
-	/* We only use the first MEC */
-	if (kfd->shared_resources.num_mec > 1)
-		kfd->shared_resources.num_mec = 1;
-
 	/* calculate max size of mqds needed for queues */
 	size = max_num_of_queues_per_device *
 			kfd->device_info->mqd_size_aligned;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 955aa304ff48..602769ced3bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -77,13 +77,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
 	return false;
 }
 
-unsigned int get_mec_num(struct device_queue_manager *dqm)
-{
-	BUG_ON(!dqm || !dqm->dev);
-
-	return dqm->dev->shared_resources.num_mec;
-}
-
 unsigned int get_queues_num(struct device_queue_manager *dqm)
 {
 	BUG_ON(!dqm || !dqm->dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 66b9615bc3c1..faf820a06400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
 void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
 void program_sh_mem_settings(struct device_queue_manager *dqm,
 					struct qcm_process_device *qpd);
-unsigned int get_mec_num(struct device_queue_manager *dqm);
 unsigned int get_queues_num(struct device_queue_manager *dqm);
 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 91ef1484b3bb..36f376677a53 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -63,9 +63,6 @@ struct kgd2kfd_shared_resources {
 	/* Bit n == 1 means VMID n is available for KFD. */
 	unsigned int compute_vmid_bitmap;
 
-	/* number of mec available from the hardware */
-	uint32_t num_mec;
-
 	/* number of pipes per mec */
 	uint32_t num_pipe_per_mec;
 
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index bfd237c15e76..ae5f06895562 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
 			return false;
 		}
 
+		/*
+		 * ignore out-of-order messages or messages that are part of a
+		 * failed transaction
+		 */
+		if (!recv_hdr.somt && !msg->have_somt)
+			return false;
+
 		/* get length contained in this portion */
 		msg->curchunk_len = recv_hdr.msg_len;
 		msg->curchunk_hdrlen = hdrlen;
@@ -2164,7 +2171,7 @@ out_unlock:
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
 
-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
 {
 	int len;
 	u8 replyblock[32];
@@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
 			       replyblock, len);
 	if (ret != len) {
 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
-		return;
+		return false;
 	}
 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
 	if (!ret) {
 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
-		return;
+		return false;
 	}
 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
 
@@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
 				    replyblock, len);
 		if (ret != len) {
-			DRM_DEBUG_KMS("failed to read a chunk\n");
+			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
+				      len, ret);
+			return false;
 		}
+
 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
-		if (ret == false)
+		if (!ret) {
 			DRM_DEBUG_KMS("failed to build sideband msg\n");
+			return false;
+		}
+
 		curreply += len;
 		replylen -= len;
 	}
+	return true;
 }
 
 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 {
 	int ret = 0;
 
-	drm_dp_get_one_sb_msg(mgr, false);
+	if (!drm_dp_get_one_sb_msg(mgr, false)) {
+		memset(&mgr->down_rep_recv, 0,
+		       sizeof(struct drm_dp_sideband_msg_rx));
+		return 0;
+	}
 
 	if (mgr->down_rep_recv.have_eomt) {
 		struct drm_dp_sideband_msg_tx *txmsg;
@@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
 {
 	int ret = 0;
-	drm_dp_get_one_sb_msg(mgr, true);
+
+	if (!drm_dp_get_one_sb_msg(mgr, true)) {
+		memset(&mgr->up_req_recv, 0,
+		       sizeof(struct drm_dp_sideband_msg_rx));
+		return 0;
+	}
 
 	if (mgr->up_req_recv.have_eomt) {
 		struct drm_dp_sideband_msg_req_body msg;
@@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
 		}
 
-		drm_dp_put_mst_branch_device(mstb);
+		if (mstb)
+			drm_dp_put_mst_branch_device(mstb);
+
 		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
 	}
 	return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 49546222c6d3..6276bb834b4f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = {
 	DRM_FORMAT_RGBA8888,
 	DRM_FORMAT_RGBX8888,
 	DRM_FORMAT_BGRA8888,
-	DRM_FORMAT_BGRA8888,
+	DRM_FORMAT_BGRX8888,
 	DRM_FORMAT_UYVY,
 	DRM_FORMAT_VYUY,
 	DRM_FORMAT_YUYV,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 636031a30e17..8aca20209cb8 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
 
 	/* port@1 is the output port */
 	ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
-	if (ret)
+	if (ret && ret != -ENODEV)
 		return ret;
 
 	imxpd->dev = dev;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 699fe7f9b8bf..a2ab6dcdf4a2 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -184,7 +184,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
 	if (rdev->kfd) {
 		struct kgd2kfd_shared_resources gpu_resources = {
 			.compute_vmid_bitmap = 0xFF00,
-			.num_mec = 1,
 			.num_pipe_per_mec = 4,
 			.num_queue_per_pipe = 8
 		};
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 403bbd5f99a9..a12cc7ea99b6 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
 		     SCALER_DISPSTATX_EMPTY);
 }
 
+static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+
+	if (crtc->state->event) {
+		unsigned long flags;
+
+		crtc->state->event->pipe = drm_crtc_index(crtc);
+
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+		spin_lock_irqsave(&dev->event_lock, flags);
+		vc4_crtc->event = crtc->state->event;
+		crtc->state->event = NULL;
+
+		HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+			  vc4_state->mm.start);
+
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	} else {
+		HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+			  vc4_state->mm.start);
+	}
+}
+
 static void vc4_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
 
 	require_hvs_enabled(dev);
 
+	/* Enable vblank irq handling before crtc is started otherwise
+	 * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
+	 */
+	drm_crtc_vblank_on(crtc);
+	vc4_crtc_update_dlist(crtc);
+
 	/* Turn on the scaler, which will wait for vstart to start
 	 * compositing.
 	 */
@@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
 	/* Turn on the pixel valve, which will emit the vstart signal. */
 	CRTC_WRITE(PV_V_CONTROL,
 		   CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
-
-	/* Enable vblank irq handling after crtc is started. */
-	drm_crtc_vblank_on(crtc);
 }
 
 static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
 {
 	struct drm_device *dev = crtc->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
-	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
 	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
 	struct drm_plane *plane;
 	bool debug_dump_regs = false;
@@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
 
 	WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
 
-	if (crtc->state->event) {
-		unsigned long flags;
-
-		crtc->state->event->pipe = drm_crtc_index(crtc);
-
-		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
-		spin_lock_irqsave(&dev->event_lock, flags);
-		vc4_crtc->event = crtc->state->event;
-		crtc->state->event = NULL;
-
-		HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
-			  vc4_state->mm.start);
-
-		spin_unlock_irqrestore(&dev->event_lock, flags);
-	} else {
-		HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
-			  vc4_state->mm.start);
-	}
+	/* Only update DISPLIST if the CRTC was already running and is not
+	 * being disabled.
+	 * vc4_crtc_enable() takes care of updating the dlist just after
+	 * re-enabling VBLANK interrupts and before enabling the engine.
+	 * If the CRTC is being disabled, there's no point in updating this
+	 * information.
+	 */
+	if (crtc->state->active && old_state->active)
+		vc4_crtc_update_dlist(crtc);
 
 	if (debug_dump_regs) {
 		DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 41b39464ded8..501e16a9227d 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2732,6 +2732,9 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
 				     hidpp_battery_props,
 				     sizeof(hidpp_battery_props),
 				     GFP_KERNEL);
+	if (!battery_props)
+		return -ENOMEM;
+
 	num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
 
 	if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f3e35e7a189d..aff20f4b6d97 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -620,16 +620,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 	return 0;
 }
 
-static int mt_touch_input_mapped(struct hid_device *hdev, struct hid_input *hi,
-		struct hid_field *field, struct hid_usage *usage,
-		unsigned long **bit, int *max)
-{
-	if (usage->type == EV_KEY || usage->type == EV_ABS)
-		set_bit(usage->type, hi->input->evbit);
-
-	return -1;
-}
-
 static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
 {
 	__s32 quirks = td->mtclass.quirks;
@@ -969,8 +959,10 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
 		return 0;
 
 	if (field->application == HID_DG_TOUCHSCREEN ||
-	    field->application == HID_DG_TOUCHPAD)
-		return mt_touch_input_mapped(hdev, hi, field, usage, bit, max);
+	    field->application == HID_DG_TOUCHPAD) {
+		/* We own these mappings, tell hid-input to ignore them */
+		return -1;
+	}
 
 	/* let hid-core decide for the others */
 	return 0;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index e9bf0bb87ac4..e57cc40cb768 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -606,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
 		get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
 
 out:
+	/* re-enable tasklet for use on re-open */
+	tasklet_enable(&channel->callback_event);
 	return ret;
 }
 
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0af7fd311979..76c34f4fde13 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -566,6 +566,8 @@ static int applesmc_init_smcreg_try(void)
 	if (ret)
 		return ret;
 	s->fan_count = tmp[0];
+	if (s->fan_count > 10)
+		s->fan_count = 10;
 
 	ret = applesmc_get_lower_bound(&s->temp_begin, "T");
 	if (ret)
@@ -811,7 +813,8 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
 	char newkey[5];
 	u8 buffer[2];
 
-	sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
+	scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
+		  to_index(attr));
 
 	ret = applesmc_read_key(newkey, buffer, 2);
 	speed = ((buffer[0] << 8 | buffer[1]) >> 2);
@@ -834,7 +837,8 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
 	if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
 		return -EINVAL;		/* Bigger than a 14-bit value */
 
-	sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
+	scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
+		  to_index(attr));
 
 	buffer[0] = (speed >> 6) & 0xff;
 	buffer[1] = (speed << 2) & 0xff;
@@ -903,7 +907,7 @@ static ssize_t applesmc_show_fan_position(struct device *dev,
 	char newkey[5];
 	u8 buffer[17];
 
-	sprintf(newkey, FAN_ID_FMT, to_index(attr));
+	scnprintf(newkey, sizeof(newkey), FAN_ID_FMT, to_index(attr));
 
 	ret = applesmc_read_key(newkey, buffer, 16);
 	buffer[16] = 0;
@@ -1116,7 +1120,8 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
 		}
 		for (i = 0; i < num; i++) {
 			node = &grp->nodes[i];
-			sprintf(node->name, grp->format, i + 1);
+			scnprintf(node->name, sizeof(node->name), grp->format,
+				  i + 1);
 			node->sda.index = (grp->option << 16) | (i & 0xffff);
 			node->sda.dev_attr.show = grp->show;
 			node->sda.dev_attr.store = grp->store;
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
index 0e05f75934c9..1858e3ce3993 100644
--- a/drivers/ide/ide-timings.c
+++ b/drivers/ide/ide-timings.c
@@ -104,19 +104,19 @@ u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
 EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
 
 #define ENOUGH(v, unit)		(((v) - 1) / (unit) + 1)
-#define EZ(v, unit)		((v) ? ENOUGH(v, unit) : 0)
+#define EZ(v, unit)		((v) ? ENOUGH((v) * 1000, unit) : 0)
 
 static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
 				int T, int UT)
 {
-	q->setup   = EZ(t->setup   * 1000,  T);
-	q->act8b   = EZ(t->act8b   * 1000,  T);
-	q->rec8b   = EZ(t->rec8b   * 1000,  T);
-	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
-	q->active  = EZ(t->active  * 1000,  T);
-	q->recover = EZ(t->recover * 1000,  T);
-	q->cycle   = EZ(t->cycle   * 1000,  T);
-	q->udma    = EZ(t->udma    * 1000, UT);
+	q->setup   = EZ(t->setup,   T);
+	q->act8b   = EZ(t->act8b,   T);
+	q->rec8b   = EZ(t->rec8b,   T);
+	q->cyc8b   = EZ(t->cyc8b,   T);
+	q->active  = EZ(t->active,  T);
+	q->recover = EZ(t->recover, T);
+	q->cycle   = EZ(t->cycle,   T);
+	q->udma    = EZ(t->udma,    UT);
 }
 
 void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a6cb379a4ebc..01236cef7bfb 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -268,6 +268,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
 			return ret;
 
 		ret = rdma_copy_addr(dev_addr, dev, NULL);
+		dev_addr->bound_dev_if = dev->ifindex;
 		if (vlan_id)
 			*vlan_id = rdma_vlan_dev_vlan_id(dev);
 		dev_put(dev);
@@ -280,6 +281,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
 					  &((const struct sockaddr_in6 *)addr)->sin6_addr,
 					  dev, 1)) {
 				ret = rdma_copy_addr(dev_addr, dev, NULL);
+				dev_addr->bound_dev_if = dev->ifindex;
 				if (vlan_id)
 					*vlan_id = rdma_vlan_dev_vlan_id(dev);
 				break;
@@ -405,10 +407,10 @@ static int addr4_resolve(struct sockaddr_in *src_in,
 	fl4.saddr = src_ip;
 	fl4.flowi4_oif = addr->bound_dev_if;
 	rt = ip_route_output_key(addr->net, &fl4);
-	if (IS_ERR(rt)) {
-		ret = PTR_ERR(rt);
-		goto out;
-	}
+	ret = PTR_ERR_OR_ZERO(rt);
+	if (ret)
+		return ret;
+
 	src_in->sin_family = AF_INET;
 	src_in->sin_addr.s_addr = fl4.saddr;
 
@@ -423,8 +425,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
 
 	*prt = rt;
 	return 0;
-out:
-	return ret;
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -509,6 +509,11 @@ static int addr_resolve(struct sockaddr *src_in,
 	struct dst_entry *dst;
 	int ret;
 
+	if (!addr->net) {
+		pr_warn_ratelimited("%s: missing namespace\n", __func__);
+		return -EINVAL;
+	}
+
 	if (src_in->sa_family == AF_INET) {
 		struct rtable *rt = NULL;
 		const struct sockaddr_in *dst_in4 =
@@ -522,8 +527,12 @@ static int addr_resolve(struct sockaddr *src_in,
 		if (resolve_neigh)
 			ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
 
-		ndev = rt->dst.dev;
-		dev_hold(ndev);
+		if (addr->bound_dev_if) {
+			ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+		} else {
+			ndev = rt->dst.dev;
+			dev_hold(ndev);
+		}
 
 		ip_rt_put(rt);
 	} else {
@@ -539,14 +548,27 @@ static int addr_resolve(struct sockaddr *src_in,
 		if (resolve_neigh)
 			ret = addr_resolve_neigh(dst, dst_in, addr, seq);
 
-		ndev = dst->dev;
-		dev_hold(ndev);
+		if (addr->bound_dev_if) {
+			ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+		} else {
+			ndev = dst->dev;
+			dev_hold(ndev);
+		}
 
 		dst_release(dst);
 	}
 
-	addr->bound_dev_if = ndev->ifindex;
-	addr->net = dev_net(ndev);
+	if (ndev->flags & IFF_LOOPBACK) {
+		ret = rdma_translate_ip(dst_in, addr, NULL);
+		/*
+		 * Put the loopback device and get the translated
+		 * device instead.
+		 */
+		dev_put(ndev);
+		ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+	} else {
+		addr->bound_dev_if = ndev->ifindex;
+	}
 	dev_put(ndev);
 
 	return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 31bb82d8ecd7..0eb393237ba2 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -623,22 +623,11 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
 	if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
 		return ret;
 
-	if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+	if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
 		ndev = dev_get_by_index(&init_net, bound_if_index);
-		if (ndev && ndev->flags & IFF_LOOPBACK) {
-			pr_info("detected loopback device\n");
-			dev_put(ndev);
-
-			if (!device->get_netdev)
-				return -EOPNOTSUPP;
-
-			ndev = device->get_netdev(device, port);
-			if (!ndev)
-				return -ENODEV;
-		}
-	} else {
+	else
 		gid_type = IB_GID_TYPE_IB;
-	}
+
 
 	ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
 					 ndev, NULL);
@@ -1044,6 +1033,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
 		} else
 			ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
 						 qp_attr_mask);
+		qp_attr->port_num = id_priv->id.port_num;
+		*qp_attr_mask |= IB_QP_PORT;
 	} else
 		ret = -ENOSYS;
 
@@ -2569,21 +2560,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
 			goto err2;
 		}
 
-		if (ndev->flags & IFF_LOOPBACK) {
-			dev_put(ndev);
-			if (!id_priv->id.device->get_netdev) {
-				ret = -EOPNOTSUPP;
-				goto err2;
-			}
-
-			ndev = id_priv->id.device->get_netdev(id_priv->id.device,
-							      id_priv->id.port_num);
-			if (!ndev) {
-				ret = -ENODEV;
-				goto err2;
-			}
-		}
-
 		supported_gids = roce_gid_type_mask_support(id_priv->id.device,
 							    id_priv->id.port_num);
 		gid_type = cma_route_gid_type(addr->dev_addr.network,
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index db958d3207ef..94a9eefb3cfc 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -42,6 +42,8 @@
 #include <rdma/ib_cache.h>
 #include <rdma/ib_addr.h>
 
+static struct workqueue_struct *gid_cache_wq;
+
 enum gid_op_type {
 	GID_DEL = 0,
 	GID_ADD
@@ -560,7 +562,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
 	}
 	INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
 
-	queue_work(ib_wq, &ndev_work->work);
+	queue_work(gid_cache_wq, &ndev_work->work);
 
 	return NOTIFY_DONE;
 }
@@ -693,7 +695,7 @@ static int addr_event(struct notifier_block *this, unsigned long event,
 	dev_hold(ndev);
 	work->gid_attr.ndev   = ndev;
 
-	queue_work(ib_wq, &work->work);
+	queue_work(gid_cache_wq, &work->work);
 
 	return NOTIFY_DONE;
 }
@@ -740,6 +742,10 @@ static struct notifier_block nb_inet6addr = {
 
 int __init roce_gid_mgmt_init(void)
 {
+	gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
+	if (!gid_cache_wq)
+		return -ENOMEM;
+
 	register_inetaddr_notifier(&nb_inetaddr);
 	if (IS_ENABLED(CONFIG_IPV6))
 		register_inet6addr_notifier(&nb_inet6addr);
@@ -764,4 +770,5 @@ void __exit roce_gid_mgmt_cleanup(void)
 	 * ib-core is removed, all physical devices have been removed,
 	 * so no issue with remaining hardware contexts.
 	 */
+	destroy_workqueue(gid_cache_wq);
 }
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8ba9bfb073d1..2c98533a0203 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1296,7 +1296,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
 	struct ib_uobject		*uobj;
 	struct ib_cq               	*cq;
 	struct ib_ucq_object        	*obj;
-	struct ib_uverbs_event_queue	*ev_queue;
 	int                        	 ret = -EINVAL;
 
 	if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1313,7 +1312,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
 	 */
 	uverbs_uobject_get(uobj);
 	cq      = uobj->object;
-	ev_queue = cq->cq_context;
 	obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
 
 	memset(&resp, 0, sizeof(resp));
@@ -1935,7 +1933,8 @@ static int modify_qp(struct ib_uverbs_file *file,
 		goto out;
 	}
 
-	if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) {
+	if ((cmd->base.attr_mask & IB_QP_PORT) &&
+	    !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
 		ret = -EINVAL;
 		goto release_qp;
 	}
@@ -2005,28 +2004,13 @@ static int modify_qp(struct ib_uverbs_file *file,
 	rdma_ah_set_port_num(&attr->alt_ah_attr,
 			     cmd->base.alt_dest.port_num);
 
-	if (qp->real_qp == qp) {
-		if (cmd->base.attr_mask & IB_QP_AV) {
-			ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
-			if (ret)
-				goto release_qp;
-		}
-		ret = ib_security_modify_qp(qp,
-					    attr,
-					    modify_qp_mask(qp->qp_type,
-							   cmd->base.attr_mask),
-					    udata);
-	} else {
-		ret = ib_security_modify_qp(qp,
-					    attr,
-					    modify_qp_mask(qp->qp_type,
-							   cmd->base.attr_mask),
-					    NULL);
-	}
+	ret = ib_modify_qp_with_udata(qp, attr,
+				      modify_qp_mask(qp->qp_type,
+						     cmd->base.attr_mask),
+				      udata);
 
 release_qp:
 	uobj_put_obj_read(qp);
-
 out:
 	kfree(attr);
 
@@ -2103,7 +2087,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
 	struct ib_uverbs_destroy_qp      cmd;
 	struct ib_uverbs_destroy_qp_resp resp;
 	struct ib_uobject		*uobj;
-	struct ib_qp               	*qp;
 	struct ib_uqp_object        	*obj;
 	int                        	 ret = -EINVAL;
 
@@ -2117,7 +2100,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
 	if (IS_ERR(uobj))
 		return PTR_ERR(uobj);
 
-	qp  = uobj->object;
 	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
 	/*
 	 * Make sure we don't free the memory in remove_commit as we still
@@ -3019,7 +3001,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
 {
 	struct ib_uverbs_ex_destroy_wq	cmd = {};
 	struct ib_uverbs_ex_destroy_wq_resp	resp = {};
-	struct ib_wq			*wq;
 	struct ib_uobject		*uobj;
 	struct ib_uwq_object		*obj;
 	size_t required_cmd_sz;
@@ -3053,7 +3034,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
 	if (IS_ERR(uobj))
 		return PTR_ERR(uobj);
 
-	wq = uobj->object;
 	obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
 	/*
 	 * Make sure we don't free the memory in remove_commit as we still
@@ -3743,10 +3723,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 	struct ib_uverbs_destroy_srq      cmd;
 	struct ib_uverbs_destroy_srq_resp resp;
 	struct ib_uobject		 *uobj;
-	struct ib_srq               	 *srq;
 	struct ib_uevent_object        	 *obj;
 	int                         	  ret = -EINVAL;
-	enum ib_srq_type		  srq_type;
 
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
@@ -3756,9 +3734,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 	if (IS_ERR(uobj))
 		return PTR_ERR(uobj);
 
-	srq = uobj->object;
 	obj = container_of(uobj, struct ib_uevent_object, uobject);
-	srq_type = srq->srq_type;
 	/*
 	 * Make sure we don't free the memory in remove_commit as we still
 	 * needs the uobject memory to create the response.
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c973a83c898b..fb98ed67d5bc 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -452,6 +452,19 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
 }
 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
 
+/*
+ * This function creates ah from the incoming packet.
+ * Incoming packet has dgid of the receiver node on which this code is
+ * getting executed and, sgid contains the GID of the sender.
+ *
+ * When resolving mac address of destination, the arrived dgid is used
+ * as sgid and, sgid is used as dgid because sgid contains destinations
+ * GID whom to respond to.
+ *
+ * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
+ * position of arguments dgid and sgid do not match the order of the
+ * parameters.
+ */
 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
 		       const struct ib_wc *wc, const struct ib_grh *grh,
 		       struct rdma_ah_attr *ah_attr)
@@ -507,11 +520,6 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
 		}
 
 		resolved_dev = dev_get_by_index(&init_net, if_index);
-		if (resolved_dev->flags & IFF_LOOPBACK) {
-			dev_put(resolved_dev);
-			resolved_dev = idev;
-			dev_hold(resolved_dev);
-		}
 		rcu_read_lock();
 		if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
 								   resolved_dev))
@@ -887,6 +895,7 @@ static const struct {
 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
 	[IB_QPS_RESET] = {
 		[IB_QPS_RESET] = { .valid = 1 },
+		[IB_QPS_ERR] =   { .valid = 1 },
 		[IB_QPS_INIT]  = {
 			.valid = 1,
 			.req_param = {
@@ -1268,20 +1277,36 @@ out:
 }
 EXPORT_SYMBOL(ib_resolve_eth_dmac);
 
-int ib_modify_qp(struct ib_qp *qp,
-		 struct ib_qp_attr *qp_attr,
-		 int qp_attr_mask)
+/**
+ * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
+ * @qp: The QP to modify.
+ * @attr: On input, specifies the QP attributes to modify.  On output,
+ *   the current values of selected QP attributes are returned.
+ * @attr_mask: A bit-mask used to specify which attributes of the QP
+ *   are being modified.
+ * @udata: pointer to user's input output buffer information
+ *   are being modified.
+ * It returns 0 on success and returns appropriate error code on error.
+ */
+int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
+			    int attr_mask, struct ib_udata *udata)
 {
+	int ret;
 
-	if (qp_attr_mask & IB_QP_AV) {
-		int ret;
-
-		ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
+	if (attr_mask & IB_QP_AV) {
+		ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
 		if (ret)
 			return ret;
 	}
+	return ib_security_modify_qp(qp, attr, attr_mask, udata);
+}
+EXPORT_SYMBOL(ib_modify_qp_with_udata);
 
-	return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
+int ib_modify_qp(struct ib_qp *qp,
+		 struct ib_qp_attr *qp_attr,
+		 int qp_attr_mask)
+{
+	return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
 }
 EXPORT_SYMBOL(ib_modify_qp);
 
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 08772836fded..85527532c49d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -51,6 +51,8 @@
 #define BNXT_RE_PAGE_SIZE_8M		BIT(23)
 #define BNXT_RE_PAGE_SIZE_1G		BIT(30)
 
+#define BNXT_RE_MAX_MR_SIZE		BIT(30)
+
 #define BNXT_RE_MAX_QPC_COUNT		(64 * 1024)
 #define BNXT_RE_MAX_MRW_COUNT		(64 * 1024)
 #define BNXT_RE_MAX_SRQC_COUNT		(64 * 1024)
@@ -60,6 +62,13 @@
 
 #define BNXT_RE_RQ_WQE_THRESHOLD	32
 
+/*
+ * Setting the default ack delay value to 16, which means
+ * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
+ */
+
+#define BNXT_RE_DEFAULT_ACK_DELAY	16
+
 struct bnxt_re_work {
 	struct work_struct	work;
 	unsigned long		event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index c7bd68311d0c..f0e01b3ac711 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
 	ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
 	bnxt_qplib_get_guid(rdev->netdev->dev_addr,
 			    (u8 *)&ib_attr->sys_image_guid);
-	ib_attr->max_mr_size = ~0ull;
-	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K |
-				 BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
-				 BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
+	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
+	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
 
 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev,
 	ib_attr->max_mr = dev_attr->max_mr;
 	ib_attr->max_pd = dev_attr->max_pd;
 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
-	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom;
-	ib_attr->atomic_cap = IB_ATOMIC_HCA;
-	ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
+	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
+	if (dev_attr->is_atomic) {
+		ib_attr->atomic_cap = IB_ATOMIC_HCA;
+		ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
+	}
 
 	ib_attr->max_ee_rd_atom = 0;
 	ib_attr->max_res_rd_atom = 0;
@@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
 
 	ib_attr->max_pkeys = 1;
-	ib_attr->local_ca_ack_delay = 0;
+	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
 	return 0;
 }
 
@@ -390,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
 			return -EINVAL;
 		ctx->refcnt--;
 		if (!ctx->refcnt) {
-			rc = bnxt_qplib_del_sgid
-					(sgid_tbl,
-					 &sgid_tbl->tbl[ctx->idx], true);
-			if (rc)
+			rc = bnxt_qplib_del_sgid(sgid_tbl,
+						 &sgid_tbl->tbl[ctx->idx],
+						 true);
+			if (rc) {
 				dev_err(rdev_to_dev(rdev),
 					"Failed to remove GID: %#x", rc);
-			ctx_tbl = sgid_tbl->ctx;
-			ctx_tbl[ctx->idx] = NULL;
-			kfree(ctx);
+			} else {
+				ctx_tbl = sgid_tbl->ctx;
+				ctx_tbl[ctx->idx] = NULL;
+				kfree(ctx);
+			}
 		}
 	} else {
 		return -EINVAL;
@@ -588,10 +590,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
 
 	/* Create a fence MW only for kernel consumers */
 	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
-	if (!mw) {
+	if (IS_ERR(mw)) {
 		dev_err(rdev_to_dev(rdev),
 			"Failed to create fence-MW for PD: %p\n", pd);
-		rc = -EINVAL;
+		rc = PTR_ERR(mw);
 		goto fail;
 	}
 	fence->mw = mw;
@@ -612,30 +614,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
 	int rc;
 
 	bnxt_re_destroy_fence_mr(pd);
-	if (ib_pd->uobject && pd->dpi.dbr) {
-		struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
-		struct bnxt_re_ucontext *ucntx;
 
-		/* Free DPI only if this is the first PD allocated by the
-		 * application and mark the context dpi as NULL
-		 */
-		ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
-
-		rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
-					    &rdev->qplib_res.dpi_tbl,
-					    &pd->dpi);
+	if (pd->qplib_pd.id) {
+		rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
+					   &rdev->qplib_res.pd_tbl,
+					   &pd->qplib_pd);
 		if (rc)
-			dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI");
-			/* Don't fail, continue*/
-		ucntx->dpi = NULL;
-	}
-
-	rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
-				   &rdev->qplib_res.pd_tbl,
-				   &pd->qplib_pd);
-	if (rc) {
-		dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
-		return rc;
+			dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
 	}
 
 	kfree(pd);
@@ -667,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
 	if (udata) {
 		struct bnxt_re_pd_resp resp;
 
-		if (!ucntx->dpi) {
+		if (!ucntx->dpi.dbr) {
 			/* Allocate DPI in alloc_pd to avoid failing of
 			 * ibv_devinfo and family of application when DPIs
 			 * are depleted.
 			 */
 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
-						 &pd->dpi, ucntx)) {
+						 &ucntx->dpi, ucntx)) {
 				rc = -ENOMEM;
 				goto dbfail;
 			}
-			ucntx->dpi = &pd->dpi;
 		}
 
 		resp.pdid = pd->qplib_pd.id;
 		/* Still allow mapping this DBR to the new user PD. */
-		resp.dpi = ucntx->dpi->dpi;
-		resp.dbr = (u64)ucntx->dpi->umdbr;
+		resp.dpi = ucntx->dpi.dpi;
+		resp.dbr = (u64)ucntx->dpi.umdbr;
 
 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
 		if (rc) {
@@ -960,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
 		qplib_qp->rq.nmap = umem->nmap;
 	}
 
-	qplib_qp->dpi = cntx->dpi;
+	qplib_qp->dpi = &cntx->dpi;
 	return 0;
 rqfail:
 	ib_umem_release(qp->sumem);
@@ -1530,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
 		qp->qplib_qp.modify_flags |=
 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
-		qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic;
+		/* Cap the max_rd_atomic to device max */
+		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
+						   dev_attr->max_qp_rd_atom);
 	}
 	if (qp_attr_mask & IB_QP_SQ_PSN) {
 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
 	}
 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+		if (qp_attr->max_dest_rd_atomic >
+		    dev_attr->max_qp_init_rd_atom) {
+			dev_err(rdev_to_dev(rdev),
+				"max_dest_rd_atomic requested%d is > dev_max%d",
+				qp_attr->max_dest_rd_atomic,
+				dev_attr->max_qp_init_rd_atom);
+			return -EINVAL;
+		}
+
 		qp->qplib_qp.modify_flags |=
 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
@@ -2403,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
 		}
 		cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
 		cq->qplib_cq.nmap = cq->umem->nmap;
-		cq->qplib_cq.dpi = uctx->dpi;
+		cq->qplib_cq.dpi = &uctx->dpi;
 	} else {
 		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
@@ -2905,6 +2900,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
 
 	spin_lock_irqsave(&cq->cq_lock, flags);
 	budget = min_t(u32, num_entries, cq->max_cql);
+	num_entries = budget;
 	if (!cq->cql) {
 		dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
 		goto exit;
@@ -3031,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
 		type = DBR_DBR_TYPE_CQ_ARMSE;
 
+	/* Poll to see if there are missed events */
+	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
+		return 1;
+
 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
 
 	return 0;
@@ -3245,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
 	struct scatterlist *sg;
 	int entry;
 
+	if (length > BNXT_RE_MAX_MR_SIZE) {
+		dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
+			length, BNXT_RE_MAX_MR_SIZE);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 	if (!mr)
 		return ERR_PTR(-ENOMEM);
@@ -3388,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
 						   struct bnxt_re_ucontext,
 						   ib_uctx);
+
+	struct bnxt_re_dev *rdev = uctx->rdev;
+	int rc = 0;
+
 	if (uctx->shpg)
 		free_page((unsigned long)uctx->shpg);
+
+	if (uctx->dpi.dbr) {
+		/* Free DPI only if this is the first PD allocated by the
+		 * application and mark the context dpi as NULL
+		 */
+		rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+					    &rdev->qplib_res.dpi_tbl,
+					    &uctx->dpi);
+		if (rc)
+			dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
+			/* Don't fail, continue*/
+		uctx->dpi.dbr = NULL;
+	}
+
 	kfree(uctx);
 	return 0;
 }
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 6c160f6a5398..a0bb7e33d7ca 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -59,7 +59,6 @@ struct bnxt_re_pd {
 	struct bnxt_re_dev	*rdev;
 	struct ib_pd		ib_pd;
 	struct bnxt_qplib_pd	qplib_pd;
-	struct bnxt_qplib_dpi	dpi;
 	struct bnxt_re_fence_data fence;
 };
 
@@ -127,7 +126,7 @@ struct bnxt_re_mw {
 struct bnxt_re_ucontext {
 	struct bnxt_re_dev	*rdev;
 	struct ib_ucontext	ib_uctx;
-	struct bnxt_qplib_dpi	*dpi;
+	struct bnxt_qplib_dpi	dpi;
 	void			*shpg;
 	spinlock_t		sh_lock;	/* protect shpg */
 };
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 1fce5e73216b..ceae2d92fb08 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -333,6 +333,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
 	req.update_period_ms = cpu_to_le32(1000);
 	req.stats_dma_addr = cpu_to_le64(dma_map);
+	req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index f05500bcdcf1..9af1514e5944 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1128,6 +1128,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
 		}
 		/* Each SGE entry = 1 WQE size16 */
 		wqe_size16 = wqe->num_sge;
+		/* HW requires wqe size has room for atleast one SGE even if
+		 * none was supplied by ULP
+		 */
+		if (!wqe->num_sge)
+			wqe_size16++;
 	}
 
 	/* Specifics */
@@ -1364,6 +1369,11 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
 	rqe->flags = wqe->flags;
 	rqe->wqe_size = wqe->num_sge +
 			((offsetof(typeof(*rqe), data) + 15) >> 4);
+	/* HW requires wqe size has room for atleast one SGE even if none
+	 * was supplied by ULP
+	 */
+	if (!wqe->num_sge)
+		rqe->wqe_size++;
 
 	/* Supply the rqe->wr_id index to the wr_id_tbl for now */
 	rqe->wr_id[0] = cpu_to_le32(sw_prod);
@@ -1885,6 +1895,25 @@ flush_rq:
 	return rc;
 }
 
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
+{
+	struct cq_base *hw_cqe, **hw_cqe_ptr;
+	unsigned long flags;
+	u32 sw_cons, raw_cons;
+	bool rc = true;
+
+	spin_lock_irqsave(&cq->hwq.lock, flags);
+	raw_cons = cq->hwq.cons;
+	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
+	hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
+	hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
+
+	 /* Check for Valid bit. If the CQE is valid, return false */
+	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
+	spin_unlock_irqrestore(&cq->hwq.lock, flags);
+	return rc;
+}
+
 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
 						struct cq_res_raweth_qp1 *hwcqe,
 						struct bnxt_qplib_cqe **pcqe,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 36b7b7db0e3f..19176e06c98a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -449,6 +449,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
 		       int num, struct bnxt_qplib_qp **qp);
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index fde18cf0e406..ef91ab786dd4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -51,6 +51,19 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
 						     0, 0, 0, 0, 0, 0, 0, 0 } };
 
 /* Device */
+
+static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
+{
+	int rc;
+	u16 pcie_ctl2;
+
+	rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
+				       &pcie_ctl2);
+	if (rc)
+		return false;
+	return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
+
 int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
 			    struct bnxt_qplib_dev_attr *attr)
 {
@@ -81,6 +94,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
 
 	/* Extract the context from the side buffer */
 	attr->max_qp = le32_to_cpu(sb->max_qp);
+	/* max_qp value reported by FW for PF doesn't include the QP1 for PF */
+	attr->max_qp += 1;
 	attr->max_qp_rd_atom =
 		sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
 		BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -129,6 +144,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
 		attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
 	}
 
+	attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
 bail:
 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
 	return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index a543f959098b..2ce7e2a32cf0 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -42,6 +42,8 @@
 
 #define BNXT_QPLIB_RESERVED_QP_WRS	128
 
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ      0x0040
+
 struct bnxt_qplib_dev_attr {
 	char				fw_ver[32];
 	u16				max_sgid;
@@ -70,6 +72,7 @@ struct bnxt_qplib_dev_attr {
 	u32				max_inline_data;
 	u32				l2_db_size;
 	u8				tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
+	bool				is_atomic;
 };
 
 struct bnxt_qplib_pd {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 29d30744d6c9..0cd0c1fa27d4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
 	struct iwch_mr *mhp;
 	u32 mmid;
 	u32 stag = 0;
-	int ret = 0;
+	int ret = -ENOMEM;
 
 	if (mr_type != IB_MR_TYPE_MEM_REG ||
 	    max_num_sg > T3_MAX_FASTREG_DEPTH)
@@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
 		goto err;
 
 	mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
-	if (!mhp->pages) {
-		ret = -ENOMEM;
+	if (!mhp->pages)
 		goto pl_err;
-	}
 
 	mhp->rhp = rhp;
 	ret = iwch_alloc_pbl(mhp, max_num_sg);
@@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
 	mhp->attr.state = 1;
 	mmid = (stag) >> 8;
 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
-	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+	ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+	if (ret)
 		goto err3;
 
 	pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e16fcaf6b5a3..be07da1997e6 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -963,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 		goto err3;
 
 	if (ucontext) {
+		ret = -ENOMEM;
 		mm = kmalloc(sizeof *mm, GFP_KERNEL);
 		if (!mm)
 			goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bfc77596acbe..cb7fc0d35d1d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -569,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
 {
 	if (wr->num_sge > 1)
 		return -EINVAL;
-	if (wr->num_sge) {
+	if (wr->num_sge && wr->sg_list[0].length) {
 		wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
 		wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
 							>> 32));
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2ba00b89df6a..94b54850ec75 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12847,7 +12847,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
 	/* clear from the handled mask of the general interrupt */
 	m = isrc / 64;
 	n = isrc % 64;
-	dd->gi_mask[m] &= ~((u64)1 << n);
+	if (likely(m < CCE_NUM_INT_CSRS)) {
+		dd->gi_mask[m] &= ~((u64)1 << n);
+	} else {
+		dd_dev_err(dd, "remap interrupt err\n");
+		return;
+	}
 
 	/* direct the chip source to the given MSI-X interrupt */
 	m = isrc / 8;
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 650305cc0373..1a7af9f60c13 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -647,18 +647,17 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
 		   qp->pid);
 }
 
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
-		    gfp_t gfp)
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
 {
 	struct hfi1_qp_priv *priv;
 
-	priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
+	priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
 	if (!priv)
 		return ERR_PTR(-ENOMEM);
 
 	priv->owner = qp;
 
-	priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
+	priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
 				   rdi->dparms.node);
 	if (!priv->s_ahg) {
 		kfree(priv);
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 1eb9cd7b8c19..6fe542b6a927 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -123,8 +123,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp);
 /*
  * Functions provided by hfi1 driver for rdmavt to use
  */
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
-		    gfp_t gfp);
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
 unsigned free_all_qps(struct rvt_dev_info *rdi);
 void notify_qp_reset(struct rvt_qp *qp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 37d5d29597a4..23fad6d96944 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			switch (wr->opcode) {
 			case IB_WR_RDMA_READ:
 				ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
-				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
-					      atomic_wr(wr)->rkey);
+				set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
+					       rdma_wr(wr)->rkey);
 				break;
 			case IB_WR_RDMA_WRITE:
 			case IB_WR_RDMA_WRITE_WITH_IMM:
 				ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
-				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
-					      atomic_wr(wr)->rkey);
+				set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
+					      rdma_wr(wr)->rkey);
 				break;
 			case IB_WR_SEND:
 			case IB_WR_SEND_WITH_INV:
@@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 	union ib_gid dgid;
 	u64 subnet_prefix;
 	int attr_mask = 0;
-	int i;
+	int i, j;
 	int ret;
+	u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
 	u8 phy_port;
+	u8 port = 0;
 	u8 sl;
 
 	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
@@ -709,11 +711,27 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 	attr.rnr_retry		= 7;
 	attr.timeout		= 0x12;
 	attr.path_mtu		= IB_MTU_256;
+	attr.ah_attr.type	= RDMA_AH_ATTR_TYPE_ROCE;
 	rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
 	rdma_ah_set_static_rate(&attr.ah_attr, 3);
 
 	subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
 	for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+		phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
+				(i % HNS_ROCE_MAX_PORTS);
+		sl = i / HNS_ROCE_MAX_PORTS;
+
+		for (j = 0; j < caps->num_ports; j++) {
+			if (hr_dev->iboe.phy_port[j] == phy_port) {
+				queue_en[i] = 1;
+				port = j;
+				break;
+			}
+		}
+
+		if (!queue_en[i])
+			continue;
+
 		free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
 		if (IS_ERR(free_mr->mr_free_qp[i])) {
 			dev_err(dev, "Create loop qp failed!\n");
@@ -721,15 +739,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 		}
 		hr_qp = free_mr->mr_free_qp[i];
 
-		sl = i / caps->num_ports;
-
-		if (caps->num_ports == HNS_ROCE_MAX_PORTS)
-			phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
-				(i % caps->num_ports);
-		else
-			phy_port = i % caps->num_ports;
-
-		hr_qp->port		= phy_port + 1;
+		hr_qp->port		= port;
 		hr_qp->phy_port		= phy_port;
 		hr_qp->ibqp.qp_type	= IB_QPT_RC;
 		hr_qp->ibqp.device	= &hr_dev->ib_dev;
@@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 		hr_qp->ibqp.recv_cq	= cq;
 		hr_qp->ibqp.send_cq	= cq;
 
-		rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1);
-		rdma_ah_set_sl(&attr.ah_attr, phy_port + 1);
-		attr.port_num		= phy_port + 1;
+		rdma_ah_set_port_num(&attr.ah_attr, port + 1);
+		rdma_ah_set_sl(&attr.ah_attr, sl);
+		attr.port_num		= port + 1;
 
 		attr.dest_qp_num	= hr_qp->qpn;
 		memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
-		       hr_dev->dev_addr[phy_port],
+		       hr_dev->dev_addr[port],
 		       MAC_ADDR_OCTET_NUM);
 
 		memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
-		memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3);
-		memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3);
+		memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
+		memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
 		dgid.raw[11] = 0xff;
 		dgid.raw[12] = 0xfe;
 		dgid.raw[8] ^= 2;
 		rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
-		attr_mask |= IB_QP_PORT;
 
 		ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
 					    IB_QPS_RESET, IB_QPS_INIT);
@@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
 
 	for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
 		hr_qp = free_mr->mr_free_qp[i];
+		if (!hr_qp)
+			continue;
+
 		ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
 		if (ret)
 			dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
@@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
 		msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
 	int i;
 	int ret;
-	int ne;
+	int ne = 0;
 
 	mr_work = container_of(work, struct hns_roce_mr_free_work, work);
 	hr_mr = (struct hns_roce_mr *)mr_work->mr;
@@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
 
 	for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
 		hr_qp = free_mr->mr_free_qp[i];
+		if (!hr_qp)
+			continue;
+		ne++;
+
 		ret = hns_roce_v1_send_lp_wqe(hr_qp);
 		if (ret) {
 			dev_err(dev,
@@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
 		}
 	}
 
-	ne = HNS_ROCE_V1_RESV_QP;
 	do {
 		ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
 		if (ret < 0) {
@@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
 			goto free_work;
 		}
 		ne -= ret;
-		msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+		usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
+			     (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
 	} while (ne && time_before_eq(jiffies, end));
 
 	if (ne != 0)
@@ -2181,7 +2197,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
 		}
 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 		++wq->tail;
-		} else {
+	} else {
 		/* RQ conrespond to CQE */
 		wc->byte_len = le32_to_cpu(cqe->byte_cnt);
 		opcode = roce_get_field(cqe->cqe_byte_4,
@@ -3533,10 +3549,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
 					old_cnt = roce_get_field(old_send,
 					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
 					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
-					if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+					if (cur_cnt - old_cnt >
+					    SDB_ST_CMP_VAL) {
 						success_flags = 1;
-					else {
-					    send_ptr = roce_get_field(old_send,
+					} else {
+						send_ptr =
+							roce_get_field(old_send,
 					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
 					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
 					    roce_get_field(sdb_retry_cnt,
@@ -3641,6 +3659,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
 	struct hns_roce_dev *hr_dev;
 	struct hns_roce_qp *hr_qp;
 	struct device *dev;
+	unsigned long qpn;
 	int ret;
 
 	qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
@@ -3648,8 +3667,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
 	dev = &hr_dev->pdev->dev;
 	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
 	hr_qp = qp_work_entry->qp;
+	qpn = hr_qp->qpn;
 
-	dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn);
+	dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
 
 	qp_work_entry->sche_cnt++;
 
@@ -3660,7 +3680,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
 					 &qp_work_entry->db_wait_stage);
 	if (ret) {
 		dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
-			hr_qp->qpn);
+			qpn);
 		return;
 	}
 
@@ -3674,7 +3694,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
 	ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
 				    IB_QPS_RESET);
 	if (ret) {
-		dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn);
+		dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
 		return;
 	}
 
@@ -3683,14 +3703,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
 
 	if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
 		/* RC QP, release QPN */
-		hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+		hns_roce_release_range_qp(hr_dev, qpn, 1);
 		kfree(hr_qp);
 	} else
 		kfree(hr_to_hr_sqp(hr_qp));
 
 	kfree(qp_work_entry);
 
-	dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn);
+	dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
 }
 
 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c3b41f95e70a..d9777b662eba 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
 		return -ENODEV;
 	}
 
-	spin_lock_bh(&hr_dev->iboe.lock);
-
 	switch (event) {
 	case NETDEV_UP:
 	case NETDEV_CHANGE:
@@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
 		break;
 	}
 
-	spin_unlock_bh(&hr_dev->iboe.lock);
 	return 0;
 }
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index da2eb5a281fa..9b1566468744 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
 int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
 void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
 
+void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
 void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
 void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
 void i40iw_rem_devusecount(struct i40iw_device *iwdev);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 6ae98aa7f74e..5a2fa743676c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3487,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
 	if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
 	     (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
 	     (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
-	     (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+	     (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
+	      iwdev->reset)) {
 		issue_close = 1;
 		iwqp->cm_id = NULL;
 		if (!iwqp->flush_issued) {
@@ -4265,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
 		cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
 		attr.qp_state = IB_QPS_ERR;
 		i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+		if (iwdev->reset)
+			i40iw_cm_disconn(cm_node->iwqp);
 		i40iw_rem_ref_cm_node(cm_node);
 	}
 }
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index a027e2072477..9ec1ae9a82c9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1970,6 +1970,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
 		ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
 	}
 
+	cqp->process_cqp_sds = i40iw_update_sds_noccq;
+
 	return ret_code;
 }
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e0f47cc2effc..ae8463ff59a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
 	if (free_hwcqp)
 		dev->cqp_ops->cqp_destroy(dev->cqp);
 
+	i40iw_cleanup_pending_cqp_op(iwdev);
+
 	i40iw_free_dma_mem(dev->hw, &cqp->sq);
 	kfree(cqp->scratch_array);
 	iwdev->cqp.scratch_array = NULL;
@@ -274,13 +276,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
 /**
  * i40iw_destroy_aeq - destroy aeq
  * @iwdev: iwarp device
- * @reset: true if called before reset
  *
  * Issue a destroy aeq request and
  * free the resources associated with the aeq
  * The function is called during driver unload
  */
-static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
+static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
 {
 	enum i40iw_status_code status = I40IW_ERR_NOT_READY;
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -288,7 +289,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
 
 	if (!iwdev->msix_shared)
 		i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
-	if (reset)
+	if (iwdev->reset)
 		goto exit;
 
 	if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
@@ -304,19 +305,17 @@ exit:
  * i40iw_destroy_ceq - destroy ceq
  * @iwdev: iwarp device
  * @iwceq: ceq to be destroyed
- * @reset: true if called before reset
  *
  * Issue a destroy ceq request and
  * free the resources associated with the ceq
  */
 static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
-			      struct i40iw_ceq *iwceq,
-			      bool reset)
+			      struct i40iw_ceq *iwceq)
 {
 	enum i40iw_status_code status;
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 
-	if (reset)
+	if (iwdev->reset)
 		goto exit;
 
 	status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
@@ -335,12 +334,11 @@ exit:
 /**
  * i40iw_dele_ceqs - destroy all ceq's
  * @iwdev: iwarp device
- * @reset: true if called before reset
  *
  * Go through all of the device ceq's and for each ceq
  * disable the ceq interrupt and destroy the ceq
  */
-static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
+static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
 {
 	u32 i = 0;
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -349,32 +347,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
 
 	if (iwdev->msix_shared) {
 		i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
-		i40iw_destroy_ceq(iwdev, iwceq, reset);
+		i40iw_destroy_ceq(iwdev, iwceq);
 		iwceq++;
 		i++;
 	}
 
 	for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
 		i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
-		i40iw_destroy_ceq(iwdev, iwceq, reset);
+		i40iw_destroy_ceq(iwdev, iwceq);
 	}
 }
 
 /**
  * i40iw_destroy_ccq - destroy control cq
  * @iwdev: iwarp device
- * @reset: true if called before reset
  *
  * Issue destroy ccq request and
  * free the resources associated with the ccq
  */
-static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
+static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
 {
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 	struct i40iw_ccq *ccq = &iwdev->ccq;
 	enum i40iw_status_code status = 0;
 
-	if (!reset)
+	if (!iwdev->reset)
 		status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
 	if (status)
 		i40iw_pr_err("ccq destroy failed %d\n", status);
@@ -810,7 +807,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
 		iwceq->msix_idx = msix_vec->idx;
 		status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
 		if (status) {
-			i40iw_destroy_ceq(iwdev, iwceq, false);
+			i40iw_destroy_ceq(iwdev, iwceq);
 			break;
 		}
 		i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
@@ -912,7 +909,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
 
 	status = i40iw_configure_aeq_vector(iwdev);
 	if (status) {
-		i40iw_destroy_aeq(iwdev, false);
+		i40iw_destroy_aeq(iwdev);
 		return status;
 	}
 
@@ -1442,12 +1439,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
 /**
  * i40iw_deinit_device - clean up the device resources
  * @iwdev: iwarp device
- * @reset: true if called before reset
  *
  * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
  * destroy the device queues and free the pble and the hmc objects
  */
-static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+static void i40iw_deinit_device(struct i40iw_device *iwdev)
 {
 	struct i40e_info *ldev = iwdev->ldev;
 
@@ -1464,7 +1460,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
 		i40iw_destroy_rdma_device(iwdev->iwibdev);
 		/* fallthrough */
 	case IP_ADDR_REGISTERED:
-		if (!reset)
+		if (!iwdev->reset)
 			i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
 		/* fallthrough */
 	case INET_NOTIFIER:
@@ -1474,26 +1470,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
 			unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
 		}
 		/* fallthrough */
+	case PBLE_CHUNK_MEM:
+		i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+		/* fallthrough */
 	case CEQ_CREATED:
-		i40iw_dele_ceqs(iwdev, reset);
+		i40iw_dele_ceqs(iwdev);
 		/* fallthrough */
 	case AEQ_CREATED:
-		i40iw_destroy_aeq(iwdev, reset);
+		i40iw_destroy_aeq(iwdev);
 		/* fallthrough */
 	case IEQ_CREATED:
-		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
 		/* fallthrough */
 	case ILQ_CREATED:
-		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
 		/* fallthrough */
 	case CCQ_CREATED:
-		i40iw_destroy_ccq(iwdev, reset);
-		/* fallthrough */
-	case PBLE_CHUNK_MEM:
-		i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+		i40iw_destroy_ccq(iwdev);
 		/* fallthrough */
 	case HMC_OBJS_CREATED:
-		i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
+		i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
 		/* fallthrough */
 	case CQP_CREATED:
 		i40iw_destroy_cqp(iwdev, true);
@@ -1670,6 +1666,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
 		status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
 		if (status)
 			break;
+		iwdev->init_state = PBLE_CHUNK_MEM;
 		iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
 		i40iw_register_notifiers();
 		iwdev->init_state = INET_NOTIFIER;
@@ -1693,7 +1690,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
 	} while (0);
 
 	i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
-	i40iw_deinit_device(iwdev, false);
+	i40iw_deinit_device(iwdev);
 	return -ERESTART;
 }
 
@@ -1774,9 +1771,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
 	iwdev = &hdl->device;
 	iwdev->closing = true;
 
+	if (reset)
+		iwdev->reset = true;
+
 	i40iw_cm_disconnect_all(iwdev);
 	destroy_workqueue(iwdev->virtchnl_wq);
-	i40iw_deinit_device(iwdev, reset);
+	i40iw_deinit_device(iwdev);
 }
 
 /**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index db41ab40da9c..71050c5d29a0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
 	set_64bit_val(wqe, 0, info->paddr);
 	set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
 	set_64bit_val(wqe, 16, header[0]);
+
+	/* Ensure all data is written before writing valid bit */
+	wmb();
 	set_64bit_val(wqe, 24, header[1]);
 
 	i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
@@ -1411,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
 
 	if (!list_empty(rxlist)) {
 		tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
-		plist = &tmpbuf->list;
 		while ((struct list_head *)tmpbuf != rxlist) {
 			if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
 				break;
+			plist = &tmpbuf->list;
 			tmpbuf = (struct i40iw_puda_buf *)plist->next;
 		}
 		/* Insert buf before tmpbuf */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 56d986924a4c..e311ec559f4e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait
  */
 void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
 {
+	struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
 	unsigned long flags;
 
 	if (cqp_request->dynamic) {
@@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp
 		list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
 		spin_unlock_irqrestore(&cqp->req_lock, flags);
 	}
+	wake_up(&iwdev->close_wq);
 }
 
 /**
@@ -365,6 +367,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
 }
 
 /**
+ * i40iw_free_pending_cqp_request -free pending cqp request objs
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
+					   struct i40iw_cqp_request *cqp_request)
+{
+	struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
+
+	if (cqp_request->waiting) {
+		cqp_request->compl_info.error = true;
+		cqp_request->request_done = true;
+		wake_up(&cqp_request->waitq);
+	}
+	i40iw_put_cqp_request(cqp, cqp_request);
+	wait_event_timeout(iwdev->close_wq,
+			   !atomic_read(&cqp_request->refcount),
+			   1000);
+}
+
+/**
+ * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
+ * @iwdev: iwarp device
+ */
+void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
+{
+	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+	struct i40iw_cqp *cqp = &iwdev->cqp;
+	struct i40iw_cqp_request *cqp_request = NULL;
+	struct cqp_commands_info *pcmdinfo = NULL;
+	u32 i, pending_work, wqe_idx;
+
+	pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
+	wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
+	for (i = 0; i < pending_work; i++) {
+		cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
+		if (cqp_request)
+			i40iw_free_pending_cqp_request(cqp, cqp_request);
+		wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
+	}
+
+	while (!list_empty(&dev->cqp_cmd_head)) {
+		pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
+		cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
+		if (cqp_request)
+			i40iw_free_pending_cqp_request(cqp, cqp_request);
+	}
+}
+
+/**
  * i40iw_free_qp - callback after destroy cqp completes
  * @cqp_request: cqp request for destroy qp
  * @num: not used
@@ -546,8 +598,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
 	cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
 	cqp_info->in.u.qp_destroy.remove_hash_idx = true;
 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
-	if (status)
-		i40iw_pr_err("CQP-OP Destroy QP fail");
+	if (!status)
+		return;
+
+	i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+	i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+	i40iw_rem_devusecount(iwdev);
 }
 
 /**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4dbe61ec7a77..02d871db7ca5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
 			     struct i40iw_qp *iwqp,
 			     u32 qp_num)
 {
+	struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+
 	i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
 	if (qp_num)
 		i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
+	if (iwpbl->pbl_allocated)
+		i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
 	i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
 	i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
 	kfree(iwqp->kqp.wrid_mem);
@@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
 			       struct i40iw_qp *iwqp,
 			       struct i40iw_qp_init_info *init_info)
 {
-	struct i40iw_pbl *iwpbl = iwqp->iwpbl;
+	struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
 	struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
 
 	iwqp->page = qpmr->sq_page;
@@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
 			ucontext = to_ucontext(ibpd->uobject->context);
 
 			if (req.user_wqe_buffers) {
+				struct i40iw_pbl *iwpbl;
+
 				spin_lock_irqsave(
 				    &ucontext->qp_reg_mem_list_lock, flags);
-				iwqp->iwpbl = i40iw_get_pbl(
+				iwpbl = i40iw_get_pbl(
 				    (unsigned long)req.user_wqe_buffers,
 				    &ucontext->qp_reg_mem_list);
 				spin_unlock_irqrestore(
 				    &ucontext->qp_reg_mem_list_lock, flags);
 
-				if (!iwqp->iwpbl) {
+				if (!iwpbl) {
 					err_code = -ENODATA;
 					i40iw_pr_err("no pbl info\n");
 					goto error;
 				}
+				memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
 			}
 		}
 		err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
@@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
 		memset(&req, 0, sizeof(req));
 		iwcq->user_mode = true;
 		ucontext = to_ucontext(context);
-		if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
+		if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
+			err_code = -EFAULT;
 			goto cq_free_resources;
+		}
 
 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
 		iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
@@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
 			ucontext = to_ucontext(ibpd->uobject->context);
 			i40iw_del_memlist(iwmr, ucontext);
 		}
-		if (iwpbl->pbl_allocated)
+		if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
 			i40iw_free_pble(iwdev->pble_rsrc, palloc);
 		kfree(iwmr);
 		return 0;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 07c3fec77de6..9067443cd311 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -170,7 +170,7 @@ struct i40iw_qp {
 	struct i40iw_qp_kmode kqp;
 	struct i40iw_dma_mem host_ctx;
 	struct timer_list terminate_timer;
-	struct i40iw_pbl *iwpbl;
+	struct i40iw_pbl iwpbl;
 	struct i40iw_dma_mem q2_ctx_mem;
 	struct i40iw_dma_mem ietf_mem;
 	struct completion sq_drained;
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 1e6c526450d9..fedaf8260105 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
 			mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
 			mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
 		sl_cm_id = get_local_comm_id(mad);
+		id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
+		if (id)
+			goto cont;
 		id = id_map_alloc(ibdev, slave_id, sl_cm_id);
 		if (IS_ERR(id)) {
 			mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
@@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
 		return -EINVAL;
 	}
 
+cont:
 	set_local_comm_id(mad, id->pv_cm_id);
 
 	if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 4f5a143fc0a7..ff931c580557 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
 	int err;
 
 	err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
-			     PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
+			     PAGE_SIZE * 2, &buf->buf);
 
 	if (err)
 		goto out;
@@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
 	if (err)
 		goto err_buf;
 
-	err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
+	err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
 	if (err)
 		goto err_mtt;
 
@@ -219,7 +219,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 
 		uar = &to_mucontext(context)->uar;
 	} else {
-		err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
+		err = mlx4_db_alloc(dev->dev, &cq->db, 1);
 		if (err)
 			goto err_cq;
 
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75b2f7d4cd95..d1b43cbbfea7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1155,7 +1155,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
 			 * call to mlx4_ib_vma_close.
 			 */
 			put_task_struct(owning_process);
-			msleep(1);
+			usleep_range(1000, 2000);
 			owning_process = get_pid_task(ibcontext->tgid,
 						      PIDTYPE_PID);
 			if (!owning_process ||
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 3405e947dc1e..b73f89700ef9 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
 		if (!count)
 			break;
 
-		msleep(1);
+		usleep_range(1000, 2000);
 	} while (time_after(end, jiffies));
 
 	flush_workqueue(ctx->mcg_wq);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c2b9cbf4da05..9db82e67e959 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -185,7 +185,6 @@ enum mlx4_ib_qp_flags {
 	MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
 	MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
 	MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
-	MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
 
 	/* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
 	MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 996e9058e515..75c0e6c5dd56 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -634,8 +634,8 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
 
 static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 			    struct ib_qp_init_attr *init_attr,
-			    struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
-			    gfp_t gfp)
+			    struct ib_udata *udata, int sqpn,
+			    struct mlx4_ib_qp **caller_qp)
 {
 	int qpn;
 	int err;
@@ -691,14 +691,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 		if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
 		    (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
 				MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
-			sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
+			sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
 			if (!sqp)
 				return -ENOMEM;
 			qp = &sqp->qp;
 			qp->pri.vid = 0xFFFF;
 			qp->alt.vid = 0xFFFF;
 		} else {
-			qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
+			qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
 			if (!qp)
 				return -ENOMEM;
 			qp->pri.vid = 0xFFFF;
@@ -780,7 +780,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 			goto err;
 
 		if (qp_has_rq(init_attr)) {
-			err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
+			err = mlx4_db_alloc(dev->dev, &qp->db, 0);
 			if (err)
 				goto err;
 
@@ -788,7 +788,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 		}
 
 		if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
-				   &qp->buf, gfp)) {
+				   &qp->buf)) {
 			memcpy(&init_attr->cap, &backup_cap,
 			       sizeof(backup_cap));
 			err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
@@ -797,7 +797,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 				goto err_db;
 
 			if (mlx4_buf_alloc(dev->dev, qp->buf_size,
-					   PAGE_SIZE * 2, &qp->buf, gfp)) {
+					   PAGE_SIZE * 2, &qp->buf)) {
 				err = -ENOMEM;
 				goto err_db;
 			}
@@ -808,20 +808,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 		if (err)
 			goto err_buf;
 
-		err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
+		err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
 		if (err)
 			goto err_mtt;
 
 		qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
-					gfp | __GFP_NOWARN);
+					GFP_KERNEL | __GFP_NOWARN);
 		if (!qp->sq.wrid)
 			qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
-						gfp, PAGE_KERNEL);
+						GFP_KERNEL, PAGE_KERNEL);
 		qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
-					gfp | __GFP_NOWARN);
+					GFP_KERNEL | __GFP_NOWARN);
 		if (!qp->rq.wrid)
 			qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
-						gfp, PAGE_KERNEL);
+						GFP_KERNEL, PAGE_KERNEL);
 		if (!qp->sq.wrid || !qp->rq.wrid) {
 			err = -ENOMEM;
 			goto err_wrid;
@@ -859,7 +859,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
 		qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
 
-	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
+	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
 	if (err)
 		goto err_qpn;
 
@@ -1127,10 +1127,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
 	int err;
 	int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
 	u16 xrcdn = 0;
-	gfp_t gfp;
 
-	gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
-		GFP_NOIO : GFP_KERNEL;
 	/*
 	 * We only support LSO, vendor flag1, and multicast loopback blocking,
 	 * and only for kernel UD QPs.
@@ -1140,8 +1137,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
 					MLX4_IB_SRIOV_TUNNEL_QP |
 					MLX4_IB_SRIOV_SQP |
 					MLX4_IB_QP_NETIF |
-					MLX4_IB_QP_CREATE_ROCE_V2_GSI |
-					MLX4_IB_QP_CREATE_USE_GFP_NOIO))
+					MLX4_IB_QP_CREATE_ROCE_V2_GSI))
 		return ERR_PTR(-EINVAL);
 
 	if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
@@ -1154,7 +1150,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
 			return ERR_PTR(-EINVAL);
 
 		if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
-						 MLX4_IB_QP_CREATE_USE_GFP_NOIO |
 						 MLX4_IB_QP_CREATE_ROCE_V2_GSI  |
 						 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
 		     init_attr->qp_type != IB_QPT_UD) ||
@@ -1179,7 +1174,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
 	case IB_QPT_RC:
 	case IB_QPT_UC:
 	case IB_QPT_RAW_PACKET:
-		qp = kzalloc(sizeof *qp, gfp);
+		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
 		if (!qp)
 			return ERR_PTR(-ENOMEM);
 		qp->pri.vid = 0xFFFF;
@@ -1188,7 +1183,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
 	case IB_QPT_UD:
 	{
 		err = create_qp_common(to_mdev(pd->device), pd, init_attr,
-				       udata, 0, &qp, gfp);
+				       udata, 0, &qp);
 		if (err) {
 			kfree(qp);
 			return ERR_PTR(err);
@@ -1217,8 +1212,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
 		}
 
 		err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
-				       sqpn,
-				       &qp, gfp);
+				       sqpn, &qp);
 		if (err)
 			return ERR_PTR(err);
 
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index e32dd58937a8..0facaf5f6d23 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -135,14 +135,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 		if (err)
 			goto err_mtt;
 	} else {
-		err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
+		err = mlx4_db_alloc(dev->dev, &srq->db, 0);
 		if (err)
 			goto err_srq;
 
 		*srq->db.db = 0;
 
-		if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
-				   GFP_KERNEL)) {
+		if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
+				   &srq->buf)) {
 			err = -ENOMEM;
 			goto err_db;
 		}
@@ -167,7 +167,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 		if (err)
 			goto err_buf;
 
-		err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
+		err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
 		if (err)
 			goto err_mtt;
 
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 763bb5b36144..2c40a2e989d2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -582,6 +582,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
 	}
 }
 
+static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+	if (!mlx5_debugfs_root)
+		return;
+
+	debugfs_remove_recursive(dev->cache.root);
+	dev->cache.root = NULL;
+}
+
 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
 {
 	struct mlx5_mr_cache *cache = &dev->cache;
@@ -600,38 +609,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
 		sprintf(ent->name, "%d", ent->order);
 		ent->dir = debugfs_create_dir(ent->name,  cache->root);
 		if (!ent->dir)
-			return -ENOMEM;
+			goto err;
 
 		ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
 						 &size_fops);
 		if (!ent->fsize)
-			return -ENOMEM;
+			goto err;
 
 		ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
 						  &limit_fops);
 		if (!ent->flimit)
-			return -ENOMEM;
+			goto err;
 
 		ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
 					       &ent->cur);
 		if (!ent->fcur)
-			return -ENOMEM;
+			goto err;
 
 		ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
 						&ent->miss);
 		if (!ent->fmiss)
-			return -ENOMEM;
+			goto err;
 	}
 
 	return 0;
-}
-
-static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
-{
-	if (!mlx5_debugfs_root)
-		return;
+err:
+	mlx5_mr_cache_debugfs_cleanup(dev);
 
-	debugfs_remove_recursive(dev->cache.root);
+	return -ENOMEM;
 }
 
 static void delay_time_func(unsigned long ctx)
@@ -692,6 +697,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
 	if (err)
 		mlx5_ib_warn(dev, "cache debugfs failure\n");
 
+	/*
+	 * We don't want to fail driver if debugfs failed to initialize,
+	 * so we are not forwarding error to the user.
+	 */
+
 	return 0;
 }
 
@@ -825,7 +835,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
 			    access_flags, 0);
 	err = PTR_ERR_OR_ZERO(*umem);
 	if (err < 0) {
-		mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
+		mlx5_ib_err(dev, "umem get failed (%d)\n", err);
 		return err;
 	}
 
@@ -1779,7 +1789,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
 	mr->ndescs = sg_nents;
 
 	for_each_sg(sgl, sg, sg_nents, i) {
-		if (unlikely(i > mr->max_descs))
+		if (unlikely(i >= mr->max_descs))
 			break;
 		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
 		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8f9d8b4ad583..b0adf65e4bdb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
 			if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
 			    || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
 				int_cnt++;
-			msleep(1);
+			usleep_range(1000, 2000);
 		}
 		if (int_cnt > 1) {
 			spin_lock_irqsave(&nesadapter->phy_lock, flags);
@@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
 						break;
 					}
 				}
-				msleep(1);
+				usleep_range(1000, 2000);
 			}
 		}
 	}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 2f30bda8457a..27d5e8d9f08d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -744,7 +744,8 @@ err:
 	if (is_uctx_pd) {
 		ocrdma_release_ucontext_pd(uctx);
 	} else {
-		status = _ocrdma_dealloc_pd(dev, pd);
+		if (_ocrdma_dealloc_pd(dev, pd))
+			pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
 	}
 exit:
 	return ERR_PTR(status);
@@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
 		goto err;
 
 	if (udata == NULL) {
+		status = -ENOMEM;
 		srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
 			    GFP_KERNEL);
 		if (srq->rqe_wr_id_tbl == NULL)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 548e4d1e998f..2ae71b8f1ba8 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -53,6 +53,14 @@
 
 #define DB_ADDR_SHIFT(addr)		((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
 
+static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
+					size_t len)
+{
+	size_t min_len = min_t(size_t, len, udata->outlen);
+
+	return ib_copy_to_udata(udata, src, min_len);
+}
+
 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
 {
 	if (index > QEDR_ROCE_PKEY_TABLE_LEN)
@@ -378,7 +386,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
 	uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
 	uresp.max_cqes = QEDR_MAX_CQES;
 
-	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
 	if (rc)
 		goto err;
 
@@ -499,7 +507,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
 
 		uresp.pd_id = pd_id;
 
-		rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+		rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
 		if (rc) {
 			DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
 			dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
@@ -729,7 +737,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev,
 	uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
 	uresp.icid = cq->icid;
 
-	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
 	if (rc)
 		DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
 
@@ -1238,7 +1246,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
 	uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
 	uresp.qp_id = qp->qp_id;
 
-	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
 	if (rc)
 		DP_ERR(dev,
 		       "create qp: failed a copy to user space with qp icid=0x%x.\n",
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 5984981e7dd4..a343e3b5d4cb 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
 
 };
 
-static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
-			 gfp_t gfp)
+static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
 {
-	unsigned long page = get_zeroed_page(gfp);
+	unsigned long page = get_zeroed_page(GFP_KERNEL);
 
 	/*
 	 * Free the page if someone raced with us installing it.
@@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
  */
 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
-		  enum ib_qp_type type, u8 port, gfp_t gfp)
+		  enum ib_qp_type type, u8 port)
 {
 	u32 i, offset, max_scan, qpn;
 	struct rvt_qpn_map *map;
@@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
 	max_scan = qpt->nmaps - !offset;
 	for (i = 0;;) {
 		if (unlikely(!map->page)) {
-			get_map_page(qpt, map, gfp);
+			get_map_page(qpt, map);
 			if (unlikely(!map->page))
 				break;
 		}
@@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
 	return ib_mtu_enum_to_int(pmtu);
 }
 
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
 {
 	struct qib_qp_priv *priv;
 
-	priv = kzalloc(sizeof(*priv), gfp);
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return ERR_PTR(-ENOMEM);
 	priv->owner = qp;
 
-	priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
+	priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
 	if (!priv->s_hdr) {
 		kfree(priv);
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index da0db5485ddc..a52fc67b40d7 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -274,11 +274,11 @@ int qib_get_counters(struct qib_pportdata *ppd,
  * Functions provided by qib driver for rdmavt to use
  */
 unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp);
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
 void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
 void qib_notify_qp_reset(struct rvt_qp *qp);
 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
-		  enum ib_qp_type type, u8 port, gfp_t gfp);
+		  enum ib_qp_type type, u8 port);
 void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
 #ifdef CONFIG_DEBUG_FS
 
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 727e81cc2c8f..8876ee7bc326 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -118,10 +118,9 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
 EXPORT_SYMBOL(ib_rvt_state_ops);
 
 static void get_map_page(struct rvt_qpn_table *qpt,
-			 struct rvt_qpn_map *map,
-			 gfp_t gfp)
+			 struct rvt_qpn_map *map)
 {
-	unsigned long page = get_zeroed_page(gfp);
+	unsigned long page = get_zeroed_page(GFP_KERNEL);
 
 	/*
 	 * Free the page if someone raced with us installing it.
@@ -173,7 +172,7 @@ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
 		    rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
 	for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
 		if (!map->page) {
-			get_map_page(qpt, map, GFP_KERNEL);
+			get_map_page(qpt, map);
 			if (!map->page) {
 				ret = -ENOMEM;
 				break;
@@ -342,14 +341,14 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
  * Return: The queue pair number
  */
 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
-		     enum ib_qp_type type, u8 port_num, gfp_t gfp)
+		     enum ib_qp_type type, u8 port_num)
 {
 	u32 i, offset, max_scan, qpn;
 	struct rvt_qpn_map *map;
 	u32 ret;
 
 	if (rdi->driver_f.alloc_qpn)
-		return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
+		return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
 
 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 		unsigned n;
@@ -374,7 +373,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
 	max_scan = qpt->nmaps - !offset;
 	for (i = 0;;) {
 		if (unlikely(!map->page)) {
-			get_map_page(qpt, map, gfp);
+			get_map_page(qpt, map);
 			if (unlikely(!map->page))
 				break;
 		}
@@ -672,7 +671,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 	struct ib_qp *ret = ERR_PTR(-ENOMEM);
 	struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
 	void *priv = NULL;
-	gfp_t gfp;
 	size_t sqsize;
 
 	if (!rdi)
@@ -680,18 +678,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 
 	if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
 	    init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
-	    init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
+	    init_attr->create_flags)
 		return ERR_PTR(-EINVAL);
 
-	/* GFP_NOIO is applicable to RC QP's only */
-
-	if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
-	    init_attr->qp_type != IB_QPT_RC)
-		return ERR_PTR(-EINVAL);
-
-	gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
-						GFP_NOIO : GFP_KERNEL;
-
 	/* Check receive queue parameters if no SRQ is specified. */
 	if (!init_attr->srq) {
 		if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
@@ -719,14 +708,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 		sz = sizeof(struct rvt_sge) *
 			init_attr->cap.max_send_sge +
 			sizeof(struct rvt_swqe);
-		if (gfp == GFP_NOIO)
-			swq = __vmalloc(
-				sqsize * sz,
-				gfp | __GFP_ZERO, PAGE_KERNEL);
-		else
-			swq = vzalloc_node(
-				sqsize * sz,
-				rdi->dparms.node);
+		swq = vzalloc_node(sqsize * sz, rdi->dparms.node);
 		if (!swq)
 			return ERR_PTR(-ENOMEM);
 
@@ -741,7 +723,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 		} else if (init_attr->cap.max_recv_sge > 1)
 			sg_list_sz = sizeof(*qp->r_sg_list) *
 				(init_attr->cap.max_recv_sge - 1);
-		qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
+		qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
+				  rdi->dparms.node);
 		if (!qp)
 			goto bail_swq;
 
@@ -751,7 +734,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 				kzalloc_node(
 					sizeof(*qp->s_ack_queue) *
 					 rvt_max_atomic(rdi),
-					gfp,
+					GFP_KERNEL,
 					rdi->dparms.node);
 			if (!qp->s_ack_queue)
 				goto bail_qp;
@@ -766,7 +749,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 		 * Driver needs to set up it's private QP structure and do any
 		 * initialization that is needed.
 		 */
-		priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
+		priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
 		if (IS_ERR(priv)) {
 			ret = priv;
 			goto bail_qp;
@@ -786,11 +769,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 				qp->r_rq.wq = vmalloc_user(
 						sizeof(struct rvt_rwq) +
 						qp->r_rq.size * sz);
-			else if (gfp == GFP_NOIO)
-				qp->r_rq.wq = __vmalloc(
-						sizeof(struct rvt_rwq) +
-						qp->r_rq.size * sz,
-						gfp | __GFP_ZERO, PAGE_KERNEL);
 			else
 				qp->r_rq.wq = vzalloc_node(
 						sizeof(struct rvt_rwq) +
@@ -824,7 +802,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 
 		err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
 				init_attr->qp_type,
-				init_attr->port_num, gfp);
+				init_attr->port_num);
 		if (err < 0) {
 			ret = ERR_PTR(err);
 			goto bail_rq_wq;
@@ -1280,9 +1258,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
 	if (attr_mask & IB_QP_TIMEOUT) {
 		qp->timeout = attr->timeout;
-		qp->timeout_jiffies =
-			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
-				1000UL);
+		qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
 	}
 
 	if (attr_mask & IB_QP_QKEY)
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c3a140ed4df2..08f3f90d2912 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -441,6 +441,8 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
 	if (unlikely(qp->need_req_skb &&
 		     skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
 		rxe_run_task(&qp->req.task, 1);
+
+	rxe_drop_ref(qp);
 }
 
 int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
@@ -473,6 +475,7 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
 		return -EAGAIN;
 	}
 
+	rxe_add_ref(pkt->qp);
 	atomic_inc(&pkt->qp->skb_out);
 	kfree_skb(skb);
 
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index be944d5aa9af..a958ee918a49 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1219,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
 		kfree_skb(skb);
 	}
 
+	if (notify)
+		return;
+
 	while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
 		advance_consumer(qp->rq.queue);
 }
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 073e66783f1d..af90a7d42b96 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -914,6 +914,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
 	spin_unlock_irqrestore(&rq->producer_lock, flags);
 
+	if (qp->resp.state == QP_STATE_ERROR)
+		rxe_run_task(&qp->resp.task, 1);
+
 err1:
 	return err;
 }
@@ -1240,6 +1243,8 @@ int rxe_register_device(struct rxe_dev *rxe)
 	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
 			    rxe->ndev->dev_addr);
 	dev->dev.dma_ops = &dma_virt_ops;
+	dma_coerce_mask_and_coherent(&dev->dev,
+				     dma_get_required_mask(dev->dev.parent));
 
 	dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
 	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7cbcfdac6529..f87d104837dc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -39,6 +39,7 @@
 #include <linux/vmalloc.h>
 #include <linux/moduleparam.h>
 #include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
 
 #include "ipoib.h"
 
@@ -954,7 +955,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
 			break;
 		}
 		spin_unlock_irq(&priv->lock);
-		msleep(1);
+		usleep_range(1000, 2000);
 		ipoib_drain_cq(dev);
 		spin_lock_irq(&priv->lock);
 	}
@@ -1047,9 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
 		.sq_sig_type		= IB_SIGNAL_ALL_WR,
 		.qp_type		= IB_QPT_RC,
 		.qp_context		= tx,
-		.create_flags		= IB_QP_CREATE_USE_GFP_NOIO
+		.create_flags		= 0
 	};
-
 	struct ib_qp *tx_qp;
 
 	if (dev->features & NETIF_F_SG)
@@ -1057,10 +1057,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
 			min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
 
 	tx_qp = ib_create_qp(priv->pd, &attr);
-	if (PTR_ERR(tx_qp) == -EINVAL) {
-		attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
-		tx_qp = ib_create_qp(priv->pd, &attr);
-	}
 	tx->max_send_sge = attr.cap.max_send_sge;
 	return tx_qp;
 }
@@ -1131,10 +1127,11 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
 			    struct sa_path_rec *pathrec)
 {
 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+	unsigned int noio_flag;
 	int ret;
 
-	p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
-			       GFP_NOIO, PAGE_KERNEL);
+	noio_flag = memalloc_noio_save();
+	p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
 	if (!p->tx_ring) {
 		ret = -ENOMEM;
 		goto err_tx;
@@ -1142,9 +1139,10 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
 	memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
 
 	p->qp = ipoib_cm_create_tx_qp(p->dev, p);
+	memalloc_noio_restore(noio_flag);
 	if (IS_ERR(p->qp)) {
 		ret = PTR_ERR(p->qp);
-		ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
+		ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
 		goto err_qp;
 	}
 
@@ -1206,7 +1204,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
 				goto timeout;
 			}
 
-			msleep(1);
+			usleep_range(1000, 2000);
 		}
 	}
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index efe7402f4885..57a9655e844d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -770,7 +770,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
 
 		ipoib_drain_cq(dev);
 
-		msleep(1);
+		usleep_range(1000, 2000);
 	}
 
 	ipoib_dbg(priv, "All sends and receives done.\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6e86eeee370e..4ce315c92b48 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -233,6 +233,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
 {
 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
+	int ret = 0;
 
 	/* dev->mtu > 2K ==> connected mode */
 	if (ipoib_cm_admin_enabled(dev)) {
@@ -256,9 +257,34 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
 		ipoib_dbg(priv, "MTU must be smaller than the underlying "
 				"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
 
-	dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
+	new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
 
-	return 0;
+	if (priv->rn_ops->ndo_change_mtu) {
+		bool carrier_status = netif_carrier_ok(dev);
+
+		netif_carrier_off(dev);
+
+		/* notify lower level on the real mtu */
+		ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
+
+		if (carrier_status)
+			netif_carrier_on(dev);
+	} else {
+		dev->mtu = new_mtu;
+	}
+
+	return ret;
+}
+
+static void ipoib_get_stats(struct net_device *dev,
+			    struct rtnl_link_stats64 *stats)
+{
+	struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+	if (priv->rn_ops->ndo_get_stats64)
+		priv->rn_ops->ndo_get_stats64(dev, stats);
+	else
+		netdev_stats_to_stats64(stats, &dev->stats);
 }
 
 /* Called with an RCU read lock taken */
@@ -1808,6 +1834,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
 	.ndo_get_vf_stats	 = ipoib_get_vf_stats,
 	.ndo_set_vf_guid	 = ipoib_set_vf_guid,
 	.ndo_set_mac_address	 = ipoib_set_mac,
+	.ndo_get_stats64	 = ipoib_get_stats,
 };
 
 static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -2212,6 +2239,7 @@ static struct net_device *ipoib_add_port(const char *format,
 		goto register_failed;
 	}
 
+	result = -ENOMEM;
 	if (ipoib_cm_add_mode_attr(priv->dev))
 		goto sysfs_failed;
 	if (ipoib_add_pkey_attr(priv->dev))
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5a887efb4bdf..37b33d708c2d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -83,6 +83,7 @@ static struct scsi_host_template iscsi_iser_sht;
 static struct iscsi_transport iscsi_iser_transport;
 static struct scsi_transport_template *iscsi_iser_scsi_transport;
 static struct workqueue_struct *release_wq;
+static DEFINE_MUTEX(unbind_iser_conn_mutex);
 struct iser_global ig;
 
 int iser_debug_level = 0;
@@ -550,12 +551,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
 	 */
 	if (iser_conn) {
 		mutex_lock(&iser_conn->state_mutex);
+		mutex_lock(&unbind_iser_conn_mutex);
 		iser_conn_terminate(iser_conn);
 		iscsi_conn_stop(cls_conn, flag);
 
 		/* unbind */
 		iser_conn->iscsi_conn = NULL;
 		conn->dd_data = NULL;
+		mutex_unlock(&unbind_iser_conn_mutex);
 
 		complete(&iser_conn->stop_completion);
 		mutex_unlock(&iser_conn->state_mutex);
@@ -977,13 +980,21 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
 	struct iser_conn *iser_conn;
 	struct ib_device *ib_dev;
 
+	mutex_lock(&unbind_iser_conn_mutex);
+
 	session = starget_to_session(scsi_target(sdev))->dd_data;
 	iser_conn = session->leadconn->dd_data;
+	if (!iser_conn) {
+		mutex_unlock(&unbind_iser_conn_mutex);
+		return -ENOTCONN;
+	}
 	ib_dev = iser_conn->ib_conn.device->ib_device;
 
 	if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
 		blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
 
+	mutex_unlock(&unbind_iser_conn_mutex);
+
 	return 0;
 }
 
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 12ed62ce9ff7..2a07692007bd 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task,
 
 	if (unsol_sz < edtl) {
 		hdr->flags     |= ISER_WSV;
-		hdr->write_stag = cpu_to_be32(mem_reg->rkey);
-		hdr->write_va   = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+		if (buf_out->data_len > imm_sz) {
+			hdr->write_stag = cpu_to_be32(mem_reg->rkey);
+			hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+		}
 
 		iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
 			 "VA:%#llX + unsol:%d\n",
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index c538a38c91ce..26a004e97ae0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -708,8 +708,14 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
 	unsigned short sg_tablesize, sup_sg_tablesize;
 
 	sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
-	sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
-				 device->ib_device->attrs.max_fast_reg_page_list_len);
+	if (device->ib_device->attrs.device_cap_flags &
+			IB_DEVICE_MEM_MGT_EXTENSIONS)
+		sup_sg_tablesize =
+			min_t(
+			 uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+			 device->ib_device->attrs.max_fast_reg_page_list_len);
+	else
+		sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
 
 	iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
 }
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index dad85e74c37c..3aae015469a5 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -71,7 +71,7 @@ static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
 static int __init digicolor_of_init(struct device_node *node,
 				struct device_node *parent)
 {
-	static void __iomem *reg_base;
+	void __iomem *reg_base;
 	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 	struct regmap *ucregs;
 	int ret;
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index 54c296401525..18d58d2b4ffe 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -43,7 +43,7 @@ static const struct of_device_id syscon_pldset_of_match[] = {
 static int __init
 realview_gic_of_init(struct device_node *node, struct device_node *parent)
 {
-	static struct regmap *map;
+	struct regmap *map;
 	struct device_node *np;
 	const struct of_device_id *gic_id;
 	u32 pld1_ctrl;
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 0a8ed1c05518..14461cbfab2f 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -154,7 +154,7 @@ asmlinkage void __weak plat_irq_dispatch(void)
 static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
 			     irq_hw_number_t hw)
 {
-	static struct irq_chip *chip;
+	struct irq_chip *chip;
 
 	if (hw < 2 && cpu_has_mipsmt) {
 		/* Software interrupts are used for MT/CMT IPI */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 832ebf4062f7..6ab1d3afec02 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -950,7 +950,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
 					       &gic_irq_domain_ops, NULL);
 	if (!gic_irq_domain)
 		panic("Failed to add GIC IRQ domain");
-	gic_irq_domain->name = "mips-gic-irq";
 
 	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
 						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@@ -959,7 +958,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
 	if (!gic_ipi_domain)
 		panic("Failed to add GIC IPI domain");
 
-	gic_ipi_domain->name = "mips-gic-ipi";
 	irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
 
 	if (node &&
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 060d357f107f..6f423bc49d0d 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -485,18 +485,19 @@ static int isdn_divert_icall(isdn_ctrl *ic)
 				cs->deflect_dest[0] = '\0';
 				retval = 4; /* only proceed */
 			}
-			sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
-				cs->akt_state,
-				cs->divert_id,
-				divert_if.drv_to_name(cs->ics.driver),
-				(ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
-				cs->ics.parm.setup.phone,
-				cs->ics.parm.setup.eazmsn,
-				cs->ics.parm.setup.si1,
-				cs->ics.parm.setup.si2,
-				cs->ics.parm.setup.screen,
-				dv->rule.waittime,
-				cs->deflect_dest);
+			snprintf(cs->info, sizeof(cs->info),
+				 "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
+				 cs->akt_state,
+				 cs->divert_id,
+				 divert_if.drv_to_name(cs->ics.driver),
+				 (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
+				 cs->ics.parm.setup.phone,
+				 cs->ics.parm.setup.eazmsn,
+				 cs->ics.parm.setup.si1,
+				 cs->ics.parm.setup.si2,
+				 cs->ics.parm.setup.screen,
+				 dv->rule.waittime,
+				 cs->deflect_dest);
 			if ((dv->rule.action == DEFLECT_REPORT) ||
 			    (dv->rule.action == DEFLECT_REJECT)) {
 				put_info_buffer(cs->info);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 40c7e2cf423b..034cabac699d 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -42,7 +42,7 @@ static char *revision = "$Revision: 1.1.2.2 $";
 
 static bool suppress_pollack;
 
-static struct pci_device_id c4_pci_tbl[] = {
+static const struct pci_device_id c4_pci_tbl[] = {
 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
 	{ }			/* Terminating entry */
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 8b7ad4f1ab01..b2023e08dcd2 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -110,7 +110,7 @@ typedef struct _diva_os_thread_dpc {
 /*
   This table should be sorted by PCI device ID
 */
-static struct pci_device_id divas_pci_tbl[] = {
+static const struct pci_device_id divas_pci_tbl[] = {
 	/* Diva Server BRI-2M PCI 0xE010 */
 	{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA),
 	  CARDTYPE_MAESTRA_PCI },
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index e3fa1cd64470..dce6632daae1 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -1142,7 +1142,7 @@ fritz_remove_pci(struct pci_dev *pdev)
 			pr_info("%s: drvdata already removed\n", __func__);
 }
 
-static struct pci_device_id fcpci_ids[] = {
+static const struct pci_device_id fcpci_ids[] = {
 	{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID,
 	  0, 0, (unsigned long) "Fritz!Card PCI"},
 	{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index aea0c9616ea5..3cf07b8ced1c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5348,7 +5348,7 @@ static const struct hm_map hfcm_map[] = {
 
 #undef H
 #define H(x)	((unsigned long)&hfcm_map[x])
-static struct pci_device_id hfmultipci_ids[] = {
+static const struct pci_device_id hfmultipci_ids[] = {
 
 	/* Cards with HFC-4S Chip */
 	{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 5dc246d71c16..d2e401a8090e 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2161,7 +2161,7 @@ static const struct _hfc_map hfc_map[] =
 	{},
 };
 
-static struct pci_device_id hfc_ids[] =
+static const struct pci_device_id hfc_ids[] =
 {
 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
 	  (unsigned long) &hfc_map[0] },
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index afde4edef9ae..6a6d848bd18e 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1137,7 +1137,7 @@ static void nj_remove(struct pci_dev *pdev)
 /* We cannot select cards with PCI_SUB... IDs, since here are cards with
  * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
  * known other cards which not work with this driver - see probe function */
-static struct pci_device_id nj_pci_ids[] = {
+static const struct pci_device_id nj_pci_ids[] = {
 	{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{ }
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 3052c836b89f..d80072fef434 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1398,7 +1398,7 @@ w6692_remove_pci(struct pci_dev *pdev)
 			pr_notice("%s: drvdata already removed\n", __func__);
 }
 
-static struct pci_device_id w6692_ids[] = {
+static const struct pci_device_id w6692_ids[] = {
 	{ PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]},
 	{ PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692,
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index c7d68675b028..7108bdb8742e 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1909,7 +1909,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
 #ifdef CONFIG_PCI
 #include <linux/pci.h>
 
-static struct pci_device_id hisax_pci_tbl[] __used = {
+static const struct pci_device_id hisax_pci_tbl[] __used = {
 #ifdef CONFIG_HISAX_FRITZPCI
 	{PCI_VDEVICE(AVM,      PCI_DEVICE_ID_AVM_A1)			},
 #endif
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 90f051ce0259..9090cc1e1f29 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -86,7 +86,7 @@ typedef struct {
 	char *device_name;
 } hfc4s8s_param;
 
-static struct pci_device_id hfc4s8s_ids[] = {
+static const struct pci_device_id hfc4s8s_ids[] = {
 	{.vendor = PCI_VENDOR_ID_CCD,
 	 .device = PCI_DEVICE_ID_4S,
 	 .subvendor = 0x1397,
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 5a9f39ed1d5d..e4f7573ba9bf 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -52,7 +52,7 @@ module_param(debug, int, 0);
 MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>");
 MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver");
 
-static struct pci_device_id fcpci_ids[] = {
+static const struct pci_device_id fcpci_ids[] = {
 	{ .vendor      = PCI_VENDOR_ID_AVM,
 	  .device      = PCI_DEVICE_ID_AVM_A1,
 	  .subvendor   = PCI_ANY_ID,
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f4eace5ea184..40f3cd7eab0f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
 
 	rdev_for_each(rdev, mddev) {
 		if (! test_bit(In_sync, &rdev->flags)
-		    || test_bit(Faulty, &rdev->flags))
+		    || test_bit(Faulty, &rdev->flags)
+		    || test_bit(Bitmap_sync, &rdev->flags))
 			continue;
 
 		target = offset + index * (PAGE_SIZE/512);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 991f0fe2dcc6..b50eb4ac1b82 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -134,7 +134,9 @@ enum flag_bits {
 	Faulty,			/* device is known to have a fault */
 	In_sync,		/* device is in_sync with rest of array */
 	Bitmap_sync,		/* ..actually, not quite In_sync.  Need a
-				 * bitmap-based recovery to get fully in sync
+				 * bitmap-based recovery to get fully in sync.
+				 * The bit is only meaningful before device
+				 * has been passed to pers->hot_add_disk.
 				 */
 	WriteMostly,		/* Avoid reading if at all possible */
 	AutoDetected,		/* added by auto-detect */
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 77cce3573aa8..44ad5baf3206 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
 		goto err;
 	}
 
-	ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0);
+	ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
 	if (!ppl_conf->bs) {
 		ret = -ENOMEM;
 		goto err;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2ceb338b094b..aeeb8d6854e2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7951,12 +7951,10 @@ static void end_reshape(struct r5conf *conf)
 {
 
 	if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
-		struct md_rdev *rdev;
 
 		spin_lock_irq(&conf->device_lock);
 		conf->previous_raid_disks = conf->raid_disks;
-		rdev_for_each(rdev, conf->mddev)
-			rdev->data_offset = rdev->new_data_offset;
+		md_finish_reshape(conf->mddev);
 		smp_wmb();
 		conf->reshape_progress = MaxSector;
 		conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 7c754a0f14bb..19e4e904c9bf 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -2,20 +2,11 @@
 # Multiplexer devices
 #
 
-menuconfig MULTIPLEXER
-	tristate "Multiplexer subsystem"
-	help
-	  Multiplexer controller subsystem. Multiplexers are used in a
-	  variety of settings, and this subsystem abstracts their use
-	  so that the rest of the kernel sees a common interface. When
-	  multiple parallel multiplexers are controlled by one single
-	  multiplexer controller, this subsystem also coordinates the
-	  multiplexer accesses.
-
-	  To compile the subsystem as a module, choose M here: the module will
-	  be called mux-core.
+config MULTIPLEXER
+	tristate
 
-if MULTIPLEXER
+menu "Multiplexer drivers"
+	depends on MULTIPLEXER
 
 config MUX_ADG792A
 	tristate "Analog Devices ADG792A/ADG792G Multiplexers"
@@ -56,4 +47,4 @@ config MUX_MMIO
 	  To compile the driver as a module, choose M here: the module will
 	  be called mux-mmio.
 
-endif
+endmenu
diff --git a/drivers/mux/mux-core.c b/drivers/mux/mux-core.c
index 90b8995f07cb..2fe96c470112 100644
--- a/drivers/mux/mux-core.c
+++ b/drivers/mux/mux-core.c
@@ -46,7 +46,7 @@ static int __init mux_init(void)
 
 static void __exit mux_exit(void)
 {
-	class_register(&mux_class);
+	class_unregister(&mux_class);
 	ida_destroy(&mux_ida);
 }
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 14ff622190a5..181839d6fbea 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4596,7 +4596,7 @@ static int bond_check_params(struct bond_params *params)
 	}
 	ad_user_port_key = valptr->value;
 
-	if (bond_mode == BOND_MODE_TLB) {
+	if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
 		bond_opt_initstr(&newval, "default");
 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
 					&newval);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index e68d368e20ac..7f36d3e3c98b 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1665,6 +1665,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
 		.dev_name = "BCM53125",
 		.vlans = 4096,
 		.enabled_ports = 0xff,
+		.arl_entries = 4,
 		.cpu_port = B53_CPU_PORT,
 		.vta_regs = B53_VTA_REGS,
 		.duplex_reg = B53_DUPLEX_STAT_GE,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 53b088166c28..5bcdd33101b0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3178,6 +3178,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
 	.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
 	.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
 	.port_pause_limit = mv88e6390_port_pause_limit,
+	.port_set_cmode = mv88e6390x_port_set_cmode,
 	.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
 	.stats_snapshot = mv88e6390_g1_stats_snapshot,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d3906f6b01bd..86058a9f3417 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1785,16 +1785,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 
 	xgene_enet_gpiod_get(pdata);
 
-	pdata->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(pdata->clk)) {
-		/* Abort if the clock is defined but couldn't be retrived.
-		 * Always abort if the clock is missing on DT system as
-		 * the driver can't cope with this case.
-		 */
-		if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
-			return PTR_ERR(pdata->clk);
-		/* Firmware may have set up the clock already. */
-		dev_info(dev, "clocks have been setup already\n");
+	if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
+		pdata->clk = devm_clk_get(&pdev->dev, NULL);
+		if (IS_ERR(pdata->clk)) {
+			/* Abort if the clock is defined but couldn't be
+			 * retrived. Always abort if the clock is missing on
+			 * DT system as the driver can't cope with this case.
+			 */
+			if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
+				return PTR_ERR(pdata->clk);
+			/* Firmware may have set up the clock already. */
+			dev_info(dev, "clocks have been setup already\n");
+		}
 	}
 
 	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 73aca97a96bc..d937083db9a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -50,11 +50,14 @@ static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset)
 
 static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
 {
-	return writel(value, bgmac->plat.idm_base + offset);
+	writel(value, bgmac->plat.idm_base + offset);
 }
 
 static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
 {
+	if (!bgmac->plat.idm_base)
+		return true;
+
 	if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN)
 		return false;
 	if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
@@ -66,6 +69,9 @@ static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
 {
 	u32 val;
 
+	if (!bgmac->plat.idm_base)
+		return;
+
 	/* The Reset Control register only contains a single bit to show if the
 	 * controller is currently in reset.  Do a sanity check here, just in
 	 * case the bootloader happened to leave the device in reset.
@@ -180,6 +186,7 @@ static int bgmac_probe(struct platform_device *pdev)
 	bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
 	bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
 	bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
+	bgmac->feature_flags |= BGMAC_FEAT_IDM_MASK;
 
 	bgmac->dev = &pdev->dev;
 	bgmac->dma_dev = &pdev->dev;
@@ -207,15 +214,13 @@ static int bgmac_probe(struct platform_device *pdev)
 		return PTR_ERR(bgmac->plat.base);
 
 	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
-	if (!regs) {
-		dev_err(&pdev->dev, "Unable to obtain idm resource\n");
-		return -EINVAL;
+	if (regs) {
+		bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+		if (IS_ERR(bgmac->plat.idm_base))
+			return PTR_ERR(bgmac->plat.idm_base);
+		bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
 	}
 
-	bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
-	if (IS_ERR(bgmac->plat.idm_base))
-		return PTR_ERR(bgmac->plat.idm_base);
-
 	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
 	if (regs) {
 		bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index ba4d2e145bb9..48d672b204a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -622,9 +622,11 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
 	BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
 	BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
 
-	if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
-		dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
-		return -ENOTSUPP;
+	if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+		if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
+			dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
+			return -ENOTSUPP;
+		}
 	}
 
 	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
@@ -855,9 +857,11 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
 static void bgmac_miiconfig(struct bgmac *bgmac)
 {
 	if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
-		bgmac_idm_write(bgmac, BCMA_IOCTL,
-				bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
-				BGMAC_BCMA_IOCTL_SW_CLKEN);
+		if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+			bgmac_idm_write(bgmac, BCMA_IOCTL,
+					bgmac_idm_read(bgmac, BCMA_IOCTL) |
+					0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN);
+		}
 		bgmac->mac_speed = SPEED_2500;
 		bgmac->mac_duplex = DUPLEX_FULL;
 		bgmac_mac_speed(bgmac);
@@ -874,11 +878,36 @@ static void bgmac_miiconfig(struct bgmac *bgmac)
 	}
 }
 
+static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
+{
+	u32 iost;
+
+	iost = bgmac_idm_read(bgmac, BCMA_IOST);
+	if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
+		iost &= ~BGMAC_BCMA_IOST_ATTACHED;
+
+	/* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
+	if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
+		u32 flags = 0;
+
+		if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+			flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+			if (!bgmac->has_robosw)
+				flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+		}
+		bgmac_clk_enable(bgmac, flags);
+	}
+
+	if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+		bgmac_idm_write(bgmac, BCMA_IOCTL,
+				bgmac_idm_read(bgmac, BCMA_IOCTL) &
+				~BGMAC_BCMA_IOCTL_SW_RESET);
+}
+
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
 static void bgmac_chip_reset(struct bgmac *bgmac)
 {
 	u32 cmdcfg_sr;
-	u32 iost;
 	int i;
 
 	if (bgmac_clk_enabled(bgmac)) {
@@ -899,20 +928,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
 		/* TODO: Clear software multicast filter list */
 	}
 
-	iost = bgmac_idm_read(bgmac, BCMA_IOST);
-	if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
-		iost &= ~BGMAC_BCMA_IOST_ATTACHED;
-
-	/* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
-	if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
-		u32 flags = 0;
-		if (iost & BGMAC_BCMA_IOST_ATTACHED) {
-			flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
-			if (!bgmac->has_robosw)
-				flags |= BGMAC_BCMA_IOCTL_SW_RESET;
-		}
-		bgmac_clk_enable(bgmac, flags);
-	}
+	if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK))
+		bgmac_chip_reset_idm_config(bgmac);
 
 	/* Request Misc PLL for corerev > 2 */
 	if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
@@ -970,11 +987,6 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
 				      BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
 	}
 
-	if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
-		bgmac_idm_write(bgmac, BCMA_IOCTL,
-				bgmac_idm_read(bgmac, BCMA_IOCTL) &
-				~BGMAC_BCMA_IOCTL_SW_RESET);
-
 	/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
 	 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
 	 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
@@ -1497,8 +1509,10 @@ int bgmac_enet_probe(struct bgmac *bgmac)
 	bgmac_clk_enable(bgmac, 0);
 
 	/* This seems to be fixing IRQ by assigning OOB #6 to the core */
-	if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
-		bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
+	if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
+		if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
+			bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
+	}
 
 	bgmac_chip_reset(bgmac);
 
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index c1818766c501..443d57b10264 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -425,6 +425,7 @@
 #define BGMAC_FEAT_CC4_IF_SW_TYPE	BIT(17)
 #define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII	BIT(18)
 #define BGMAC_FEAT_CC7_IF_TYPE_RGMII	BIT(19)
+#define BGMAC_FEAT_IDM_MASK		BIT(20)
 
 struct bgmac_slot_info {
 	union {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 43423744fdfa..1e33abde4a3e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2886,7 +2886,7 @@ static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
 
 static int bnx2x_test_nvram(struct bnx2x *bp)
 {
-	const struct crc_pair nvram_tbl[] = {
+	static const struct crc_pair nvram_tbl[] = {
 		{     0,  0x14 }, /* bootstrap */
 		{  0x14,  0xec }, /* dir */
 		{ 0x100, 0x350 }, /* manuf_info */
@@ -2895,7 +2895,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
 		{ 0x708,  0x70 }, /* manuf_key_info */
 		{     0,     0 }
 	};
-	const struct crc_pair nvram_tbl2[] = {
+	static const struct crc_pair nvram_tbl2[] = {
 		{ 0x7e8, 0x350 }, /* manuf_info2 */
 		{ 0xb38,  0xf0 }, /* feature_info */
 		{     0,     0 }
@@ -3162,7 +3162,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 		if (is_multi(bp)) {
 			for_each_eth_queue(bp, i) {
 				memset(queue_name, 0, sizeof(queue_name));
-				sprintf(queue_name, "%d", i);
+				snprintf(queue_name, sizeof(queue_name),
+					 "%d", i);
 				for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
 					snprintf(buf + (k + j)*ETH_GSTRING_LEN,
 						ETH_GSTRING_LEN,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index daca1c9d254b..7b0b399aaedd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1202,12 +1202,21 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
 	return tx_cb_ptr;
 }
 
-/* Simple helper to free a control block's resources */
-static void bcmgenet_free_cb(struct enet_cb *cb)
+static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
+					 struct bcmgenet_tx_ring *ring)
 {
-	dev_kfree_skb_any(cb->skb);
-	cb->skb = NULL;
-	dma_unmap_addr_set(cb, dma_addr, 0);
+	struct enet_cb *tx_cb_ptr;
+
+	tx_cb_ptr = ring->cbs;
+	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+
+	/* Rewinding local write pointer */
+	if (ring->write_ptr == ring->cb_ptr)
+		ring->write_ptr = ring->end_ptr;
+	else
+		ring->write_ptr--;
+
+	return tx_cb_ptr;
 }
 
 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
@@ -1260,18 +1269,72 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
 				 INTRL2_CPU_MASK_SET);
 }
 
+/* Simple helper to free a transmit control block's resources
+ * Returns an skb when the last transmit control block associated with the
+ * skb is freed.  The skb should be freed by the caller if necessary.
+ */
+static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
+					   struct enet_cb *cb)
+{
+	struct sk_buff *skb;
+
+	skb = cb->skb;
+
+	if (skb) {
+		cb->skb = NULL;
+		if (cb == GENET_CB(skb)->first_cb)
+			dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
+					 dma_unmap_len(cb, dma_len),
+					 DMA_TO_DEVICE);
+		else
+			dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
+				       dma_unmap_len(cb, dma_len),
+				       DMA_TO_DEVICE);
+		dma_unmap_addr_set(cb, dma_addr, 0);
+
+		if (cb == GENET_CB(skb)->last_cb)
+			return skb;
+
+	} else if (dma_unmap_addr(cb, dma_addr)) {
+		dma_unmap_page(dev,
+			       dma_unmap_addr(cb, dma_addr),
+			       dma_unmap_len(cb, dma_len),
+			       DMA_TO_DEVICE);
+		dma_unmap_addr_set(cb, dma_addr, 0);
+	}
+
+	return 0;
+}
+
+/* Simple helper to free a receive control block's resources */
+static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
+					   struct enet_cb *cb)
+{
+	struct sk_buff *skb;
+
+	skb = cb->skb;
+	cb->skb = NULL;
+
+	if (dma_unmap_addr(cb, dma_addr)) {
+		dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
+				 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
+		dma_unmap_addr_set(cb, dma_addr, 0);
+	}
+
+	return skb;
+}
+
 /* Unlocked version of the reclaim routine */
 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
 					  struct bcmgenet_tx_ring *ring)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
-	struct device *kdev = &priv->pdev->dev;
-	struct enet_cb *tx_cb_ptr;
-	unsigned int pkts_compl = 0;
+	unsigned int txbds_processed = 0;
 	unsigned int bytes_compl = 0;
-	unsigned int c_index;
+	unsigned int pkts_compl = 0;
 	unsigned int txbds_ready;
-	unsigned int txbds_processed = 0;
+	unsigned int c_index;
+	struct sk_buff *skb;
 
 	/* Clear status before servicing to reduce spurious interrupts */
 	if (ring->index == DESC_INDEX)
@@ -1292,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
 
 	/* Reclaim transmitted buffers */
 	while (txbds_processed < txbds_ready) {
-		tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
-		if (tx_cb_ptr->skb) {
+		skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
+					  &priv->tx_cbs[ring->clean_ptr]);
+		if (skb) {
 			pkts_compl++;
-			bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
-			dma_unmap_single(kdev,
-					 dma_unmap_addr(tx_cb_ptr, dma_addr),
-					 dma_unmap_len(tx_cb_ptr, dma_len),
-					 DMA_TO_DEVICE);
-			bcmgenet_free_cb(tx_cb_ptr);
-		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
-			dma_unmap_page(kdev,
-				       dma_unmap_addr(tx_cb_ptr, dma_addr),
-				       dma_unmap_len(tx_cb_ptr, dma_len),
-				       DMA_TO_DEVICE);
-			dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+			bytes_compl += GENET_CB(skb)->bytes_sent;
+			dev_kfree_skb_any(skb);
 		}
 
 		txbds_processed++;
@@ -1380,95 +1434,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
 	bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
 }
 
-/* Transmits a single SKB (either head of a fragment or a single SKB)
- * caller must hold priv->lock
- */
-static int bcmgenet_xmit_single(struct net_device *dev,
-				struct sk_buff *skb,
-				u16 dma_desc_flags,
-				struct bcmgenet_tx_ring *ring)
-{
-	struct bcmgenet_priv *priv = netdev_priv(dev);
-	struct device *kdev = &priv->pdev->dev;
-	struct enet_cb *tx_cb_ptr;
-	unsigned int skb_len;
-	dma_addr_t mapping;
-	u32 length_status;
-	int ret;
-
-	tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
-
-	if (unlikely(!tx_cb_ptr))
-		BUG();
-
-	tx_cb_ptr->skb = skb;
-
-	skb_len = skb_headlen(skb);
-
-	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
-	ret = dma_mapping_error(kdev, mapping);
-	if (ret) {
-		priv->mib.tx_dma_failed++;
-		netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
-		dev_kfree_skb(skb);
-		return ret;
-	}
-
-	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
-	dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
-	length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
-			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
-			DMA_TX_APPEND_CRC;
-
-	if (skb->ip_summed == CHECKSUM_PARTIAL)
-		length_status |= DMA_TX_DO_CSUM;
-
-	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
-
-	return 0;
-}
-
-/* Transmit a SKB fragment */
-static int bcmgenet_xmit_frag(struct net_device *dev,
-			      skb_frag_t *frag,
-			      u16 dma_desc_flags,
-			      struct bcmgenet_tx_ring *ring)
-{
-	struct bcmgenet_priv *priv = netdev_priv(dev);
-	struct device *kdev = &priv->pdev->dev;
-	struct enet_cb *tx_cb_ptr;
-	unsigned int frag_size;
-	dma_addr_t mapping;
-	int ret;
-
-	tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
-
-	if (unlikely(!tx_cb_ptr))
-		BUG();
-
-	tx_cb_ptr->skb = NULL;
-
-	frag_size = skb_frag_size(frag);
-
-	mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
-	ret = dma_mapping_error(kdev, mapping);
-	if (ret) {
-		priv->mib.tx_dma_failed++;
-		netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
-			  __func__);
-		return ret;
-	}
-
-	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
-	dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
-
-	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
-		    (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
-		    (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
-
-	return 0;
-}
-
 /* Reallocate the SKB to put enough headroom in front of it and insert
  * the transmit checksum offsets in the descriptors
  */
@@ -1535,11 +1500,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device *kdev = &priv->pdev->dev;
 	struct bcmgenet_tx_ring *ring = NULL;
+	struct enet_cb *tx_cb_ptr;
 	struct netdev_queue *txq;
 	unsigned long flags = 0;
 	int nr_frags, index;
-	u16 dma_desc_flags;
+	dma_addr_t mapping;
+	unsigned int size;
+	skb_frag_t *frag;
+	u32 len_stat;
 	int ret;
 	int i;
 
@@ -1592,29 +1562,53 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
 		}
 	}
 
-	dma_desc_flags = DMA_SOP;
-	if (nr_frags == 0)
-		dma_desc_flags |= DMA_EOP;
+	for (i = 0; i <= nr_frags; i++) {
+		tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
 
-	/* Transmit single SKB or head of fragment list */
-	ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
-	if (ret) {
-		ret = NETDEV_TX_OK;
-		goto out;
-	}
+		if (unlikely(!tx_cb_ptr))
+			BUG();
+
+		if (!i) {
+			/* Transmit single SKB or head of fragment list */
+			GENET_CB(skb)->first_cb = tx_cb_ptr;
+			size = skb_headlen(skb);
+			mapping = dma_map_single(kdev, skb->data, size,
+						 DMA_TO_DEVICE);
+		} else {
+			/* xmit fragment */
+			frag = &skb_shinfo(skb)->frags[i - 1];
+			size = skb_frag_size(frag);
+			mapping = skb_frag_dma_map(kdev, frag, 0, size,
+						   DMA_TO_DEVICE);
+		}
 
-	/* xmit fragment */
-	for (i = 0; i < nr_frags; i++) {
-		ret = bcmgenet_xmit_frag(dev,
-					 &skb_shinfo(skb)->frags[i],
-					 (i == nr_frags - 1) ? DMA_EOP : 0,
-					 ring);
+		ret = dma_mapping_error(kdev, mapping);
 		if (ret) {
+			priv->mib.tx_dma_failed++;
+			netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
 			ret = NETDEV_TX_OK;
-			goto out;
+			goto out_unmap_frags;
+		}
+		dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+		dma_unmap_len_set(tx_cb_ptr, dma_len, size);
+
+		tx_cb_ptr->skb = skb;
+
+		len_stat = (size << DMA_BUFLENGTH_SHIFT) |
+			   (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
+
+		if (!i) {
+			len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
+			if (skb->ip_summed == CHECKSUM_PARTIAL)
+				len_stat |= DMA_TX_DO_CSUM;
 		}
+		if (i == nr_frags)
+			len_stat |= DMA_EOP;
+
+		dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
 	}
 
+	GENET_CB(skb)->last_cb = tx_cb_ptr;
 	skb_tx_timestamp(skb);
 
 	/* Decrement total BD count and advance our write pointer */
@@ -1635,6 +1629,19 @@ out:
 	spin_unlock_irqrestore(&ring->lock, flags);
 
 	return ret;
+
+out_unmap_frags:
+	/* Back up for failed control block mapping */
+	bcmgenet_put_txcb(priv, ring);
+
+	/* Unmap successfully mapped control blocks */
+	while (i-- > 0) {
+		tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
+		bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
+	}
+
+	dev_kfree_skb(skb);
+	goto out;
 }
 
 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
@@ -1666,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
 	}
 
 	/* Grab the current Rx skb from the ring and DMA-unmap it */
-	rx_skb = cb->skb;
-	if (likely(rx_skb))
-		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
-				 priv->rx_buf_len, DMA_FROM_DEVICE);
+	rx_skb = bcmgenet_free_rx_cb(kdev, cb);
 
 	/* Put the new Rx skb on the ring */
 	cb->skb = skb;
 	dma_unmap_addr_set(cb, dma_addr, mapping);
+	dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
 	dmadesc_set_addr(priv, cb->bd_addr, mapping);
 
 	/* Return the current Rx skb to caller */
@@ -1880,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
 
 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
 {
-	struct device *kdev = &priv->pdev->dev;
+	struct sk_buff *skb;
 	struct enet_cb *cb;
 	int i;
 
 	for (i = 0; i < priv->num_rx_bds; i++) {
 		cb = &priv->rx_cbs[i];
 
-		if (dma_unmap_addr(cb, dma_addr)) {
-			dma_unmap_single(kdev,
-					 dma_unmap_addr(cb, dma_addr),
-					 priv->rx_buf_len, DMA_FROM_DEVICE);
-			dma_unmap_addr_set(cb, dma_addr, 0);
-		}
-
-		if (cb->skb)
-			bcmgenet_free_cb(cb);
+		skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
+		if (skb)
+			dev_kfree_skb_any(skb);
 	}
 }
 
@@ -2479,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
 
 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
-	int i;
 	struct netdev_queue *txq;
+	struct sk_buff *skb;
+	struct enet_cb *cb;
+	int i;
 
 	bcmgenet_fini_rx_napi(priv);
 	bcmgenet_fini_tx_napi(priv);
@@ -2489,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 	bcmgenet_dma_teardown(priv);
 
 	for (i = 0; i < priv->num_tx_bds; i++) {
-		if (priv->tx_cbs[i].skb != NULL) {
-			dev_kfree_skb(priv->tx_cbs[i].skb);
-			priv->tx_cbs[i].skb = NULL;
-		}
+		cb = priv->tx_cbs + i;
+		skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
+		if (skb)
+			dev_kfree_skb(skb);
 	}
 
 	for (i = 0; i < priv->hw_params->tx_queues; i++) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index efd07020b89f..b9344de669f8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -544,6 +544,8 @@ struct bcmgenet_hw_params {
 };
 
 struct bcmgenet_skb_cb {
+	struct enet_cb *first_cb;	/* First control block of SKB */
+	struct enet_cb *last_cb;	/* Last control block of SKB */
 	unsigned int bytes_sent;	/* bytes on the wire (no TSB) */
 };
 
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 28ecda3d3404..ebd353bc78ff 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -335,7 +335,7 @@ lio_ethtool_get_channels(struct net_device *dev,
 
 static int lio_get_eeprom_len(struct net_device *netdev)
 {
-	u8 buf[128];
+	u8 buf[192];
 	struct lio *lio = GET_LIO(netdev);
 	struct octeon_device *oct_dev = lio->oct_dev;
 	struct octeon_board_info *board_info;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a0ca68ce3fbb..79112563a25a 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1008,7 +1008,7 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
 {
 	struct device *dev = &bgx->pdev->dev;
 	struct lmac *lmac;
-	char str[20];
+	char str[27];
 
 	if (!bgx->is_dlm && lmacid)
 		return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 50517cfd9671..9f9d6cae39d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -441,7 +441,8 @@ void cxgb4_ptp_init(struct adapter *adapter)
 
 	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
 						&adapter->pdev->dev);
-	if (!adapter->ptp_clock) {
+	if (IS_ERR_OR_NULL(adapter->ptp_clock)) {
+		adapter->ptp_clock = NULL;
 		dev_err(adapter->pdev_dev,
 			"PTP %s Clock registration has failed\n", __func__);
 		return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 99987d8e437e..aa28299aef5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -174,6 +174,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
 	CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */
+	CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */
+	CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */
 
 	/* T6 adapters:
 	 */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ff864a187d5a..a37166ee577b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -776,8 +776,9 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
 
 	assert(handle);
 	mac_cb = hns_get_mac_cb(handle);
-	if (!mac_cb->cpld_ctrl)
+	if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
 		return;
+
 	hns_set_led_opt(mac_cb);
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 7a8addda726e..408b63faf9a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -53,6 +53,34 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
 	return ret;
 }
 
+static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
+                                      u32 link, u32 port, u32 act)
+{
+       union acpi_object *obj;
+       union acpi_object obj_args[3], argv4;
+
+       obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+       obj_args[0].integer.value = link;
+       obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+       obj_args[1].integer.value = port;
+       obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+       obj_args[2].integer.value = act;
+
+       argv4.type = ACPI_TYPE_PACKAGE;
+       argv4.package.count = 3;
+       argv4.package.elements = obj_args;
+
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+                               &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+       if (!obj) {
+               dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n",
+                        link, port, act);
+               return;
+       }
+
+       ACPI_FREE(obj);
+}
+
 static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
 			     u16 speed, int data)
 {
@@ -93,6 +121,18 @@ static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
 	}
 }
 
+static void hns_cpld_set_led_acpi(struct hns_mac_cb *mac_cb, int link_status,
+                            u16 speed, int data)
+{
+       if (!mac_cb) {
+               pr_err("cpld_led_set mac_cb is null!\n");
+               return;
+       }
+
+       hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+               link_status, mac_cb->mac_id, data);
+}
+
 static void cpld_led_reset(struct hns_mac_cb *mac_cb)
 {
 	if (!mac_cb || !mac_cb->cpld_ctrl)
@@ -103,6 +143,20 @@ static void cpld_led_reset(struct hns_mac_cb *mac_cb)
 	mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
 }
 
+static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
+{
+       if (!mac_cb) {
+               pr_err("cpld_led_reset mac_cb is null!\n");
+               return;
+       }
+
+       if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
+                return;
+
+       hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+               0, mac_cb->mac_id, 0);
+}
+
 static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
 			   enum hnae_led_state status)
 {
@@ -604,8 +658,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
 
 		misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
 	} else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
-		misc_op->cpld_set_led = hns_cpld_set_led;
-		misc_op->cpld_reset_led = cpld_led_reset;
+		misc_op->cpld_set_led = hns_cpld_set_led_acpi;
+		misc_op->cpld_reset_led = cpld_led_reset_acpi;
 		misc_op->cpld_set_led_id = cpld_set_led_id;
 
 		misc_op->dsaf_reset = hns_dsaf_rst_acpi;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 249a4584401a..b651c1210555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -283,7 +283,7 @@ int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
 }
 
 /* Should be called under a lock */
-static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
+static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
 {
 	struct mlx4_zone_allocator *zone_alloc = entry->allocator;
 
@@ -315,8 +315,6 @@ static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
 		}
 		zone_alloc->mask = mask;
 	}
-
-	return 0;
 }
 
 void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
@@ -457,7 +455,7 @@ struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32
 int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
 {
 	struct mlx4_zone_entry *zone;
-	int res;
+	int res = 0;
 
 	spin_lock(&zones->lock);
 
@@ -468,7 +466,7 @@ int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
 		goto out;
 	}
 
-	res = __mlx4_zone_remove_one_entry(zone);
+	__mlx4_zone_remove_one_entry(zone);
 
 out:
 	spin_unlock(&zones->lock);
@@ -578,7 +576,7 @@ out:
 }
 
 static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
-				 struct mlx4_buf *buf, gfp_t gfp)
+				 struct mlx4_buf *buf)
 {
 	dma_addr_t t;
 
@@ -587,7 +585,7 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
 	buf->page_shift   = get_order(size) + PAGE_SHIFT;
 	buf->direct.buf   =
 		dma_zalloc_coherent(&dev->persist->pdev->dev,
-				    size, &t, gfp);
+				    size, &t, GFP_KERNEL);
 	if (!buf->direct.buf)
 		return -ENOMEM;
 
@@ -607,10 +605,10 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
  *  multiple pages, so we don't require too much contiguous memory.
  */
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
-		   struct mlx4_buf *buf, gfp_t gfp)
+		   struct mlx4_buf *buf)
 {
 	if (size <= max_direct) {
-		return mlx4_buf_direct_alloc(dev, size, buf, gfp);
+		return mlx4_buf_direct_alloc(dev, size, buf);
 	} else {
 		dma_addr_t t;
 		int i;
@@ -620,14 +618,14 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 		buf->npages	= buf->nbufs;
 		buf->page_shift  = PAGE_SHIFT;
 		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
-					   gfp);
+					   GFP_KERNEL);
 		if (!buf->page_list)
 			return -ENOMEM;
 
 		for (i = 0; i < buf->nbufs; ++i) {
 			buf->page_list[i].buf =
 				dma_zalloc_coherent(&dev->persist->pdev->dev,
-						    PAGE_SIZE, &t, gfp);
+						    PAGE_SIZE, &t, GFP_KERNEL);
 			if (!buf->page_list[i].buf)
 				goto err_free;
 
@@ -663,12 +661,11 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
 }
 EXPORT_SYMBOL_GPL(mlx4_buf_free);
 
-static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
-						 gfp_t gfp)
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
 {
 	struct mlx4_db_pgdir *pgdir;
 
-	pgdir = kzalloc(sizeof *pgdir, gfp);
+	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
 	if (!pgdir)
 		return NULL;
 
@@ -676,7 +673,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
 	pgdir->bits[0] = pgdir->order0;
 	pgdir->bits[1] = pgdir->order1;
 	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
-					    &pgdir->db_dma, gfp);
+					    &pgdir->db_dma, GFP_KERNEL);
 	if (!pgdir->db_page) {
 		kfree(pgdir);
 		return NULL;
@@ -716,7 +713,7 @@ found:
 	return 0;
 }
 
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_db_pgdir *pgdir;
@@ -728,7 +725,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
 		if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
 			goto out;
 
-	pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
+	pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev);
 	if (!pgdir) {
 		ret = -ENOMEM;
 		goto out;
@@ -780,13 +777,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
 {
 	int err;
 
-	err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
+	err = mlx4_db_alloc(dev, &wqres->db, 1);
 	if (err)
 		return err;
 
 	*wqres->db.db = 0;
 
-	err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL);
+	err = mlx4_buf_direct_alloc(dev, size, &wqres->buf);
 	if (err)
 		goto err_db;
 
@@ -795,7 +792,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
 	if (err)
 		goto err_buf;
 
-	err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
+	err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
 	if (err)
 		goto err_mtt;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index fa6d2354a0e9..c56a511b918e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -224,11 +224,11 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
 	if (*cqn == -1)
 		return -ENOMEM;
 
-	err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
+	err = mlx4_table_get(dev, &cq_table->table, *cqn);
 	if (err)
 		goto err_out;
 
-	err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
+	err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
 	if (err)
 		goto err_put;
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e5fb89505a13..436f7689a032 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1042,7 +1042,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
 	if (!context)
 		return -ENOMEM;
 
-	err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
+	err = mlx4_qp_alloc(mdev->dev, qpn, qp);
 	if (err) {
 		en_err(priv, "Failed to allocate qp #%x\n", qpn);
 		goto out;
@@ -1086,7 +1086,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
 		en_err(priv, "Failed reserving drop qpn\n");
 		return err;
 	}
-	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
+	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
 	if (err) {
 		en_err(priv, "Failed allocating drop qp\n");
 		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
@@ -1158,8 +1158,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 	}
 
 	/* Configure RSS indirection qp */
-	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp,
-			    GFP_KERNEL);
+	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
 	if (err) {
 		en_err(priv, "Failed to allocate RSS indirection QP\n");
 		goto rss_err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4f3a9b27ce4a..73faa3d77921 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -111,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 		goto err_hwq_res;
 	}
 
-	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL);
+	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
 	if (err) {
 		en_err(priv, "Failed allocating qp %d\n", ring->qpn);
 		goto err_reserve;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index e1f9e7cebf8f..5a7816e7c7b4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -251,8 +251,7 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
-		   gfp_t gfp)
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
 {
 	u32 i = (obj & (table->num_obj - 1)) /
 			(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
@@ -266,7 +265,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
 	}
 
 	table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
-				       (table->lowmem ? gfp : GFP_HIGHUSER) |
+				       (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
 				       __GFP_NOWARN, table->coherent);
 	if (!table->icm[i]) {
 		ret = -ENOMEM;
@@ -363,7 +362,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
 	u32 i;
 
 	for (i = start; i <= end; i += inc) {
-		err = mlx4_table_get(dev, table, i, GFP_KERNEL);
+		err = mlx4_table_get(dev, table, i);
 		if (err)
 			goto fail;
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index 0c7364550150..dee67fa39107 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -71,8 +71,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
 				gfp_t gfp_mask, int coherent);
 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
 
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
-		   gfp_t gfp);
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
 			 u32 start, u32 end);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 30616cd0140d..706d7f21ac5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -969,7 +969,7 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
@@ -977,7 +977,7 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
 void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
 int __mlx4_mpt_reserve(struct mlx4_dev *dev);
 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp);
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index ce852ca22a96..24282cd017d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -479,14 +479,14 @@ static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
 	__mlx4_mpt_release(dev, index);
 }
 
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
 {
 	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
 
-	return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
+	return mlx4_table_get(dev, &mr_table->dmpt_table, index);
 }
 
-static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
+static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
 {
 	u64 param = 0;
 
@@ -497,7 +497,7 @@ static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
 							MLX4_CMD_TIME_CLASS_A,
 							MLX4_CMD_WRAPPED);
 	}
-	return __mlx4_mpt_alloc_icm(dev, index, gfp);
+	return __mlx4_mpt_alloc_icm(dev, index);
 }
 
 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
@@ -629,7 +629,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
 	struct mlx4_mpt_entry *mpt_entry;
 	int err;
 
-	err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
+	err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
 	if (err)
 		return err;
 
@@ -787,14 +787,13 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
 
 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-		       struct mlx4_buf *buf, gfp_t gfp)
+		       struct mlx4_buf *buf)
 {
 	u64 *page_list;
 	int err;
 	int i;
 
-	page_list = kmalloc(buf->npages * sizeof *page_list,
-			    gfp);
+	page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL);
 	if (!page_list)
 		return -ENOMEM;
 
@@ -841,7 +840,7 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
 	struct mlx4_mpt_entry *mpt_entry;
 	int err;
 
-	err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
+	err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
 	if (err)
 		return err;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 5a310d313e94..26747212526b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -301,29 +301,29 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
 
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_qp_table *qp_table = &priv->qp_table;
 	int err;
 
-	err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
+	err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
 	if (err)
 		goto err_out;
 
-	err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
+	err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
 	if (err)
 		goto err_put_qp;
 
-	err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
+	err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
 	if (err)
 		goto err_put_auxc;
 
-	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
+	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
 	if (err)
 		goto err_put_altc;
 
-	err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
+	err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
 	if (err)
 		goto err_put_rdmarc;
 
@@ -345,7 +345,7 @@ err_out:
 	return err;
 }
 
-static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
 {
 	u64 param = 0;
 
@@ -355,7 +355,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
 				    MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
 				    MLX4_CMD_WRAPPED);
 	}
-	return __mlx4_qp_alloc_icm(dev, qpn, gfp);
+	return __mlx4_qp_alloc_icm(dev, qpn);
 }
 
 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
@@ -397,7 +397,7 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
 	return qp;
 }
 
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_qp_table *qp_table = &priv->qp_table;
@@ -408,7 +408,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
 
 	qp->qpn = qpn;
 
-	err = mlx4_qp_alloc_icm(dev, qpn, gfp);
+	err = mlx4_qp_alloc_icm(dev, qpn);
 	if (err)
 		return err;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 812783865205..215e21c3dc8a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1822,7 +1822,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 			return err;
 
 		if (!fw_reserved(dev, qpn)) {
-			err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
+			err = __mlx4_qp_alloc_icm(dev, qpn);
 			if (err) {
 				res_abort_move(dev, slave, RES_QP, qpn);
 				return err;
@@ -1909,7 +1909,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 		if (err)
 			return err;
 
-		err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
+		err = __mlx4_mpt_alloc_icm(dev, mpt->key);
 		if (err) {
 			res_abort_move(dev, slave, RES_MPT, id);
 			return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index f44d089e2ca6..bedf52126824 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -100,11 +100,11 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
 	if (*srqn == -1)
 		return -ENOMEM;
 
-	err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL);
+	err = mlx4_table_get(dev, &srq_table->table, *srqn);
 	if (err)
 		goto err_out;
 
-	err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL);
+	err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
 	if (err)
 		goto err_put;
 	return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 746d94e28470..60850bfa3d32 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -766,11 +766,13 @@ static void emac_shutdown(struct platform_device *pdev)
 	struct emac_adapter *adpt = netdev_priv(netdev);
 	struct emac_sgmii *sgmii = &adpt->phy;
 
-	/* Closing the SGMII turns off its interrupts */
-	sgmii->close(adpt);
+	if (netdev->flags & IFF_UP) {
+		/* Closing the SGMII turns off its interrupts */
+		sgmii->close(adpt);
 
-	/* Resetting the MAC turns off all DMA and its interrupts */
-	emac_mac_reset(adpt);
+		/* Resetting the MAC turns off all DMA and its interrupts */
+		emac_mac_reset(adpt);
+	}
 }
 
 static struct platform_driver emac_platform_driver = {
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index b607936e1b3e..9c0488e0f08e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -90,17 +90,13 @@ struct ioc3_private {
 	spinlock_t ioc3_lock;
 	struct mii_if_info mii;
 
+	struct net_device *dev;
 	struct pci_dev *pdev;
 
 	/* Members used by autonegotiation  */
 	struct timer_list ioc3_timer;
 };
 
-static inline struct net_device *priv_netdev(struct ioc3_private *dev)
-{
-	return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
-}
-
 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static void ioc3_set_multicast_list(struct net_device *dev);
 static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -427,7 +423,7 @@ static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
 		nic[i] = nic_read_byte(ioc3);
 
 	for (i = 2; i < 8; i++)
-		priv_netdev(ip)->dev_addr[i - 2] = nic[i];
+		ip->dev->dev_addr[i - 2] = nic[i];
 }
 
 /*
@@ -439,7 +435,7 @@ static void ioc3_get_eaddr(struct ioc3_private *ip)
 {
 	ioc3_get_eaddr_nic(ip);
 
-	printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr);
+	printk("Ethernet address is %pM.\n", ip->dev->dev_addr);
 }
 
 static void __ioc3_set_mac_address(struct net_device *dev)
@@ -790,13 +786,12 @@ static void ioc3_timer(unsigned long data)
  */
 static int ioc3_mii_init(struct ioc3_private *ip)
 {
-	struct net_device *dev = priv_netdev(ip);
 	int i, found = 0, res = 0;
 	int ioc3_phy_workaround = 1;
 	u16 word;
 
 	for (i = 0; i < 32; i++) {
-		word = ioc3_mdio_read(dev, i, MII_PHYSID1);
+		word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
 
 		if (word != 0xffff && word != 0x0000) {
 			found = 1;
@@ -1276,6 +1271,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	ip = netdev_priv(dev);
+	ip->dev = dev;
 
 	dev->irq = pdev->irq;
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f233bf8b4ebb..c4407e8e39a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -117,7 +117,7 @@ static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
 	void __iomem *ioaddr = hw->pcsr;
 	u32 value;
 
-	const struct stmmac_rx_routing route_possibilities[] = {
+	static const struct stmmac_rx_routing route_possibilities[] = {
 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1853f7ff6657..1763e48c84e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4120,8 +4120,15 @@ int stmmac_dvr_probe(struct device *device,
 	if ((phyaddr >= 0) && (phyaddr <= 31))
 		priv->plat->phy_addr = phyaddr;
 
-	if (priv->plat->stmmac_rst)
+	if (priv->plat->stmmac_rst) {
+		ret = reset_control_assert(priv->plat->stmmac_rst);
 		reset_control_deassert(priv->plat->stmmac_rst);
+		/* Some reset controllers have only reset callback instead of
+		 * assert + deassert callbacks pair.
+		 */
+		if (ret == -ENOTSUPP)
+			reset_control_reset(priv->plat->stmmac_rst);
+	}
 
 	/* Init MAC and get the capabilities */
 	ret = stmmac_hw_init(priv);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 46cb7f8955a2..4bb04aaf9650 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9532,7 +9532,7 @@ static struct niu_parent *niu_get_parent(struct niu *np,
 		p = niu_new_parent(np, id, ptype);
 
 	if (p) {
-		char port_name[6];
+		char port_name[8];
 		int err;
 
 		sprintf(port_name, "port%d", port);
@@ -9553,7 +9553,7 @@ static void niu_put_parent(struct niu *np)
 {
 	struct niu_parent *p = np->parent;
 	u8 port = np->port;
-	char port_name[6];
+	char port_name[8];
 
 	BUG_ON(!p || p->ports[port] != np);
 
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 711fbbbc4b1f..163d8d16bc24 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -654,6 +654,8 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
 			RET(-EFAULT);
 		}
 		DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
+	} else {
+		return -EOPNOTSUPP;
 	}
 
 	if (!capable(CAP_SYS_RAWIO))
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1850e348f555..badd0a8caeb9 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -3089,6 +3089,31 @@ static int cpsw_probe(struct platform_device *pdev)
 			cpsw->quirk_irq = true;
 	}
 
+	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	ndev->netdev_ops = &cpsw_netdev_ops;
+	ndev->ethtool_ops = &cpsw_ethtool_ops;
+	netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+	netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
+	cpsw_split_res(ndev);
+
+	/* register the network device */
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(priv->dev, "error registering net device\n");
+		ret = -ENODEV;
+		goto clean_ale_ret;
+	}
+
+	if (cpsw->data.dual_emac) {
+		ret = cpsw_probe_dual_emac(priv);
+		if (ret) {
+			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+			goto clean_unregister_netdev_ret;
+		}
+	}
+
 	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
 	 * MISC IRQs which are always kept disabled with this driver so
 	 * we will not request them.
@@ -3127,33 +3152,9 @@ static int cpsw_probe(struct platform_device *pdev)
 		goto clean_ale_ret;
 	}
 
-	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
-	ndev->netdev_ops = &cpsw_netdev_ops;
-	ndev->ethtool_ops = &cpsw_ethtool_ops;
-	netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
-	netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
-	cpsw_split_res(ndev);
-
-	/* register the network device */
-	SET_NETDEV_DEV(ndev, &pdev->dev);
-	ret = register_netdev(ndev);
-	if (ret) {
-		dev_err(priv->dev, "error registering net device\n");
-		ret = -ENODEV;
-		goto clean_ale_ret;
-	}
-
 	cpsw_notice(priv, probe,
 		    "initialized device (regs %pa, irq %d, pool size %d)\n",
 		    &ss_res->start, ndev->irq, dma_params.descs_pool_size);
-	if (cpsw->data.dual_emac) {
-		ret = cpsw_probe_dual_emac(priv);
-		if (ret) {
-			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
-			goto clean_unregister_netdev_ret;
-		}
-	}
 
 	pm_runtime_put(&pdev->dev);
 
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 00755b6a42cf..c608e1dfaf09 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -135,8 +135,8 @@ int mdio_mux_init(struct device *dev,
 	for_each_available_child_of_node(dev->of_node, child_bus_node) {
 		int v;
 
-		v = of_mdio_parse_addr(dev, child_bus_node);
-		if (v < 0) {
+		r = of_property_read_u32(child_bus_node, "reg", &v);
+		if (r) {
 			dev_err(dev,
 				"Error: Failed to find reg for child %s\n",
 				of_node_full_name(child_bus_node));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 13028833bee3..bd4303944e44 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -120,6 +120,7 @@ struct ppp {
 	int		n_channels;	/* how many channels are attached 54 */
 	spinlock_t	rlock;		/* lock for receive side 58 */
 	spinlock_t	wlock;		/* lock for transmit side 5c */
+	int		*xmit_recursion __percpu; /* xmit recursion detect */
 	int		mru;		/* max receive unit 60 */
 	unsigned int	flags;		/* control bits 64 */
 	unsigned int	xstate;		/* transmit state bits 68 */
@@ -1025,6 +1026,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
 	struct ppp *ppp = netdev_priv(dev);
 	int indx;
 	int err;
+	int cpu;
 
 	ppp->dev = dev;
 	ppp->ppp_net = src_net;
@@ -1039,6 +1041,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
 	INIT_LIST_HEAD(&ppp->channels);
 	spin_lock_init(&ppp->rlock);
 	spin_lock_init(&ppp->wlock);
+
+	ppp->xmit_recursion = alloc_percpu(int);
+	if (!ppp->xmit_recursion) {
+		err = -ENOMEM;
+		goto err1;
+	}
+	for_each_possible_cpu(cpu)
+		(*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
+
 #ifdef CONFIG_PPP_MULTILINK
 	ppp->minseq = -1;
 	skb_queue_head_init(&ppp->mrq);
@@ -1050,11 +1061,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
 
 	err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
 	if (err < 0)
-		return err;
+		goto err2;
 
 	conf->file->private_data = &ppp->file;
 
 	return 0;
+err2:
+	free_percpu(ppp->xmit_recursion);
+err1:
+	return err;
 }
 
 static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
@@ -1400,18 +1415,16 @@ static void __ppp_xmit_process(struct ppp *ppp)
 	ppp_xmit_unlock(ppp);
 }
 
-static DEFINE_PER_CPU(int, ppp_xmit_recursion);
-
 static void ppp_xmit_process(struct ppp *ppp)
 {
 	local_bh_disable();
 
-	if (unlikely(__this_cpu_read(ppp_xmit_recursion)))
+	if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
 		goto err;
 
-	__this_cpu_inc(ppp_xmit_recursion);
+	(*this_cpu_ptr(ppp->xmit_recursion))++;
 	__ppp_xmit_process(ppp);
-	__this_cpu_dec(ppp_xmit_recursion);
+	(*this_cpu_ptr(ppp->xmit_recursion))--;
 
 	local_bh_enable();
 
@@ -1905,7 +1918,7 @@ static void __ppp_channel_push(struct channel *pch)
 		read_lock(&pch->upl);
 		ppp = pch->ppp;
 		if (ppp)
-			__ppp_xmit_process(ppp);
+			ppp_xmit_process(ppp);
 		read_unlock(&pch->upl);
 	}
 }
@@ -1914,9 +1927,7 @@ static void ppp_channel_push(struct channel *pch)
 {
 	local_bh_disable();
 
-	__this_cpu_inc(ppp_xmit_recursion);
 	__ppp_channel_push(pch);
-	__this_cpu_dec(ppp_xmit_recursion);
 
 	local_bh_enable();
 }
@@ -3057,6 +3068,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
 #endif /* CONFIG_PPP_FILTER */
 
 	kfree_skb(ppp->xmit_pending);
+	free_percpu(ppp->xmit_recursion);
 
 	free_netdev(ppp->dev);
 }
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d103a1d4fb36..8f572b9f3625 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -768,8 +768,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
 	u8 *buf;
 	int len;
 	int temp;
+	int err;
 	u8 iface_no;
 	struct usb_cdc_parsed_header hdr;
+	u16 curr_ntb_format;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -874,6 +876,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
 		goto error2;
 	}
 
+	/*
+	 * Some Huawei devices have been observed to come out of reset in NDP32 mode.
+	 * Let's check if this is the case, and set the device to NDP16 mode again if
+	 * needed.
+	*/
+	if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
+		err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
+				      USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+				      0, iface_no, &curr_ntb_format, 2);
+		if (err < 0) {
+			goto error2;
+		}
+
+		if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
+			dev_info(&intf->dev, "resetting NTB format to 16-bit");
+			err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+					       USB_TYPE_CLASS | USB_DIR_OUT
+					       | USB_RECIP_INTERFACE,
+					       USB_CDC_NCM_NTB16_FORMAT,
+					       iface_no, NULL, 0);
+
+			if (err < 0)
+				goto error2;
+		}
+	}
+
 	cdc_ncm_find_endpoints(dev, ctx->data);
 	cdc_ncm_find_endpoints(dev, ctx->control);
 	if (!dev->in || !dev->out || !dev->status) {
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 2680a65cd5e4..63f28908afda 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
 	 * be at the end of the frame.
 	 */
 	drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+
+	/* Additionally, it has been reported that some Huawei E3372H devices, with
+	 * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
+	 * needing to be set to the NTB16 one again.
+	 */
+	drvflags |= CDC_NCM_FLAG_RESET_NTB16;
 	ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
 	if (ret)
 		goto err;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2dfca96a63b6..340c13484e5c 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -898,6 +898,7 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
 	.set_wol	= smsc95xx_ethtool_set_wol,
 	.get_link_ksettings	= smsc95xx_get_link_ksettings,
 	.set_link_ksettings	= smsc95xx_set_link_ksettings,
+	.get_ts_info	= ethtool_op_get_ts_info,
 };
 
 static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index ba1c9f93592b..9c51b8be0038 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -311,7 +311,7 @@ struct vmxnet3_intr {
 	u8  num_intrs;			/* # of intr vectors */
 	u8  event_intr_idx;		/* idx of the intr vector for event */
 	u8  mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
-	char	event_msi_vector_name[IFNAMSIZ+11];
+	char	event_msi_vector_name[IFNAMSIZ+17];
 #ifdef CONFIG_PCI_MSI
 	struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
 #endif
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 6e2e760d98b1..0b75def39c6c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -5704,7 +5704,7 @@ static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev)
 
 static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev)
 {
-	const u8 glrt_table[] = {
+	static const u8 glrt_table[] = {
 		0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */
 		0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */
 		0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7cd99b1f8596..75bc08c6838c 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -421,14 +421,15 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num)
 static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
 {
 	const unsigned int sector_size = 512;
-	sector_t start_sector;
+	sector_t start_sector, end_sector;
 	u64 num_sectors;
 	u32 rem;
 
 	start_sector = div_u64(ns_offset, sector_size);
-	num_sectors = div_u64_rem(len, sector_size, &rem);
+	end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
 	if (rem)
-		num_sectors++;
+		end_sector++;
+	num_sectors = end_sector - start_sector;
 
 	if (unlikely(num_sectors > (u64)INT_MAX)) {
 		u64 remaining = num_sectors;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cb96f4a7ae3a..3b77cfe5aa1e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
 
 	c.directive.opcode = nvme_admin_directive_recv;
 	c.directive.nsid = cpu_to_le32(nsid);
-	c.directive.numd = sizeof(*s);
+	c.directive.numd = cpu_to_le32(sizeof(*s));
 	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
 	c.directive.dtype = NVME_DIR_STREAMS;
 
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d10d2f279d19..8569ee771269 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -539,7 +539,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
 }
 #endif
 
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 	struct dma_pool *pool;
@@ -556,7 +556,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 
 	length -= (page_size - offset);
 	if (length <= 0)
-		return true;
+		return BLK_STS_OK;
 
 	dma_len -= (page_size - offset);
 	if (dma_len) {
@@ -569,7 +569,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 
 	if (length <= page_size) {
 		iod->first_dma = dma_addr;
-		return true;
+		return BLK_STS_OK;
 	}
 
 	nprps = DIV_ROUND_UP(length, page_size);
@@ -585,7 +585,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 	if (!prp_list) {
 		iod->first_dma = dma_addr;
 		iod->npages = -1;
-		return false;
+		return BLK_STS_RESOURCE;
 	}
 	list[0] = prp_list;
 	iod->first_dma = prp_dma;
@@ -595,7 +595,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 			__le64 *old_prp_list = prp_list;
 			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
 			if (!prp_list)
-				return false;
+				return BLK_STS_RESOURCE;
 			list[iod->npages++] = prp_list;
 			prp_list[0] = old_prp_list[i - 1];
 			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -609,13 +609,29 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 			break;
 		if (dma_len > 0)
 			continue;
-		BUG_ON(dma_len < 0);
+		if (unlikely(dma_len < 0))
+			goto bad_sgl;
 		sg = sg_next(sg);
 		dma_addr = sg_dma_address(sg);
 		dma_len = sg_dma_len(sg);
 	}
 
-	return true;
+	return BLK_STS_OK;
+
+ bad_sgl:
+	if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
+				blk_rq_payload_bytes(req), iod->nents)) {
+		for_each_sg(iod->sg, sg, iod->nents, i) {
+			dma_addr_t phys = sg_phys(sg);
+			pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
+			       "dma_address:%pad dma_length:%d\n", i, &phys,
+					sg->offset, sg->length,
+					&sg_dma_address(sg),
+					sg_dma_len(sg));
+		}
+	}
+	return BLK_STS_IOERR;
+
 }
 
 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -637,7 +653,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 				DMA_ATTR_NO_WARN))
 		goto out;
 
-	if (!nvme_setup_prps(dev, req))
+	ret = nvme_setup_prps(dev, req);
+	if (ret != BLK_STS_OK)
 		goto out_unmap;
 
 	ret = BLK_STS_IOERR;
@@ -2282,7 +2299,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	result = nvme_dev_map(dev);
 	if (result)
-		goto free;
+		goto put_pci;
 
 	INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
 	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
@@ -2291,7 +2308,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	result = nvme_setup_prp_pools(dev);
 	if (result)
-		goto put_pci;
+		goto unmap;
 
 	quirks |= check_dell_samsung_bug(pdev);
 
@@ -2308,9 +2325,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
  release_pools:
 	nvme_release_prp_pools(dev);
+ unmap:
+	nvme_dev_unmap(dev);
  put_pci:
 	put_device(dev->dev);
-	nvme_dev_unmap(dev);
  free:
 	kfree(dev->queues);
 	kfree(dev);
@@ -2466,6 +2484,9 @@ static const struct pci_device_id nvme_id_table[] = {
 	{ PCI_VDEVICE(INTEL, 0x0a54),
 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
 				NVME_QUIRK_DEALLOCATE_ZEROES, },
+	{ PCI_VDEVICE(INTEL, 0x0a55),
+		.driver_data = NVME_QUIRK_STRIPE_SIZE |
+				NVME_QUIRK_DEALLOCATE_ZEROES, },
 	{ PCI_VDEVICE(INTEL, 0xf1a5),	/* Intel 600P/P3100 */
 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS },
 	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 35f930db3c02..2d7a98ab53fb 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -168,11 +168,21 @@ out:
 	nvmet_req_complete(req, status);
 }
 
+static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
+{
+	int len = min(src_len, dst_len);
+
+	memcpy(dst, src, len);
+	if (dst_len > len)
+		memset(dst + len, ' ', dst_len - len);
+}
+
 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
 {
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
 	struct nvme_id_ctrl *id;
 	u16 status = 0;
+	const char model[] = "Linux";
 
 	id = kzalloc(sizeof(*id), GFP_KERNEL);
 	if (!id) {
@@ -184,8 +194,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
 	id->vid = 0;
 	id->ssvid = 0;
 
-	memset(id->sn, ' ', sizeof(id->sn));
-	snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
+	bin2hex(id->sn, &ctrl->subsys->serial,
+		min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
+	copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
+	copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
 
 	memset(id->mn, ' ', sizeof(id->mn));
 	strncpy((char *)id->mn, "Linux", sizeof(id->mn));
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index a358ecd93e11..0a0067e771f5 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -650,7 +650,7 @@ out_unlock:
 
 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
 
-static ssize_t nvmet_subsys_version_show(struct config_item *item,
+static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
 					      char *page)
 {
 	struct nvmet_subsys *subsys = to_subsys(item);
@@ -666,7 +666,7 @@ static ssize_t nvmet_subsys_version_show(struct config_item *item,
 				(int)NVME_MINOR(subsys->ver));
 }
 
-static ssize_t nvmet_subsys_version_store(struct config_item *item,
+static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
 					       const char *page, size_t count)
 {
 	struct nvmet_subsys *subsys = to_subsys(item);
@@ -684,11 +684,33 @@ static ssize_t nvmet_subsys_version_store(struct config_item *item,
 
 	return count;
 }
-CONFIGFS_ATTR(nvmet_subsys_, version);
+CONFIGFS_ATTR(nvmet_subsys_, attr_version);
+
+static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
+					     char *page)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+
+	return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
+}
+
+static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
+					      const char *page, size_t count)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+
+	down_write(&nvmet_config_sem);
+	sscanf(page, "%llx\n", &subsys->serial);
+	up_write(&nvmet_config_sem);
+
+	return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
 
 static struct configfs_attribute *nvmet_subsys_attrs[] = {
 	&nvmet_subsys_attr_attr_allow_any_host,
-	&nvmet_subsys_attr_version,
+	&nvmet_subsys_attr_attr_version,
+	&nvmet_subsys_attr_attr_serial,
 	NULL,
 };
 
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b5b4ac103748..f4b02bb4a1a8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -767,9 +767,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
 
-	/* generate a random serial number as our controllers are ephemeral: */
-	get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
-
 	kref_init(&ctrl->ref);
 	ctrl->subsys = subsys;
 
@@ -928,6 +925,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
 		return NULL;
 
 	subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
+	/* generate a random serial number as our controllers are ephemeral: */
+	get_random_bytes(&subsys->serial, sizeof(subsys->serial));
 
 	switch (type) {
 	case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1e6dcc241b3c..d5801c150b1c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1174,14 +1174,14 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
 	 */
 	if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
 		ret = VERR_CR_ASSOC_LEN;
-	else if (rqst->desc_list_len <
-			cpu_to_be32(FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN))
+	else if (be32_to_cpu(rqst->desc_list_len) <
+			FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
 		ret = VERR_CR_ASSOC_RQST_LEN;
 	else if (rqst->assoc_cmd.desc_tag !=
 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
 		ret = VERR_CR_ASSOC_CMD;
-	else if (rqst->assoc_cmd.desc_len <
-			cpu_to_be32(FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN))
+	else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
+			FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
 		ret = VERR_CR_ASSOC_CMD_LEN;
 	else if (!rqst->assoc_cmd.ersp_ratio ||
 		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 747bbdb4f9c6..e3b244c7e443 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -112,7 +112,6 @@ struct nvmet_ctrl {
 
 	struct mutex		lock;
 	u64			cap;
-	u64			serial;
 	u32			cc;
 	u32			csts;
 
@@ -152,6 +151,7 @@ struct nvmet_subsys {
 	u16			max_qid;
 
 	u64			ver;
+	u64			serial;
 	char			*subsysnqn;
 
 	struct config_group	group;
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index a0d4ede9b8fc..63e3eb55f3ac 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -170,7 +170,7 @@ static const struct of_device_id rockchip_efuse_match[] = {
 		.data = (void *)&rockchip_rk3288_efuse_read,
 	},
 	{
-		.compatible = "rockchip,rk322x-efuse",
+		.compatible = "rockchip,rk3228-efuse",
 		.data = (void *)&rockchip_rk3288_efuse_read,
 	},
 	{
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 077f62e208aa..6a4367cc9caa 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3401,9 +3401,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
 		if (is_write) {
 			req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
 
-			rc = copy_from_user(kbuf, ubuf, ulen);
-			if (unlikely(rc))
+			if (copy_from_user(kbuf, ubuf, ulen)) {
+				rc = -EFAULT;
 				goto out;
+			}
 		}
 	}
 
@@ -3431,8 +3432,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
 		goto out;
 	}
 
-	if (ulen && !is_write)
-		rc = copy_to_user(ubuf, kbuf, ulen);
+	if (ulen && !is_write) {
+		if (copy_to_user(ubuf, kbuf, ulen))
+			rc = -EFAULT;
+	}
 out:
 	kfree(buf);
 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 551d103c27f1..2bfea7082e3a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1693,7 +1693,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
 
 static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
 {
-	const u8 trans_tx_err_code_prio[] = {
+	static const u8 trans_tx_err_code_prio[] = {
 		TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS,
 		TRANS_TX_ERR_PHY_NOT_ENABLE,
 		TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION,
@@ -1738,7 +1738,7 @@ static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
 
 static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
 {
-	const u8 trans_rx_err_code_prio[] = {
+	static const u8 trans_rx_err_code_prio[] = {
 		TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR,
 		TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR,
 		TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM,
@@ -1784,7 +1784,7 @@ static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
 
 static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
 {
-	const u8 dma_tx_err_code_prio[] = {
+	static const u8 dma_tx_err_code_prio[] = {
 		DMA_TX_UNEXP_XFER_ERR,
 		DMA_TX_UNEXP_RETRANS_ERR,
 		DMA_TX_XFER_LEN_OVERFLOW,
@@ -1810,7 +1810,7 @@ static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
 
 static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
 {
-	const u8 sipc_rx_err_code_prio[] = {
+	static const u8 sipc_rx_err_code_prio[] = {
 		SIPC_RX_FIS_STATUS_ERR_BIT_VLD,
 		SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR,
 		SIPC_RX_FIS_STATUS_BSY_BIT_ERR,
@@ -1836,7 +1836,7 @@ static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
 
 static int parse_dma_rx_err_code_v2_hw(u32 err_msk)
 {
-	const u8 dma_rx_err_code_prio[] = {
+	static const u8 dma_rx_err_code_prio[] = {
 		DMA_RX_UNKNOWN_FRM_ERR,
 		DMA_RX_DATA_LEN_OVERFLOW,
 		DMA_RX_DATA_LEN_UNDERFLOW,
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 47f66e949745..ed197bc8e801 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -213,7 +213,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
  * @task_context:
  *
  */
-static void scu_ssp_reqeust_construct_task_context(
+static void scu_ssp_request_construct_task_context(
 	struct isci_request *ireq,
 	struct scu_task_context *task_context)
 {
@@ -425,7 +425,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
 	u8 prot_type = scsi_get_prot_type(scmd);
 	u8 prot_op = scsi_get_prot_op(scmd);
 
-	scu_ssp_reqeust_construct_task_context(ireq, task_context);
+	scu_ssp_request_construct_task_context(ireq, task_context);
 
 	task_context->ssp_command_iu_length =
 		sizeof(struct ssp_cmd_iu) / sizeof(u32);
@@ -472,7 +472,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
 {
 	struct scu_task_context *task_context = ireq->tc;
 
-	scu_ssp_reqeust_construct_task_context(ireq, task_context);
+	scu_ssp_request_construct_task_context(ireq, task_context);
 
 	task_context->control_frame                = 1;
 	task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
@@ -495,7 +495,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
  * the command buffer is complete. none Revisit task context construction to
  * determine what is common for SSP/SMP/STP task context structures.
  */
-static void scu_sata_reqeust_construct_task_context(
+static void scu_sata_request_construct_task_context(
 	struct isci_request *ireq,
 	struct scu_task_context *task_context)
 {
@@ -562,7 +562,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq
 {
 	struct scu_task_context *task_context = ireq->tc;
 
-	scu_sata_reqeust_construct_task_context(ireq, task_context);
+	scu_sata_request_construct_task_context(ireq, task_context);
 
 	task_context->control_frame         = 0;
 	task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
@@ -613,7 +613,7 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
 	struct scu_task_context *task_context = ireq->tc;
 
 	/* Build the STP task context structure */
-	scu_sata_reqeust_construct_task_context(ireq, task_context);
+	scu_sata_request_construct_task_context(ireq, task_context);
 
 	/* Copy over the SGL elements */
 	sci_request_build_sgl(ireq);
@@ -1401,7 +1401,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
  * @data_buffer: The buffer of data to be copied.
  * @length: The length of the data transfer.
  *
- * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * Copy the data from the buffer for the length specified to the IO request SGL
  * specified data region. enum sci_status
  */
 static enum sci_status
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index fd501f8dbb11..8660f923ace0 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -573,7 +573,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
 		event = DISC_EV_FAILED;
 	}
 	if (error)
-		fc_disc_error(disc, fp);
+		fc_disc_error(disc, ERR_PTR(error));
 	else if (event != DISC_EV_NONE)
 		fc_disc_done(disc, event);
 	fc_frame_free(fp);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index b58bba4604e8..7786c97e033f 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1227,7 +1227,7 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
 
 		if (rdata->spp_type != FC_TYPE_FCP) {
 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
-			    "Not offlading since since spp type isn't FCP\n");
+			    "Not offloading since spp type isn't FCP\n");
 			break;
 		}
 		if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 32632c9b2276..91d2f51c351b 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -23,11 +23,17 @@
 #include <linux/qed/qed_iscsi_if.h>
 #include <linux/qed/qed_ll2_if.h>
 #include "qedi_version.h"
+#include "qedi_nvm_iscsi_cfg.h"
 
 #define QEDI_MODULE_NAME		"qedi"
 
 struct qedi_endpoint;
 
+#ifndef GET_FIELD2
+#define GET_FIELD2(value, name) \
+	(((value) & (name ## _MASK)) >> (name ## _OFFSET))
+#endif
+
 /*
  * PCI function probe defines
  */
@@ -66,6 +72,11 @@ struct qedi_endpoint;
 #define QEDI_HW_DMA_BOUNDARY	0xfff
 #define QEDI_PATH_HANDLE	0xFE0000000UL
 
+enum qedi_nvm_tgts {
+	QEDI_NVM_TGT_PRI,
+	QEDI_NVM_TGT_SEC,
+};
+
 struct qedi_uio_ctrl {
 	/* meta data */
 	u32 uio_hsi_version;
@@ -283,6 +294,8 @@ struct qedi_ctx {
 	void *bdq_pbl_list;
 	dma_addr_t bdq_pbl_list_dma;
 	u8 bdq_pbl_list_num_entries;
+	struct nvm_iscsi_cfg *iscsi_cfg;
+	dma_addr_t nvm_buf_dma;
 	void __iomem *bdq_primary_prod;
 	void __iomem *bdq_secondary_prod;
 	u16 bdq_prod_idx;
@@ -337,6 +350,10 @@ struct qedi_ctx {
 	bool use_fast_sge;
 
 	atomic_t num_offloads;
+#define SYSFS_FLAG_FW_SEL_BOOT 2
+#define IPV6_LEN	41
+#define IPV4_LEN	17
+	struct iscsi_boot_kset *boot_kset;
 };
 
 struct qedi_work {
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 19254bd739d9..93d54acd4a22 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1411,7 +1411,7 @@ static void qedi_tmf_work(struct work_struct *work)
 
 	list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
 	if (!list_work) {
-		QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
+		QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
 		goto abort_ret;
 	}
 
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5f5a4ef2e529..2c3783684815 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -19,6 +19,7 @@
 #include <linux/mm.h>
 #include <linux/if_vlan.h>
 #include <linux/cpu.h>
+#include <linux/iscsi_boot_sysfs.h>
 
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -1143,6 +1144,30 @@ exit_setup_int:
 	return rc;
 }
 
+static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
+{
+	if (qedi->iscsi_cfg)
+		dma_free_coherent(&qedi->pdev->dev,
+				  sizeof(struct nvm_iscsi_cfg),
+				  qedi->iscsi_cfg, qedi->nvm_buf_dma);
+}
+
+static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
+{
+	qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
+					     sizeof(struct nvm_iscsi_cfg),
+					     &qedi->nvm_buf_dma, GFP_KERNEL);
+	if (!qedi->iscsi_cfg) {
+		QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
+		return -ENOMEM;
+	}
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+		  "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
+		  qedi->nvm_buf_dma);
+
+	return 0;
+}
+
 static void qedi_free_bdq(struct qedi_ctx *qedi)
 {
 	int i;
@@ -1183,6 +1208,7 @@ static void qedi_free_global_queues(struct qedi_ctx *qedi)
 		kfree(gl[i]);
 	}
 	qedi_free_bdq(qedi);
+	qedi_free_nvm_iscsi_cfg(qedi);
 }
 
 static int qedi_alloc_bdq(struct qedi_ctx *qedi)
@@ -1309,6 +1335,11 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
 	if (rc)
 		goto mem_alloc_failure;
 
+	/* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
+	rc = qedi_alloc_nvm_iscsi_cfg(qedi);
+	if (rc)
+		goto mem_alloc_failure;
+
 	/* Allocate a CQ and an associated PBL for each MSI-X
 	 * vector.
 	 */
@@ -1671,6 +1702,387 @@ void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
 	qedi_ops->ll2->start(qedi->cdev, &params);
 }
 
+/**
+ * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting
+ * for gaps) for the matching absolute-pf-id of the QEDI device.
+ */
+static struct nvm_iscsi_block *
+qedi_get_nvram_block(struct qedi_ctx *qedi)
+{
+	int i;
+	u8 pf;
+	u32 flags;
+	struct nvm_iscsi_block *block;
+
+	pf = qedi->dev_info.common.abs_pf_id;
+	block = &qedi->iscsi_cfg->block[0];
+	for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
+		flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
+			NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
+		if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY |
+				NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) &&
+			(pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK)
+				>> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET))
+			return block;
+	}
+	return NULL;
+}
+
+static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
+{
+	struct qedi_ctx *qedi = data;
+	struct nvm_iscsi_initiator *initiator;
+	char *str = buf;
+	int rc = 1;
+	u32 ipv6_en, dhcp_en, ip_len;
+	struct nvm_iscsi_block *block;
+	char *fmt, *ip, *sub, *gw;
+
+	block = qedi_get_nvram_block(qedi);
+	if (!block)
+		return 0;
+
+	initiator = &block->initiator;
+	ipv6_en = block->generic.ctrl_flags &
+		  NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
+	dhcp_en = block->generic.ctrl_flags &
+		  NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED;
+	/* Static IP assignments. */
+	fmt = ipv6_en ? "%pI6\n" : "%pI4\n";
+	ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte;
+	ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
+	sub = ipv6_en ? initiator->ipv6.subnet_mask.byte :
+	      initiator->ipv4.subnet_mask.byte;
+	gw = ipv6_en ? initiator->ipv6.gateway.byte :
+	     initiator->ipv4.gateway.byte;
+	/* DHCP IP adjustments. */
+	fmt = dhcp_en ? "%s\n" : fmt;
+	if (dhcp_en) {
+		ip = ipv6_en ? "0::0" : "0.0.0.0";
+		sub = ip;
+		gw = ip;
+		ip_len = ipv6_en ? 5 : 8;
+	}
+
+	switch (type) {
+	case ISCSI_BOOT_ETH_IP_ADDR:
+		rc = snprintf(str, ip_len, fmt, ip);
+		break;
+	case ISCSI_BOOT_ETH_SUBNET_MASK:
+		rc = snprintf(str, ip_len, fmt, sub);
+		break;
+	case ISCSI_BOOT_ETH_GATEWAY:
+		rc = snprintf(str, ip_len, fmt, gw);
+		break;
+	case ISCSI_BOOT_ETH_FLAGS:
+		rc = snprintf(str, 3, "%hhd\n",
+			      SYSFS_FLAG_FW_SEL_BOOT);
+		break;
+	case ISCSI_BOOT_ETH_INDEX:
+		rc = snprintf(str, 3, "0\n");
+		break;
+	case ISCSI_BOOT_ETH_MAC:
+		rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
+		break;
+	case ISCSI_BOOT_ETH_VLAN:
+		rc = snprintf(str, 12, "%d\n",
+			      GET_FIELD2(initiator->generic_cont0,
+					 NVM_ISCSI_CFG_INITIATOR_VLAN));
+		break;
+	case ISCSI_BOOT_ETH_ORIGIN:
+		if (dhcp_en)
+			rc = snprintf(str, 3, "3\n");
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+	return rc;
+}
+
+static umode_t qedi_eth_get_attr_visibility(void *data, int type)
+{
+	int rc = 1;
+
+	switch (type) {
+	case ISCSI_BOOT_ETH_FLAGS:
+	case ISCSI_BOOT_ETH_MAC:
+	case ISCSI_BOOT_ETH_INDEX:
+	case ISCSI_BOOT_ETH_IP_ADDR:
+	case ISCSI_BOOT_ETH_SUBNET_MASK:
+	case ISCSI_BOOT_ETH_GATEWAY:
+	case ISCSI_BOOT_ETH_ORIGIN:
+	case ISCSI_BOOT_ETH_VLAN:
+		rc = 0444;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+
+static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
+{
+	struct qedi_ctx *qedi = data;
+	struct nvm_iscsi_initiator *initiator;
+	char *str = buf;
+	int rc;
+	struct nvm_iscsi_block *block;
+
+	block = qedi_get_nvram_block(qedi);
+	if (!block)
+		return 0;
+
+	initiator = &block->initiator;
+
+	switch (type) {
+	case ISCSI_BOOT_INI_INITIATOR_NAME:
+		rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+			      initiator->initiator_name.byte);
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+
+static umode_t qedi_ini_get_attr_visibility(void *data, int type)
+{
+	int rc;
+
+	switch (type) {
+	case ISCSI_BOOT_INI_INITIATOR_NAME:
+		rc = 0444;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+
+static ssize_t
+qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
+			char *buf, enum qedi_nvm_tgts idx)
+{
+	char *str = buf;
+	int rc = 1;
+	u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
+	struct nvm_iscsi_block *block;
+	char *chap_name, *chap_secret;
+	char *mchap_name, *mchap_secret;
+
+	block = qedi_get_nvram_block(qedi);
+	if (!block)
+		goto exit_show_tgt_info;
+
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
+		  "Port:%d, tgt_idx:%d\n",
+		  GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx);
+
+	ctrl_flags = block->target[idx].ctrl_flags &
+		     NVM_ISCSI_CFG_TARGET_ENABLED;
+
+	if (!ctrl_flags) {
+		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
+			  "Target disabled\n");
+		goto exit_show_tgt_info;
+	}
+
+	ipv6_en = block->generic.ctrl_flags &
+		  NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
+	ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
+	chap_en = block->generic.ctrl_flags &
+		  NVM_ISCSI_CFG_GEN_CHAP_ENABLED;
+	chap_name = chap_en ? block->initiator.chap_name.byte : NULL;
+	chap_secret = chap_en ? block->initiator.chap_password.byte : NULL;
+
+	mchap_en = block->generic.ctrl_flags &
+		  NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED;
+	mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL;
+	mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL;
+
+	switch (type) {
+	case ISCSI_BOOT_TGT_NAME:
+		rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+			      block->target[idx].target_name.byte);
+		break;
+	case ISCSI_BOOT_TGT_IP_ADDR:
+		if (ipv6_en)
+			rc = snprintf(str, ip_len, "%pI6\n",
+				      block->target[idx].ipv6_addr.byte);
+		else
+			rc = snprintf(str, ip_len, "%pI4\n",
+				      block->target[idx].ipv4_addr.byte);
+		break;
+	case ISCSI_BOOT_TGT_PORT:
+		rc = snprintf(str, 12, "%d\n",
+			      GET_FIELD2(block->target[idx].generic_cont0,
+					 NVM_ISCSI_CFG_TARGET_TCP_PORT));
+		break;
+	case ISCSI_BOOT_TGT_LUN:
+		rc = snprintf(str, 22, "%.*d\n",
+			      block->target[idx].lun.value[1],
+			      block->target[idx].lun.value[0]);
+		break;
+	case ISCSI_BOOT_TGT_CHAP_NAME:
+		rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
+			      chap_name);
+		break;
+	case ISCSI_BOOT_TGT_CHAP_SECRET:
+		rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
+			      chap_secret);
+		break;
+	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+		rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
+			      mchap_name);
+		break;
+	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+		rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
+			      mchap_secret);
+		break;
+	case ISCSI_BOOT_TGT_FLAGS:
+		rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+		break;
+	case ISCSI_BOOT_TGT_NIC_ASSOC:
+		rc = snprintf(str, 3, "0\n");
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+exit_show_tgt_info:
+	return rc;
+}
+
+static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf)
+{
+	struct qedi_ctx *qedi = data;
+
+	return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI);
+}
+
+static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf)
+{
+	struct qedi_ctx *qedi = data;
+
+	return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC);
+}
+
+static umode_t qedi_tgt_get_attr_visibility(void *data, int type)
+{
+	int rc;
+
+	switch (type) {
+	case ISCSI_BOOT_TGT_NAME:
+	case ISCSI_BOOT_TGT_IP_ADDR:
+	case ISCSI_BOOT_TGT_PORT:
+	case ISCSI_BOOT_TGT_LUN:
+	case ISCSI_BOOT_TGT_CHAP_NAME:
+	case ISCSI_BOOT_TGT_CHAP_SECRET:
+	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+	case ISCSI_BOOT_TGT_NIC_ASSOC:
+	case ISCSI_BOOT_TGT_FLAGS:
+		rc = 0444;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+
+static void qedi_boot_release(void *data)
+{
+	struct qedi_ctx *qedi = data;
+
+	scsi_host_put(qedi->shost);
+}
+
+static int qedi_get_boot_info(struct qedi_ctx *qedi)
+{
+	int ret = 1;
+	u16 len;
+
+	len = sizeof(struct nvm_iscsi_cfg);
+
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+		  "Get NVM iSCSI CFG image\n");
+	ret = qedi_ops->common->nvm_get_image(qedi->cdev,
+					      QED_NVM_IMAGE_ISCSI_CFG,
+					      (char *)qedi->iscsi_cfg, len);
+	if (ret)
+		QEDI_ERR(&qedi->dbg_ctx,
+			 "Could not get NVM image. ret = %d\n", ret);
+
+	return ret;
+}
+
+static int qedi_setup_boot_info(struct qedi_ctx *qedi)
+{
+	struct iscsi_boot_kobj *boot_kobj;
+
+	if (qedi_get_boot_info(qedi))
+		return -EPERM;
+
+	qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no);
+	if (!qedi->boot_kset)
+		goto kset_free;
+
+	if (!scsi_host_get(qedi->shost))
+		goto kset_free;
+
+	boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi,
+					     qedi_show_boot_tgt_pri_info,
+					     qedi_tgt_get_attr_visibility,
+					     qedi_boot_release);
+	if (!boot_kobj)
+		goto put_host;
+
+	if (!scsi_host_get(qedi->shost))
+		goto kset_free;
+
+	boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi,
+					     qedi_show_boot_tgt_sec_info,
+					     qedi_tgt_get_attr_visibility,
+					     qedi_boot_release);
+	if (!boot_kobj)
+		goto put_host;
+
+	if (!scsi_host_get(qedi->shost))
+		goto kset_free;
+
+	boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi,
+						qedi_show_boot_ini_info,
+						qedi_ini_get_attr_visibility,
+						qedi_boot_release);
+	if (!boot_kobj)
+		goto put_host;
+
+	if (!scsi_host_get(qedi->shost))
+		goto kset_free;
+
+	boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi,
+					       qedi_show_boot_eth_info,
+					       qedi_eth_get_attr_visibility,
+					       qedi_boot_release);
+	if (!boot_kobj)
+		goto put_host;
+
+	return 0;
+
+put_host:
+	scsi_host_put(qedi->shost);
+kset_free:
+	iscsi_boot_destroy_kset(qedi->boot_kset);
+	return -ENOMEM;
+}
+
 static void __qedi_remove(struct pci_dev *pdev, int mode)
 {
 	struct qedi_ctx *qedi = pci_get_drvdata(pdev);
@@ -1724,6 +2136,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
 			qedi->ll2_recv_thread = NULL;
 		}
 		qedi_ll2_free_skbs(qedi);
+
+		if (qedi->boot_kset)
+			iscsi_boot_destroy_kset(qedi->boot_kset);
 	}
 }
 
@@ -1967,6 +2382,10 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
 		/* F/w needs 1st task context memory entry for performance */
 		set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
 		atomic_set(&qedi->num_offloads, 0);
+
+		if (qedi_setup_boot_info(qedi))
+			QEDI_ERR(&qedi->dbg_ctx,
+				 "No iSCSI boot target configured\n");
 	}
 
 	return 0;
diff --git a/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
new file mode 100644
index 000000000000..df39b69b366d
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
@@ -0,0 +1,210 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef NVM_ISCSI_CFG_H
+#define NVM_ISCSI_CFG_H
+
+#define NUM_OF_ISCSI_TARGET_PER_PF    4   /* Defined as per the
+					   * ISCSI IBFT constraint
+					   */
+#define NUM_OF_ISCSI_PF_SUPPORTED     4   /* One PF per Port -
+					   * assuming 4 port card
+					   */
+
+#define NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN  256
+
+union nvm_iscsi_dhcp_vendor_id {
+	u32 value[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN / 4];
+	u8  byte[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN];
+};
+
+#define NVM_ISCSI_IPV4_ADDR_BYTE_LEN 4
+union nvm_iscsi_ipv4_addr {
+	u32 addr;
+	u8  byte[NVM_ISCSI_IPV4_ADDR_BYTE_LEN];
+};
+
+#define NVM_ISCSI_IPV6_ADDR_BYTE_LEN 16
+union nvm_iscsi_ipv6_addr {
+	u32 addr[4];
+	u8  byte[NVM_ISCSI_IPV6_ADDR_BYTE_LEN];
+};
+
+struct nvm_iscsi_initiator_ipv4 {
+	union nvm_iscsi_ipv4_addr addr;				/* 0x0 */
+	union nvm_iscsi_ipv4_addr subnet_mask;			/* 0x4 */
+	union nvm_iscsi_ipv4_addr gateway;			/* 0x8 */
+	union nvm_iscsi_ipv4_addr primary_dns;			/* 0xC */
+	union nvm_iscsi_ipv4_addr secondary_dns;		/* 0x10 */
+	union nvm_iscsi_ipv4_addr dhcp_addr;			/* 0x14 */
+
+	union nvm_iscsi_ipv4_addr isns_server;			/* 0x18 */
+	union nvm_iscsi_ipv4_addr slp_server;			/* 0x1C */
+	union nvm_iscsi_ipv4_addr primay_radius_server;		/* 0x20 */
+	union nvm_iscsi_ipv4_addr secondary_radius_server;	/* 0x24 */
+
+	union nvm_iscsi_ipv4_addr rsvd[4];			/* 0x28 */
+};
+
+struct nvm_iscsi_initiator_ipv6 {
+	union nvm_iscsi_ipv6_addr addr;				/* 0x0 */
+	union nvm_iscsi_ipv6_addr subnet_mask;			/* 0x10 */
+	union nvm_iscsi_ipv6_addr gateway;			/* 0x20 */
+	union nvm_iscsi_ipv6_addr primary_dns;			/* 0x30 */
+	union nvm_iscsi_ipv6_addr secondary_dns;		/* 0x40 */
+	union nvm_iscsi_ipv6_addr dhcp_addr;			/* 0x50 */
+
+	union nvm_iscsi_ipv6_addr isns_server;			/* 0x60 */
+	union nvm_iscsi_ipv6_addr slp_server;			/* 0x70 */
+	union nvm_iscsi_ipv6_addr primay_radius_server;		/* 0x80 */
+	union nvm_iscsi_ipv6_addr secondary_radius_server;	/* 0x90 */
+
+	union nvm_iscsi_ipv6_addr rsvd[3];			/* 0xA0 */
+
+	u32   config;						/* 0xD0 */
+#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_MASK      0x000000FF
+#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_OFFSET    0
+
+	u32   rsvd_1[3];
+};
+
+#define NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN  256
+union nvm_iscsi_name {
+	u32 value[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN / 4];
+	u8  byte[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN];
+};
+
+#define NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN  256
+union nvm_iscsi_chap_name {
+	u32 value[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN / 4];
+	u8  byte[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN];
+};
+
+#define NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN  16 /* md5 need per RFC1996
+					    * is 16 octets
+					    */
+union nvm_iscsi_chap_password {
+	u32 value[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN / 4];
+	u8 byte[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN];
+};
+
+union nvm_iscsi_lun {
+	u8  byte[8];
+	u32 value[2];
+};
+
+struct nvm_iscsi_generic {
+	u32 ctrl_flags;						/* 0x0 */
+#define NVM_ISCSI_CFG_GEN_CHAP_ENABLED                 BIT(0)
+#define NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED    BIT(1)
+#define NVM_ISCSI_CFG_GEN_DHCP_ISCSI_CONFIG_ENABLED    BIT(2)
+#define NVM_ISCSI_CFG_GEN_IPV6_ENABLED                 BIT(3)
+#define NVM_ISCSI_CFG_GEN_IPV4_FALLBACK_ENABLED        BIT(4)
+#define NVM_ISCSI_CFG_GEN_ISNS_WORLD_LOGIN             BIT(5)
+#define NVM_ISCSI_CFG_GEN_ISNS_SELECTIVE_LOGIN         BIT(6)
+#define NVM_ISCSI_CFG_GEN_ADDR_REDIRECT_ENABLED	       BIT(7)
+#define NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED          BIT(8)
+
+	u32 timeout;						/* 0x4 */
+#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_MASK       0x0000FFFF
+#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_OFFSET     0
+#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_MASK         0xFFFF0000
+#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_OFFSET       16
+
+	union nvm_iscsi_dhcp_vendor_id  dhcp_vendor_id;		/* 0x8  */
+	u32 rsvd[62];						/* 0x108 */
+};
+
+struct nvm_iscsi_initiator {
+	struct nvm_iscsi_initiator_ipv4 ipv4;			/* 0x0 */
+	struct nvm_iscsi_initiator_ipv6 ipv6;			/* 0x38 */
+
+	union nvm_iscsi_name           initiator_name;		/* 0x118 */
+	union nvm_iscsi_chap_name      chap_name;		/* 0x218 */
+	union nvm_iscsi_chap_password  chap_password;		/* 0x318 */
+
+	u32 generic_cont0;					/* 0x398 */
+#define NVM_ISCSI_CFG_INITIATOR_VLAN_MASK		0x0000FFFF
+#define NVM_ISCSI_CFG_INITIATOR_VLAN_OFFSET		0
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_MASK		0x00030000
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_OFFSET	16
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4		1
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_6		2
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4_AND_6	3
+
+	u32 ctrl_flags;
+#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_PRIORITY_V6     BIT(0)
+#define NVM_ISCSI_CFG_INITIATOR_VLAN_ENABLED               BIT(1)
+
+	u32 rsvd[116];						/* 0x32C */
+};
+
+struct nvm_iscsi_target {
+	u32 ctrl_flags;						/* 0x0 */
+#define NVM_ISCSI_CFG_TARGET_ENABLED            BIT(0)
+#define NVM_ISCSI_CFG_BOOT_TIME_LOGIN_STATUS    BIT(1)
+
+	u32 generic_cont0;					/* 0x4 */
+#define NVM_ISCSI_CFG_TARGET_TCP_PORT_MASK      0x0000FFFF
+#define NVM_ISCSI_CFG_TARGET_TCP_PORT_OFFSET    0
+
+	u32 ip_ver;
+#define NVM_ISCSI_CFG_IPv4       4
+#define NVM_ISCSI_CFG_IPv6       6
+
+	u32 rsvd_1[7];						/* 0x24 */
+	union nvm_iscsi_ipv4_addr ipv4_addr;			/* 0x28 */
+	union nvm_iscsi_ipv6_addr ipv6_addr;			/* 0x2C */
+	union nvm_iscsi_lun lun;				/* 0x3C */
+
+	union nvm_iscsi_name           target_name;		/* 0x44 */
+	union nvm_iscsi_chap_name      chap_name;		/* 0x144 */
+	union nvm_iscsi_chap_password  chap_password;		/* 0x244 */
+
+	u32 rsvd_2[107];					/* 0x2C4 */
+};
+
+struct nvm_iscsi_block {
+	u32 id;							/* 0x0 */
+#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK         0x0000000F
+#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET       0
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK            0x00000FF0
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET          4
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY    BIT(0)
+#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED       BIT(1)
+
+	u32 rsvd_1[5];						/* 0x4 */
+
+	struct nvm_iscsi_generic     generic;			/* 0x18 */
+	struct nvm_iscsi_initiator   initiator;			/* 0x218 */
+	struct nvm_iscsi_target      target[NUM_OF_ISCSI_TARGET_PER_PF];
+								/* 0x718 */
+
+	u32 rsvd_2[58];						/* 0x1718 */
+	/* total size - 0x1800 - 6K block */
+};
+
+struct nvm_iscsi_cfg {
+	u32 id;							/* 0x0 */
+#define NVM_ISCSI_CFG_BLK_VERSION_MINOR_MASK     0x000000FF
+#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR_MASK     0x0000FF00
+#define NVM_ISCSI_CFG_BLK_SIGNATURE_MASK         0xFFFF0000
+#define NVM_ISCSI_CFG_BLK_SIGNATURE              0x49430000 /* IC - Iscsi
+							     * Config
+							     */
+
+#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR          0
+#define NVM_ISCSI_CFG_BLK_VERSION_MINOR          10
+#define NVM_ISCSI_CFG_BLK_VERSION ((NVM_ISCSI_CFG_BLK_VERSION_MAJOR << 8) | \
+				   NVM_ISCSI_CFG_BLK_VERSION_MINOR)
+
+	struct nvm_iscsi_block	block[NUM_OF_ISCSI_PF_SUPPORTED]; /* 0x4 */
+};
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index c2dc836dc484..e101cd3043b9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3727,7 +3727,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
 	h &= QLA_CMD_HANDLE_MASK;
 
 	if (h != QLA_TGT_NULL_HANDLE) {
-		if (unlikely(h > req->num_outstanding_cmds)) {
+		if (unlikely(h >= req->num_outstanding_cmds)) {
 			ql_dbg(ql_dbg_tgt, vha, 0xe052,
 			    "qla_target(%d): Wrong handle %x received\n",
 			    vha->vp_idx, handle);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 21225d62b0c1..1e82d4128a84 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -758,8 +758,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
 		if (hp->dxferp || hp->dxfer_len > 0)
 			return false;
 		return true;
-	case SG_DXFER_TO_DEV:
 	case SG_DXFER_FROM_DEV:
+		if (hp->dxfer_len < 0)
+			return false;
+		return true;
+	case SG_DXFER_TO_DEV:
 	case SG_DXFER_TO_FROM_DEV:
 		if (!hp->dxferp || hp->dxfer_len == 0)
 			return false;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8b93197daefe..9be211d68b15 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -837,6 +837,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
 	.eh_abort_handler = virtscsi_abort,
 	.eh_device_reset_handler = virtscsi_device_reset,
 	.eh_timed_out = virtscsi_eh_timed_out,
+	.slave_alloc = virtscsi_device_alloc,
 
 	.can_queue = 1024,
 	.dma_boundary = UINT_MAX,
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 2afe3597982e..f4b7a98a7913 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -134,7 +134,6 @@ struct apid_data {
  * @spmic:		SPMI controller object
  * @ver_ops:		version dependent operations.
  * @ppid_to_apid	in-memory copy of PPID -> channel (APID) mapping table.
- *			v2 only.
  */
 struct spmi_pmic_arb {
 	void __iomem		*rd_base;
@@ -1016,6 +1015,13 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 		goto err_put_ctrl;
 	}
 
+	pa->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
+					sizeof(*pa->ppid_to_apid), GFP_KERNEL);
+	if (!pa->ppid_to_apid) {
+		err = -ENOMEM;
+		goto err_put_ctrl;
+	}
+
 	hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
 
 	if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
@@ -1048,15 +1054,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 			err = PTR_ERR(pa->wr_base);
 			goto err_put_ctrl;
 		}
-
-		pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
-						PMIC_ARB_MAX_PPID,
-						sizeof(*pa->ppid_to_apid),
-						GFP_KERNEL);
-		if (!pa->ppid_to_apid) {
-			err = -ENOMEM;
-			goto err_put_ctrl;
-		}
 	}
 
 	dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 2b9b0941d9eb..6d23226e5f69 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -365,11 +365,23 @@ static int spmi_drv_remove(struct device *dev)
 	return 0;
 }
 
+static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	int ret;
+
+	ret = of_device_uevent_modalias(dev, env);
+	if (ret != -ENODEV)
+		return ret;
+
+	return 0;
+}
+
 static struct bus_type spmi_bus_type = {
 	.name		= "spmi",
 	.match		= spmi_device_match,
 	.probe		= spmi_drv_probe,
 	.remove		= spmi_drv_remove,
+	.uevent		= spmi_drv_uevent,
 };
 
 /**
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 268d4e6ef48a..ef28a1cb64ae 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -110,4 +110,6 @@ source "drivers/staging/ccree/Kconfig"
 
 source "drivers/staging/typec/Kconfig"
 
+source "drivers/staging/vboxvideo/Kconfig"
+
 endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index b93e6f5f0f6e..2918580bdb9e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_KS7010)		+= ks7010/
 obj-$(CONFIG_GREYBUS)		+= greybus/
 obj-$(CONFIG_BCM2835_VCHIQ)	+= vc04_services/
 obj-$(CONFIG_CRYPTO_DEV_CCREE)	+= ccree/
+obj-$(CONFIG_DRM_VBOXVIDEO)	+= vboxvideo/
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index b2e382888981..2f7bfc1c59e5 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3116,8 +3116,7 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
 		/* following line: 2-1 per STC */
 		ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
 		ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
-		/* following line: N-1 per STC */
-		ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+		ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
 	} else { /* TRIG_EXT */
 		/* FIXME:  assert scan_begin_arg != 0, ret failure otherwise */
 		devpriv->ao_cmd2  |= NISTC_AO_CMD2_BC_GATE_ENA;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 85b242ec5f9b..8fc191d99927 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1640,8 +1640,13 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
 	ibmsg = tx->tx_msg;
 	ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
 
-	copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE,
-		       &from);
+	rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob,
+			    &from);
+	if (rc != payload_nob) {
+		kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
+		return -EFAULT;
+	}
+
 	nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
 	kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
 
@@ -1741,8 +1746,14 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
 			break;
 		}
 
-		copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload,
-			     IBLND_MSG_SIZE, to);
+		rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen,
+				  to);
+		if (rc != rlen) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = 0;
 		lnet_finalize(ni, lntmsg, 0);
 		break;
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 002d09159896..a69007ef77bf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -132,7 +132,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
 		kfree(pcmd->parmbuf);
 	}
 
-	if (!pcmd->rsp) {
+	if (pcmd->rsp) {
 		if (pcmd->rspsz != 0) {
 			/* free rsp in cmd_obj */
 			kfree(pcmd->rsp);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 963235fd7292..d283341cfe43 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -43,6 +43,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
 	{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
 	{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+	{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
 	{}	/* Terminating entry */
 };
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 944dd25924be..4754f7a20684 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -40,7 +40,7 @@ static unsigned int get_mxclk_freq(void)
 
 	pll_reg = peek32(MXCLK_PLL_CTRL);
 	M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT;
-	N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT;
+	N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_N_SHIFT;
 	OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT;
 	POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT;
 
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 3aa4128703d5..67207b0554cd 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1053,6 +1053,26 @@ release_fb:
 	return err;
 }
 
+static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+	bool primary = false;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+#ifdef CONFIG_X86
+	primary = pdev->resource[PCI_ROM_RESOURCE].flags &
+					IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
+	kfree(ap);
+	return 0;
+}
+
 static int lynxfb_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *ent)
 {
@@ -1061,6 +1081,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
 	int fbidx;
 	int err;
 
+	err = lynxfb_kick_out_firmware_fb(pdev);
+	if (err)
+		return err;
+
 	/* enable device */
 	err = pcim_enable_device(pdev);
 	if (err)
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 82e5de248947..67956e24779c 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2314,6 +2314,7 @@ static void __exit speakup_exit(void)
 	mutex_lock(&spk_mutex);
 	synth_release();
 	mutex_unlock(&spk_mutex);
+	spk_ttyio_unregister_ldisc();
 
 	speakup_kobj_exit();
 
@@ -2376,6 +2377,7 @@ static int __init speakup_init(void)
 	if (err)
 		goto error_kobjects;
 
+	spk_ttyio_register_ldisc();
 	synth_init(synth_name);
 	speakup_register_devsynth();
 	/*
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 87b6a0a4c54d..046040ac074c 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -48,6 +48,8 @@ void spk_stop_serial_interrupt(void);
 int spk_wait_for_xmitr(struct spk_synth *in_synth);
 void spk_serial_release(void);
 void spk_ttyio_release(void);
+void spk_ttyio_register_ldisc(void);
+void spk_ttyio_unregister_ldisc(void);
 
 void synth_buffer_skip_nonlatin1(void);
 u16 synth_buffer_getc(void);
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index ed8e96b06ead..fe340b07c482 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -154,12 +154,6 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
 	struct ktermios tmp_termios;
 	dev_t dev;
 
-	ret = tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops);
-	if (ret) {
-		pr_err("Error registering line discipline.\n");
-		return ret;
-	}
-
 	ret = get_dev_to_use(synth, &dev);
 	if (ret)
 		return ret;
@@ -196,10 +190,24 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
 	tty_unlock(tty);
 
 	ret = tty_set_ldisc(tty, N_SPEAKUP);
+	if (ret)
+		pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
 
 	return ret;
 }
 
+void spk_ttyio_register_ldisc(void)
+{
+	if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops))
+		pr_warn("speakup: Error registering line discipline. Most synths won't work.\n");
+}
+
+void spk_ttyio_unregister_ldisc(void)
+{
+	if (tty_unregister_ldisc(N_SPEAKUP))
+		pr_warn("speakup: Couldn't unregister ldisc\n");
+}
+
 static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
 {
 	if (in_synth->alive && speakup_tty && speakup_tty->ops->write) {
@@ -300,7 +308,7 @@ void spk_ttyio_release(void)
 
 	tty_ldisc_flush(speakup_tty);
 	tty_unlock(speakup_tty);
-	tty_ldisc_release(speakup_tty);
+	tty_release_struct(speakup_tty, speakup_tty->index);
 }
 EXPORT_SYMBOL_GPL(spk_ttyio_release);
 
diff --git a/drivers/staging/vboxvideo/Kconfig b/drivers/staging/vboxvideo/Kconfig
new file mode 100644
index 000000000000..a52746f9a670
--- /dev/null
+++ b/drivers/staging/vboxvideo/Kconfig
@@ -0,0 +1,12 @@
+config DRM_VBOXVIDEO
+	tristate "Virtual Box Graphics Card"
+	depends on DRM && X86 && PCI
+	select DRM_KMS_HELPER
+	help
+	  This is a KMS driver for the virtual Graphics Card used in
+	  Virtual Box virtual machines.
+
+	  Although it is possible to builtin this module, it is advised
+	  to build this driver as a module, so that it can be updated
+	  independently of the kernel. Select M to built this driver as a
+	  module and add support for these devices via drm/kms interfaces.
diff --git a/drivers/staging/vboxvideo/Makefile b/drivers/staging/vboxvideo/Makefile
new file mode 100644
index 000000000000..2d0b3bc7ad73
--- /dev/null
+++ b/drivers/staging/vboxvideo/Makefile
@@ -0,0 +1,7 @@
+ccflags-y := -Iinclude/drm
+
+vboxvideo-y :=  hgsmi_base.o modesetting.o vbva_base.o \
+		vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
+		vbox_mode.o vbox_prime.o vbox_ttm.o
+
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/staging/vboxvideo/TODO b/drivers/staging/vboxvideo/TODO
new file mode 100644
index 000000000000..ce764309b079
--- /dev/null
+++ b/drivers/staging/vboxvideo/TODO
@@ -0,0 +1,9 @@
+TODO:
+-Move the driver over to the atomic API
+-Stop using old load / unload drm_driver hooks
+-Get a full review from the drm-maintainers on dri-devel done on this driver
+-Extend this TODO with the results of that review
+
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
+Hans de Goede <hdegoede@redhat.com> and
+Michael Thayer <michael.thayer@oracle.com>.
diff --git a/drivers/staging/vboxvideo/hgsmi_base.c b/drivers/staging/vboxvideo/hgsmi_base.c
new file mode 100644
index 000000000000..15ff5f42e2cd
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_base.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_channels.h"
+#include "hgsmi_ch_setup.h"
+
+/**
+ * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
+ * @param    ctx          the context of the guest heap to use.
+ * @param    location     the offset chosen for the flags within guest VRAM.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
+{
+	struct hgsmi_buffer_location *p;
+
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
+			       HGSMI_CC_HOST_FLAGS_LOCATION);
+	if (!p)
+		return -ENOMEM;
+
+	p->buf_location = location;
+	p->buf_len = sizeof(struct hgsmi_host_flags);
+
+	hgsmi_buffer_submit(ctx, p);
+	hgsmi_buffer_free(ctx, p);
+
+	return 0;
+}
+
+/**
+ * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
+ * @param    ctx                 the context of the guest heap to use.
+ * @param    caps                the capabilities to report, see vbva_caps.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps)
+{
+	struct vbva_caps *p;
+
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
+	if (!p)
+		return -ENOMEM;
+
+	p->rc = VERR_NOT_IMPLEMENTED;
+	p->caps = caps;
+
+	hgsmi_buffer_submit(ctx, p);
+
+	WARN_ON_ONCE(RT_FAILURE(p->rc));
+
+	hgsmi_buffer_free(ctx, p);
+
+	return 0;
+}
+
+int hgsmi_test_query_conf(struct gen_pool *ctx)
+{
+	u32 value = 0;
+	int ret;
+
+	ret = hgsmi_query_conf(ctx, U32_MAX, &value);
+	if (ret)
+		return ret;
+
+	return value == U32_MAX ? 0 : -EIO;
+}
+
+/**
+ * Query the host for an HGSMI configuration parameter via an HGSMI command.
+ * @param  ctx        the context containing the heap used
+ * @param  index      the index of the parameter to query,
+ *                    @see vbva_conf32::index
+ * @param  value_ret  where to store the value of the parameter on success
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
+{
+	struct vbva_conf32 *p;
+
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+			       VBVA_QUERY_CONF32);
+	if (!p)
+		return -ENOMEM;
+
+	p->index = index;
+	p->value = U32_MAX;
+
+	hgsmi_buffer_submit(ctx, p);
+
+	*value_ret = p->value;
+
+	hgsmi_buffer_free(ctx, p);
+
+	return 0;
+}
+
+/**
+ * Pass the host a new mouse pointer shape via an HGSMI command.
+ *
+ * @param  ctx      the context containing the heap to be used
+ * @param  flags    cursor flags, @see VMMDevReqMousePointer::flags
+ * @param  hot_x    horizontal position of the hot spot
+ * @param  hot_y    vertical position of the hot spot
+ * @param  width    width in pixels of the cursor
+ * @param  height   height in pixels of the cursor
+ * @param  pixels   pixel data, @see VMMDevReqMousePointer for the format
+ * @param  len      size in bytes of the pixel data
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+			       u32 hot_x, u32 hot_y, u32 width, u32 height,
+			       u8 *pixels, u32 len)
+{
+	struct vbva_mouse_pointer_shape *p;
+	u32 pixel_len = 0;
+	int rc;
+
+	if (flags & VBOX_MOUSE_POINTER_SHAPE) {
+		/*
+		 * Size of the pointer data:
+		 * sizeof (AND mask) + sizeof (XOR_MASK)
+		 */
+		pixel_len = ((((width + 7) / 8) * height + 3) & ~3) +
+			 width * 4 * height;
+		if (pixel_len > len)
+			return -EINVAL;
+
+		/*
+		 * If shape is supplied, then always create the pointer visible.
+		 * See comments in 'vboxUpdatePointerShape'
+		 */
+		flags |= VBOX_MOUSE_POINTER_VISIBLE;
+	}
+
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
+			       VBVA_MOUSE_POINTER_SHAPE);
+	if (!p)
+		return -ENOMEM;
+
+	p->result = VINF_SUCCESS;
+	p->flags = flags;
+	p->hot_X = hot_x;
+	p->hot_y = hot_y;
+	p->width = width;
+	p->height = height;
+	if (pixel_len)
+		memcpy(p->data, pixels, pixel_len);
+
+	hgsmi_buffer_submit(ctx, p);
+
+	switch (p->result) {
+	case VINF_SUCCESS:
+		rc = 0;
+		break;
+	case VERR_NO_MEMORY:
+		rc = -ENOMEM;
+		break;
+	case VERR_NOT_SUPPORTED:
+		rc = -EBUSY;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	hgsmi_buffer_free(ctx, p);
+
+	return rc;
+}
+
+/**
+ * Report the guest cursor position.  The host may wish to use this information
+ * to re-position its own cursor (though this is currently unlikely).  The
+ * current host cursor position is returned.
+ * @param  ctx              The context containing the heap used.
+ * @param  report_position  Are we reporting a position?
+ * @param  x                Guest cursor X position.
+ * @param  y                Guest cursor Y position.
+ * @param  x_host           Host cursor X position is stored here.  Optional.
+ * @param  y_host           Host cursor Y position is stored here.  Optional.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
+			  u32 x, u32 y, u32 *x_host, u32 *y_host)
+{
+	struct vbva_cursor_position *p;
+
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+			       VBVA_CURSOR_POSITION);
+	if (!p)
+		return -ENOMEM;
+
+	p->report_position = report_position;
+	p->x = x;
+	p->y = y;
+
+	hgsmi_buffer_submit(ctx, p);
+
+	*x_host = p->x;
+	*y_host = p->y;
+
+	hgsmi_buffer_free(ctx, p);
+
+	return 0;
+}
+
+/**
+ * @todo Mouse pointer position to be read from VMMDev memory, address of the
+ * memory region can be queried from VMMDev via an IOCTL. This VMMDev memory
+ * region will contain host information which is needed by the guest.
+ *
+ * Reading will not cause a switch to the host.
+ *
+ * Have to take into account:
+ *  * synchronization: host must write to the memory only from EMT,
+ *    large structures must be read under flag, which tells the host
+ *    that the guest is currently reading the memory (OWNER flag?).
+ *  * guest writes: may be allocate a page for the host info and make
+ *    the page readonly for the guest.
+ *  * the information should be available only for additions drivers.
+ *  * VMMDev additions driver will inform the host which version of the info
+ *    it expects, host must support all versions.
+ */
diff --git a/drivers/staging/vboxvideo/hgsmi_ch_setup.h b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
new file mode 100644
index 000000000000..8e6d9e11a69c
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __HGSMI_CH_SETUP_H__
+#define __HGSMI_CH_SETUP_H__
+
+/*
+ * Tell the host the location of hgsmi_host_flags structure, where the host
+ * can write information about pending buffers, etc, and which can be quickly
+ * polled by the guest without a need to port IO.
+ */
+#define HGSMI_CC_HOST_FLAGS_LOCATION 0
+
+struct hgsmi_buffer_location {
+	u32 buf_location;
+	u32 buf_len;
+} __packed;
+
+/* HGSMI setup and configuration data structures. */
+/* host->guest commands pending, should be accessed under FIFO lock only */
+#define HGSMIHOSTFLAGS_COMMANDS_PENDING    0x01u
+/* IRQ is fired, should be accessed under VGAState::lock only  */
+#define HGSMIHOSTFLAGS_IRQ                 0x02u
+/* vsync interrupt flag, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_VSYNC               0x10u
+/** monitor hotplug flag, should be accessed under VGAState::lock only */
+#define HGSMIHOSTFLAGS_HOTPLUG             0x20u
+/**
+ * Cursor capability state change flag, should be accessed under
+ * VGAState::lock only. @see vbva_conf32.
+ */
+#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u
+
+struct hgsmi_host_flags {
+	/*
+	 * Host flags can be accessed and modified in multiple threads
+	 * concurrently, e.g. CrOpenGL HGCM and GUI threads when completing
+	 * HGSMI 3D and Video Accel respectively, EMT thread when dealing with
+	 * HGSMI command processing, etc.
+	 * Besides settings/cleaning flags atomically, some flags have their
+	 * own special sync restrictions, see comments for flags above.
+	 */
+	u32 host_flags;
+	u32 reserved[3];
+} __packed;
+
+#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_channels.h b/drivers/staging/vboxvideo/hgsmi_channels.h
new file mode 100644
index 000000000000..a2a34b2167b4
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_channels.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __HGSMI_CHANNELS_H__
+#define __HGSMI_CHANNELS_H__
+
+/*
+ * Each channel has an 8 bit identifier. There are a number of predefined
+ * (hardcoded) channels.
+ *
+ * HGSMI_CH_HGSMI channel can be used to map a string channel identifier
+ * to a free 16 bit numerical value. values are allocated in range
+ * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
+ */
+
+/* A reserved channel value */
+#define HGSMI_CH_RESERVED				0x00
+/* HGCMI: setup and configuration */
+#define HGSMI_CH_HGSMI					0x01
+/* Graphics: VBVA */
+#define HGSMI_CH_VBVA					0x02
+/* Graphics: Seamless with a single guest region */
+#define HGSMI_CH_SEAMLESS				0x03
+/* Graphics: Seamless with separate host windows */
+#define HGSMI_CH_SEAMLESS2				0x04
+/* Graphics: OpenGL HW acceleration */
+#define HGSMI_CH_OPENGL					0x05
+
+/* The first channel index to be used for string mappings (inclusive) */
+#define HGSMI_CH_STRING_FIRST				0x20
+/* The last channel index for string mappings (inclusive) */
+#define HGSMI_CH_STRING_LAST				0xff
+
+#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_defs.h b/drivers/staging/vboxvideo/hgsmi_defs.h
new file mode 100644
index 000000000000..5b21fb974d20
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_defs.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __HGSMI_DEFS_H__
+#define __HGSMI_DEFS_H__
+
+/* Buffer sequence type mask. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_MASK     0x03
+/* Single buffer, not a part of a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE   0x00
+/* The first buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_START    0x01
+/* A middle buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02
+/* The last buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_END      0x03
+
+/* 16 bytes buffer header. */
+struct hgsmi_buffer_header {
+	u32 data_size;		/* Size of data that follows the header. */
+	u8 flags;		/* HGSMI_BUFFER_HEADER_F_* */
+	u8 channel;		/* The channel the data must be routed to. */
+	u16 channel_info;	/* Opaque to the HGSMI, used by the channel. */
+
+	union {
+		/* Opaque placeholder to make the union 8 bytes. */
+		u8 header_data[8];
+
+		/* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
+		struct {
+			u32 reserved1;	/* A reserved field, initialize to 0. */
+			u32 reserved2;	/* A reserved field, initialize to 0. */
+		} buffer;
+
+		/* HGSMI_BUFFER_HEADER_F_SEQ_START */
+		struct {
+			/* Must be the same for all buffers in the sequence. */
+			u32 sequence_number;
+			/* The total size of the sequence. */
+			u32 sequence_size;
+		} sequence_start;
+
+		/*
+		 * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and
+		 * HGSMI_BUFFER_HEADER_F_SEQ_END
+		 */
+		struct {
+			/* Must be the same for all buffers in the sequence. */
+			u32 sequence_number;
+			/* Data offset in the entire sequence. */
+			u32 sequence_offset;
+		} sequence_continue;
+	} u;
+} __packed;
+
+/* 8 bytes buffer tail. */
+struct hgsmi_buffer_tail {
+	/* Reserved, must be initialized to 0. */
+	u32 reserved;
+	/*
+	 * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html
+	 * Over the header, offset and for first 4 bytes of the tail.
+	 */
+	u32 checksum;
+} __packed;
+
+/*
+ * The size of the array of channels. Array indexes are u8.
+ * Note: the value must not be changed.
+ */
+#define HGSMI_NUMBER_OF_CHANNELS 0x100
+
+#endif
diff --git a/drivers/staging/vboxvideo/modesetting.c b/drivers/staging/vboxvideo/modesetting.c
new file mode 100644
index 000000000000..7616b8aab23a
--- /dev/null
+++ b/drivers/staging/vboxvideo/modesetting.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_channels.h"
+
+/**
+ * Set a video mode via an HGSMI request.  The views must have been
+ * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
+ * set on the first display then it must be set first using registers.
+ * @param  ctx           The context containing the heap to use
+ * @param  display       The screen number
+ * @param  origin_x      The horizontal displacement relative to the first scrn
+ * @param  origin_y      The vertical displacement relative to the first screen
+ * @param  start_offset  The offset of the visible area of the framebuffer
+ *                       relative to the framebuffer start
+ * @param  pitch         The offset in bytes between the starts of two adjecent
+ *                       scan lines in video RAM
+ * @param  width         The mode width
+ * @param  height        The mode height
+ * @param  bpp           The colour depth of the mode
+ * @param  flags         Flags
+ */
+void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
+				s32 origin_x, s32 origin_y, u32 start_offset,
+				u32 pitch, u32 width, u32 height,
+				u16 bpp, u16 flags)
+{
+	struct vbva_infoscreen *p;
+
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+			       VBVA_INFO_SCREEN);
+	if (!p)
+		return;
+
+	p->view_index = display;
+	p->origin_x = origin_x;
+	p->origin_y = origin_y;
+	p->start_offset = start_offset;
+	p->line_size = pitch;
+	p->width = width;
+	p->height = height;
+	p->bits_per_pixel = bpp;
+	p->flags = flags;
+
+	hgsmi_buffer_submit(ctx, p);
+	hgsmi_buffer_free(ctx, p);
+}
+
+/**
+ * Report the rectangle relative to which absolute pointer events should be
+ * expressed.  This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens.
+ * @param  ctx       The context containing the heap to use.
+ * @param  origin_x  Upper left X co-ordinate relative to the first screen.
+ * @param  origin_y  Upper left Y co-ordinate relative to the first screen.
+ * @param  width     Rectangle width.
+ * @param  height    Rectangle height.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
+			       u32 width, u32 height)
+{
+	struct vbva_report_input_mapping *p;
+
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+			       VBVA_REPORT_INPUT_MAPPING);
+	if (!p)
+		return -ENOMEM;
+
+	p->x = origin_x;
+	p->y = origin_y;
+	p->cx = width;
+	p->cy = height;
+
+	hgsmi_buffer_submit(ctx, p);
+	hgsmi_buffer_free(ctx, p);
+
+	return 0;
+}
+
+/**
+ * Get most recent video mode hints.
+ * @param  ctx      The context containing the heap to use.
+ * @param  screens  The number of screens to query hints for, starting at 0.
+ * @param  hints    Array of vbva_modehint structures for receiving the hints.
+ * @returns 0 on success, -errno on failure
+ */
+int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
+			 struct vbva_modehint *hints)
+{
+	struct vbva_query_mode_hints *p;
+	size_t size;
+
+	if (WARN_ON(!hints))
+		return -EINVAL;
+
+	size = screens * sizeof(struct vbva_modehint);
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA,
+			       VBVA_QUERY_MODE_HINTS);
+	if (!p)
+		return -ENOMEM;
+
+	p->hints_queried_count = screens;
+	p->hint_structure_guest_size = sizeof(struct vbva_modehint);
+	p->rc = VERR_NOT_SUPPORTED;
+
+	hgsmi_buffer_submit(ctx, p);
+
+	if (RT_FAILURE(p->rc)) {
+		hgsmi_buffer_free(ctx, p);
+		return -EIO;
+	}
+
+	memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size);
+	hgsmi_buffer_free(ctx, p);
+
+	return 0;
+}
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
new file mode 100644
index 000000000000..92ae1560a16d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Michael Thayer <michael.thayer@oracle.com,
+ *          Hans de Goede <hdegoede@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/vt_kern.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+
+int vbox_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, vbox_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static const struct pci_device_id pciidlist[] = {
+	{ 0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ 0, 0, 0},
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void vbox_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static int vbox_drm_freeze(struct drm_device *dev)
+{
+	struct vbox_private *vbox = dev->dev_private;
+
+	drm_kms_helper_poll_disable(dev);
+
+	pci_save_state(dev->pdev);
+
+	drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, true);
+
+	return 0;
+}
+
+static int vbox_drm_thaw(struct drm_device *dev)
+{
+	struct vbox_private *vbox = dev->dev_private;
+
+	drm_mode_config_reset(dev);
+	drm_helper_resume_force_mode(dev);
+	drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, false);
+
+	return 0;
+}
+
+static int vbox_drm_resume(struct drm_device *dev)
+{
+	int ret;
+
+	if (pci_enable_device(dev->pdev))
+		return -EIO;
+
+	ret = vbox_drm_thaw(dev);
+	if (ret)
+		return ret;
+
+	drm_kms_helper_poll_enable(dev);
+
+	return 0;
+}
+
+static int vbox_pm_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+	int error;
+
+	error = vbox_drm_freeze(ddev);
+	if (error)
+		return error;
+
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+static int vbox_pm_resume(struct device *dev)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+	return vbox_drm_resume(ddev);
+}
+
+static int vbox_pm_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+
+	if (!ddev || !ddev->dev_private)
+		return -ENODEV;
+
+	return vbox_drm_freeze(ddev);
+}
+
+static int vbox_pm_thaw(struct device *dev)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+	return vbox_drm_thaw(ddev);
+}
+
+static int vbox_pm_poweroff(struct device *dev)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+
+	return vbox_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops vbox_pm_ops = {
+	.suspend = vbox_pm_suspend,
+	.resume = vbox_pm_resume,
+	.freeze = vbox_pm_freeze,
+	.thaw = vbox_pm_thaw,
+	.poweroff = vbox_pm_poweroff,
+	.restore = vbox_pm_resume,
+};
+
+static struct pci_driver vbox_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = vbox_pci_probe,
+	.remove = vbox_pci_remove,
+	.driver.pm = &vbox_pm_ops,
+};
+
+static const struct file_operations vbox_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = vbox_mmap,
+	.poll = drm_poll,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.read = drm_read,
+};
+
+static int vbox_master_set(struct drm_device *dev,
+			   struct drm_file *file_priv, bool from_open)
+{
+	struct vbox_private *vbox = dev->dev_private;
+
+	/*
+	 * We do not yet know whether the new owner can handle hotplug, so we
+	 * do not advertise dynamic modes on the first query and send a
+	 * tentative hotplug notification after that to see if they query again.
+	 */
+	vbox->initial_mode_queried = false;
+
+	mutex_lock(&vbox->hw_mutex);
+	/*
+	 * Disable VBVA when someone releases master in case the next person
+	 * tries tries to do VESA.
+	 */
+	/** @todo work out if anyone is likely to and whether it will work. */
+	/*
+	 * Update: we also disable it because if the new master does not do
+	 * dirty rectangle reporting (e.g. old versions of Plymouth) then at
+	 * least the first screen will still be updated. We enable it as soon
+	 * as we receive a dirty rectangle report.
+	 */
+	vbox_disable_accel(vbox);
+	mutex_unlock(&vbox->hw_mutex);
+
+	return 0;
+}
+
+static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct vbox_private *vbox = dev->dev_private;
+
+	/* See vbox_master_set() */
+	vbox->initial_mode_queried = false;
+
+	mutex_lock(&vbox->hw_mutex);
+	vbox_disable_accel(vbox);
+	mutex_unlock(&vbox->hw_mutex);
+}
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+	    DRIVER_PRIME,
+	.dev_priv_size = 0,
+
+	.load = vbox_driver_load,
+	.unload = vbox_driver_unload,
+	.lastclose = vbox_driver_lastclose,
+	.master_set = vbox_master_set,
+	.master_drop = vbox_master_drop,
+	.set_busid = drm_pci_set_busid,
+
+	.fops = &vbox_fops,
+	.irq_handler = vbox_irq_handler,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+
+	.gem_free_object = vbox_gem_free_object,
+	.dumb_create = vbox_dumb_create,
+	.dumb_map_offset = vbox_dumb_mmap_offset,
+	.dumb_destroy = drm_gem_dumb_destroy,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_pin = vbox_gem_prime_pin,
+	.gem_prime_unpin = vbox_gem_prime_unpin,
+	.gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
+	.gem_prime_vmap = vbox_gem_prime_vmap,
+	.gem_prime_vunmap = vbox_gem_prime_vunmap,
+	.gem_prime_mmap = vbox_gem_prime_mmap,
+};
+
+static int __init vbox_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && vbox_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (vbox_modeset == 0)
+		return -EINVAL;
+
+	return drm_pci_init(&driver, &vbox_pci_driver);
+}
+
+static void __exit vbox_exit(void)
+{
+	drm_pci_exit(&driver, &vbox_pci_driver);
+}
+
+module_init(vbox_init);
+module_exit(vbox_exit);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
new file mode 100644
index 000000000000..4b9302703b36
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.h
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Michael Thayer <michael.thayer@oracle.com,
+ *          Hans de Goede <hdegoede@redhat.com>
+ */
+#ifndef __VBOX_DRV_H__
+#define __VBOX_DRV_H__
+
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_ch_setup.h"
+
+#define DRIVER_NAME         "vboxvideo"
+#define DRIVER_DESC         "Oracle VM VirtualBox Graphics Card"
+#define DRIVER_DATE         "20130823"
+
+#define DRIVER_MAJOR        1
+#define DRIVER_MINOR        0
+#define DRIVER_PATCHLEVEL   0
+
+#define VBOX_MAX_CURSOR_WIDTH  64
+#define VBOX_MAX_CURSOR_HEIGHT 64
+#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
+#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
+
+#define VBOX_MAX_SCREENS  32
+
+#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
+				 VBVA_ADAPTER_INFORMATION_SIZE)
+#define GUEST_HEAP_SIZE   VBVA_ADAPTER_INFORMATION_SIZE
+#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
+				sizeof(struct hgsmi_host_flags))
+#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
+
+struct vbox_fbdev;
+
+struct vbox_private {
+	struct drm_device *dev;
+
+	u8 __iomem *guest_heap;
+	u8 __iomem *vbva_buffers;
+	struct gen_pool *guest_pool;
+	struct vbva_buf_ctx *vbva_info;
+	bool any_pitch;
+	u32 num_crtcs;
+	/** Amount of available VRAM, including space used for buffers. */
+	u32 full_vram_size;
+	/** Amount of available VRAM, not including space used for buffers. */
+	u32 available_vram_size;
+	/** Array of structures for receiving mode hints. */
+	struct vbva_modehint *last_mode_hints;
+
+	struct vbox_fbdev *fbdev;
+
+	int fb_mtrr;
+
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+	} ttm;
+
+	struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
+	/**
+	 * We decide whether or not user-space supports display hot-plug
+	 * depending on whether they react to a hot-plug event after the initial
+	 * mode query.
+	 */
+	bool initial_mode_queried;
+	struct work_struct hotplug_work;
+	u32 input_mapping_width;
+	u32 input_mapping_height;
+	/**
+	 * Is user-space using an X.Org-style layout of one large frame-buffer
+	 * encompassing all screen ones or is the fbdev console active?
+	 */
+	bool single_framebuffer;
+	u32 cursor_width;
+	u32 cursor_height;
+	u32 cursor_hot_x;
+	u32 cursor_hot_y;
+	size_t cursor_data_size;
+	u8 cursor_data[CURSOR_DATA_SIZE];
+};
+
+#undef CURSOR_PIXEL_COUNT
+#undef CURSOR_DATA_SIZE
+
+int vbox_driver_load(struct drm_device *dev, unsigned long flags);
+void vbox_driver_unload(struct drm_device *dev);
+void vbox_driver_lastclose(struct drm_device *dev);
+
+struct vbox_gem_object;
+
+struct vbox_connector {
+	struct drm_connector base;
+	char name[32];
+	struct vbox_crtc *vbox_crtc;
+	struct {
+		u16 width;
+		u16 height;
+		bool disconnected;
+	} mode_hint;
+};
+
+struct vbox_crtc {
+	struct drm_crtc base;
+	bool blanked;
+	bool disconnected;
+	unsigned int crtc_id;
+	u32 fb_offset;
+	bool cursor_enabled;
+	u16 x_hint;
+	u16 y_hint;
+};
+
+struct vbox_encoder {
+	struct drm_encoder base;
+};
+
+struct vbox_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+struct vbox_fbdev {
+	struct drm_fb_helper helper;
+	struct vbox_framebuffer afb;
+	int size;
+	struct ttm_bo_kmap_obj mapping;
+	int x1, y1, x2, y2;	/* dirty rect */
+	spinlock_t dirty_lock;
+};
+
+#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
+#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
+#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
+
+int vbox_mode_init(struct drm_device *dev);
+void vbox_mode_fini(struct drm_device *dev);
+
+#define DRM_MODE_FB_CMD drm_mode_fb_cmd2
+#define CRTC_FB(crtc) ((crtc)->primary->fb)
+
+void vbox_enable_accel(struct vbox_private *vbox);
+void vbox_disable_accel(struct vbox_private *vbox);
+void vbox_report_caps(struct vbox_private *vbox);
+
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+				       struct drm_clip_rect *rects,
+				       unsigned int num_rects);
+
+int vbox_framebuffer_init(struct drm_device *dev,
+			  struct vbox_framebuffer *vbox_fb,
+			  const struct DRM_MODE_FB_CMD *mode_cmd,
+			  struct drm_gem_object *obj);
+
+int vbox_fbdev_init(struct drm_device *dev);
+void vbox_fbdev_fini(struct drm_device *dev);
+void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr);
+
+struct vbox_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	struct ttm_bo_kmap_obj kmap;
+	struct drm_gem_object gem;
+	struct ttm_place placements[3];
+	int pin_count;
+};
+
+#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
+
+static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct vbox_bo, bo);
+}
+
+#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
+
+int vbox_dumb_create(struct drm_file *file,
+		     struct drm_device *dev,
+		     struct drm_mode_create_dumb *args);
+
+void vbox_gem_free_object(struct drm_gem_object *obj);
+int vbox_dumb_mmap_offset(struct drm_file *file,
+			  struct drm_device *dev,
+			  u32 handle, u64 *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
+
+int vbox_mm_init(struct vbox_private *vbox);
+void vbox_mm_fini(struct vbox_private *vbox);
+
+int vbox_bo_create(struct drm_device *dev, int size, int align,
+		   u32 flags, struct vbox_bo **pvboxbo);
+
+int vbox_gem_create(struct drm_device *dev,
+		    u32 size, bool iskernel, struct drm_gem_object **obj);
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int vbox_bo_unpin(struct vbox_bo *bo);
+
+static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
+{
+	int ret;
+
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
+	if (ret) {
+		if (ret != -ERESTARTSYS && ret != -EBUSY)
+			DRM_ERROR("reserve failed %p\n", bo);
+		return ret;
+	}
+	return 0;
+}
+
+static inline void vbox_bo_unreserve(struct vbox_bo *bo)
+{
+	ttm_bo_unreserve(&bo->bo);
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain);
+int vbox_bo_push_sysram(struct vbox_bo *bo);
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* vbox_prime.c */
+int vbox_gem_prime_pin(struct drm_gem_object *obj);
+void vbox_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+	struct drm_device *dev, struct dma_buf_attachment *attach,
+	struct sg_table *table);
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int vbox_gem_prime_mmap(struct drm_gem_object *obj,
+			struct vm_area_struct *area);
+
+/* vbox_irq.c */
+int vbox_irq_init(struct vbox_private *vbox);
+void vbox_irq_fini(struct vbox_private *vbox);
+void vbox_report_hotplug(struct vbox_private *vbox);
+irqreturn_t vbox_irq_handler(int irq, void *arg);
+
+/* vbox_hgsmi.c */
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+			 u8 channel, u16 channel_info);
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
+
+static inline void vbox_write_ioport(u16 index, u16 data)
+{
+	outw(index, VBE_DISPI_IOPORT_INDEX);
+	outw(data, VBE_DISPI_IOPORT_DATA);
+}
+
+#endif
diff --git a/drivers/staging/vboxvideo/vbox_err.h b/drivers/staging/vboxvideo/vbox_err.h
new file mode 100644
index 000000000000..562db8630eb0
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_err.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __VBOX_ERR_H__
+#define __VBOX_ERR_H__
+
+/**
+ * @name VirtualBox virtual-hardware error macros
+ * @{
+ */
+
+#define VINF_SUCCESS                        0
+#define VERR_INVALID_PARAMETER              (-2)
+#define VERR_INVALID_POINTER                (-6)
+#define VERR_NO_MEMORY                      (-8)
+#define VERR_NOT_IMPLEMENTED                (-12)
+#define VERR_INVALID_FUNCTION               (-36)
+#define VERR_NOT_SUPPORTED                  (-37)
+#define VERR_TOO_MUCH_DATA                  (-42)
+#define VERR_INVALID_STATE                  (-79)
+#define VERR_OUT_OF_RESOURCES               (-80)
+#define VERR_ALREADY_EXISTS                 (-105)
+#define VERR_INTERNAL_ERROR                 (-225)
+
+#define RT_SUCCESS_NP(rc)   ((int)(rc) >= VINF_SUCCESS)
+#define RT_SUCCESS(rc)      (likely(RT_SUCCESS_NP(rc)))
+#define RT_FAILURE(rc)      (unlikely(!RT_SUCCESS_NP(rc)))
+
+/** @}  */
+
+#endif
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
new file mode 100644
index 000000000000..35f6d9f8c203
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_fb.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Michael Thayer <michael.thayer@oracle.com,
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+
+#define VBOX_DIRTY_DELAY (HZ / 30)
+/**
+ * Tell the host about dirty rectangles to update.
+ */
+static void vbox_dirty_update(struct vbox_fbdev *fbdev,
+			      int x, int y, int width, int height)
+{
+	struct drm_gem_object *obj;
+	struct vbox_bo *bo;
+	int ret = -EBUSY;
+	bool store_for_later = false;
+	int x2, y2;
+	unsigned long flags;
+	struct drm_clip_rect rect;
+
+	obj = fbdev->afb.obj;
+	bo = gem_to_vbox_bo(obj);
+
+	/*
+	 * try and reserve the BO, if we fail with busy
+	 * then the BO is being moved and we should
+	 * store up the damage until later.
+	 */
+	if (drm_can_sleep())
+		ret = vbox_bo_reserve(bo, true);
+	if (ret) {
+		if (ret != -EBUSY)
+			return;
+
+		store_for_later = true;
+	}
+
+	x2 = x + width - 1;
+	y2 = y + height - 1;
+	spin_lock_irqsave(&fbdev->dirty_lock, flags);
+
+	if (fbdev->y1 < y)
+		y = fbdev->y1;
+	if (fbdev->y2 > y2)
+		y2 = fbdev->y2;
+	if (fbdev->x1 < x)
+		x = fbdev->x1;
+	if (fbdev->x2 > x2)
+		x2 = fbdev->x2;
+
+	if (store_for_later) {
+		fbdev->x1 = x;
+		fbdev->x2 = x2;
+		fbdev->y1 = y;
+		fbdev->y2 = y2;
+		spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
+		return;
+	}
+
+	fbdev->x1 = INT_MAX;
+	fbdev->y1 = INT_MAX;
+	fbdev->x2 = 0;
+	fbdev->y2 = 0;
+
+	spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
+
+	/*
+	 * Not sure why the original code subtracted 1 here, but I will keep
+	 * it that way to avoid unnecessary differences.
+	 */
+	rect.x1 = x;
+	rect.x2 = x2 + 1;
+	rect.y1 = y;
+	rect.y2 = y2 + 1;
+	vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1);
+
+	vbox_bo_unreserve(bo);
+}
+
+#ifdef CONFIG_FB_DEFERRED_IO
+static void vbox_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+	struct vbox_fbdev *fbdev = info->par;
+	unsigned long start, end, min, max;
+	struct page *page;
+	int y1, y2;
+
+	min = ULONG_MAX;
+	max = 0;
+	list_for_each_entry(page, pagelist, lru) {
+		start = page->index << PAGE_SHIFT;
+		end = start + PAGE_SIZE - 1;
+		min = min(min, start);
+		max = max(max, end);
+	}
+
+	if (min < max) {
+		y1 = min / info->fix.line_length;
+		y2 = (max / info->fix.line_length) + 1;
+		DRM_INFO("%s: Calling dirty update: 0, %d, %d, %d\n",
+			 __func__, y1, info->var.xres, y2 - y1 - 1);
+		vbox_dirty_update(fbdev, 0, y1, info->var.xres, y2 - y1 - 1);
+	}
+}
+
+static struct fb_deferred_io vbox_defio = {
+	.delay = VBOX_DIRTY_DELAY,
+	.deferred_io = vbox_deferred_io,
+};
+#endif
+
+static void vbox_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct vbox_fbdev *fbdev = info->par;
+
+	sys_fillrect(info, rect);
+	vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void vbox_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+	struct vbox_fbdev *fbdev = info->par;
+
+	sys_copyarea(info, area);
+	vbox_dirty_update(fbdev, area->dx, area->dy, area->width, area->height);
+}
+
+static void vbox_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct vbox_fbdev *fbdev = info->par;
+
+	sys_imageblit(info, image);
+	vbox_dirty_update(fbdev, image->dx, image->dy, image->width,
+			  image->height);
+}
+
+static struct fb_ops vboxfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = vbox_fillrect,
+	.fb_copyarea = vbox_copyarea,
+	.fb_imageblit = vbox_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int vboxfb_create_object(struct vbox_fbdev *fbdev,
+				struct DRM_MODE_FB_CMD *mode_cmd,
+				struct drm_gem_object **gobj_p)
+{
+	struct drm_device *dev = fbdev->helper.dev;
+	u32 size;
+	struct drm_gem_object *gobj;
+	u32 pitch = mode_cmd->pitches[0];
+	int ret;
+
+	size = pitch * mode_cmd->height;
+	ret = vbox_gem_create(dev, size, true, &gobj);
+	if (ret)
+		return ret;
+
+	*gobj_p = gobj;
+
+	return 0;
+}
+
+static int vboxfb_create(struct drm_fb_helper *helper,
+			 struct drm_fb_helper_surface_size *sizes)
+{
+	struct vbox_fbdev *fbdev =
+	    container_of(helper, struct vbox_fbdev, helper);
+	struct drm_device *dev = fbdev->helper.dev;
+	struct DRM_MODE_FB_CMD mode_cmd;
+	struct drm_framebuffer *fb;
+	struct fb_info *info;
+	struct device *device = &dev->pdev->dev;
+	struct drm_gem_object *gobj;
+	struct vbox_bo *bo;
+	int size, ret;
+	u32 pitch;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+	mode_cmd.pitches[0] = pitch;
+
+	size = pitch * mode_cmd.height;
+
+	ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+		return ret;
+	}
+
+	ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
+	if (ret)
+		return ret;
+
+	bo = gem_to_vbox_bo(gobj);
+
+	ret = vbox_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+	if (ret) {
+		vbox_bo_unreserve(bo);
+		return ret;
+	}
+
+	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+	vbox_bo_unreserve(bo);
+	if (ret) {
+		DRM_ERROR("failed to kmap fbcon\n");
+		return ret;
+	}
+
+	info = framebuffer_alloc(0, device);
+	if (!info)
+		return -ENOMEM;
+	info->par = fbdev;
+
+	fbdev->size = size;
+
+	fb = &fbdev->afb.base;
+	fbdev->helper.fb = fb;
+	fbdev->helper.fbdev = info;
+
+	strcpy(info->fix.id, "vboxdrmfb");
+
+	/*
+	 * The last flag forces a mode set on VT switches even if the kernel
+	 * does not think it is needed.
+	 */
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT |
+		      FBINFO_MISC_ALWAYS_SETPAR;
+	info->fbops = &vboxfb_ops;
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret)
+		return -ENOMEM;
+
+	/*
+	 * This seems to be done for safety checking that the framebuffer
+	 * is not registered twice by different drivers.
+	 */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures)
+		return -ENOMEM;
+	info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+	info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+	drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
+			       sizes->fb_height);
+
+	info->screen_base = bo->kmap.virtual;
+	info->screen_size = size;
+
+#ifdef CONFIG_FB_DEFERRED_IO
+	info->fbdefio = &vbox_defio;
+	fb_deferred_io_init(info);
+#endif
+
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+	DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
+
+	return 0;
+}
+
+static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			      u16 blue, int regno)
+{
+}
+
+static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			      u16 *blue, int regno)
+{
+	*red = regno;
+	*green = regno;
+	*blue = regno;
+}
+
+static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
+	.gamma_set = vbox_fb_gamma_set,
+	.gamma_get = vbox_fb_gamma_get,
+	.fb_probe = vboxfb_create,
+};
+
+void vbox_fbdev_fini(struct drm_device *dev)
+{
+	struct vbox_private *vbox = dev->dev_private;
+	struct vbox_fbdev *fbdev = vbox->fbdev;
+	struct vbox_framebuffer *afb = &fbdev->afb;
+
+	drm_fb_helper_unregister_fbi(&fbdev->helper);
+
+	if (afb->obj) {
+		struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
+
+		if (!vbox_bo_reserve(bo, false)) {
+			if (bo->kmap.virtual)
+				ttm_bo_kunmap(&bo->kmap);
+			/*
+			 * QXL does this, but is it really needed before
+			 * freeing?
+			 */
+			if (bo->pin_count)
+				vbox_bo_unpin(bo);
+			vbox_bo_unreserve(bo);
+		}
+		drm_gem_object_unreference_unlocked(afb->obj);
+		afb->obj = NULL;
+	}
+	drm_fb_helper_fini(&fbdev->helper);
+
+	drm_framebuffer_unregister_private(&afb->base);
+	drm_framebuffer_cleanup(&afb->base);
+}
+
+int vbox_fbdev_init(struct drm_device *dev)
+{
+	struct vbox_private *vbox = dev->dev_private;
+	struct vbox_fbdev *fbdev;
+	int ret;
+
+	fbdev = devm_kzalloc(dev->dev, sizeof(*fbdev), GFP_KERNEL);
+	if (!fbdev)
+		return -ENOMEM;
+
+	vbox->fbdev = fbdev;
+	spin_lock_init(&fbdev->dirty_lock);
+
+	drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
+	ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
+	if (ret)
+		return ret;
+
+	ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
+	if (ret)
+		goto err_fini;
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
+	ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
+	if (ret)
+		goto err_fini;
+
+	return 0;
+
+err_fini:
+	drm_fb_helper_fini(&fbdev->helper);
+	return ret;
+}
+
+void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr)
+{
+	struct fb_info *fbdev = vbox->fbdev->helper.fbdev;
+
+	fbdev->fix.smem_start = fbdev->apertures->ranges[0].base + gpu_addr;
+	fbdev->fix.smem_len = vbox->available_vram_size - gpu_addr;
+}
diff --git a/drivers/staging/vboxvideo/vbox_hgsmi.c b/drivers/staging/vboxvideo/vbox_hgsmi.c
new file mode 100644
index 000000000000..822fd31121cb
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_hgsmi.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include "vbox_drv.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_defs.h"
+
+/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */
+static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size)
+{
+	while (size--) {
+		hash += *data++;
+		hash += (hash << 10);
+		hash ^= (hash >> 6);
+	}
+
+	return hash;
+}
+
+static u32 hgsmi_hash_end(u32 hash)
+{
+	hash += (hash << 3);
+	hash ^= (hash >> 11);
+	hash += (hash << 15);
+
+	return hash;
+}
+
+/* Not really a checksum but that is the naming used in all vbox code */
+static u32 hgsmi_checksum(u32 offset,
+			  const struct hgsmi_buffer_header *header,
+			  const struct hgsmi_buffer_tail *tail)
+{
+	u32 checksum;
+
+	checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset));
+	checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header));
+	/* 4 -> Do not checksum the checksum itself */
+	checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4);
+
+	return hgsmi_hash_end(checksum);
+}
+
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+			 u8 channel, u16 channel_info)
+{
+	struct hgsmi_buffer_header *h;
+	struct hgsmi_buffer_tail *t;
+	size_t total_size;
+	dma_addr_t offset;
+
+	total_size = size + sizeof(*h) + sizeof(*t);
+	h = gen_pool_dma_alloc(guest_pool, total_size, &offset);
+	if (!h)
+		return NULL;
+
+	t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size);
+
+	h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
+	h->data_size = size;
+	h->channel = channel;
+	h->channel_info = channel_info;
+	memset(&h->u.header_data, 0, sizeof(h->u.header_data));
+
+	t->reserved = 0;
+	t->checksum = hgsmi_checksum(offset, h, t);
+
+	return (u8 *)h + sizeof(*h);
+}
+
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
+{
+	struct hgsmi_buffer_header *h =
+		(struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h));
+	size_t total_size = h->data_size + sizeof(*h) +
+					     sizeof(struct hgsmi_buffer_tail);
+
+	gen_pool_free(guest_pool, (unsigned long)h, total_size);
+}
+
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
+{
+	phys_addr_t offset;
+
+	offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
+				       sizeof(struct hgsmi_buffer_header));
+	outl(offset, VGA_PORT_HGSMI_GUEST);
+	/* Make the compiler aware that the host has changed memory. */
+	mb();
+
+	return 0;
+}
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c
new file mode 100644
index 000000000000..3ca8bec62ac4
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_irq.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2016-2017 Oracle Corporation
+ * This file is based on qxl_irq.c
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ *          Michael Thayer <michael.thayer@oracle.com,
+ *          Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+
+static void vbox_clear_irq(void)
+{
+	outl((u32)~0, VGA_PORT_HGSMI_HOST);
+}
+
+static u32 vbox_get_flags(struct vbox_private *vbox)
+{
+	return readl(vbox->guest_heap + HOST_FLAGS_OFFSET);
+}
+
+void vbox_report_hotplug(struct vbox_private *vbox)
+{
+	schedule_work(&vbox->hotplug_work);
+}
+
+irqreturn_t vbox_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+	u32 host_flags = vbox_get_flags(vbox);
+
+	if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
+		return IRQ_NONE;
+
+	/*
+	 * Due to a bug in the initial host implementation of hot-plug irqs,
+	 * the hot-plug and cursor capability flags were never cleared.
+	 * Fortunately we can tell when they would have been set by checking
+	 * that the VSYNC flag is not set.
+	 */
+	if (host_flags &
+	    (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) &&
+	    !(host_flags & HGSMIHOSTFLAGS_VSYNC))
+		vbox_report_hotplug(vbox);
+
+	vbox_clear_irq();
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * Check that the position hints provided by the host are suitable for GNOME
+ * shell (i.e. all screens disjoint and hints for all enabled screens) and if
+ * not replace them with default ones.  Providing valid hints improves the
+ * chances that we will get a known screen layout for pointer mapping.
+ */
+static void validate_or_set_position_hints(struct vbox_private *vbox)
+{
+	struct vbva_modehint *hintsi, *hintsj;
+	bool valid = true;
+	u16 currentx = 0;
+	int i, j;
+
+	for (i = 0; i < vbox->num_crtcs; ++i) {
+		for (j = 0; j < i; ++j) {
+			hintsi = &vbox->last_mode_hints[i];
+			hintsj = &vbox->last_mode_hints[j];
+
+			if (hintsi->enabled && hintsj->enabled) {
+				if (hintsi->dx >= 0xffff ||
+				    hintsi->dy >= 0xffff ||
+				    hintsj->dx >= 0xffff ||
+				    hintsj->dy >= 0xffff ||
+				    (hintsi->dx <
+					hintsj->dx + (hintsj->cx & 0x8fff) &&
+				     hintsi->dx + (hintsi->cx & 0x8fff) >
+					hintsj->dx) ||
+				    (hintsi->dy <
+					hintsj->dy + (hintsj->cy & 0x8fff) &&
+				     hintsi->dy + (hintsi->cy & 0x8fff) >
+					hintsj->dy))
+					valid = false;
+			}
+		}
+	}
+	if (!valid)
+		for (i = 0; i < vbox->num_crtcs; ++i) {
+			if (vbox->last_mode_hints[i].enabled) {
+				vbox->last_mode_hints[i].dx = currentx;
+				vbox->last_mode_hints[i].dy = 0;
+				currentx +=
+				    vbox->last_mode_hints[i].cx & 0x8fff;
+			}
+		}
+}
+
+/**
+ * Query the host for the most recent video mode hints.
+ */
+static void vbox_update_mode_hints(struct vbox_private *vbox)
+{
+	struct drm_device *dev = vbox->dev;
+	struct drm_connector *connector;
+	struct vbox_connector *vbox_conn;
+	struct vbva_modehint *hints;
+	u16 flags;
+	bool disconnected;
+	unsigned int crtc_id;
+	int ret;
+
+	ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs,
+				   vbox->last_mode_hints);
+	if (ret) {
+		DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret);
+		return;
+	}
+
+	validate_or_set_position_hints(vbox);
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		vbox_conn = to_vbox_connector(connector);
+
+		hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
+		if (hints->magic != VBVAMODEHINT_MAGIC)
+			continue;
+
+		disconnected = !(hints->enabled);
+		crtc_id = vbox_conn->vbox_crtc->crtc_id;
+		vbox_conn->mode_hint.width = hints->cx & 0x8fff;
+		vbox_conn->mode_hint.height = hints->cy & 0x8fff;
+		vbox_conn->vbox_crtc->x_hint = hints->dx;
+		vbox_conn->vbox_crtc->y_hint = hints->dy;
+		vbox_conn->mode_hint.disconnected = disconnected;
+
+		if (vbox_conn->vbox_crtc->disconnected == disconnected)
+			continue;
+
+		if (disconnected)
+			flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
+		else
+			flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK;
+
+		hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0,
+					   hints->cx * 4, hints->cx,
+					   hints->cy, 0, flags);
+
+		vbox_conn->vbox_crtc->disconnected = disconnected;
+	}
+	drm_modeset_unlock_all(dev);
+}
+
+static void vbox_hotplug_worker(struct work_struct *work)
+{
+	struct vbox_private *vbox = container_of(work, struct vbox_private,
+						 hotplug_work);
+
+	vbox_update_mode_hints(vbox);
+	drm_kms_helper_hotplug_event(vbox->dev);
+}
+
+int vbox_irq_init(struct vbox_private *vbox)
+{
+	INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
+	vbox_update_mode_hints(vbox);
+
+	return drm_irq_install(vbox->dev, vbox->dev->pdev->irq);
+}
+
+void vbox_irq_fini(struct vbox_private *vbox)
+{
+	drm_irq_uninstall(vbox->dev);
+	flush_work(&vbox->hotplug_work);
+}
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
new file mode 100644
index 000000000000..d0c6ec75a3c7
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_main.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>,
+ *          Michael Thayer <michael.thayer@oracle.com,
+ *          Hans de Goede <hdegoede@redhat.com>
+ */
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+
+static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
+
+	if (vbox_fb->obj)
+		drm_gem_object_unreference_unlocked(vbox_fb->obj);
+
+	drm_framebuffer_cleanup(fb);
+	kfree(fb);
+}
+
+void vbox_enable_accel(struct vbox_private *vbox)
+{
+	unsigned int i;
+	struct vbva_buffer *vbva;
+
+	if (!vbox->vbva_info || !vbox->vbva_buffers) {
+		/* Should never happen... */
+		DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
+		return;
+	}
+
+	for (i = 0; i < vbox->num_crtcs; ++i) {
+		if (vbox->vbva_info[i].vbva)
+			continue;
+
+		vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
+		if (!vbva_enable(&vbox->vbva_info[i],
+				 vbox->guest_pool, vbva, i)) {
+			/* very old host or driver error. */
+			DRM_ERROR("vboxvideo: vbva_enable failed\n");
+			return;
+		}
+	}
+}
+
+void vbox_disable_accel(struct vbox_private *vbox)
+{
+	unsigned int i;
+
+	for (i = 0; i < vbox->num_crtcs; ++i)
+		vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
+}
+
+void vbox_report_caps(struct vbox_private *vbox)
+{
+	u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
+		   VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
+
+	if (vbox->initial_mode_queried)
+		caps |= VBVACAPS_VIDEO_MODE_HINTS;
+
+	hgsmi_send_caps_info(vbox->guest_pool, caps);
+}
+
+/**
+ * Send information about dirty rectangles to VBVA.  If necessary we enable
+ * VBVA first, as this is normally disabled after a change of master in case
+ * the new master does not send dirty rectangle information (is this even
+ * allowed?)
+ */
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+				       struct drm_clip_rect *rects,
+				       unsigned int num_rects)
+{
+	struct vbox_private *vbox = fb->dev->dev_private;
+	struct drm_crtc *crtc;
+	unsigned int i;
+
+	mutex_lock(&vbox->hw_mutex);
+	list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
+		if (CRTC_FB(crtc) != fb)
+			continue;
+
+		vbox_enable_accel(vbox);
+
+		for (i = 0; i < num_rects; ++i) {
+			struct vbva_cmd_hdr cmd_hdr;
+			unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
+
+			if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
+			    (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
+			    (rects[i].x2 < crtc->x) ||
+			    (rects[i].y2 < crtc->y))
+				continue;
+
+			cmd_hdr.x = (s16)rects[i].x1;
+			cmd_hdr.y = (s16)rects[i].y1;
+			cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
+			cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
+
+			if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
+						      vbox->guest_pool))
+				continue;
+
+			vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
+				   &cmd_hdr, sizeof(cmd_hdr));
+			vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
+		}
+	}
+	mutex_unlock(&vbox->hw_mutex);
+}
+
+static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
+				       struct drm_file *file_priv,
+				       unsigned int flags, unsigned int color,
+				       struct drm_clip_rect *rects,
+				       unsigned int num_rects)
+{
+	vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
+
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs vbox_fb_funcs = {
+	.destroy = vbox_user_framebuffer_destroy,
+	.dirty = vbox_user_framebuffer_dirty,
+};
+
+int vbox_framebuffer_init(struct drm_device *dev,
+			  struct vbox_framebuffer *vbox_fb,
+			  const struct DRM_MODE_FB_CMD *mode_cmd,
+			  struct drm_gem_object *obj)
+{
+	int ret;
+
+	drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
+	vbox_fb->obj = obj;
+	ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
+	if (ret) {
+		DRM_ERROR("framebuffer init failed %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct drm_framebuffer *vbox_user_framebuffer_create(
+		struct drm_device *dev,
+		struct drm_file *filp,
+		const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct vbox_framebuffer *vbox_fb;
+	int ret = -ENOMEM;
+
+	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+	if (!obj)
+		return ERR_PTR(-ENOENT);
+
+	vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
+	if (!vbox_fb)
+		goto err_unref_obj;
+
+	ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
+	if (ret)
+		goto err_free_vbox_fb;
+
+	return &vbox_fb->base;
+
+err_free_vbox_fb:
+	kfree(vbox_fb);
+err_unref_obj:
+	drm_gem_object_unreference_unlocked(obj);
+	return ERR_PTR(ret);
+}
+
+static const struct drm_mode_config_funcs vbox_mode_funcs = {
+	.fb_create = vbox_user_framebuffer_create,
+};
+
+static int vbox_accel_init(struct vbox_private *vbox)
+{
+	unsigned int i;
+
+	vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
+				       sizeof(*vbox->vbva_info), GFP_KERNEL);
+	if (!vbox->vbva_info)
+		return -ENOMEM;
+
+	/* Take a command buffer for each screen from the end of usable VRAM. */
+	vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
+
+	vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
+					     vbox->available_vram_size,
+					     vbox->num_crtcs *
+					     VBVA_MIN_BUFFER_SIZE);
+	if (!vbox->vbva_buffers)
+		return -ENOMEM;
+
+	for (i = 0; i < vbox->num_crtcs; ++i)
+		vbva_setup_buffer_context(&vbox->vbva_info[i],
+					  vbox->available_vram_size +
+					  i * VBVA_MIN_BUFFER_SIZE,
+					  VBVA_MIN_BUFFER_SIZE);
+
+	return 0;
+}
+
+static void vbox_accel_fini(struct vbox_private *vbox)
+{
+	vbox_disable_accel(vbox);
+	pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
+}
+
+/** Do we support the 4.3 plus mode hint reporting interface? */
+static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
+{
+	u32 have_hints, have_cursor;
+	int ret;
+
+	ret = hgsmi_query_conf(vbox->guest_pool,
+			       VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
+			       &have_hints);
+	if (ret)
+		return false;
+
+	ret = hgsmi_query_conf(vbox->guest_pool,
+			       VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
+			       &have_cursor);
+	if (ret)
+		return false;
+
+	return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
+}
+
+static bool vbox_check_supported(u16 id)
+{
+	u16 dispi_id;
+
+	vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
+	dispi_id = inw(VBE_DISPI_IOPORT_DATA);
+
+	return dispi_id == id;
+}
+
+/**
+ * Set up our heaps and data exchange buffers in VRAM before handing the rest
+ * to the memory manager.
+ */
+static int vbox_hw_init(struct vbox_private *vbox)
+{
+	int ret = -ENOMEM;
+
+	vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
+	vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
+
+	DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
+
+	/* Map guest-heap at end of vram */
+	vbox->guest_heap =
+	    pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
+			    GUEST_HEAP_SIZE);
+	if (!vbox->guest_heap)
+		return -ENOMEM;
+
+	/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
+	vbox->guest_pool = gen_pool_create(4, -1);
+	if (!vbox->guest_pool)
+		goto err_unmap_guest_heap;
+
+	ret = gen_pool_add_virt(vbox->guest_pool,
+				(unsigned long)vbox->guest_heap,
+				GUEST_HEAP_OFFSET(vbox),
+				GUEST_HEAP_USABLE_SIZE, -1);
+	if (ret)
+		goto err_destroy_guest_pool;
+
+	ret = hgsmi_test_query_conf(vbox->guest_pool);
+	if (ret) {
+		DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
+		goto err_destroy_guest_pool;
+	}
+
+	/* Reduce available VRAM size to reflect the guest heap. */
+	vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
+	/* Linux drm represents monitors as a 32-bit array. */
+	hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
+			 &vbox->num_crtcs);
+	vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
+
+	if (!have_hgsmi_mode_hints(vbox)) {
+		ret = -ENOTSUPP;
+		goto err_destroy_guest_pool;
+	}
+
+	vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
+					     sizeof(struct vbva_modehint),
+					     GFP_KERNEL);
+	if (!vbox->last_mode_hints) {
+		ret = -ENOMEM;
+		goto err_destroy_guest_pool;
+	}
+
+	ret = vbox_accel_init(vbox);
+	if (ret)
+		goto err_destroy_guest_pool;
+
+	return 0;
+
+err_destroy_guest_pool:
+	gen_pool_destroy(vbox->guest_pool);
+err_unmap_guest_heap:
+	pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
+	return ret;
+}
+
+static void vbox_hw_fini(struct vbox_private *vbox)
+{
+	vbox_accel_fini(vbox);
+	gen_pool_destroy(vbox->guest_pool);
+	pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
+}
+
+int vbox_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct vbox_private *vbox;
+	int ret = 0;
+
+	if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
+		return -ENODEV;
+
+	vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
+	if (!vbox)
+		return -ENOMEM;
+
+	dev->dev_private = vbox;
+	vbox->dev = dev;
+
+	mutex_init(&vbox->hw_mutex);
+
+	ret = vbox_hw_init(vbox);
+	if (ret)
+		return ret;
+
+	ret = vbox_mm_init(vbox);
+	if (ret)
+		goto err_hw_fini;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.funcs = (void *)&vbox_mode_funcs;
+	dev->mode_config.min_width = 64;
+	dev->mode_config.min_height = 64;
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
+	dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
+
+	ret = vbox_mode_init(dev);
+	if (ret)
+		goto err_drm_mode_cleanup;
+
+	ret = vbox_irq_init(vbox);
+	if (ret)
+		goto err_mode_fini;
+
+	ret = vbox_fbdev_init(dev);
+	if (ret)
+		goto err_irq_fini;
+
+	return 0;
+
+err_irq_fini:
+	vbox_irq_fini(vbox);
+err_mode_fini:
+	vbox_mode_fini(dev);
+err_drm_mode_cleanup:
+	drm_mode_config_cleanup(dev);
+	vbox_mm_fini(vbox);
+err_hw_fini:
+	vbox_hw_fini(vbox);
+	return ret;
+}
+
+void vbox_driver_unload(struct drm_device *dev)
+{
+	struct vbox_private *vbox = dev->dev_private;
+
+	vbox_fbdev_fini(dev);
+	vbox_irq_fini(vbox);
+	vbox_mode_fini(dev);
+	drm_mode_config_cleanup(dev);
+	vbox_mm_fini(vbox);
+	vbox_hw_fini(vbox);
+}
+
+/**
+ * @note this is described in the DRM framework documentation.  AST does not
+ * have it, but we get an oops on driver unload if it is not present.
+ */
+void vbox_driver_lastclose(struct drm_device *dev)
+{
+	struct vbox_private *vbox = dev->dev_private;
+
+	if (vbox->fbdev)
+		drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
+}
+
+int vbox_gem_create(struct drm_device *dev,
+		    u32 size, bool iskernel, struct drm_gem_object **obj)
+{
+	struct vbox_bo *vboxbo;
+	int ret;
+
+	*obj = NULL;
+
+	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("failed to allocate GEM object\n");
+		return ret;
+	}
+
+	*obj = &vboxbo->gem;
+
+	return 0;
+}
+
+int vbox_dumb_create(struct drm_file *file,
+		     struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+	int ret;
+	struct drm_gem_object *gobj;
+	u32 handle;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = vbox_gem_create(dev, args->size, false, &gobj);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file, gobj, &handle);
+	drm_gem_object_unreference_unlocked(gobj);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+
+	return 0;
+}
+
+static void vbox_bo_unref(struct vbox_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+
+	tbo = &((*bo)->bo);
+	ttm_bo_unref(&tbo);
+	if (!tbo)
+		*bo = NULL;
+}
+
+void vbox_gem_free_object(struct drm_gem_object *obj)
+{
+	struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
+
+	vbox_bo_unref(&vbox_bo);
+}
+
+static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
+{
+	return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int
+vbox_dumb_mmap_offset(struct drm_file *file,
+		      struct drm_device *dev,
+		      u32 handle, u64 *offset)
+{
+	struct drm_gem_object *obj;
+	int ret;
+	struct vbox_bo *bo;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(file, handle);
+	if (!obj) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	bo = gem_to_vbox_bo(obj);
+	*offset = vbox_bo_mmap_offset(bo);
+
+	drm_gem_object_unreference(obj);
+	ret = 0;
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
new file mode 100644
index 000000000000..f2b85f3256fa
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -0,0 +1,877 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_mode.c
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Michael Thayer <michael.thayer@oracle.com,
+ *          Hans de Goede <hdegoede@redhat.com>
+ */
+#include <linux/export.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+#include "hgsmi_channels.h"
+
+static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+			    u32 handle, u32 width, u32 height,
+			    s32 hot_x, s32 hot_y);
+static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y);
+
+/**
+ * Set a graphics mode.  Poke any required values into registers, do an HGSMI
+ * mode set and tell the host we support advanced graphics functions.
+ */
+static void vbox_do_modeset(struct drm_crtc *crtc,
+			    const struct drm_display_mode *mode)
+{
+	struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+	struct vbox_private *vbox;
+	int width, height, bpp, pitch;
+	unsigned int crtc_id;
+	u16 flags;
+	s32 x_offset, y_offset;
+
+	vbox = crtc->dev->dev_private;
+	width = mode->hdisplay ? mode->hdisplay : 640;
+	height = mode->vdisplay ? mode->vdisplay : 480;
+	crtc_id = vbox_crtc->crtc_id;
+	bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32;
+	pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
+	x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint;
+	y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint;
+
+	/*
+	 * This is the old way of setting graphics modes.  It assumed one screen
+	 * and a frame-buffer at the start of video RAM.  On older versions of
+	 * VirtualBox, certain parts of the code still assume that the first
+	 * screen is programmed this way, so try to fake it.
+	 */
+	if (vbox_crtc->crtc_id == 0 && crtc->enabled &&
+	    vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
+	    vbox_crtc->fb_offset % (bpp / 8) == 0) {
+		vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
+		vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
+		vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
+		vbox_write_ioport(VBE_DISPI_INDEX_BPP,
+				  CRTC_FB(crtc)->format->cpp[0] * 8);
+		vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
+		vbox_write_ioport(
+			VBE_DISPI_INDEX_X_OFFSET,
+			vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x);
+		vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
+				  vbox_crtc->fb_offset / pitch + crtc->y);
+	}
+
+	flags = VBVA_SCREEN_F_ACTIVE;
+	flags |= (crtc->enabled && !vbox_crtc->blanked) ?
+		 0 : VBVA_SCREEN_F_BLANK;
+	flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
+	hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
+				   x_offset, y_offset,
+				   crtc->x * bpp / 8 + crtc->y * pitch,
+				   pitch, width, height,
+				   vbox_crtc->blanked ? 0 : bpp, flags);
+}
+
+static int vbox_set_view(struct drm_crtc *crtc)
+{
+	struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+	struct vbox_private *vbox = crtc->dev->dev_private;
+	struct vbva_infoview *p;
+
+	/*
+	 * Tell the host about the view.  This design originally targeted the
+	 * Windows XP driver architecture and assumed that each screen would
+	 * have a dedicated frame buffer with the command buffer following it,
+	 * the whole being a "view".  The host works out which screen a command
+	 * buffer belongs to by checking whether it is in the first view, then
+	 * whether it is in the second and so on.  The first match wins.  We
+	 * cheat around this by making the first view be the managed memory
+	 * plus the first command buffer, the second the same plus the second
+	 * buffer and so on.
+	 */
+	p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
+			       HGSMI_CH_VBVA, VBVA_INFO_VIEW);
+	if (!p)
+		return -ENOMEM;
+
+	p->view_index = vbox_crtc->crtc_id;
+	p->view_offset = vbox_crtc->fb_offset;
+	p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
+		       vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
+	p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
+
+	hgsmi_buffer_submit(vbox->guest_pool, p);
+	hgsmi_buffer_free(vbox->guest_pool, p);
+
+	return 0;
+}
+
+static void vbox_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+	struct vbox_private *vbox = crtc->dev->dev_private;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		vbox_crtc->blanked = false;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		vbox_crtc->blanked = true;
+		break;
+	}
+
+	mutex_lock(&vbox->hw_mutex);
+	vbox_do_modeset(crtc, &crtc->hwmode);
+	mutex_unlock(&vbox->hw_mutex);
+}
+
+static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+/*
+ * Try to map the layout of virtual screens to the range of the input device.
+ * Return true if we need to re-set the crtc modes due to screen offset
+ * changes.
+ */
+static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
+{
+	struct drm_crtc *crtci;
+	struct drm_connector *connectori;
+	struct drm_framebuffer *fb1 = NULL;
+	bool single_framebuffer = true;
+	bool old_single_framebuffer = vbox->single_framebuffer;
+	u16 width = 0, height = 0;
+
+	/*
+	 * Are we using an X.Org-style single large frame-buffer for all crtcs?
+	 * If so then screen layout can be deduced from the crtc offsets.
+	 * Same fall-back if this is the fbdev frame-buffer.
+	 */
+	list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
+		if (!fb1) {
+			fb1 = CRTC_FB(crtci);
+			if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb)
+				break;
+		} else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) {
+			single_framebuffer = false;
+		}
+	}
+	if (single_framebuffer) {
+		list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+				    head) {
+			if (to_vbox_crtc(crtci)->crtc_id != 0)
+				continue;
+
+			vbox->single_framebuffer = true;
+			vbox->input_mapping_width = CRTC_FB(crtci)->width;
+			vbox->input_mapping_height = CRTC_FB(crtci)->height;
+			return old_single_framebuffer !=
+			       vbox->single_framebuffer;
+		}
+	}
+	/* Otherwise calculate the total span of all screens. */
+	list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list,
+			    head) {
+		struct vbox_connector *vbox_connector =
+		    to_vbox_connector(connectori);
+		struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
+
+		width = max_t(u16, width, vbox_crtc->x_hint +
+					  vbox_connector->mode_hint.width);
+		height = max_t(u16, height, vbox_crtc->y_hint +
+					    vbox_connector->mode_hint.height);
+	}
+
+	vbox->single_framebuffer = false;
+	vbox->input_mapping_width = width;
+	vbox->input_mapping_height = height;
+
+	return old_single_framebuffer != vbox->single_framebuffer;
+}
+
+static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
+				 struct drm_framebuffer *old_fb, int x, int y)
+{
+	struct vbox_private *vbox = crtc->dev->dev_private;
+	struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+	struct drm_gem_object *obj;
+	struct vbox_framebuffer *vbox_fb;
+	struct vbox_bo *bo;
+	int ret;
+	u64 gpu_addr;
+
+	/* Unpin the previous fb. */
+	if (old_fb) {
+		vbox_fb = to_vbox_framebuffer(old_fb);
+		obj = vbox_fb->obj;
+		bo = gem_to_vbox_bo(obj);
+		ret = vbox_bo_reserve(bo, false);
+		if (ret)
+			return ret;
+
+		vbox_bo_unpin(bo);
+		vbox_bo_unreserve(bo);
+	}
+
+	vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc));
+	obj = vbox_fb->obj;
+	bo = gem_to_vbox_bo(obj);
+
+	ret = vbox_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	if (ret) {
+		vbox_bo_unreserve(bo);
+		return ret;
+	}
+
+	if (&vbox->fbdev->afb == vbox_fb)
+		vbox_fbdev_set_base(vbox, gpu_addr);
+	vbox_bo_unreserve(bo);
+
+	/* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */
+	vbox_crtc->fb_offset = gpu_addr;
+	if (vbox_set_up_input_mapping(vbox)) {
+		struct drm_crtc *crtci;
+
+		list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+				    head) {
+			vbox_set_view(crtc);
+			vbox_do_modeset(crtci, &crtci->mode);
+		}
+	}
+
+	return 0;
+}
+
+static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+				   struct drm_framebuffer *old_fb)
+{
+	return vbox_crtc_do_set_base(crtc, old_fb, x, y);
+}
+
+static int vbox_crtc_mode_set(struct drm_crtc *crtc,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode,
+			      int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct vbox_private *vbox = crtc->dev->dev_private;
+	int ret;
+
+	vbox_crtc_mode_set_base(crtc, x, y, old_fb);
+
+	mutex_lock(&vbox->hw_mutex);
+	ret = vbox_set_view(crtc);
+	if (!ret)
+		vbox_do_modeset(crtc, mode);
+	hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
+				   vbox->input_mapping_width,
+				   vbox->input_mapping_height);
+	mutex_unlock(&vbox->hw_mutex);
+
+	return ret;
+}
+
+static void vbox_crtc_disable(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
+	.dpms = vbox_crtc_dpms,
+	.mode_fixup = vbox_crtc_mode_fixup,
+	.mode_set = vbox_crtc_mode_set,
+	/* .mode_set_base = vbox_crtc_mode_set_base, */
+	.disable = vbox_crtc_disable,
+	.load_lut = vbox_crtc_load_lut,
+	.prepare = vbox_crtc_prepare,
+	.commit = vbox_crtc_commit,
+};
+
+static void vbox_crtc_reset(struct drm_crtc *crtc)
+{
+}
+
+static void vbox_crtc_destroy(struct drm_crtc *crtc)
+{
+	drm_crtc_cleanup(crtc);
+	kfree(crtc);
+}
+
+static const struct drm_crtc_funcs vbox_crtc_funcs = {
+	.cursor_move = vbox_cursor_move,
+	.cursor_set2 = vbox_cursor_set2,
+	.reset = vbox_crtc_reset,
+	.set_config = drm_crtc_helper_set_config,
+	/* .gamma_set = vbox_crtc_gamma_set, */
+	.destroy = vbox_crtc_destroy,
+};
+
+static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
+{
+	struct vbox_crtc *vbox_crtc;
+
+	vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
+	if (!vbox_crtc)
+		return NULL;
+
+	vbox_crtc->crtc_id = i;
+
+	drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs);
+	drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
+	drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
+
+	return vbox_crtc;
+}
+
+static void vbox_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
+						    *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+
+	/* pick the encoder ids */
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
+
+	return NULL;
+}
+
+static const struct drm_encoder_funcs vbox_enc_funcs = {
+	.destroy = vbox_encoder_destroy,
+};
+
+static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool vbox_mode_fixup(struct drm_encoder *encoder,
+			    const struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void vbox_encoder_mode_set(struct drm_encoder *encoder,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void vbox_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void vbox_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = {
+	.dpms = vbox_encoder_dpms,
+	.mode_fixup = vbox_mode_fixup,
+	.prepare = vbox_encoder_prepare,
+	.commit = vbox_encoder_commit,
+	.mode_set = vbox_encoder_mode_set,
+};
+
+static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
+					     unsigned int i)
+{
+	struct vbox_encoder *vbox_encoder;
+
+	vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
+	if (!vbox_encoder)
+		return NULL;
+
+	drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
+			 DRM_MODE_ENCODER_DAC, NULL);
+	drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs);
+
+	vbox_encoder->base.possible_crtcs = 1 << i;
+	return &vbox_encoder->base;
+}
+
+/**
+ * Generate EDID data with a mode-unique serial number for the virtual
+ *  monitor to try to persuade Unity that different modes correspond to
+ *  different monitors and it should not try to force the same resolution on
+ *  them.
+ */
+static void vbox_set_edid(struct drm_connector *connector, int width,
+			  int height)
+{
+	enum { EDID_SIZE = 128 };
+	unsigned char edid[EDID_SIZE] = {
+		0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,	/* header */
+		0x58, 0x58,	/* manufacturer (VBX) */
+		0x00, 0x00,	/* product code */
+		0x00, 0x00, 0x00, 0x00,	/* serial number goes here */
+		0x01,		/* week of manufacture */
+		0x00,		/* year of manufacture */
+		0x01, 0x03,	/* EDID version */
+		0x80,		/* capabilities - digital */
+		0x00,		/* horiz. res in cm, zero for projectors */
+		0x00,		/* vert. res in cm */
+		0x78,		/* display gamma (120 == 2.2). */
+		0xEE,		/* features (standby, suspend, off, RGB, std */
+				/* colour space, preferred timing mode) */
+		0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
+		/* chromaticity for standard colour space. */
+		0x00, 0x00, 0x00,	/* no default timings */
+		0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+		    0x01, 0x01,
+		0x01, 0x01, 0x01, 0x01,	/* no standard timings */
+		0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
+		    0x02, 0x02,
+		/* descriptor block 1 goes below */
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		/* descriptor block 2, monitor ranges */
+		0x00, 0x00, 0x00, 0xFD, 0x00,
+		0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
+		    0x20, 0x20,
+		/* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
+		0x20,
+		/* descriptor block 3, monitor name */
+		0x00, 0x00, 0x00, 0xFC, 0x00,
+		'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
+		'\n',
+		/* descriptor block 4: dummy data */
+		0x00, 0x00, 0x00, 0x10, 0x00,
+		0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+		0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+		0x20,
+		0x00,		/* number of extensions */
+		0x00		/* checksum goes here */
+	};
+	int clock = (width + 6) * (height + 6) * 60 / 10000;
+	unsigned int i, sum = 0;
+
+	edid[12] = width & 0xff;
+	edid[13] = width >> 8;
+	edid[14] = height & 0xff;
+	edid[15] = height >> 8;
+	edid[54] = clock & 0xff;
+	edid[55] = clock >> 8;
+	edid[56] = width & 0xff;
+	edid[58] = (width >> 4) & 0xf0;
+	edid[59] = height & 0xff;
+	edid[61] = (height >> 4) & 0xf0;
+	for (i = 0; i < EDID_SIZE - 1; ++i)
+		sum += edid[i];
+	edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
+	drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
+}
+
+static int vbox_get_modes(struct drm_connector *connector)
+{
+	struct vbox_connector *vbox_connector = NULL;
+	struct drm_display_mode *mode = NULL;
+	struct vbox_private *vbox = NULL;
+	unsigned int num_modes = 0;
+	int preferred_width, preferred_height;
+
+	vbox_connector = to_vbox_connector(connector);
+	vbox = connector->dev->dev_private;
+	/*
+	 * Heuristic: we do not want to tell the host that we support dynamic
+	 * resizing unless we feel confident that the user space client using
+	 * the video driver can handle hot-plug events.  So the first time modes
+	 * are queried after a "master" switch we tell the host that we do not,
+	 * and immediately after we send the client a hot-plug notification as
+	 * a test to see if they will respond and query again.
+	 * That is also the reason why capabilities are reported to the host at
+	 * this place in the code rather than elsewhere.
+	 * We need to report the flags location before reporting the IRQ
+	 * capability.
+	 */
+	hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
+				    HOST_FLAGS_OFFSET);
+	if (vbox_connector->vbox_crtc->crtc_id == 0)
+		vbox_report_caps(vbox);
+	if (!vbox->initial_mode_queried) {
+		if (vbox_connector->vbox_crtc->crtc_id == 0) {
+			vbox->initial_mode_queried = true;
+			vbox_report_hotplug(vbox);
+		}
+		return drm_add_modes_noedid(connector, 800, 600);
+	}
+	num_modes = drm_add_modes_noedid(connector, 2560, 1600);
+	preferred_width = vbox_connector->mode_hint.width ?
+			  vbox_connector->mode_hint.width : 1024;
+	preferred_height = vbox_connector->mode_hint.height ?
+			   vbox_connector->mode_hint.height : 768;
+	mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
+			    60, false, false, false);
+	if (mode) {
+		mode->type |= DRM_MODE_TYPE_PREFERRED;
+		drm_mode_probed_add(connector, mode);
+		++num_modes;
+	}
+	vbox_set_edid(connector, preferred_width, preferred_height);
+	drm_object_property_set_value(
+		&connector->base, vbox->dev->mode_config.suggested_x_property,
+		vbox_connector->vbox_crtc->x_hint);
+	drm_object_property_set_value(
+		&connector->base, vbox->dev->mode_config.suggested_y_property,
+		vbox_connector->vbox_crtc->y_hint);
+
+	return num_modes;
+}
+
+static int vbox_mode_valid(struct drm_connector *connector,
+			   struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static void vbox_connector_destroy(struct drm_connector *connector)
+{
+	struct vbox_connector *vbox_connector;
+
+	vbox_connector = to_vbox_connector(connector);
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static enum drm_connector_status
+vbox_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct vbox_connector *vbox_connector;
+
+	vbox_connector = to_vbox_connector(connector);
+
+	return vbox_connector->mode_hint.disconnected ?
+	    connector_status_disconnected : connector_status_connected;
+}
+
+static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
+			   u32 max_y)
+{
+	struct vbox_connector *vbox_connector;
+	struct drm_device *dev;
+	struct drm_display_mode *mode, *iterator;
+
+	vbox_connector = to_vbox_connector(connector);
+	dev = vbox_connector->base.dev;
+	list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
+		list_del(&mode->head);
+		drm_mode_destroy(dev, mode);
+	}
+
+	return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
+}
+
+static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
+	.mode_valid = vbox_mode_valid,
+	.get_modes = vbox_get_modes,
+	.best_encoder = vbox_best_single_encoder,
+};
+
+static const struct drm_connector_funcs vbox_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = vbox_connector_detect,
+	.fill_modes = vbox_fill_modes,
+	.destroy = vbox_connector_destroy,
+};
+
+static int vbox_connector_init(struct drm_device *dev,
+			       struct vbox_crtc *vbox_crtc,
+			       struct drm_encoder *encoder)
+{
+	struct vbox_connector *vbox_connector;
+	struct drm_connector *connector;
+
+	vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
+	if (!vbox_connector)
+		return -ENOMEM;
+
+	connector = &vbox_connector->base;
+	vbox_connector->vbox_crtc = vbox_crtc;
+
+	drm_connector_init(dev, connector, &vbox_connector_funcs,
+			   DRM_MODE_CONNECTOR_VGA);
+	drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
+
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	drm_mode_create_suggested_offset_properties(dev);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_x_property, -1);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_y_property, -1);
+	drm_connector_register(connector);
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	return 0;
+}
+
+int vbox_mode_init(struct drm_device *dev)
+{
+	struct vbox_private *vbox = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct vbox_crtc *vbox_crtc;
+	unsigned int i;
+	int ret;
+
+	/* vbox_cursor_init(dev); */
+	for (i = 0; i < vbox->num_crtcs; ++i) {
+		vbox_crtc = vbox_crtc_init(dev, i);
+		if (!vbox_crtc)
+			return -ENOMEM;
+		encoder = vbox_encoder_init(dev, i);
+		if (!encoder)
+			return -ENOMEM;
+		ret = vbox_connector_init(dev, vbox_crtc, encoder);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void vbox_mode_fini(struct drm_device *dev)
+{
+	/* vbox_cursor_fini(dev); */
+}
+
+/**
+ * Copy the ARGB image and generate the mask, which is needed in case the host
+ * does not support ARGB cursors.  The mask is a 1BPP bitmap with the bit set
+ * if the corresponding alpha value in the ARGB image is greater than 0xF0.
+ */
+static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
+			      size_t mask_size)
+{
+	size_t line_size = (width + 7) / 8;
+	u32 i, j;
+
+	memcpy(dst + mask_size, src, width * height * 4);
+	for (i = 0; i < height; ++i)
+		for (j = 0; j < width; ++j)
+			if (((u32 *)src)[i * width + j] > 0xf0000000)
+				dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
+}
+
+static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
+			    u32 handle, u32 width, u32 height,
+			    s32 hot_x, s32 hot_y)
+{
+	struct vbox_private *vbox = crtc->dev->dev_private;
+	struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+	struct ttm_bo_kmap_obj uobj_map;
+	size_t data_size, mask_size;
+	struct drm_gem_object *obj;
+	u32 flags, caps = 0;
+	struct vbox_bo *bo;
+	bool src_isiomem;
+	u8 *dst = NULL;
+	u8 *src;
+	int ret;
+
+	/*
+	 * Re-set this regularly as in 5.0.20 and earlier the information was
+	 * lost on save and restore.
+	 */
+	hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
+				   vbox->input_mapping_width,
+				   vbox->input_mapping_height);
+	if (!handle) {
+		bool cursor_enabled = false;
+		struct drm_crtc *crtci;
+
+		/* Hide cursor. */
+		vbox_crtc->cursor_enabled = false;
+		list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
+				    head) {
+			if (to_vbox_crtc(crtci)->cursor_enabled)
+				cursor_enabled = true;
+		}
+
+		if (!cursor_enabled)
+			hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
+						   0, 0, NULL, 0);
+		return 0;
+	}
+
+	vbox_crtc->cursor_enabled = true;
+
+	if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
+	    width == 0 || height == 0)
+		return -EINVAL;
+
+	ret = hgsmi_query_conf(vbox->guest_pool,
+			       VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
+	if (ret)
+		return ret;
+
+	if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
+		/*
+		 * -EINVAL means cursor_set2() not supported, -EAGAIN means
+		 * retry at once.
+		 */
+		return -EBUSY;
+	}
+
+	obj = drm_gem_object_lookup(file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+		return -ENOENT;
+	}
+
+	bo = gem_to_vbox_bo(obj);
+	ret = vbox_bo_reserve(bo, false);
+	if (ret)
+		goto out_unref_obj;
+
+	/*
+	 * The mask must be calculated based on the alpha
+	 * channel, one bit per ARGB word, and must be 32-bit
+	 * padded.
+	 */
+	mask_size = ((width + 7) / 8 * height + 3) & ~3;
+	data_size = width * height * 4 + mask_size;
+	vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width);
+	vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height);
+	vbox->cursor_width = width;
+	vbox->cursor_height = height;
+	vbox->cursor_data_size = data_size;
+	dst = vbox->cursor_data;
+
+	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+	if (ret) {
+		vbox->cursor_data_size = 0;
+		goto out_unreserve_bo;
+	}
+
+	src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+	if (src_isiomem) {
+		DRM_ERROR("src cursor bo not in main memory\n");
+		ret = -EIO;
+		goto out_unmap_bo;
+	}
+
+	copy_cursor_image(src, dst, width, height, mask_size);
+
+	flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
+		VBOX_MOUSE_POINTER_ALPHA;
+	ret = hgsmi_update_pointer_shape(vbox->guest_pool, flags,
+					 vbox->cursor_hot_x, vbox->cursor_hot_y,
+					 width, height, dst, data_size);
+out_unmap_bo:
+	ttm_bo_kunmap(&uobj_map);
+out_unreserve_bo:
+	vbox_bo_unreserve(bo);
+out_unref_obj:
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct vbox_private *vbox = crtc->dev->dev_private;
+	u32 flags = VBOX_MOUSE_POINTER_VISIBLE |
+	    VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA;
+	s32 crtc_x =
+	    vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint;
+	s32 crtc_y =
+	    vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint;
+	u32 host_x, host_y;
+	u32 hot_x = 0;
+	u32 hot_y = 0;
+	int ret;
+
+	/*
+	 * We compare these to unsigned later and don't
+	 * need to handle negative.
+	 */
+	if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0)
+		return 0;
+
+	ret = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x,
+				    y + crtc_y, &host_x, &host_y);
+
+	/*
+	 * The only reason we have vbox_cursor_move() is that some older clients
+	 * might use DRM_IOCTL_MODE_CURSOR instead of DRM_IOCTL_MODE_CURSOR2 and
+	 * use DRM_MODE_CURSOR_MOVE to set the hot-spot.
+	 *
+	 * However VirtualBox 5.0.20 and earlier has a bug causing it to return
+	 * 0,0 as host cursor location after a save and restore.
+	 *
+	 * To work around this we ignore a 0, 0 return, since missing the odd
+	 * time when it legitimately happens is not going to hurt much.
+	 */
+	if (ret || (host_x == 0 && host_y == 0))
+		return ret;
+
+	if (x + crtc_x < host_x)
+		hot_x = min(host_x - x - crtc_x, vbox->cursor_width);
+	if (y + crtc_y < host_y)
+		hot_y = min(host_y - y - crtc_y, vbox->cursor_height);
+
+	if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
+		return 0;
+
+	vbox->cursor_hot_x = hot_x;
+	vbox->cursor_hot_y = hot_y;
+
+	return hgsmi_update_pointer_shape(vbox->guest_pool, flags,
+			hot_x, hot_y, vbox->cursor_width, vbox->cursor_height,
+			vbox->cursor_data, vbox->cursor_data_size);
+}
diff --git a/drivers/staging/vboxvideo/vbox_prime.c b/drivers/staging/vboxvideo/vbox_prime.c
new file mode 100644
index 000000000000..b7453e427a1d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_prime.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ * Copyright 2017 Canonical
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andreas Pokorny
+ */
+
+#include "vbox_drv.h"
+
+/*
+ * Based on qxl_prime.c:
+ * Empty Implementations as there should not be any other driver for a virtual
+ * device that might share buffers with vboxvideo
+ */
+
+int vbox_gem_prime_pin(struct drm_gem_object *obj)
+{
+	WARN_ONCE(1, "not implemented");
+	return -ENOSYS;
+}
+
+void vbox_gem_prime_unpin(struct drm_gem_object *obj)
+{
+	WARN_ONCE(1, "not implemented");
+}
+
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	WARN_ONCE(1, "not implemented");
+	return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+	struct drm_device *dev, struct dma_buf_attachment *attach,
+	struct sg_table *table)
+{
+	WARN_ONCE(1, "not implemented");
+	return ERR_PTR(-ENOSYS);
+}
+
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	WARN_ONCE(1, "not implemented");
+	return ERR_PTR(-ENOSYS);
+}
+
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	WARN_ONCE(1, "not implemented");
+}
+
+int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
+{
+	WARN_ONCE(1, "not implemented");
+	return -ENOSYS;
+}
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
new file mode 100644
index 000000000000..34a905d40735
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_ttm.c
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Michael Thayer <michael.thayer@oracle.com>
+ */
+#include "vbox_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct vbox_private, ttm.bdev);
+}
+
+static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+/**
+ * Adds the vbox memory manager object/structures to the global memory manager.
+ */
+static int vbox_ttm_global_init(struct vbox_private *vbox)
+{
+	struct drm_global_reference *global_ref;
+	int ret;
+
+	global_ref = &vbox->ttm.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &vbox_ttm_mem_global_init;
+	global_ref->release = &vbox_ttm_mem_global_release;
+	ret = drm_global_item_ref(global_ref);
+	if (ret) {
+		DRM_ERROR("Failed setting up TTM memory subsystem.\n");
+		return ret;
+	}
+
+	vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
+	global_ref = &vbox->ttm.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+
+	ret = drm_global_item_ref(global_ref);
+	if (ret) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&vbox->ttm.mem_global_ref);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * Removes the vbox memory manager object from the global memory manager.
+ */
+static void vbox_ttm_global_release(struct vbox_private *vbox)
+{
+	drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&vbox->ttm.mem_global_ref);
+}
+
+static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+	struct vbox_bo *bo;
+
+	bo = container_of(tbo, struct vbox_bo, bo);
+
+	drm_gem_object_release(&bo->gem);
+	kfree(bo);
+}
+
+static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &vbox_bo_ttm_destroy)
+		return true;
+
+	return false;
+}
+
+static int
+vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
+		      struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct vbox_bo *vboxbo = vbox_bo(bo);
+
+	if (!vbox_ttm_bo_is_vbox_bo(bo))
+		return;
+
+	vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
+	*pl = vboxbo->placement;
+}
+
+static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
+				 struct file *filp)
+{
+	return 0;
+}
+
+static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				   struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct vbox_private *vbox = vbox_bdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
+				 struct ttm_mem_reg *mem)
+{
+}
+
+static int vbox_bo_move(struct ttm_buffer_object *bo,
+			bool evict, bool interruptible,
+			bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+	return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
+}
+
+static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
+{
+	ttm_tt_fini(tt);
+	kfree(tt);
+}
+
+static struct ttm_backend_func vbox_tt_backend_func = {
+	.destroy = &vbox_ttm_backend_destroy,
+};
+
+static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
+					 unsigned long size,
+					 u32 page_flags,
+					 struct page *dummy_read_page)
+{
+	struct ttm_tt *tt;
+
+	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+	if (!tt)
+		return NULL;
+
+	tt->func = &vbox_tt_backend_func;
+	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+		kfree(tt);
+		return NULL;
+	}
+
+	return tt;
+}
+
+static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	return ttm_pool_populate(ttm);
+}
+
+static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver vbox_bo_driver = {
+	.ttm_tt_create = vbox_ttm_tt_create,
+	.ttm_tt_populate = vbox_ttm_tt_populate,
+	.ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
+	.init_mem_type = vbox_bo_init_mem_type,
+	.eviction_valuable = ttm_bo_eviction_valuable,
+	.evict_flags = vbox_bo_evict_flags,
+	.move = vbox_bo_move,
+	.verify_access = vbox_bo_verify_access,
+	.io_mem_reserve = &vbox_ttm_io_mem_reserve,
+	.io_mem_free = &vbox_ttm_io_mem_free,
+	.io_mem_pfn = ttm_bo_default_io_mem_pfn,
+};
+
+int vbox_mm_init(struct vbox_private *vbox)
+{
+	int ret;
+	struct drm_device *dev = vbox->dev;
+	struct ttm_bo_device *bdev = &vbox->ttm.bdev;
+
+	ret = vbox_ttm_global_init(vbox);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&vbox->ttm.bdev,
+				 vbox->ttm.bo_global_ref.ref.object,
+				 &vbox_bo_driver,
+				 dev->anon_inode->i_mapping,
+				 DRM_FILE_PAGE_OFFSET, true);
+	if (ret) {
+		DRM_ERROR("Error initialising bo driver; %d\n", ret);
+		goto err_ttm_global_release;
+	}
+
+	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+			     vbox->available_vram_size >> PAGE_SHIFT);
+	if (ret) {
+		DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+		goto err_device_release;
+	}
+
+#ifdef DRM_MTRR_WC
+	vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+				     pci_resource_len(dev->pdev, 0),
+				     DRM_MTRR_WC);
+#else
+	vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+					 pci_resource_len(dev->pdev, 0));
+#endif
+	return 0;
+
+err_device_release:
+	ttm_bo_device_release(&vbox->ttm.bdev);
+err_ttm_global_release:
+	vbox_ttm_global_release(vbox);
+	return ret;
+}
+
+void vbox_mm_fini(struct vbox_private *vbox)
+{
+#ifdef DRM_MTRR_WC
+	drm_mtrr_del(vbox->fb_mtrr,
+		     pci_resource_start(vbox->dev->pdev, 0),
+		     pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
+#else
+	arch_phys_wc_del(vbox->fb_mtrr);
+#endif
+	ttm_bo_device_release(&vbox->ttm.bdev);
+	vbox_ttm_global_release(vbox);
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain)
+{
+	unsigned int i;
+	u32 c = 0;
+
+	bo->placement.placement = bo->placements;
+	bo->placement.busy_placement = bo->placements;
+
+	if (domain & TTM_PL_FLAG_VRAM)
+		bo->placements[c++].flags =
+		    TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+	if (domain & TTM_PL_FLAG_SYSTEM)
+		bo->placements[c++].flags =
+		    TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (!c)
+		bo->placements[c++].flags =
+		    TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+	bo->placement.num_placement = c;
+	bo->placement.num_busy_placement = c;
+
+	for (i = 0; i < c; ++i) {
+		bo->placements[i].fpfn = 0;
+		bo->placements[i].lpfn = 0;
+	}
+}
+
+int vbox_bo_create(struct drm_device *dev, int size, int align,
+		   u32 flags, struct vbox_bo **pvboxbo)
+{
+	struct vbox_private *vbox = dev->dev_private;
+	struct vbox_bo *vboxbo;
+	size_t acc_size;
+	int ret;
+
+	vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
+	if (!vboxbo)
+		return -ENOMEM;
+
+	ret = drm_gem_object_init(dev, &vboxbo->gem, size);
+	if (ret)
+		goto err_free_vboxbo;
+
+	vboxbo->bo.bdev = &vbox->ttm.bdev;
+
+	vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+	acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
+				       sizeof(struct vbox_bo));
+
+	ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
+			  ttm_bo_type_device, &vboxbo->placement,
+			  align >> PAGE_SHIFT, false, NULL, acc_size,
+			  NULL, NULL, vbox_bo_ttm_destroy);
+	if (ret)
+		goto err_free_vboxbo;
+
+	*pvboxbo = vboxbo;
+
+	return 0;
+
+err_free_vboxbo:
+	kfree(vboxbo);
+	return ret;
+}
+
+static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
+{
+	return bo->bo.offset;
+}
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+	int i, ret;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = vbox_bo_gpu_offset(bo);
+
+		return 0;
+	}
+
+	vbox_ttm_placement(bo, pl_flag);
+
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	bo->pin_count = 1;
+
+	if (gpu_addr)
+		*gpu_addr = vbox_bo_gpu_offset(bo);
+
+	return 0;
+}
+
+int vbox_bo_unpin(struct vbox_bo *bo)
+{
+	int i, ret;
+
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * Move a vbox-owned buffer object to system memory if no one else has it
+ * pinned.  The caller must have pinned it previously, and this call will
+ * release the caller's pin.
+ */
+int vbox_bo_push_sysram(struct vbox_bo *bo)
+{
+	int i, ret;
+
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	if (bo->kmap.virtual)
+		ttm_bo_kunmap(&bo->kmap);
+
+	vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret) {
+		DRM_ERROR("pushing to VRAM failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct vbox_private *vbox;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return -EINVAL;
+
+	file_priv = filp->private_data;
+	vbox = file_priv->minor->dev->dev_private;
+
+	return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
+}
diff --git a/drivers/staging/vboxvideo/vboxvideo.h b/drivers/staging/vboxvideo/vboxvideo.h
new file mode 100644
index 000000000000..d835d75d761c
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo.h
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ */
+
+#ifndef __VBOXVIDEO_H__
+#define __VBOXVIDEO_H__
+
+/*
+ * This should be in sync with monitorCount <xsd:maxInclusive value="64"/> in
+ * src/VBox/Main/xml/VirtualBox-settings-common.xsd
+ */
+#define VBOX_VIDEO_MAX_SCREENS 64
+
+/*
+ * The last 4096 bytes of the guest VRAM contains the generic info for all
+ * DualView chunks: sizes and offsets of chunks. This is filled by miniport.
+ *
+ * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
+ * etc. This is used exclusively by the corresponding instance of a display
+ * driver.
+ *
+ * The VRAM layout:
+ *   Last 4096 bytes - Adapter information area.
+ *   4096 bytes aligned miniport heap (value specified in the config rouded up).
+ *   Slack - what left after dividing the VRAM.
+ *   4096 bytes aligned framebuffers:
+ *     last 4096 bytes of each framebuffer is the display information area.
+ *
+ * The Virtual Graphics Adapter information in the guest VRAM is stored by the
+ * guest video driver using structures prepended by VBOXVIDEOINFOHDR.
+ *
+ * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * the host starts to process the info. The first element at the start of
+ * the 4096 bytes region should be normally be a LINK that points to
+ * actual information chain. That way the guest driver can have some
+ * fixed layout of the information memory block and just rewrite
+ * the link to point to relevant memory chain.
+ *
+ * The processing stops at the END element.
+ *
+ * The host can access the memory only when the port IO is processed.
+ * All data that will be needed later must be copied from these 4096 bytes.
+ * But other VRAM can be used by host until the mode is disabled.
+ *
+ * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * to disable the mode.
+ *
+ * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
+ * from the host and issue commands to the host.
+ *
+ * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
+ * following operations with the VBE data register can be performed:
+ *
+ * Operation            Result
+ * write 16 bit value   NOP
+ * read 16 bit value    count of monitors
+ * write 32 bit value   set the vbox cmd value and the cmd processed by the host
+ * read 32 bit value    result of the last vbox command is returned
+ */
+
+/**
+ * VBVA command header.
+ *
+ * @todo Where does this fit in?
+ */
+struct vbva_cmd_hdr {
+   /** Coordinates of affected rectangle. */
+	s16 x;
+	s16 y;
+	u16 w;
+	u16 h;
+} __packed;
+
+/** @name VBVA ring defines.
+ *
+ * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
+ * data. For example big bitmaps which do not fit to the buffer.
+ *
+ * Guest starts writing to the buffer by initializing a record entry in the
+ * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
+ * written. As data is written to the ring buffer, the guest increases
+ * free_offset.
+ *
+ * The host reads the records on flushes and processes all completed records.
+ * When host encounters situation when only a partial record presents and
+ * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
+ * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
+ * data_offset. After that on each flush the host continues fetching the data
+ * until the record is completed.
+ *
+ */
+#define VBVA_RING_BUFFER_SIZE        (4194304 - 1024)
+#define VBVA_RING_BUFFER_THRESHOLD   (4096)
+
+#define VBVA_MAX_RECORDS (64)
+
+#define VBVA_F_MODE_ENABLED         0x00000001u
+#define VBVA_F_MODE_VRDP            0x00000002u
+#define VBVA_F_MODE_VRDP_RESET      0x00000004u
+#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u
+
+#define VBVA_F_STATE_PROCESSING     0x00010000u
+
+#define VBVA_F_RECORD_PARTIAL       0x80000000u
+
+/**
+ * VBVA record.
+ */
+struct vbva_record {
+	/** The length of the record. Changed by guest. */
+	u32 len_and_flags;
+} __packed;
+
+/*
+ * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of
+ * the runtime heapsimple API. Use minimum 2 pages here, because the info area
+ * also may contain other data (for example hgsmi_host_flags structure).
+ */
+#define VBVA_ADAPTER_INFORMATION_SIZE 65536
+#define VBVA_MIN_BUFFER_SIZE          65536
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY        0xFFFFFFFF
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY      0x00000000
+
+/* The value for port IO to let the adapter to interpret the display memory.
+ * The display number is encoded in low 16 bits.
+ */
+#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
+
+struct vbva_host_flags {
+	u32 host_events;
+	u32 supported_orders;
+} __packed;
+
+struct vbva_buffer {
+	struct vbva_host_flags host_flags;
+
+	/* The offset where the data start in the buffer. */
+	u32 data_offset;
+	/* The offset where next data must be placed in the buffer. */
+	u32 free_offset;
+
+	/* The queue of record descriptions. */
+	struct vbva_record records[VBVA_MAX_RECORDS];
+	u32 record_first_index;
+	u32 record_free_index;
+
+	/* Space to leave free when large partial records are transferred. */
+	u32 partial_write_tresh;
+
+	u32 data_len;
+	/* variable size for the rest of the vbva_buffer area in VRAM. */
+	u8 data[0];
+} __packed;
+
+#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
+
+/* guest->host commands */
+#define VBVA_QUERY_CONF32			 1
+#define VBVA_SET_CONF32				 2
+#define VBVA_INFO_VIEW				 3
+#define VBVA_INFO_HEAP				 4
+#define VBVA_FLUSH				 5
+#define VBVA_INFO_SCREEN			 6
+#define VBVA_ENABLE				 7
+#define VBVA_MOUSE_POINTER_SHAPE		 8
+/* informs host about HGSMI caps. see vbva_caps below */
+#define VBVA_INFO_CAPS				12
+/* configures scanline, see VBVASCANLINECFG below */
+#define VBVA_SCANLINE_CFG			13
+/* requests scanline info, see VBVASCANLINEINFO below */
+#define VBVA_SCANLINE_INFO			14
+/* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_SUBMIT			16
+/* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_FLUSH			17
+/* G->H DMA command */
+#define VBVA_CMDVBVA_CTL			18
+/* Query most recent mode hints sent */
+#define VBVA_QUERY_MODE_HINTS			19
+/**
+ * Report the guest virtual desktop position and size for mapping host and
+ * guest pointer positions.
+ */
+#define VBVA_REPORT_INPUT_MAPPING		20
+/** Report the guest cursor position and query the host position. */
+#define VBVA_CURSOR_POSITION			21
+
+/* host->guest commands */
+#define VBVAHG_EVENT				1
+#define VBVAHG_DISPLAY_CUSTOM			2
+
+/* vbva_conf32::index */
+#define VBOX_VBVA_CONF32_MONITOR_COUNT		0
+#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE		1
+/**
+ * Returns VINF_SUCCESS if the host can report mode hints via VBVA.
+ * Set value to VERR_NOT_SUPPORTED before calling.
+ */
+#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING	2
+/**
+ * Returns VINF_SUCCESS if the host can report guest cursor enabled status via
+ * VBVA.  Set value to VERR_NOT_SUPPORTED before calling.
+ */
+#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING	3
+/**
+ * Returns the currently available host cursor capabilities.  Available if
+ * vbva_conf32::VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
+ * @see VMMDevReqMouseStatus::mouseFeatures.
+ */
+#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES	4
+/** Returns the supported flags in vbva_infoscreen::flags. */
+#define VBOX_VBVA_CONF32_SCREEN_FLAGS		5
+/** Returns the max size of VBVA record. */
+#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE	6
+
+struct vbva_conf32 {
+	u32 index;
+	u32 value;
+} __packed;
+
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0   BIT(0)
+/**
+ * Guest cursor capability: can the host show a hardware cursor at the host
+ * pointer location?
+ */
+#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE    BIT(1)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2   BIT(2)
+/** Reserved for historical reasons.  Must always be unset. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3   BIT(3)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4   BIT(4)
+/** Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5   BIT(5)
+
+struct vbva_infoview {
+	/* Index of the screen, assigned by the guest. */
+	u32 view_index;
+
+	/* The screen offset in VRAM, the framebuffer starts here. */
+	u32 view_offset;
+
+	/* The size of the VRAM memory that can be used for the view. */
+	u32 view_size;
+
+	/* The recommended maximum size of the VRAM memory for the screen. */
+	u32 max_screen_size;
+} __packed;
+
+struct vbva_flush {
+	u32 reserved;
+} __packed;
+
+/* vbva_infoscreen::flags */
+#define VBVA_SCREEN_F_NONE			0x0000
+#define VBVA_SCREEN_F_ACTIVE			0x0001
+/**
+ * The virtual monitor has been disabled by the guest and should be removed
+ * by the host and ignored for purposes of pointer position calculation.
+ */
+#define VBVA_SCREEN_F_DISABLED			0x0002
+/**
+ * The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using width, height, etc values from the vbva_infoscreen
+ * request.
+ */
+#define VBVA_SCREEN_F_BLANK			0x0004
+/**
+ * The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using the previous mode values for width. height, etc.
+ */
+#define VBVA_SCREEN_F_BLANK2			0x0008
+
+struct vbva_infoscreen {
+	/* Which view contains the screen. */
+	u32 view_index;
+
+	/* Physical X origin relative to the primary screen. */
+	s32 origin_x;
+
+	/* Physical Y origin relative to the primary screen. */
+	s32 origin_y;
+
+	/* Offset of visible framebuffer relative to the framebuffer start. */
+	u32 start_offset;
+
+	/* The scan line size in bytes. */
+	u32 line_size;
+
+	/* Width of the screen. */
+	u32 width;
+
+	/* Height of the screen. */
+	u32 height;
+
+	/* Color depth. */
+	u16 bits_per_pixel;
+
+	/* VBVA_SCREEN_F_* */
+	u16 flags;
+} __packed;
+
+/* vbva_enable::flags */
+#define VBVA_F_NONE				0x00000000
+#define VBVA_F_ENABLE				0x00000001
+#define VBVA_F_DISABLE				0x00000002
+/* extended VBVA to be used with WDDM */
+#define VBVA_F_EXTENDED				0x00000004
+/* vbva offset is absolute VRAM offset */
+#define VBVA_F_ABSOFFSET			0x00000008
+
+struct vbva_enable {
+	u32 flags;
+	u32 offset;
+	s32 result;
+} __packed;
+
+struct vbva_enable_ex {
+	struct vbva_enable base;
+	u32 screen_id;
+} __packed;
+
+struct vbva_mouse_pointer_shape {
+	/* The host result. */
+	s32 result;
+
+	/* VBOX_MOUSE_POINTER_* bit flags. */
+	u32 flags;
+
+	/* X coordinate of the hot spot. */
+	u32 hot_X;
+
+	/* Y coordinate of the hot spot. */
+	u32 hot_y;
+
+	/* Width of the pointer in pixels. */
+	u32 width;
+
+	/* Height of the pointer in scanlines. */
+	u32 height;
+
+	/* Pointer data.
+	 *
+	 ****
+	 * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
+	 * mask.
+	 *
+	 * For pointers without alpha channel the XOR mask pixels are 32 bit
+	 * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask
+	 * consists of (lsb)BGRA(msb) 32 bit values.
+	 *
+	 * Guest driver must create the AND mask for pointers with alpha chan.,
+	 * so if host does not support alpha, the pointer could be displayed as
+	 * a normal color pointer. The AND mask can be constructed from alpha
+	 * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
+	 *
+	 * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
+	 * mask, therefore, is and_len = (width + 7) / 8 * height. The padding
+	 * bits at the end of any scanline are undefined.
+	 *
+	 * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
+	 * u8 *xor = and + (and_len + 3) & ~3
+	 * Bytes in the gap between the AND and the XOR mask are undefined.
+	 * XOR mask scanlines have no gap between them and size of XOR mask is:
+	 * xor_len = width * 4 * height.
+	 ****
+	 *
+	 * Preallocate 4 bytes for accessing actual data as p->data.
+	 */
+	u8 data[4];
+} __packed;
+
+/**
+ * @name vbva_mouse_pointer_shape::flags
+ * @note The VBOX_MOUSE_POINTER_* flags are used in the guest video driver,
+ *       values must be <= 0x8000 and must not be changed. (try make more sense
+ *       of this, please).
+ * @{
+ */
+
+/** pointer is visible */
+#define VBOX_MOUSE_POINTER_VISIBLE		0x0001
+/** pointer has alpha channel */
+#define VBOX_MOUSE_POINTER_ALPHA		0x0002
+/** pointerData contains new pointer shape */
+#define VBOX_MOUSE_POINTER_SHAPE		0x0004
+
+/** @} */
+
+/*
+ * The guest driver can handle asynch guest cmd completion by reading the
+ * command offset from io port.
+ */
+#define VBVACAPS_COMPLETEGCMD_BY_IOREAD		0x00000001
+/* the guest driver can handle video adapter IRQs */
+#define VBVACAPS_IRQ				0x00000002
+/** The guest can read video mode hints sent via VBVA. */
+#define VBVACAPS_VIDEO_MODE_HINTS		0x00000004
+/** The guest can switch to a software cursor on demand. */
+#define VBVACAPS_DISABLE_CURSOR_INTEGRATION	0x00000008
+/** The guest does not depend on host handling the VBE registers. */
+#define VBVACAPS_USE_VBVA_ONLY			0x00000010
+
+struct vbva_caps {
+	s32 rc;
+	u32 caps;
+} __packed;
+
+/** Query the most recent mode hints received from the host. */
+struct vbva_query_mode_hints {
+	/** The maximum number of screens to return hints for. */
+	u16 hints_queried_count;
+	/** The size of the mode hint structures directly following this one. */
+	u16 hint_structure_guest_size;
+	/** Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
+	s32 rc;
+} __packed;
+
+/**
+ * Structure in which a mode hint is returned. The guest allocates an array
+ * of these immediately after the vbva_query_mode_hints structure.
+ * To accommodate future extensions, the vbva_query_mode_hints structure
+ * specifies the size of the vbva_modehint structures allocated by the guest,
+ * and the host only fills out structure elements which fit into that size. The
+ * host should fill any unused members (e.g. dx, dy) or structure space on the
+ * end with ~0. The whole structure can legally be set to ~0 to skip a screen.
+ */
+struct vbva_modehint {
+	u32 magic;
+	u32 cx;
+	u32 cy;
+	u32 bpp;		/* Which has never been used... */
+	u32 display;
+	u32 dx;			/**< X offset into the virtual frame-buffer. */
+	u32 dy;			/**< Y offset into the virtual frame-buffer. */
+	u32 enabled;		/* Not flags. Add new members for new flags. */
+} __packed;
+
+#define VBVAMODEHINT_MAGIC 0x0801add9u
+
+/**
+ * Report the rectangle relative to which absolute pointer events should be
+ * expressed. This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens and must be re-set.
+ * @see VBVA_REPORT_INPUT_MAPPING.
+ */
+struct vbva_report_input_mapping {
+	s32 x;	/**< Upper left X co-ordinate relative to the first screen. */
+	s32 y;	/**< Upper left Y co-ordinate relative to the first screen. */
+	u32 cx;	/**< Rectangle width. */
+	u32 cy;	/**< Rectangle height. */
+} __packed;
+
+/**
+ * Report the guest cursor position and query the host one. The host may wish
+ * to use the guest information to re-position its own cursor (though this is
+ * currently unlikely).
+ * @see VBVA_CURSOR_POSITION
+ */
+struct vbva_cursor_position {
+	u32 report_position;	/**< Are we reporting a position? */
+	u32 x;			/**< Guest cursor X position */
+	u32 y;			/**< Guest cursor Y position */
+} __packed;
+
+#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_guest.h b/drivers/staging/vboxvideo/vboxvideo_guest.h
new file mode 100644
index 000000000000..d09da841711a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_guest.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __VBOXVIDEO_GUEST_H__
+#define __VBOXVIDEO_GUEST_H__
+
+#include <linux/genalloc.h>
+#include "vboxvideo.h"
+
+/**
+ * Structure grouping the context needed for sending graphics acceleration
+ * information to the host via VBVA.  Each screen has its own VBVA buffer.
+ */
+struct vbva_buf_ctx {
+	/** Offset of the buffer in the VRAM section for the screen */
+	u32 buffer_offset;
+	/** Length of the buffer in bytes */
+	u32 buffer_length;
+	/** Set if we wrote to the buffer faster than the host could read it */
+	bool buffer_overflow;
+	/** VBVA record that we are currently preparing for the host, or NULL */
+	struct vbva_record *record;
+	/**
+	 * Pointer to the VBVA buffer mapped into the current address space.
+	 * Will be NULL if VBVA is not enabled.
+	 */
+	struct vbva_buffer *vbva;
+};
+
+/**
+ * @name Base HGSMI APIs
+ * @{
+ */
+int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location);
+int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps);
+int hgsmi_test_query_conf(struct gen_pool *ctx);
+int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
+int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+			       u32 hot_x, u32 hot_y, u32 width, u32 height,
+			       u8 *pixels, u32 len);
+int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
+			  u32 x, u32 y, u32 *x_host, u32 *y_host);
+/** @}  */
+
+/**
+ * @name VBVA APIs
+ * @{
+ */
+bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+		 struct vbva_buffer *vbva, s32 screen);
+void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+		  s32 screen);
+bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
+			      struct gen_pool *ctx);
+void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx);
+bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+		const void *p, u32 len);
+void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
+			       u32 buffer_offset, u32 buffer_length);
+/** @}  */
+
+/**
+ * @name Modesetting APIs
+ * @{
+ */
+void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
+				s32 origin_x, s32 origin_y, u32 start_offset,
+				u32 pitch, u32 width, u32 height,
+				u16 bpp, u16 flags);
+int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
+			       u32 width, u32 height);
+int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
+			 struct vbva_modehint *hints);
+/** @}  */
+
+#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_vbe.h b/drivers/staging/vboxvideo/vboxvideo_vbe.h
new file mode 100644
index 000000000000..f842f4d9c80a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_vbe.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __VBOXVIDEO_VBE_H__
+#define __VBOXVIDEO_VBE_H__
+
+/* GUEST <-> HOST Communication API */
+
+/**
+ * @todo FIXME: Either dynamicly ask host for this or put somewhere high in
+ *              physical memory like 0xE0000000.
+ */
+
+#define VBE_DISPI_BANK_ADDRESS          0xA0000
+#define VBE_DISPI_BANK_SIZE_KB          64
+
+#define VBE_DISPI_MAX_XRES              16384
+#define VBE_DISPI_MAX_YRES              16384
+#define VBE_DISPI_MAX_BPP               32
+
+#define VBE_DISPI_IOPORT_INDEX          0x01CE
+#define VBE_DISPI_IOPORT_DATA           0x01CF
+
+#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX  0x03C8
+#define VBE_DISPI_IOPORT_DAC_DATA         0x03C9
+
+#define VBE_DISPI_INDEX_ID              0x0
+#define VBE_DISPI_INDEX_XRES            0x1
+#define VBE_DISPI_INDEX_YRES            0x2
+#define VBE_DISPI_INDEX_BPP             0x3
+#define VBE_DISPI_INDEX_ENABLE          0x4
+#define VBE_DISPI_INDEX_BANK            0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH      0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT     0x7
+#define VBE_DISPI_INDEX_X_OFFSET        0x8
+#define VBE_DISPI_INDEX_Y_OFFSET        0x9
+#define VBE_DISPI_INDEX_VBOX_VIDEO      0xa
+#define VBE_DISPI_INDEX_FB_BASE_HI      0xb
+
+#define VBE_DISPI_ID0                   0xB0C0
+#define VBE_DISPI_ID1                   0xB0C1
+#define VBE_DISPI_ID2                   0xB0C2
+#define VBE_DISPI_ID3                   0xB0C3
+#define VBE_DISPI_ID4                   0xB0C4
+
+#define VBE_DISPI_ID_VBOX_VIDEO         0xBE00
+/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
+#define VBE_DISPI_ID_HGSMI              0xBE01
+#define VBE_DISPI_ID_ANYX               0xBE02
+
+#define VBE_DISPI_DISABLED              0x00
+#define VBE_DISPI_ENABLED               0x01
+#define VBE_DISPI_GETCAPS               0x02
+#define VBE_DISPI_8BIT_DAC              0x20
+/**
+ * @note this definition is a BOCHS legacy, used only in the video BIOS
+ * code and ignored by the emulated hardware.
+ */
+#define VBE_DISPI_LFB_ENABLED           0x40
+#define VBE_DISPI_NOCLEARMEM            0x80
+
+#define VGA_PORT_HGSMI_HOST             0x3b0
+#define VGA_PORT_HGSMI_GUEST            0x3d0
+
+#endif
diff --git a/drivers/staging/vboxvideo/vbva_base.c b/drivers/staging/vboxvideo/vbva_base.c
new file mode 100644
index 000000000000..c10c782f94e1
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbva_base.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2006-2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "vbox_drv.h"
+#include "vbox_err.h"
+#include "vboxvideo_guest.h"
+#include "hgsmi_channels.h"
+
+/*
+ * There is a hardware ring buffer in the graphics device video RAM, formerly
+ * in the VBox VMMDev PCI memory space.
+ * All graphics commands go there serialized by vbva_buffer_begin_update.
+ * and vbva_buffer_end_update.
+ *
+ * free_offset is writing position. data_offset is reading position.
+ * free_offset == data_offset means buffer is empty.
+ * There must be always gap between data_offset and free_offset when data
+ * are in the buffer.
+ * Guest only changes free_offset, host changes data_offset.
+ */
+
+static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
+{
+	s32 diff = vbva->data_offset - vbva->free_offset;
+
+	return diff > 0 ? diff : vbva->data_len + diff;
+}
+
+static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
+				      const void *p, u32 len, u32 offset)
+{
+	struct vbva_buffer *vbva = vbva_ctx->vbva;
+	u32 bytes_till_boundary = vbva->data_len - offset;
+	u8 *dst = &vbva->data[offset];
+	s32 diff = len - bytes_till_boundary;
+
+	if (diff <= 0) {
+		/* Chunk will not cross buffer boundary. */
+		memcpy(dst, p, len);
+	} else {
+		/* Chunk crosses buffer boundary. */
+		memcpy(dst, p, bytes_till_boundary);
+		memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
+	}
+}
+
+static void vbva_buffer_flush(struct gen_pool *ctx)
+{
+	struct vbva_flush *p;
+
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
+	if (!p)
+		return;
+
+	p->reserved = 0;
+
+	hgsmi_buffer_submit(ctx, p);
+	hgsmi_buffer_free(ctx, p);
+}
+
+bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+		const void *p, u32 len)
+{
+	struct vbva_record *record;
+	struct vbva_buffer *vbva;
+	u32 available;
+
+	vbva = vbva_ctx->vbva;
+	record = vbva_ctx->record;
+
+	if (!vbva || vbva_ctx->buffer_overflow ||
+	    !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
+		return false;
+
+	available = vbva_buffer_available(vbva);
+
+	while (len > 0) {
+		u32 chunk = len;
+
+		if (chunk >= available) {
+			vbva_buffer_flush(ctx);
+			available = vbva_buffer_available(vbva);
+		}
+
+		if (chunk >= available) {
+			if (WARN_ON(available <= vbva->partial_write_tresh)) {
+				vbva_ctx->buffer_overflow = true;
+				return false;
+			}
+			chunk = available - vbva->partial_write_tresh;
+		}
+
+		vbva_buffer_place_data_at(vbva_ctx, p, chunk,
+					  vbva->free_offset);
+
+		vbva->free_offset = (vbva->free_offset + chunk) %
+				    vbva->data_len;
+		record->len_and_flags += chunk;
+		available -= chunk;
+		len -= chunk;
+		p += chunk;
+	}
+
+	return true;
+}
+
+static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
+			     struct gen_pool *ctx, s32 screen, bool enable)
+{
+	struct vbva_enable_ex *p;
+	bool ret;
+
+	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
+	if (!p)
+		return false;
+
+	p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
+	p->base.offset = vbva_ctx->buffer_offset;
+	p->base.result = VERR_NOT_SUPPORTED;
+	if (screen >= 0) {
+		p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
+		p->screen_id = screen;
+	}
+
+	hgsmi_buffer_submit(ctx, p);
+
+	if (enable)
+		ret = RT_SUCCESS(p->base.result);
+	else
+		ret = true;
+
+	hgsmi_buffer_free(ctx, p);
+
+	return ret;
+}
+
+bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+		 struct vbva_buffer *vbva, s32 screen)
+{
+	bool ret = false;
+
+	memset(vbva, 0, sizeof(*vbva));
+	vbva->partial_write_tresh = 256;
+	vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
+	vbva_ctx->vbva = vbva;
+
+	ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
+	if (!ret)
+		vbva_disable(vbva_ctx, ctx, screen);
+
+	return ret;
+}
+
+void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+		  s32 screen)
+{
+	vbva_ctx->buffer_overflow = false;
+	vbva_ctx->record = NULL;
+	vbva_ctx->vbva = NULL;
+
+	vbva_inform_host(vbva_ctx, ctx, screen, false);
+}
+
+bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
+			      struct gen_pool *ctx)
+{
+	struct vbva_record *record;
+	u32 next;
+
+	if (!vbva_ctx->vbva ||
+	    !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
+		return false;
+
+	WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
+
+	next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
+
+	/* Flush if all slots in the records queue are used */
+	if (next == vbva_ctx->vbva->record_first_index)
+		vbva_buffer_flush(ctx);
+
+	/* If even after flush there is no place then fail the request */
+	if (next == vbva_ctx->vbva->record_first_index)
+		return false;
+
+	record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
+	record->len_and_flags = VBVA_F_RECORD_PARTIAL;
+	vbva_ctx->vbva->record_free_index = next;
+	/* Remember which record we are using. */
+	vbva_ctx->record = record;
+
+	return true;
+}
+
+void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
+{
+	struct vbva_record *record = vbva_ctx->record;
+
+	WARN_ON(!vbva_ctx->vbva || !record ||
+		!(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
+
+	/* Mark the record completed. */
+	record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
+
+	vbva_ctx->buffer_overflow = false;
+	vbva_ctx->record = NULL;
+}
+
+void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
+			       u32 buffer_offset, u32 buffer_length)
+{
+	vbva_ctx->buffer_offset = buffer_offset;
+	vbva_ctx->buffer_length = buffer_length;
+}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 030bec855d86..314ffac50bb8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3391,7 +3391,6 @@ static int vchiq_probe(struct platform_device *pdev)
 	struct device_node *fw_node;
 	struct rpi_firmware *fw;
 	int err;
-	void *ptr_err;
 
 	fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
 	if (!fw_node) {
@@ -3427,14 +3426,14 @@ static int vchiq_probe(struct platform_device *pdev)
 
 	/* create sysfs entries */
 	vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
-	ptr_err = vchiq_class;
-	if (IS_ERR(ptr_err))
+	err = PTR_ERR(vchiq_class);
+	if (IS_ERR(vchiq_class))
 		goto failed_class_create;
 
 	vchiq_dev = device_create(vchiq_class, NULL,
 		vchiq_devid, NULL, "vchiq");
-	ptr_err = vchiq_dev;
-	if (IS_ERR(ptr_err))
+	err = PTR_ERR(vchiq_dev);
+	if (IS_ERR(vchiq_dev))
 		goto failed_device_create;
 
 	/* create debugfs entries */
@@ -3455,7 +3454,6 @@ failed_device_create:
 	class_destroy(vchiq_class);
 failed_class_create:
 	cdev_del(&vchiq_cdev);
-	err = PTR_ERR(ptr_err);
 failed_cdev_add:
 	unregister_chrdev_region(vchiq_devid, 1);
 failed_platform_init:
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ab3e8f410444..40219a706309 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -281,9 +281,11 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
 	if (active) {
 		config.name = "nvm_active";
 		config.reg_read = tb_switch_nvm_read;
+		config.read_only = true;
 	} else {
 		config.name = "nvm_non_active";
 		config.reg_write = tb_switch_nvm_write;
+		config.root_only = true;
 	}
 
 	config.id = id;
@@ -292,7 +294,6 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
 	config.size = size;
 	config.dev = &sw->dev;
 	config.owner = THIS_MODULE;
-	config.root_only = true;
 	config.priv = sw;
 
 	return nvmem_register(&config);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index d1399aac05a1..284749fb0f6b 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -448,48 +448,6 @@ err:
 	return retval;
 }
 
-/**
- *	pty_open_peer - open the peer of a pty
- *	@tty: the peer of the pty being opened
- *
- *	Open the cached dentry in tty->link, providing a safe way for userspace
- *	to get the slave end of a pty (where they have the master fd and cannot
- *	access or trust the mount namespace /dev/pts was mounted inside).
- */
-static struct file *pty_open_peer(struct tty_struct *tty, int flags)
-{
-	if (tty->driver->subtype != PTY_TYPE_MASTER)
-		return ERR_PTR(-EIO);
-	return dentry_open(tty->link->driver_data, flags, current_cred());
-}
-
-static int pty_get_peer(struct tty_struct *tty, int flags)
-{
-	int fd = -1;
-	struct file *filp = NULL;
-	int retval = -EINVAL;
-
-	fd = get_unused_fd_flags(0);
-	if (fd < 0) {
-		retval = fd;
-		goto err;
-	}
-
-	filp = pty_open_peer(tty, flags);
-	if (IS_ERR(filp)) {
-		retval = PTR_ERR(filp);
-		goto err_put;
-	}
-
-	fd_install(fd, filp);
-	return fd;
-
-err_put:
-	put_unused_fd(fd);
-err:
-	return retval;
-}
-
 static void pty_cleanup(struct tty_struct *tty)
 {
 	tty_port_put(tty->port);
@@ -646,9 +604,50 @@ static inline void legacy_pty_init(void) { }
 
 /* Unix98 devices */
 #ifdef CONFIG_UNIX98_PTYS
-
 static struct cdev ptmx_cdev;
 
+/**
+ *	pty_open_peer - open the peer of a pty
+ *	@tty: the peer of the pty being opened
+ *
+ *	Open the cached dentry in tty->link, providing a safe way for userspace
+ *	to get the slave end of a pty (where they have the master fd and cannot
+ *	access or trust the mount namespace /dev/pts was mounted inside).
+ */
+static struct file *pty_open_peer(struct tty_struct *tty, int flags)
+{
+	if (tty->driver->subtype != PTY_TYPE_MASTER)
+		return ERR_PTR(-EIO);
+	return dentry_open(tty->link->driver_data, flags, current_cred());
+}
+
+static int pty_get_peer(struct tty_struct *tty, int flags)
+{
+	int fd = -1;
+	struct file *filp = NULL;
+	int retval = -EINVAL;
+
+	fd = get_unused_fd_flags(0);
+	if (fd < 0) {
+		retval = fd;
+		goto err;
+	}
+
+	filp = pty_open_peer(tty, flags);
+	if (IS_ERR(filp)) {
+		retval = PTR_ERR(filp);
+		goto err_put;
+	}
+
+	fd_install(fd, filp);
+	return fd;
+
+err_put:
+	put_unused_fd(fd);
+err:
+	return retval;
+}
+
 static int pty_unix98_ioctl(struct tty_struct *tty,
 			    unsigned int cmd, unsigned long arg)
 {
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 343de8c384b0..898dcb091a27 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -619,6 +619,12 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
 		TIOCSER_TEMT : 0;
 }
 
+static bool lpuart_is_32(struct lpuart_port *sport)
+{
+	return sport->port.iotype == UPIO_MEM32 ||
+	       sport->port.iotype ==  UPIO_MEM32BE;
+}
+
 static irqreturn_t lpuart_txint(int irq, void *dev_id)
 {
 	struct lpuart_port *sport = dev_id;
@@ -627,7 +633,7 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id)
 
 	spin_lock_irqsave(&sport->port.lock, flags);
 	if (sport->port.x_char) {
-		if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+		if (lpuart_is_32(sport))
 			lpuart32_write(&sport->port, sport->port.x_char, UARTDATA);
 		else
 			writeb(sport->port.x_char, sport->port.membase + UARTDR);
@@ -635,14 +641,14 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id)
 	}
 
 	if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
-		if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+		if (lpuart_is_32(sport))
 			lpuart32_stop_tx(&sport->port);
 		else
 			lpuart_stop_tx(&sport->port);
 		goto out;
 	}
 
-	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+	if (lpuart_is_32(sport))
 		lpuart32_transmit_buffer(sport);
 	else
 		lpuart_transmit_buffer(sport);
@@ -1978,12 +1984,12 @@ static int __init lpuart_console_setup(struct console *co, char *options)
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
 	else
-		if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+		if (lpuart_is_32(sport))
 			lpuart32_console_get_options(sport, &baud, &parity, &bits);
 		else
 			lpuart_console_get_options(sport, &baud, &parity, &bits);
 
-	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+	if (lpuart_is_32(sport))
 		lpuart32_setup_watermark(sport);
 	else
 		lpuart_setup_watermark(sport);
@@ -2118,7 +2124,7 @@ static int lpuart_probe(struct platform_device *pdev)
 	}
 	sport->port.irq = ret;
 	sport->port.iotype = sdata->iotype;
-	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+	if (lpuart_is_32(sport))
 		sport->port.ops = &lpuart32_pops;
 	else
 		sport->port.ops = &lpuart_pops;
@@ -2145,7 +2151,7 @@ static int lpuart_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, &sport->port);
 
-	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+	if (lpuart_is_32(sport))
 		lpuart_reg.cons = LPUART32_CONSOLE;
 	else
 		lpuart_reg.cons = LPUART_CONSOLE;
@@ -2198,7 +2204,7 @@ static int lpuart_suspend(struct device *dev)
 	struct lpuart_port *sport = dev_get_drvdata(dev);
 	unsigned long temp;
 
-	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) {
+	if (lpuart_is_32(sport)) {
 		/* disable Rx/Tx and interrupts */
 		temp = lpuart32_read(&sport->port, UARTCTRL);
 		temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
@@ -2249,7 +2255,7 @@ static int lpuart_resume(struct device *dev)
 	if (sport->port.suspended && !sport->port.irq_wake)
 		clk_prepare_enable(sport->clk);
 
-	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) {
+	if (lpuart_is_32(sport)) {
 		lpuart32_setup_watermark(sport);
 		temp = lpuart32_read(&sport->port, UARTCTRL);
 		temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9e3162bf3bd1..80934e7bd67f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -186,11 +186,6 @@
 
 #define UART_NR 8
 
-/* RX DMA buffer periods */
-#define RX_DMA_PERIODS 4
-#define RX_BUF_SIZE	(PAGE_SIZE)
-
-
 /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
 enum imx_uart_type {
 	IMX1_UART,
@@ -226,7 +221,6 @@ struct imx_port {
 	struct dma_chan		*dma_chan_rx, *dma_chan_tx;
 	struct scatterlist	rx_sgl, tx_sgl[2];
 	void			*rx_buf;
-	unsigned int		rx_buf_size;
 	struct circ_buf		rx_ring;
 	unsigned int		rx_periods;
 	dma_cookie_t		rx_cookie;
@@ -464,7 +458,7 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
 		}
 	}
 
-	while (!uart_circ_empty(xmit) &&
+	while (!uart_circ_empty(xmit) && !sport->dma_is_txing &&
 	       !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) {
 		/* send xmit->buf[xmit->tail]
 		 * out the port here */
@@ -967,6 +961,8 @@ static void imx_timeout(unsigned long data)
 	}
 }
 
+#define RX_BUF_SIZE	(PAGE_SIZE)
+
 /*
  * There are two kinds of RX DMA interrupts(such as in the MX6Q):
  *   [1] the RX DMA buffer is full.
@@ -1049,6 +1045,9 @@ static void dma_rx_callback(void *data)
 	}
 }
 
+/* RX DMA buffer periods */
+#define RX_DMA_PERIODS 4
+
 static int start_rx_dma(struct imx_port *sport)
 {
 	struct scatterlist *sgl = &sport->rx_sgl;
@@ -1059,8 +1058,9 @@ static int start_rx_dma(struct imx_port *sport)
 
 	sport->rx_ring.head = 0;
 	sport->rx_ring.tail = 0;
+	sport->rx_periods = RX_DMA_PERIODS;
 
-	sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size);
+	sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
 	ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
 	if (ret == 0) {
 		dev_err(dev, "DMA mapping error for RX.\n");
@@ -1171,7 +1171,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
 		goto err;
 	}
 
-	sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL);
+	sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!sport->rx_buf) {
 		ret = -ENOMEM;
 		goto err;
@@ -2036,7 +2036,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
 {
 	struct device_node *np = pdev->dev.of_node;
 	int ret;
-	u32 dma_buf_size[2];
 
 	sport->devdata = of_device_get_match_data(&pdev->dev);
 	if (!sport->devdata)
@@ -2060,14 +2059,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
 	if (of_get_property(np, "rts-gpios", NULL))
 		sport->have_rtsgpio = 1;
 
-	if (!of_property_read_u32_array(np, "fsl,dma-size", dma_buf_size, 2)) {
-		sport->rx_buf_size = dma_buf_size[0] * dma_buf_size[1];
-		sport->rx_periods = dma_buf_size[1];
-	} else {
-		sport->rx_buf_size = RX_BUF_SIZE;
-		sport->rx_periods = RX_DMA_PERIODS;
-	}
-
 	return 0;
 }
 #else
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index da5ddfc14778..e08b16b070c0 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1085,10 +1085,12 @@ static ssize_t rx_trigger_store(struct device *dev,
 {
 	struct uart_port *port = dev_get_drvdata(dev);
 	struct sci_port *sci = to_sci_port(port);
+	int ret;
 	long r;
 
-	if (kstrtol(buf, 0, &r) == -EINVAL)
-		return -EINVAL;
+	ret = kstrtol(buf, 0, &r);
+	if (ret)
+		return ret;
 
 	sci->rx_trigger = scif_set_rtrg(port, r);
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
@@ -1116,10 +1118,12 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
 {
 	struct uart_port *port = dev_get_drvdata(dev);
 	struct sci_port *sci = to_sci_port(port);
+	int ret;
 	long r;
 
-	if (kstrtol(buf, 0, &r) == -EINVAL)
-		return -EINVAL;
+	ret = kstrtol(buf, 0, &r);
+	if (ret)
+		return ret;
 	sci->rx_fifo_timeout = r;
 	scif_set_rtrg(port, 1);
 	if (r > 0)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index f5335be344f6..6b0ca65027d0 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -758,6 +758,7 @@ static int asc_init_port(struct asc_port *ascport,
 	if (IS_ERR(ascport->pinctrl)) {
 		ret = PTR_ERR(ascport->pinctrl);
 		dev_err(&pdev->dev, "Failed to get Pinctrl: %d\n", ret);
+		return ret;
 	}
 
 	ascport->states[DEFAULT] =
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5357d83bbda2..5e056064259c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1829,6 +1829,9 @@ static const struct usb_device_id acm_ids[] = {
 	{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
 	},
+	{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
+	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+	},
 
 	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
 	.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index bc3b3fda5000..c4066cd77e47 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3573,6 +3573,9 @@ irq_retry:
 		/* Report disconnection if it is not already done. */
 		dwc2_hsotg_disconnect(hsotg);
 
+		/* Reset device address to zero */
+		__bic32(hsotg->regs + DCFG, DCFG_DEVADDR_MASK);
+
 		if (usb_status & GOTGCTL_BSESVLD && connected)
 			dwc2_hsotg_core_init_disconnected(hsotg, true);
 	}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 326b302fc440..03474d3575ab 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -766,15 +766,15 @@ static int dwc3_core_init(struct dwc3 *dwc)
 			dwc->maximum_speed = USB_SPEED_HIGH;
 	}
 
-	ret = dwc3_core_soft_reset(dwc);
+	ret = dwc3_core_get_phy(dwc);
 	if (ret)
 		goto err0;
 
-	ret = dwc3_phy_setup(dwc);
+	ret = dwc3_core_soft_reset(dwc);
 	if (ret)
 		goto err0;
 
-	ret = dwc3_core_get_phy(dwc);
+	ret = dwc3_phy_setup(dwc);
 	if (ret)
 		goto err0;
 
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 98926504b55b..f5aaa0cf3873 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -512,15 +512,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
 
 	/* check the DMA Status */
 	reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
-	irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
-	ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
-					dwc3_omap_interrupt_thread, IRQF_SHARED,
-					"dwc3-omap", omap);
-	if (ret) {
-		dev_err(dev, "failed to request IRQ #%d --> %d\n",
-				omap->irq, ret);
-		goto err1;
-	}
 
 	ret = dwc3_omap_extcon_register(omap);
 	if (ret < 0)
@@ -532,8 +523,15 @@ static int dwc3_omap_probe(struct platform_device *pdev)
 		goto err1;
 	}
 
+	ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
+					dwc3_omap_interrupt_thread, IRQF_SHARED,
+					"dwc3-omap", omap);
+	if (ret) {
+		dev_err(dev, "failed to request IRQ #%d --> %d\n",
+			omap->irq, ret);
+		goto err1;
+	}
 	dwc3_omap_enable_irqs(omap);
-	enable_irq(omap->irq);
 	return 0;
 
 err1:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9e41605a276b..6b299c7b7656 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -191,14 +191,16 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
 
 	req->started = false;
 	list_del(&req->list);
-	req->trb = NULL;
 	req->remaining = 0;
 
 	if (req->request.status == -EINPROGRESS)
 		req->request.status = status;
 
-	usb_gadget_unmap_request_by_dev(dwc->sysdev,
-					&req->request, req->direction);
+	if (req->trb)
+		usb_gadget_unmap_request_by_dev(dwc->sysdev,
+						&req->request, req->direction);
+
+	req->trb = NULL;
 
 	trace_dwc3_gadget_giveback(req);
 
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index e80b9c123a9d..f95bddd6513f 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2490,7 +2490,7 @@ static int fsg_main_thread(void *common_)
 		int i;
 
 		down_write(&common->filesem);
-		for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
+		for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
 			struct fsg_lun *curlun = common->luns[i];
 			if (!curlun || !fsg_lun_is_open(curlun))
 				continue;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 8656f84e17d9..29efbedc91f9 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -92,9 +92,9 @@ static struct uac_input_terminal_descriptor usb_out_it_desc = {
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype =	UAC_INPUT_TERMINAL,
 	.bTerminalID =		USB_OUT_IT_ID,
-	.wTerminalType =	UAC_TERMINAL_STREAMING,
+	.wTerminalType =	cpu_to_le16(UAC_TERMINAL_STREAMING),
 	.bAssocTerminal =	0,
-	.wChannelConfig =	0x3,
+	.wChannelConfig =	cpu_to_le16(0x3),
 };
 
 #define IO_OUT_OT_ID	2
@@ -103,7 +103,7 @@ static struct uac1_output_terminal_descriptor io_out_ot_desc = {
 	.bDescriptorType	= USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype	= UAC_OUTPUT_TERMINAL,
 	.bTerminalID		= IO_OUT_OT_ID,
-	.wTerminalType		= UAC_OUTPUT_TERMINAL_SPEAKER,
+	.wTerminalType		= cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
 	.bAssocTerminal		= 0,
 	.bSourceID		= USB_OUT_IT_ID,
 };
@@ -114,9 +114,9 @@ static struct uac_input_terminal_descriptor io_in_it_desc = {
 	.bDescriptorType	= USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype	= UAC_INPUT_TERMINAL,
 	.bTerminalID		= IO_IN_IT_ID,
-	.wTerminalType		= UAC_INPUT_TERMINAL_MICROPHONE,
+	.wTerminalType		= cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
 	.bAssocTerminal		= 0,
-	.wChannelConfig		= 0x3,
+	.wChannelConfig		= cpu_to_le16(0x3),
 };
 
 #define USB_IN_OT_ID	4
@@ -125,7 +125,7 @@ static struct uac1_output_terminal_descriptor usb_in_ot_desc = {
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype =	UAC_OUTPUT_TERMINAL,
 	.bTerminalID =		USB_IN_OT_ID,
-	.wTerminalType =	UAC_TERMINAL_STREAMING,
+	.wTerminalType =	cpu_to_le16(UAC_TERMINAL_STREAMING),
 	.bAssocTerminal =	0,
 	.bSourceID =		IO_IN_IT_ID,
 };
@@ -174,7 +174,7 @@ static struct uac1_as_header_descriptor as_out_header_desc = {
 	.bDescriptorSubtype =	UAC_AS_GENERAL,
 	.bTerminalLink =	USB_OUT_IT_ID,
 	.bDelay =		1,
-	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+	.wFormatTag =		cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
 };
 
 static struct uac1_as_header_descriptor as_in_header_desc = {
@@ -183,7 +183,7 @@ static struct uac1_as_header_descriptor as_in_header_desc = {
 	.bDescriptorSubtype =	UAC_AS_GENERAL,
 	.bTerminalLink =	USB_IN_OT_ID,
 	.bDelay =		1,
-	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+	.wFormatTag =		cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
 };
 
 DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
@@ -606,8 +606,8 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
 	if (status)
 		goto fail;
 
-	audio->out_ep_maxpsize = as_out_ep_desc.wMaxPacketSize;
-	audio->in_ep_maxpsize = as_in_ep_desc.wMaxPacketSize;
+	audio->out_ep_maxpsize = le16_to_cpu(as_out_ep_desc.wMaxPacketSize);
+	audio->in_ep_maxpsize = le16_to_cpu(as_in_ep_desc.wMaxPacketSize);
 	audio->params.c_chmask = audio_opts->c_chmask;
 	audio->params.c_srate = audio_opts->c_srate;
 	audio->params.c_ssize = audio_opts->c_ssize;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 9082ce261e70..f05c3f3e6103 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -168,7 +168,7 @@ static struct uac2_input_terminal_descriptor usb_out_it_desc = {
 	.bAssocTerminal = 0,
 	.bCSourceID = USB_OUT_CLK_ID,
 	.iChannelNames = 0,
-	.bmControls = (CONTROL_RDWR << COPY_CTRL),
+	.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
 };
 
 /* Input Terminal for I/O-In */
@@ -182,7 +182,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
 	.bAssocTerminal = 0,
 	.bCSourceID = USB_IN_CLK_ID,
 	.iChannelNames = 0,
-	.bmControls = (CONTROL_RDWR << COPY_CTRL),
+	.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
 };
 
 /* Ouput Terminal for USB_IN */
@@ -196,7 +196,7 @@ static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
 	.bAssocTerminal = 0,
 	.bSourceID = IO_IN_IT_ID,
 	.bCSourceID = USB_IN_CLK_ID,
-	.bmControls = (CONTROL_RDWR << COPY_CTRL),
+	.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
 };
 
 /* Ouput Terminal for I/O-Out */
@@ -210,7 +210,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
 	.bAssocTerminal = 0,
 	.bSourceID = USB_OUT_IT_ID,
 	.bCSourceID = USB_OUT_CLK_ID,
-	.bmControls = (CONTROL_RDWR << COPY_CTRL),
+	.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
 };
 
 static struct uac2_ac_header_descriptor ac_hdr_desc = {
@@ -220,9 +220,10 @@ static struct uac2_ac_header_descriptor ac_hdr_desc = {
 	.bDescriptorSubtype = UAC_MS_HEADER,
 	.bcdADC = cpu_to_le16(0x200),
 	.bCategory = UAC2_FUNCTION_IO_BOX,
-	.wTotalLength = sizeof in_clk_src_desc + sizeof out_clk_src_desc
-			 + sizeof usb_out_it_desc + sizeof io_in_it_desc
-			+ sizeof usb_in_ot_desc + sizeof io_out_ot_desc,
+	.wTotalLength = cpu_to_le16(sizeof in_clk_src_desc
+			+ sizeof out_clk_src_desc + sizeof usb_out_it_desc
+			+ sizeof io_in_it_desc + sizeof usb_in_ot_desc
+			+ sizeof io_out_ot_desc),
 	.bmControls = 0,
 };
 
@@ -569,10 +570,12 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 		return ret;
 	}
 
-	agdev->in_ep_maxpsize = max(fs_epin_desc.wMaxPacketSize,
-					hs_epin_desc.wMaxPacketSize);
-	agdev->out_ep_maxpsize = max(fs_epout_desc.wMaxPacketSize,
-					hs_epout_desc.wMaxPacketSize);
+	agdev->in_ep_maxpsize = max_t(u16,
+				le16_to_cpu(fs_epin_desc.wMaxPacketSize),
+				le16_to_cpu(hs_epin_desc.wMaxPacketSize));
+	agdev->out_ep_maxpsize = max_t(u16,
+				le16_to_cpu(fs_epout_desc.wMaxPacketSize),
+				le16_to_cpu(hs_epout_desc.wMaxPacketSize));
 
 	hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
 	hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 9ffb11ec9ed9..7cd5c969fcbe 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -192,7 +192,7 @@ config USB_RENESAS_USBHS_UDC
 config USB_RENESAS_USB3
 	tristate 'Renesas USB3.0 Peripheral controller'
 	depends on ARCH_RENESAS || COMPILE_TEST
-	depends on EXTCON
+	depends on EXTCON && HAS_DMA
 	help
 	   Renesas USB3.0 Peripheral controller is a USB peripheral controller
 	   that supports super, high, and full speed USB 3.0 data transfers.
@@ -257,6 +257,7 @@ config USB_MV_U3D
 
 config USB_SNP_CORE
 	depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT)
+	depends on HAS_DMA
 	tristate
 	help
 	  This enables core driver support for Synopsys USB 2.0 Device
@@ -271,7 +272,7 @@ config USB_SNP_CORE
 
 config USB_SNP_UDC_PLAT
 	tristate "Synopsys USB 2.0 Device controller"
-	depends on (USB_GADGET && OF)
+	depends on USB_GADGET && OF && HAS_DMA
 	select USB_GADGET_DUALSPEED
 	select USB_SNP_CORE
 	default ARCH_BCM_IPROC
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index d8278322d5ac..62dc9c7798e7 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -89,6 +89,9 @@
 
 /* USB_COM_CON */
 #define USB_COM_CON_CONF		BIT(24)
+#define USB_COM_CON_PN_WDATAIF_NL	BIT(23)
+#define USB_COM_CON_PN_RDATAIF_NL	BIT(22)
+#define USB_COM_CON_PN_LSTTR_PP		BIT(21)
 #define USB_COM_CON_SPD_MODE		BIT(17)
 #define USB_COM_CON_EP0_EN		BIT(16)
 #define USB_COM_CON_DEV_ADDR_SHIFT	8
@@ -686,6 +689,9 @@ static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
 {
 	usb3_init_axi_bridge(usb3);
 	usb3_init_epc_registers(usb3);
+	usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
+		     USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
+		     USB3_USB_COM_CON);
 	usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_STA);
 	usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_ENA);
 
@@ -1369,7 +1375,7 @@ static int renesas_usb3_dma_free_prd(struct renesas_usb3 *usb3,
 
 	usb3_for_each_dma(usb3, dma, i) {
 		if (dma->prd) {
-			dma_free_coherent(dev, USB3_DMA_MAX_XFER_SIZE,
+			dma_free_coherent(dev, USB3_DMA_PRD_SIZE,
 					  dma->prd, dma->prd_dma);
 			dma->prd = NULL;
 		}
@@ -1409,12 +1415,12 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
 	int ret = -EAGAIN;
 	u32 enable_bits = 0;
 
+	spin_lock_irqsave(&usb3->lock, flags);
 	if (usb3_ep->halt || usb3_ep->started)
-		return;
+		goto out;
 	if (usb3_req != usb3_req_first)
-		return;
+		goto out;
 
-	spin_lock_irqsave(&usb3->lock, flags);
 	if (usb3_pn_change(usb3, usb3_ep->num) < 0)
 		goto out;
 
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 2e11f19e07ae..f7b4d0f159e4 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -28,7 +28,7 @@
 /* description */
 #define UDC_MOD_DESCRIPTION     "Synopsys UDC platform driver"
 
-void start_udc(struct udc *udc)
+static void start_udc(struct udc *udc)
 {
 	if (udc->driver) {
 		dev_info(udc->dev, "Connecting...\n");
@@ -38,7 +38,7 @@ void start_udc(struct udc *udc)
 	}
 }
 
-void stop_udc(struct udc *udc)
+static void stop_udc(struct udc *udc)
 {
 	int tmp;
 	u32 reg;
@@ -76,7 +76,7 @@ void stop_udc(struct udc *udc)
 	dev_info(udc->dev, "Device disconnected\n");
 }
 
-void udc_drd_work(struct work_struct *work)
+static void udc_drd_work(struct work_struct *work)
 {
 	struct udc *udc;
 
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a9a1e4c40480..c8989c62a262 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -77,6 +77,16 @@
 #define USB_INTEL_USB3_PSSEN   0xD8
 #define USB_INTEL_USB3PRM      0xDC
 
+/* ASMEDIA quirk use */
+#define ASMT_DATA_WRITE0_REG	0xF8
+#define ASMT_DATA_WRITE1_REG	0xFC
+#define ASMT_CONTROL_REG	0xE0
+#define ASMT_CONTROL_WRITE_BIT	0x02
+#define ASMT_WRITEREG_CMD	0x10423
+#define ASMT_FLOWCTL_ADDR	0xFA30
+#define ASMT_FLOWCTL_DATA	0xBA
+#define ASMT_PSEUDO_DATA	0
+
 /*
  * amd_chipset_gen values represent AMD different chipset generations
  */
@@ -412,6 +422,50 @@ void usb_amd_quirk_pll_disable(void)
 }
 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
 
+static int usb_asmedia_wait_write(struct pci_dev *pdev)
+{
+	unsigned long retry_count;
+	unsigned char value;
+
+	for (retry_count = 1000; retry_count > 0; --retry_count) {
+
+		pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
+
+		if (value == 0xff) {
+			dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
+			return -EIO;
+		}
+
+		if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
+			return 0;
+
+		usleep_range(40, 60);
+	}
+
+	dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
+	return -ETIMEDOUT;
+}
+
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
+{
+	if (usb_asmedia_wait_write(pdev) != 0)
+		return;
+
+	/* send command and address to device */
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
+	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+
+	if (usb_asmedia_wait_write(pdev) != 0)
+		return;
+
+	/* send data to device */
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
+	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+}
+EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
+
 void usb_amd_quirk_pll_enable(void)
 {
 	usb_amd_quirk_pll(0);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 0222195bd5b0..655994480198 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -11,6 +11,7 @@ bool usb_amd_prefetch_quirk(void);
 void usb_amd_dev_put(void);
 void usb_amd_quirk_pll_disable(void);
 void usb_amd_quirk_pll_enable(void);
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 void sb800_prefetch(struct device *dev, int on);
@@ -18,6 +19,7 @@ void sb800_prefetch(struct device *dev, int on);
 struct pci_dev;
 static inline void usb_amd_quirk_pll_disable(void) {}
 static inline void usb_amd_quirk_pll_enable(void) {}
+static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
 static inline void usb_amd_dev_put(void) {}
 static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
 static inline void sb800_prefetch(struct device *dev, int on) {}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1adae9eab831..00721e8807ab 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -398,14 +398,21 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
 	spin_lock_irqsave(&xhci->lock, flags);
 	for (i = LAST_EP_INDEX; i > 0; i--) {
 		if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
+			struct xhci_ep_ctx *ep_ctx;
 			struct xhci_command *command;
+
+			ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i);
+
+			/* Check ep is running, required by AMD SNPS 3.1 xHC */
+			if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING)
+				continue;
+
 			command = xhci_alloc_command(xhci, false, false,
 						     GFP_NOWAIT);
 			if (!command) {
 				spin_unlock_irqrestore(&xhci->lock, flags);
 				xhci_free_command(xhci, cmd);
 				return -ENOMEM;
-
 			}
 			xhci_queue_stop_endpoint(xhci, command, slot_id, i,
 						 suspend);
@@ -603,12 +610,14 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
 
 	/* Disable all Device Slots */
 	xhci_dbg(xhci, "Disable all slots\n");
+	spin_unlock_irqrestore(&xhci->lock, *flags);
 	for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
 		retval = xhci_disable_slot(xhci, NULL, i);
 		if (retval)
 			xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
 				 i, retval);
 	}
+	spin_lock_irqsave(&xhci->lock, *flags);
 	/* Put all ports to the Disable state by clear PP */
 	xhci_dbg(xhci, "Disable all port (PP = 0)\n");
 	/* Power off USB3 ports*/
@@ -897,6 +906,9 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
 			clear_bit(wIndex, &bus_state->resuming_ports);
 
 			set_bit(wIndex, &bus_state->rexit_ports);
+
+			xhci_test_and_clear_bit(xhci, port_array, wIndex,
+						PORT_PLC);
 			xhci_set_link_state(xhci, port_array, wIndex,
 					XDEV_U0);
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 53882e2babbb..5b0fa553c8bc 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,8 @@
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_2			0x43bb
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_1			0x43bc
 
+#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI		0x1142
+
 static const char hcd_name[] = "xhci_hcd";
 
 static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -217,6 +219,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 			pdev->device == 0x1142)
 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
+	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
+		xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
+
 	if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
 		xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
 
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c50c902d009e..cc368ad2b51e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -864,13 +864,16 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
 			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
 		int stream_id;
 
-		for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+		for (stream_id = 1; stream_id < ep->stream_info->num_streams;
 				stream_id++) {
+			ring = ep->stream_info->stream_rings[stream_id];
+			if (!ring)
+				continue;
+
 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 					"Killing URBs for slot ID %u, ep index %u, stream %u",
-					slot_id, ep_index, stream_id + 1);
-			xhci_kill_ring_urbs(xhci,
-					ep->stream_info->stream_rings[stream_id]);
+					slot_id, ep_index, stream_id);
+			xhci_kill_ring_urbs(xhci, ring);
 		}
 	} else {
 		ring = ep->ring;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 56f85df013db..b2ff1ff1a02f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -198,6 +198,9 @@ int xhci_reset(struct xhci_hcd *xhci)
 	if (ret)
 		return ret;
 
+	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+		usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
+
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			 "Wait for controller to be ready for doorbell rings");
 	/*
@@ -622,8 +625,10 @@ int xhci_run(struct usb_hcd *hcd)
 		if (!command)
 			return -ENOMEM;
 
-		xhci_queue_vendor_command(xhci, command, 0, 0, 0,
+		ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
 				TRB_TYPE(TRB_NEC_GET_FW));
+		if (ret)
+			xhci_free_command(xhci, command);
 	}
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"Finished xhci_run for USB2 roothub");
@@ -1085,6 +1090,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
 		compliance_mode_recovery_timer_init(xhci);
 
+	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+		usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
+
 	/* Re-enable port polling. */
 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3c6da1f93c84..e3e935291ed6 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1820,6 +1820,7 @@ struct xhci_hcd {
 #define XHCI_BROKEN_PORT_PED	(1 << 25)
 #define XHCI_LIMIT_ENDPOINT_INTERVAL_7	(1 << 26)
 #define XHCI_U2_DISABLE_WAKE	(1 << 27)
+#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL	(1 << 28)
 
 	unsigned int		num_active_eps;
 	unsigned int		limit_active_eps;
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 623c51300393..f0ce304c5aaf 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -752,8 +752,10 @@ static int usbhsc_resume(struct device *dev)
 	struct usbhs_priv *priv = dev_get_drvdata(dev);
 	struct platform_device *pdev = usbhs_priv_to_pdev(priv);
 
-	if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
+	if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
 		usbhsc_power_ctrl(priv, 1);
+		usbhs_mod_autonomy_mode(priv);
+	}
 
 	usbhs_platform_call(priv, phy_reset, pdev);
 
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 5bc7a6138855..93fba9033b00 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -37,6 +37,7 @@ struct usbhsg_gpriv;
 struct usbhsg_uep {
 	struct usb_ep		 ep;
 	struct usbhs_pipe	*pipe;
+	spinlock_t		lock;	/* protect the pipe */
 
 	char ep_name[EP_NAME_SIZE];
 
@@ -636,10 +637,16 @@ usbhsg_ep_enable_end:
 static int usbhsg_ep_disable(struct usb_ep *ep)
 {
 	struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
-	struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+	struct usbhs_pipe *pipe;
+	unsigned long flags;
+	int ret = 0;
 
-	if (!pipe)
-		return -EINVAL;
+	spin_lock_irqsave(&uep->lock, flags);
+	pipe = usbhsg_uep_to_pipe(uep);
+	if (!pipe) {
+		ret = -EINVAL;
+		goto out;
+	}
 
 	usbhsg_pipe_disable(uep);
 	usbhs_pipe_free(pipe);
@@ -647,6 +654,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
 	uep->pipe->mod_private	= NULL;
 	uep->pipe		= NULL;
 
+out:
+	spin_unlock_irqrestore(&uep->lock, flags);
+
 	return 0;
 }
 
@@ -696,8 +706,11 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
 {
 	struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
 	struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
-	struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+	struct usbhs_pipe *pipe;
+	unsigned long flags;
 
+	spin_lock_irqsave(&uep->lock, flags);
+	pipe = usbhsg_uep_to_pipe(uep);
 	if (pipe)
 		usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
 
@@ -706,6 +719,7 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
 	 * even if the pipe is NULL.
 	 */
 	usbhsg_queue_pop(uep, ureq, -ECONNRESET);
+	spin_unlock_irqrestore(&uep->lock, flags);
 
 	return 0;
 }
@@ -852,10 +866,10 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
 {
 	struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
-	struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+	struct usbhsg_uep *uep;
 	struct device *dev = usbhs_priv_to_dev(priv);
 	unsigned long flags;
-	int ret = 0;
+	int ret = 0, i;
 
 	/********************  spin lock ********************/
 	usbhs_lock(priv, flags);
@@ -887,7 +901,9 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
 	usbhs_sys_set_test_mode(priv, 0);
 	usbhs_sys_function_ctrl(priv, 0);
 
-	usbhsg_ep_disable(&dcp->ep);
+	/* disable all eps */
+	usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
+		usbhsg_ep_disable(&uep->ep);
 
 	dev_dbg(dev, "stop gadget\n");
 
@@ -1069,6 +1085,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 		ret = -ENOMEM;
 		goto usbhs_mod_gadget_probe_err_gpriv;
 	}
+	spin_lock_init(&uep->lock);
 
 	gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
 	dev_info(dev, "%stransceiver found\n",
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index fba4005dd737..6a7720e66595 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1529,8 +1529,11 @@ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
 
 	/* Make sure driver was initialized */
 
-	if (us->extra == NULL)
+	if (us->extra == NULL) {
 		usb_stor_dbg(us, "ERROR Driver not initialized\n");
+		srb->result = DID_ERROR << 16;
+		return;
+	}
 
 	scsi_set_resid(srb, 0);
 	/* scsi_bufflen might change in protocol translation to ata */
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 6b0d2f0918c6..8a88f45822e3 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -3,6 +3,7 @@
 #define __DRIVER_USB_TYPEC_UCSI_H
 
 #include <linux/bitops.h>
+#include <linux/device.h>
 #include <linux/types.h>
 
 /* -------------------------------------------------------------------------- */
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3612542b6044..83fc9aab34e8 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -704,7 +704,8 @@ static int omap_hdq_probe(struct platform_device *pdev)
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq	< 0) {
-		ret = -ENXIO;
+		dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+		ret = irq;
 		goto err_irq;
 	}
 
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 95ea7e6b1d99..74471e7aa5cc 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -728,6 +728,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
 	memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
 	atomic_set(&sl->refcnt, 1);
 	atomic_inc(&sl->master->refcnt);
+	dev->slave_count++;
 
 	/* slave modules need to be loaded in a context with unlocked mutex */
 	mutex_unlock(&dev->mutex);
@@ -747,11 +748,11 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
 
 	sl->family = f;
 
-
 	err = __w1_attach_slave_device(sl);
 	if (err < 0) {
 		dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__,
 			 sl->name);
+		dev->slave_count--;
 		w1_family_put(sl->family);
 		atomic_dec(&sl->master->refcnt);
 		kfree(sl);
@@ -759,7 +760,6 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
 	}
 
 	sl->ttl = dev->slave_ttl;
-	dev->slave_count++;
 
 	memcpy(msg.id.id, rn, sizeof(msg.id));
 	msg.type = W1_SLAVE_ADD;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 50dcb68d8070..ab609255a0f3 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -780,6 +780,9 @@ static int __init balloon_init(void)
 	}
 #endif
 
+	/* Init the xen-balloon driver. */
+	xen_balloon_init();
+
 	return 0;
 }
 subsys_initcall(balloon_init);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index d6786b87e13b..2c6a9114d332 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -42,6 +42,7 @@
 #include <linux/delay.h>
 #include <linux/hardirq.h>
 #include <linux/workqueue.h>
+#include <linux/ratelimit.h>
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
@@ -1072,8 +1073,14 @@ static int gnttab_expand(unsigned int req_entries)
 	cur = nr_grant_frames;
 	extra = ((req_entries + (grefs_per_grant_frame-1)) /
 		 grefs_per_grant_frame);
-	if (cur + extra > gnttab_max_grant_frames())
+	if (cur + extra > gnttab_max_grant_frames()) {
+		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
+				    " cur=%u extra=%u limit=%u"
+				    " gnttab_free_count=%u req_entries=%u\n",
+				    cur, extra, gnttab_max_grant_frames(),
+				    gnttab_free_count, req_entries);
 		return -ENOSPC;
+	}
 
 	rc = gnttab_map(cur, cur + extra - 1);
 	if (rc == 0)
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e7715cb62eef..e89136ab851e 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -59,6 +59,8 @@ static void watch_target(struct xenbus_watch *watch,
 {
 	unsigned long long new_target;
 	int err;
+	static bool watch_fired;
+	static long target_diff;
 
 	err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
 	if (err != 1) {
@@ -69,7 +71,14 @@ static void watch_target(struct xenbus_watch *watch,
 	/* The given memory/target value is in KiB, so it needs converting to
 	 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
 	 */
-	balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+	new_target >>= PAGE_SHIFT - 10;
+	if (watch_fired) {
+		balloon_set_new_target(new_target - target_diff);
+		return;
+	}
+
+	watch_fired = true;
+	target_diff = new_target - balloon_stats.target_pages;
 }
 static struct xenbus_watch target_watch = {
 	.node = "memory/target",
@@ -94,22 +103,15 @@ static struct notifier_block xenstore_notifier = {
 	.notifier_call = balloon_init_watcher,
 };
 
-static int __init balloon_init(void)
+void xen_balloon_init(void)
 {
-	if (!xen_domain())
-		return -ENODEV;
-
-	pr_info("Initialising balloon driver\n");
-
 	register_balloon(&balloon_dev);
 
 	register_xen_selfballooning(&balloon_dev);
 
 	register_xenstore_notifier(&xenstore_notifier);
-
-	return 0;
 }
-subsys_initcall(balloon_init);
+EXPORT_SYMBOL_GPL(xen_balloon_init);
 
 #define BALLOON_SHOW(name, format, args...)				\
 	static ssize_t show_##name(struct device *dev,			\