summary refs log tree commit diff
path: root/drivers/memory
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/memory')
-rw-r--r--drivers/memory/Kconfig6
-rw-r--r--drivers/memory/brcmstb_dpfe.c7
-rw-r--r--drivers/memory/bt1-l2-ctl.c2
-rw-r--r--drivers/memory/da8xx-ddrctl.c2
-rw-r--r--drivers/memory/emif-asm-offsets.c10
-rw-r--r--drivers/memory/emif.c23
-rw-r--r--drivers/memory/fsl_ifc.c30
-rw-r--r--drivers/memory/jz4780-nemc.c17
-rw-r--r--drivers/memory/mtk-smi.c2
-rw-r--r--drivers/memory/mvebu-devbus.c20
-rw-r--r--drivers/memory/of_memory.c32
-rw-r--r--drivers/memory/of_memory.h21
-rw-r--r--drivers/memory/omap-gpmc.c66
-rw-r--r--drivers/memory/pl172.c19
-rw-r--r--drivers/memory/samsung/Kconfig7
-rw-r--r--drivers/memory/samsung/exynos-srom.c22
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c15
-rw-r--r--drivers/memory/tegra/Kconfig14
-rw-r--r--drivers/memory/tegra/Makefile4
-rw-r--r--drivers/memory/tegra/mc.h1
-rw-r--r--drivers/memory/tegra/tegra124-emc.c7
-rw-r--r--drivers/memory/tegra/tegra186-emc.c25
-rw-r--r--drivers/memory/tegra/tegra186.c4
-rw-r--r--drivers/memory/tegra/tegra20-emc.c34
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c1775
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c2100
-rw-r--r--drivers/memory/tegra/tegra210-emc-table.c90
-rw-r--r--drivers/memory/tegra/tegra210-emc.h1016
-rw-r--r--drivers/memory/tegra/tegra210-mc.h50
-rw-r--r--drivers/memory/tegra/tegra30-emc.c122
-rw-r--r--drivers/memory/ti-aemif.c16
-rw-r--r--drivers/memory/ti-emif-pm.c2
32 files changed, 5294 insertions, 267 deletions
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index e438d79857da..00060bdbf574 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -5,6 +5,12 @@
 
 menuconfig MEMORY
 	bool "Memory Controller drivers"
+	help
+	  This option allows to enable specific memory controller drivers,
+	  useful mostly on embedded systems.  These could be controllers
+	  for DRAM (SDR, DDR), ROM, SRAM and others.  The drivers features
+	  vary from memory tuning and frequency scaling to enabling
+	  access to attached peripherals through memory bus.
 
 if MEMORY
 
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 82b415be18d1..60e8633b1175 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -23,7 +23,7 @@
  *     - BE kernel + LE firmware image
  *     - BE kernel + BE firmware image
  *
- * The DPCU always runs in big endian mode. The firwmare image, however, can
+ * The DPCU always runs in big endian mode. The firmware image, however, can
  * be in either format. Also, communication between host CPU and DCPU is
  * always in little endian.
  */
@@ -188,7 +188,7 @@ struct brcmstb_dpfe_priv {
 	struct mutex lock;
 };
 
-static const char *error_text[] = {
+static const char * const error_text[] = {
 	"Success", "Header code incorrect", "Unknown command or argument",
 	"Incorrect checksum", "Malformed command", "Timed out",
 };
@@ -379,9 +379,8 @@ static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
 	void __iomem *ptr = NULL;
 
 	/* There is no need to use this function for API v3 or later. */
-	if (unlikely(priv->dpfe_api->version >= 3)) {
+	if (unlikely(priv->dpfe_api->version >= 3))
 		return NULL;
-	}
 
 	msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
 	offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
diff --git a/drivers/memory/bt1-l2-ctl.c b/drivers/memory/bt1-l2-ctl.c
index 633fea6a4edf..85965fa26e0b 100644
--- a/drivers/memory/bt1-l2-ctl.c
+++ b/drivers/memory/bt1-l2-ctl.c
@@ -66,6 +66,7 @@ struct l2_ctl_device_attribute {
 	struct device_attribute dev_attr;
 	enum l2_ctl_stall id;
 };
+
 #define to_l2_ctl_dev_attr(_dev_attr) \
 	container_of(_dev_attr, struct l2_ctl_device_attribute, dev_attr)
 
@@ -242,6 +243,7 @@ static ssize_t l2_ctl_latency_store(struct device *dev,
 
 	return count;
 }
+
 static L2_CTL_ATTR_RW(l2_ws_latency, l2_ctl_latency, L2_WS_STALL);
 static L2_CTL_ATTR_RW(l2_tag_latency, l2_ctl_latency, L2_TAG_STALL);
 static L2_CTL_ATTR_RW(l2_data_latency, l2_ctl_latency, L2_DATA_STALL);
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
index e8f9b3f461f5..872addd0ec60 100644
--- a/drivers/memory/da8xx-ddrctl.c
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -102,14 +102,12 @@ static int da8xx_ddrctl_probe(struct platform_device *pdev)
 {
 	const struct da8xx_ddrctl_config_knob *knob;
 	const struct da8xx_ddrctl_setting *setting;
-	struct device_node *node;
 	struct resource *res;
 	void __iomem *ddrctl;
 	struct device *dev;
 	u32 reg;
 
 	dev = &pdev->dev;
-	node = dev->of_node;
 
 	setting = da8xx_ddrctl_get_board_settings();
 	if (!setting) {
diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c
index db8043019ec6..4b98d1854cd7 100644
--- a/drivers/memory/emif-asm-offsets.c
+++ b/drivers/memory/emif-asm-offsets.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * TI AM33XX EMIF PM Assembly Offsets
  *
  * Copyright (C) 2016-2017 Texas Instruments Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 #include <linux/ti-emif-sram.h>
 
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 9d9127bf2a59..bb6a71d26798 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -282,10 +282,9 @@ static void set_lpmode(struct emif_data *emif, u8 lpmode)
 	 * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
 	 */
 	if ((emif->plat_data->ip_rev == EMIF_4D) &&
-	    (EMIF_LP_MODE_PWR_DN == lpmode)) {
+	    (lpmode == EMIF_LP_MODE_PWR_DN)) {
 		WARN_ONCE(1,
-			  "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by"
-			  "erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
+			  "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
 		/* rollback LP_MODE to Self-refresh mode */
 		lpmode = EMIF_LP_MODE_SELF_REFRESH;
 	}
@@ -714,7 +713,7 @@ static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void)
 	u32 fifo_we_slave_ratio;
 
 	fifo_we_slave_ratio =  DIV_ROUND_CLOSEST(
-		EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+		EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
 
 	return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 |
 		fifo_we_slave_ratio << 22;
@@ -725,7 +724,7 @@ static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void)
 	u32 fifo_we_slave_ratio;
 
 	fifo_we_slave_ratio =  DIV_ROUND_CLOSEST(
-		EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+		EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
 
 	return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 |
 		fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23;
@@ -736,7 +735,7 @@ static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void)
 	u32 fifo_we_slave_ratio;
 
 	fifo_we_slave_ratio =  DIV_ROUND_CLOSEST(
-		EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+		EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
 
 	return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 |
 		fifo_we_slave_ratio << 13;
@@ -975,8 +974,7 @@ static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
 				EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
 		if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
 			dev_err(emif->dev,
-				"%s:NOT Extended temperature capable memory."
-				"Converting MR4=0x%02x as shutdown event\n",
+				"%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
 				__func__, emif->temperature_level);
 			/*
 			 * Temperature far too high - do kernel_power_off()
@@ -1318,9 +1316,9 @@ static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
 	if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
 		dev_info->cal_resistors_per_cs = true;
 
-	if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s4"))
+	if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
 		dev_info->type = DDR_TYPE_LPDDR2_S4;
-	else if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s2"))
+	else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
 		dev_info->type = DDR_TYPE_LPDDR2_S2;
 
 	of_property_read_u32(np_ddr, "density", &density);
@@ -1563,11 +1561,8 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
 		goto error;
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(emif->dev, "%s: error getting IRQ resource - %d\n",
-			__func__, irq);
+	if (irq < 0)
 		goto error;
-	}
 
 	emif_onetime_settings(emif);
 	emif_debugfs_init(emif);
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index a2c971743ffe..89f99b5b6450 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -53,6 +53,7 @@ int fsl_ifc_find(phys_addr_t addr_base)
 
 	for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
 		u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr);
+
 		if (cspr & CSPR_V && (cspr & CSPR_BA) ==
 				convert_ifc_address(addr_base))
 			return i;
@@ -153,8 +154,8 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
 	/* read for chip select error */
 	cs_err = ifc_in32(&ifc->cm_evter_stat);
 	if (cs_err) {
-		dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
-				"any memory bank 0x%08X\n", cs_err);
+		dev_err(ctrl->dev, "transaction sent to IFC is not mapped to any memory bank 0x%08X\n",
+			cs_err);
 		/* clear the chip select error */
 		ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
 
@@ -163,24 +164,24 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
 		err_addr = ifc_in32(&ifc->cm_erattr1);
 
 		if (status & IFC_CM_ERATTR0_ERTYP_READ)
-			dev_err(ctrl->dev, "Read transaction error"
-				"CM_ERATTR0 0x%08X\n", status);
+			dev_err(ctrl->dev, "Read transaction error CM_ERATTR0 0x%08X\n",
+				status);
 		else
-			dev_err(ctrl->dev, "Write transaction error"
-				"CM_ERATTR0 0x%08X\n", status);
+			dev_err(ctrl->dev, "Write transaction error CM_ERATTR0 0x%08X\n",
+				status);
 
 		err_axiid = (status & IFC_CM_ERATTR0_ERAID) >>
 					IFC_CM_ERATTR0_ERAID_SHIFT;
-		dev_err(ctrl->dev, "AXI ID of the error"
-					"transaction 0x%08X\n", err_axiid);
+		dev_err(ctrl->dev, "AXI ID of the error transaction 0x%08X\n",
+			err_axiid);
 
 		err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >>
 					IFC_CM_ERATTR0_ESRCID_SHIFT;
-		dev_err(ctrl->dev, "SRC ID of the error"
-					"transaction 0x%08X\n", err_srcid);
+		dev_err(ctrl->dev, "SRC ID of the error transaction 0x%08X\n",
+			err_srcid);
 
-		dev_err(ctrl->dev, "Transaction Address corresponding to error"
-					"ERADDR 0x%08X\n", err_addr);
+		dev_err(ctrl->dev, "Transaction Address corresponding to error ERADDR 0x%08X\n",
+			err_addr);
 
 		ret = IRQ_HANDLED;
 	}
@@ -199,7 +200,7 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
  * the resources needed for the controller only.  The
  * resources for the NAND banks themselves are allocated
  * in the chip probe function.
-*/
+ */
 static int fsl_ifc_ctrl_probe(struct platform_device *dev)
 {
 	int ret = 0;
@@ -250,8 +251,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
 	/* get the Controller level irq */
 	fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
 	if (fsl_ifc_ctrl_dev->irq == 0) {
-		dev_err(&dev->dev, "failed to get irq resource "
-							"for IFC\n");
+		dev_err(&dev->dev, "failed to get irq resource for IFC\n");
 		ret = -ENODEV;
 		goto err;
 	}
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
index b232ed279fc3..3ec5cb0fce1e 100644
--- a/drivers/memory/jz4780-nemc.c
+++ b/drivers/memory/jz4780-nemc.c
@@ -8,6 +8,7 @@
 
 #include <linux/clk.h>
 #include <linux/init.h>
+#include <linux/io.h>
 #include <linux/math64.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -22,6 +23,8 @@
 #define NEMC_SMCRn(n)		(0x14 + (((n) - 1) * 4))
 #define NEMC_NFCSR		0x50
 
+#define NEMC_REG_LEN		0x54
+
 #define NEMC_SMCR_SMT		BIT(0)
 #define NEMC_SMCR_BW_SHIFT	6
 #define NEMC_SMCR_BW_MASK	(0x3 << NEMC_SMCR_BW_SHIFT)
@@ -288,7 +291,19 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
 	nemc->dev = dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	nemc->base = devm_ioremap_resource(dev, res);
+
+	/*
+	 * The driver currently only uses the registers up to offset
+	 * NEMC_REG_LEN. Since the EFUSE registers are in the middle of the
+	 * NEMC registers, we only request the registers we will use for now;
+	 * that way the EFUSE driver can probe too.
+	 */
+	if (!devm_request_mem_region(dev, res->start, NEMC_REG_LEN, dev_name(dev))) {
+		dev_err(dev, "unable to request I/O memory region\n");
+		return -EBUSY;
+	}
+
+	nemc->base = devm_ioremap(dev, res->start, NEMC_REG_LEN);
 	if (IS_ERR(nemc->base)) {
 		dev_err(dev, "failed to get I/O memory\n");
 		return PTR_ERR(nemc->base);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index a113e811faab..e154bea3cf14 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -60,7 +60,7 @@ struct mtk_smi_common_plat {
 
 struct mtk_smi_larb_gen {
 	int port_in_larb[MTK_LARB_NR_MAX + 1];
-	void (*config_port)(struct device *);
+	void (*config_port)(struct device *dev);
 	unsigned int			larb_direct_to_common_mask;
 	bool				has_gals;
 };
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index 886aea587276..8450638e8670 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -124,32 +124,32 @@ static int devbus_get_timing_params(struct devbus *devbus,
 	 * The bus width is encoded into the register as 0 for 8 bits,
 	 * and 1 for 16 bits, so we do the necessary conversion here.
 	 */
-	if (r->bus_width == 8)
+	if (r->bus_width == 8) {
 		r->bus_width = 0;
-	else if (r->bus_width == 16)
+	} else if (r->bus_width == 16) {
 		r->bus_width = 1;
-	else {
+	} else {
 		dev_err(devbus->dev, "invalid bus width %d\n", r->bus_width);
 		return -EINVAL;
 	}
 
 	err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
-				 &r->badr_skew);
+				  &r->badr_skew);
 	if (err < 0)
 		return err;
 
 	err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps",
-				 &r->turn_off);
+				  &r->turn_off);
 	if (err < 0)
 		return err;
 
 	err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps",
-				 &r->acc_first);
+				  &r->acc_first);
 	if (err < 0)
 		return err;
 
 	err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps",
-				 &r->acc_next);
+				  &r->acc_next);
 	if (err < 0)
 		return err;
 
@@ -175,17 +175,17 @@ static int devbus_get_timing_params(struct devbus *devbus,
 	}
 
 	err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps",
-				 &w->ale_wr);
+				  &w->ale_wr);
 	if (err < 0)
 		return err;
 
 	err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps",
-				 &w->wr_low);
+				  &w->wr_low);
 	if (err < 0)
 		return err;
 
 	err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps",
-				 &w->wr_high);
+				  &w->wr_high);
 	if (err < 0)
 		return err;
 
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 71f26eac7350..d9f5437d3bce 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -4,11 +4,10 @@
  *
  * Copyright (C) 2012 Texas Instruments, Inc.
  * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2020 Krzysztof Kozlowski <krzk@kernel.org>
  */
 
 #include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/list.h>
 #include <linux/of.h>
 #include <linux/gfp.h>
 #include <linux/export.h>
@@ -19,7 +18,7 @@
 /**
  * of_get_min_tck() - extract min timing values for ddr
  * @np: pointer to ddr device tree node
- * @device: device requesting for min timing values
+ * @dev: device requesting for min timing values
  *
  * Populates the lpddr2_min_tck structure by extracting data
  * from device tree node. Returns a pointer to the populated
@@ -27,7 +26,7 @@
  * default min timings provided by JEDEC.
  */
 const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
-		struct device *dev)
+					    struct device *dev)
 {
 	int			ret = 0;
 	struct lpddr2_min_tck	*min;
@@ -56,13 +55,13 @@ const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
 	return min;
 
 default_min_tck:
-	dev_warn(dev, "%s: using default min-tck values\n", __func__);
+	dev_warn(dev, "Using default min-tck values\n");
 	return &lpddr2_jedec_min_tck;
 }
 EXPORT_SYMBOL(of_get_min_tck);
 
 static int of_do_get_timings(struct device_node *np,
-		struct lpddr2_timings *tim)
+			     struct lpddr2_timings *tim)
 {
 	int ret;
 
@@ -84,7 +83,7 @@ static int of_do_get_timings(struct device_node *np,
 	ret |= of_property_read_u32(np, "tZQinit", &tim->tZQinit);
 	ret |= of_property_read_u32(np, "tRAS-max-ns", &tim->tRAS_max_ns);
 	ret |= of_property_read_u32(np, "tDQSCK-max-derated",
-		&tim->tDQSCK_max_derated);
+				    &tim->tDQSCK_max_derated);
 
 	return ret;
 }
@@ -103,7 +102,9 @@ static int of_do_get_timings(struct device_node *np,
  * while populating, returns default timings provided by JEDEC.
  */
 const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
-		struct device *dev, u32 device_type, u32 *nr_frequencies)
+						struct device *dev,
+						u32 device_type,
+						u32 *nr_frequencies)
 {
 	struct lpddr2_timings	*timings = NULL;
 	u32			arr_sz = 0, i = 0;
@@ -116,7 +117,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
 		tim_compat = "jedec,lpddr2-timings";
 		break;
 	default:
-		dev_warn(dev, "%s: un-supported memory type\n", __func__);
+		dev_warn(dev, "Unsupported memory type\n");
 	}
 
 	for_each_child_of_node(np_ddr, np_tim)
@@ -145,7 +146,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
 	return timings;
 
 default_timings:
-	dev_warn(dev, "%s: using default timings\n", __func__);
+	dev_warn(dev, "Using default memory timings\n");
 	*nr_frequencies = ARRAY_SIZE(lpddr2_jedec_timings);
 	return lpddr2_jedec_timings;
 }
@@ -154,7 +155,7 @@ EXPORT_SYMBOL(of_get_ddr_timings);
 /**
  * of_lpddr3_get_min_tck() - extract min timing values for lpddr3
  * @np: pointer to ddr device tree node
- * @device: device requesting for min timing values
+ * @dev: device requesting for min timing values
  *
  * Populates the lpddr3_min_tck structure by extracting data
  * from device tree node. Returns a pointer to the populated
@@ -193,8 +194,7 @@ const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
 	ret |= of_property_read_u32(np, "tMRD-min-tck", &min->tMRD);
 
 	if (ret) {
-		dev_warn(dev, "%s: errors while parsing min-tck values\n",
-			 __func__);
+		dev_warn(dev, "Errors while parsing min-tck values\n");
 		devm_kfree(dev, min);
 		goto default_min_tck;
 	}
@@ -202,7 +202,7 @@ const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
 	return min;
 
 default_min_tck:
-	dev_warn(dev, "%s: using default min-tck values\n", __func__);
+	dev_warn(dev, "Using default min-tck values\n");
 	return NULL;
 }
 EXPORT_SYMBOL(of_lpddr3_get_min_tck);
@@ -264,7 +264,7 @@ const struct lpddr3_timings
 		tim_compat = "jedec,lpddr3-timings";
 		break;
 	default:
-		dev_warn(dev, "%s: un-supported memory type\n", __func__);
+		dev_warn(dev, "Unsupported memory type\n");
 	}
 
 	for_each_child_of_node(np_ddr, np_tim)
@@ -293,7 +293,7 @@ const struct lpddr3_timings
 	return timings;
 
 default_timings:
-	dev_warn(dev, "%s: failed to get timings\n", __func__);
+	dev_warn(dev, "Failed to get timings\n");
 	*nr_frequencies = 0;
 	return NULL;
 }
diff --git a/drivers/memory/of_memory.h b/drivers/memory/of_memory.h
index e39ecc4c733d..4a99b232ab0a 100644
--- a/drivers/memory/of_memory.h
+++ b/drivers/memory/of_memory.h
@@ -3,22 +3,23 @@
  * OpenFirmware helpers for memory drivers
  *
  * Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2020 Krzysztof Kozlowski <krzk@kernel.org>
  */
 
 #ifndef __LINUX_MEMORY_OF_REG_H
 #define __LINUX_MEMORY_OF_REG_H
 
 #if defined(CONFIG_OF) && defined(CONFIG_DDR)
-extern const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
-		struct device *dev);
-extern const struct lpddr2_timings
-	*of_get_ddr_timings(struct device_node *np_ddr, struct device *dev,
-	u32 device_type, u32 *nr_frequencies);
-extern const struct lpddr3_min_tck
-	*of_lpddr3_get_min_tck(struct device_node *np, struct device *dev);
-extern const struct lpddr3_timings
-	*of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
-	struct device *dev, u32 device_type, u32 *nr_frequencies);
+const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
+					    struct device *dev);
+const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
+						struct device *dev,
+						u32 device_type, u32 *nr_frequencies);
+const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
+						   struct device *dev);
+const struct lpddr3_timings *
+of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
+			  struct device *dev, u32 device_type, u32 *nr_frequencies);
 #else
 static inline const struct lpddr2_min_tck
 	*of_get_min_tck(struct device_node *np, struct device *dev)
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index eff26c1b1394..f512cbc7a36c 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -29,6 +29,7 @@
 #include <linux/of_platform.h>
 #include <linux/omap-gpmc.h>
 #include <linux/pm_runtime.h>
+#include <linux/sizes.h>
 
 #include <linux/platform_data/mtd-nand-omap2.h>
 
@@ -108,8 +109,8 @@
 #define ENABLE_PREFETCH		(0x1 << 7)
 #define DMA_MPU_MODE		2
 
-#define	GPMC_REVISION_MAJOR(l)		((l >> 4) & 0xf)
-#define	GPMC_REVISION_MINOR(l)		(l & 0xf)
+#define	GPMC_REVISION_MAJOR(l)		(((l) >> 4) & 0xf)
+#define	GPMC_REVISION_MINOR(l)		((l) & 0xf)
 
 #define	GPMC_HAS_WR_ACCESS		0x1
 #define	GPMC_HAS_WR_DATA_MUX_BUS	0x2
@@ -140,27 +141,27 @@
 #define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28)
 #define GPMC_CONFIG1_WRITETYPE_ASYNC    (0 << 27)
 #define GPMC_CONFIG1_WRITETYPE_SYNC     (1 << 27)
-#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25)
+#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) (((val) & 3) << 25)
 /** CLKACTIVATIONTIME Max Ticks */
 #define GPMC_CONFIG1_CLKACTIVATIONTIME_MAX 2
-#define GPMC_CONFIG1_PAGE_LEN(val)      ((val & 3) << 23)
+#define GPMC_CONFIG1_PAGE_LEN(val)      (((val) & 3) << 23)
 /** ATTACHEDDEVICEPAGELENGTH Max Value */
 #define GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX 2
 #define GPMC_CONFIG1_WAIT_READ_MON      (1 << 22)
 #define GPMC_CONFIG1_WAIT_WRITE_MON     (1 << 21)
-#define GPMC_CONFIG1_WAIT_MON_TIME(val) ((val & 3) << 18)
+#define GPMC_CONFIG1_WAIT_MON_TIME(val) (((val) & 3) << 18)
 /** WAITMONITORINGTIME Max Ticks */
 #define GPMC_CONFIG1_WAITMONITORINGTIME_MAX  2
-#define GPMC_CONFIG1_WAIT_PIN_SEL(val)  ((val & 3) << 16)
-#define GPMC_CONFIG1_DEVICESIZE(val)    ((val & 3) << 12)
+#define GPMC_CONFIG1_WAIT_PIN_SEL(val)  (((val) & 3) << 16)
+#define GPMC_CONFIG1_DEVICESIZE(val)    (((val) & 3) << 12)
 #define GPMC_CONFIG1_DEVICESIZE_16      GPMC_CONFIG1_DEVICESIZE(1)
 /** DEVICESIZE Max Value */
 #define GPMC_CONFIG1_DEVICESIZE_MAX     1
-#define GPMC_CONFIG1_DEVICETYPE(val)    ((val & 3) << 10)
+#define GPMC_CONFIG1_DEVICETYPE(val)    (((val) & 3) << 10)
 #define GPMC_CONFIG1_DEVICETYPE_NOR     GPMC_CONFIG1_DEVICETYPE(0)
-#define GPMC_CONFIG1_MUXTYPE(val)       ((val & 3) << 8)
+#define GPMC_CONFIG1_MUXTYPE(val)       (((val) & 3) << 8)
 #define GPMC_CONFIG1_TIME_PARA_GRAN     (1 << 4)
-#define GPMC_CONFIG1_FCLK_DIV(val)      (val & 3)
+#define GPMC_CONFIG1_FCLK_DIV(val)      ((val) & 3)
 #define GPMC_CONFIG1_FCLK_DIV2          (GPMC_CONFIG1_FCLK_DIV(1))
 #define GPMC_CONFIG1_FCLK_DIV3          (GPMC_CONFIG1_FCLK_DIV(2))
 #define GPMC_CONFIG1_FCLK_DIV4          (GPMC_CONFIG1_FCLK_DIV(3))
@@ -245,7 +246,7 @@ static DEFINE_SPINLOCK(gpmc_mem_lock);
 static unsigned int gpmc_cs_num = GPMC_CS_NUM;
 static unsigned int gpmc_nr_waitpins;
 static resource_size_t phys_base, mem_size;
-static unsigned gpmc_capability;
+static unsigned int gpmc_capability;
 static void __iomem *gpmc_base;
 
 static struct clk *gpmc_l3_clk;
@@ -291,15 +292,14 @@ static unsigned long gpmc_get_fclk_period(void)
 
 /**
  * gpmc_get_clk_period - get period of selected clock domain in ps
- * @cs Chip Select Region.
- * @cd Clock Domain.
+ * @cs: Chip Select Region.
+ * @cd: Clock Domain.
  *
  * GPMC_CS_CONFIG1 GPMCFCLKDIVIDER for cs has to be setup
  * prior to calling this function with GPMC_CD_CLK.
  */
 static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
 {
-
 	unsigned long tick_ps = gpmc_get_fclk_period();
 	u32 l;
 	int div;
@@ -319,7 +319,6 @@ static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
 	}
 
 	return tick_ps;
-
 }
 
 static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs,
@@ -411,7 +410,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
  * @reg:     GPMC_CS_CONFIGn register offset.
  * @st_bit:  Start Bit
  * @end_bit: End Bit. Must be >= @st_bit.
- * @ma:x     Maximum parameter value (before optional @shift).
+ * @max:     Maximum parameter value (before optional @shift).
  *           If 0, maximum is as high as @st_bit and @end_bit allow.
  * @name:    DTS node name, w/o "gpmc,"
  * @cd:      Clock Domain of timing parameter.
@@ -511,7 +510,7 @@ static void gpmc_cs_show_timings(int cs, const char *desc)
 	GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1,  4,  4, "time-para-granularity");
 	GPMC_GET_RAW(GPMC_CS_CONFIG1,  8,  9, "mux-add-data");
 	GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 12, 13, 1,
-			 GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
+			       GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
 	GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
 	GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
 	GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
@@ -625,9 +624,8 @@ static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max
 
 	l = gpmc_cs_read_reg(cs, reg);
 #ifdef CONFIG_OMAP_GPMC_DEBUG
-	pr_info(
-		"GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
-	       cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
+	pr_info("GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
+		cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
 			(l >> st_bit) & mask, time);
 #endif
 	l &= ~(mask << st_bit);
@@ -662,7 +660,6 @@ static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max
  */
 static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
 {
-
 	int div = gpmc_ns_to_ticks(wait_monitoring);
 
 	div += GPMC_CONFIG1_WAITMONITORINGTIME_MAX - 1;
@@ -674,7 +671,6 @@ static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
 		div = 1;
 
 	return div;
-
 }
 
 /**
@@ -728,7 +724,6 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
 	if (!s->sync_read && !s->sync_write &&
 	    (s->wait_on_read || s->wait_on_write)
 	   ) {
-
 		div = gpmc_calc_waitmonitoring_divider(t->wait_monitoring);
 		if (div < 0) {
 			pr_err("%s: waitmonitoringtime %3d ns too large for greatest gpmcfclkdivider.\n",
@@ -958,7 +953,7 @@ static int gpmc_cs_remap(int cs, u32 base)
 	 * Make sure we ignore any device offsets from the GPMC partition
 	 * allocated for the chip select and that the new base confirms
 	 * to the GPMC 16MB minimum granularity.
-	 */ 
+	 */
 	base &= ~(SZ_16M - 1);
 
 	gpmc_cs_get_memconf(cs, &old_base, &size);
@@ -1087,7 +1082,7 @@ static struct gpmc_nand_ops nand_ops = {
 
 /**
  * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
- * @regs: the GPMC NAND register map exclusive for NAND use.
+ * @reg: the GPMC NAND register map exclusive for NAND use.
  * @cs: GPMC chip select number on which the NAND sits. The
  *      register map returned will be specific to this chip select.
  *
@@ -1242,7 +1237,7 @@ int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
 }
 EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
 
-int gpmc_get_client_irq(unsigned irq_config)
+int gpmc_get_client_irq(unsigned int irq_config)
 {
 	if (!gpmc_irq_domain) {
 		pr_warn("%s called before GPMC IRQ domain available\n",
@@ -1465,7 +1460,6 @@ static void gpmc_mem_exit(void)
 			continue;
 		gpmc_cs_delete_mem(cs);
 	}
-
 }
 
 static void gpmc_mem_init(void)
@@ -1634,17 +1628,14 @@ static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
 	/* oe_on */
 	temp = dev_t->t_oeasu;
 	if (mux)
-		temp = max_t(u32, temp,
-			gpmc_t->adv_rd_off + dev_t->t_aavdh);
+		temp = max_t(u32, temp, gpmc_t->adv_rd_off + dev_t->t_aavdh);
 	gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
 
 	/* access */
 	temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
-				gpmc_t->oe_on + dev_t->t_oe);
-	temp = max_t(u32, temp,
-				gpmc_t->cs_on + dev_t->t_ce);
-	temp = max_t(u32, temp,
-				gpmc_t->adv_on + dev_t->t_aa);
+		     gpmc_t->oe_on + dev_t->t_oe);
+	temp = max_t(u32, temp, gpmc_t->cs_on + dev_t->t_ce);
+	temp = max_t(u32, temp, gpmc_t->adv_on + dev_t->t_aa);
 	gpmc_t->access = gpmc_round_ps_to_ticks(temp);
 
 	gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
@@ -1753,10 +1744,11 @@ static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
 	return 0;
 }
 
-/* TODO: remove this function once all peripherals are confirmed to
+/*
+ * TODO: remove this function once all peripherals are confirmed to
  * work with generic timing. Simultaneously gpmc_cs_set_timings()
  * has to be modified to handle timings in ps instead of ns
-*/
+ */
 static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
 {
 	t->cs_on /= 1000;
@@ -2089,7 +2081,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
 	gpmc_cs_disable_mem(cs);
 
 	/*
-	 * FIXME: gpmc_cs_request() will map the CS to an arbitary
+	 * FIXME: gpmc_cs_request() will map the CS to an arbitrary
 	 * location in the gpmc address space. When booting with
 	 * device-tree we want the NOR flash to be mapped to the
 	 * location specified in the device-tree blob. So remap the
diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c
index ff57195b4e37..575fadbffa30 100644
--- a/drivers/memory/pl172.c
+++ b/drivers/memory/pl172.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Memory controller driver for ARM PrimeCell PL172
  * PrimeCell MultiPort Memory Controller (PL172)
@@ -6,10 +7,6 @@
  *
  * Based on:
  * TI AEMIF driver, Copyright (C) 2010 - 2013 Texas Instruments Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/amba/bus.h>
@@ -24,7 +21,7 @@
 #include <linux/of_platform.h>
 #include <linux/time.h>
 
-#define MPMC_STATIC_CFG(n)		(0x200 + 0x20 * n)
+#define MPMC_STATIC_CFG(n)		(0x200 + 0x20 * (n))
 #define  MPMC_STATIC_CFG_MW_8BIT	0x0
 #define  MPMC_STATIC_CFG_MW_16BIT	0x1
 #define  MPMC_STATIC_CFG_MW_32BIT	0x2
@@ -34,17 +31,17 @@
 #define  MPMC_STATIC_CFG_EW		BIT(8)
 #define  MPMC_STATIC_CFG_B		BIT(19)
 #define  MPMC_STATIC_CFG_P		BIT(20)
-#define MPMC_STATIC_WAIT_WEN(n)		(0x204 + 0x20 * n)
+#define MPMC_STATIC_WAIT_WEN(n)		(0x204 + 0x20 * (n))
 #define  MPMC_STATIC_WAIT_WEN_MAX	0x0f
-#define MPMC_STATIC_WAIT_OEN(n)		(0x208 + 0x20 * n)
+#define MPMC_STATIC_WAIT_OEN(n)		(0x208 + 0x20 * (n))
 #define  MPMC_STATIC_WAIT_OEN_MAX	0x0f
-#define MPMC_STATIC_WAIT_RD(n)		(0x20c + 0x20 * n)
+#define MPMC_STATIC_WAIT_RD(n)		(0x20c + 0x20 * (n))
 #define  MPMC_STATIC_WAIT_RD_MAX	0x1f
-#define MPMC_STATIC_WAIT_PAGE(n)	(0x210 + 0x20 * n)
+#define MPMC_STATIC_WAIT_PAGE(n)	(0x210 + 0x20 * (n))
 #define  MPMC_STATIC_WAIT_PAGE_MAX	0x1f
-#define MPMC_STATIC_WAIT_WR(n)		(0x214 + 0x20 * n)
+#define MPMC_STATIC_WAIT_WR(n)		(0x214 + 0x20 * (n))
 #define  MPMC_STATIC_WAIT_WR_MAX	0x1f
-#define MPMC_STATIC_WAIT_TURN(n)	(0x218 + 0x20 * n)
+#define MPMC_STATIC_WAIT_TURN(n)	(0x218 + 0x20 * (n))
 #define  MPMC_STATIC_WAIT_TURN_MAX	0x0f
 
 /* Maximum number of static chip selects */
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index 20a8406ce786..8e240f078afc 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -23,5 +23,12 @@ config EXYNOS5422_DMC
 config EXYNOS_SROM
 	bool "Exynos SROM controller driver" if COMPILE_TEST
 	depends on (ARM && ARCH_EXYNOS) || (COMPILE_TEST && HAS_IOMEM)
+	help
+	  This adds driver for Samsung Exynos SoC SROM controller.  The driver
+	  in basic operation mode only saves and restores SROM registers
+	  during suspend.  If however appropriate device tree configuration
+	  is provided, the driver enables support for external memory
+	  or external devices.
+	  If unsure, say Y on devices with Samsung Exynos SocS.
 
 endif
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
index 6510d7bab217..e73dd330af47 100644
--- a/drivers/memory/samsung/exynos-srom.c
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -47,9 +47,9 @@ struct exynos_srom {
 	struct exynos_srom_reg_dump *reg_offset;
 };
 
-static struct exynos_srom_reg_dump *exynos_srom_alloc_reg_dump(
-		const unsigned long *rdump,
-		unsigned long nr_rdump)
+static struct exynos_srom_reg_dump *
+exynos_srom_alloc_reg_dump(const unsigned long *rdump,
+			   unsigned long nr_rdump)
 {
 	struct exynos_srom_reg_dump *rd;
 	unsigned int i;
@@ -116,7 +116,7 @@ static int exynos_srom_probe(struct platform_device *pdev)
 	}
 
 	srom = devm_kzalloc(&pdev->dev,
-			sizeof(struct exynos_srom), GFP_KERNEL);
+			    sizeof(struct exynos_srom), GFP_KERNEL);
 	if (!srom)
 		return -ENOMEM;
 
@@ -130,7 +130,7 @@ static int exynos_srom_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, srom);
 
 	srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
-			ARRAY_SIZE(exynos_srom_offsets));
+						      ARRAY_SIZE(exynos_srom_offsets));
 	if (!srom->reg_offset) {
 		iounmap(srom->reg_base);
 		return -ENOMEM;
@@ -157,16 +157,16 @@ static int exynos_srom_probe(struct platform_device *pdev)
 
 #ifdef CONFIG_PM_SLEEP
 static void exynos_srom_save(void __iomem *base,
-				    struct exynos_srom_reg_dump *rd,
-				    unsigned int num_regs)
+			     struct exynos_srom_reg_dump *rd,
+			     unsigned int num_regs)
 {
 	for (; num_regs > 0; --num_regs, ++rd)
 		rd->value = readl(base + rd->offset);
 }
 
 static void exynos_srom_restore(void __iomem *base,
-				      const struct exynos_srom_reg_dump *rd,
-				      unsigned int num_regs)
+				const struct exynos_srom_reg_dump *rd,
+				unsigned int num_regs)
 {
 	for (; num_regs > 0; --num_regs, ++rd)
 		writel(rd->value, base + rd->offset);
@@ -177,7 +177,7 @@ static int exynos_srom_suspend(struct device *dev)
 	struct exynos_srom *srom = dev_get_drvdata(dev);
 
 	exynos_srom_save(srom->reg_base, srom->reg_offset,
-				ARRAY_SIZE(exynos_srom_offsets));
+			 ARRAY_SIZE(exynos_srom_offsets));
 	return 0;
 }
 
@@ -186,7 +186,7 @@ static int exynos_srom_resume(struct device *dev)
 	struct exynos_srom *srom = dev_get_drvdata(dev);
 
 	exynos_srom_restore(srom->reg_base, srom->reg_offset,
-				ARRAY_SIZE(exynos_srom_offsets));
+			    ARRAY_SIZE(exynos_srom_offsets));
 	return 0;
 }
 #endif
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 25196d6268e2..4312233e8e0a 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -270,12 +270,14 @@ static int find_target_freq_idx(struct exynos5_dmc *dmc,
  * This function switches between these banks according to the
  * currently used clock source.
  */
-static void exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
+static int exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
 {
 	unsigned int reg;
 	int ret;
 
 	ret = regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, &reg);
+	if (ret)
+		return ret;
 
 	if (set)
 		reg |= EXYNOS5_TIMING_SET_SWI;
@@ -283,6 +285,8 @@ static void exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
 		reg &= ~EXYNOS5_TIMING_SET_SWI;
 
 	regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, reg);
+
+	return 0;
 }
 
 /**
@@ -516,7 +520,7 @@ exynos5_dmc_switch_to_bypass_configuration(struct exynos5_dmc *dmc,
 	/*
 	 * Delays are long enough, so use them for the new coming clock.
 	 */
-	exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
+	ret = exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
 
 	return ret;
 }
@@ -577,7 +581,9 @@ exynos5_dmc_change_freq_and_volt(struct exynos5_dmc *dmc,
 
 	clk_set_rate(dmc->fout_bpll, target_rate);
 
-	exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
+	ret = exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
+	if (ret)
+		goto disable_clocks;
 
 	ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_bpll);
 	if (ret)
@@ -1392,7 +1398,7 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
 		return PTR_ERR(dmc->base_drexi1);
 
 	dmc->clk_regmap = syscon_regmap_lookup_by_phandle(np,
-				"samsung,syscon-clk");
+							  "samsung,syscon-clk");
 	if (IS_ERR(dmc->clk_regmap))
 		return PTR_ERR(dmc->clk_regmap);
 
@@ -1471,7 +1477,6 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
 		exynos5_dmc_df_profile.polling_ms = 500;
 	}
 
-
 	dmc->df = devm_devfreq_add_device(dev, &exynos5_dmc_df_profile,
 					  DEVFREQ_GOV_SIMPLE_ONDEMAND,
 					  &dmc->gov_data);
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index fbfbaada61a2..9f0a96bf9ccc 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -36,3 +36,17 @@ config TEGRA124_EMC
 	  Tegra124 chips. The EMC controls the external DRAM on the board.
 	  This driver is required to change memory timings / clock rate for
 	  external memory.
+
+config TEGRA210_EMC_TABLE
+	bool
+	depends on ARCH_TEGRA_210_SOC
+
+config TEGRA210_EMC
+	tristate "NVIDIA Tegra210 External Memory Controller driver"
+	depends on TEGRA_MC && ARCH_TEGRA_210_SOC
+	select TEGRA210_EMC_TABLE
+	help
+	  This driver is for the External Memory Controller (EMC) found on
+	  Tegra210 chips. The EMC controls the external DRAM on the board.
+	  This driver is required to change memory timings / clock rate for
+	  external memory.
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 529d10bc5650..6c1a2ecc6628 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -13,5 +13,9 @@ obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
 obj-$(CONFIG_TEGRA20_EMC)  += tegra20-emc.o
 obj-$(CONFIG_TEGRA30_EMC)  += tegra30-emc.o
 obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
+obj-$(CONFIG_TEGRA210_EMC_TABLE) += tegra210-emc-table.o
+obj-$(CONFIG_TEGRA210_EMC) += tegra210-emc.o
 obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o tegra186-emc.o
 obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra186-emc.o
+
+tegra210-emc-y := tegra210-emc-core.o tegra210-emc-cc-r21021.o
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index 957c6eb74ff9..afa3ba45c9e6 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -34,6 +34,7 @@
 #define MC_EMEM_ARB_TIMING_W2W				0xbc
 #define MC_EMEM_ARB_TIMING_R2W				0xc0
 #define MC_EMEM_ARB_TIMING_W2R				0xc4
+#define MC_EMEM_ARB_MISC2				0xc8
 #define MC_EMEM_ARB_DA_TURNS				0xd0
 #define MC_EMEM_ARB_DA_COVERS				0xd4
 #define MC_EMEM_ARB_MISC0				0xd8
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 33b8216bac30..ba5cb1f4dfc2 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -984,6 +984,7 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
 
 static const struct of_device_id tegra_emc_of_match[] = {
 	{ .compatible = "nvidia,tegra124-emc" },
+	{ .compatible = "nvidia,tegra132-emc" },
 	{}
 };
 
@@ -1178,11 +1179,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
 		return;
 	}
 
-	debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, emc,
+	debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
 			    &tegra_emc_debug_available_rates_fops);
-	debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+	debugfs_create_file("min_rate", 0644, emc->debugfs.root,
 			    emc, &tegra_emc_debug_min_rate_fops);
-	debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+	debugfs_create_file("max_rate", 0644, emc->debugfs.root,
 			    emc, &tegra_emc_debug_max_rate_fops);
 }
 
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index 97f26bc77ad4..8478f59db432 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -185,7 +185,7 @@ static int tegra186_emc_probe(struct platform_device *pdev)
 	if (IS_ERR(emc->clk)) {
 		err = PTR_ERR(emc->clk);
 		dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
-		return err;
+		goto put_bpmp;
 	}
 
 	platform_set_drvdata(pdev, emc);
@@ -201,7 +201,7 @@ static int tegra186_emc_probe(struct platform_device *pdev)
 	err = tegra_bpmp_transfer(emc->bpmp, &msg);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err);
-		return err;
+		goto put_bpmp;
 	}
 
 	emc->debugfs.min_rate = ULONG_MAX;
@@ -211,8 +211,10 @@ static int tegra186_emc_probe(struct platform_device *pdev)
 
 	emc->dvfs = devm_kmalloc_array(&pdev->dev, emc->num_dvfs,
 				       sizeof(*emc->dvfs), GFP_KERNEL);
-	if (!emc->dvfs)
-		return -ENOMEM;
+	if (!emc->dvfs) {
+		err = -ENOMEM;
+		goto put_bpmp;
+	}
 
 	dev_dbg(&pdev->dev, "%u DVFS pairs:\n", emc->num_dvfs);
 
@@ -237,15 +239,10 @@ static int tegra186_emc_probe(struct platform_device *pdev)
 			"failed to set rate range [%lu-%lu] for %pC\n",
 			emc->debugfs.min_rate, emc->debugfs.max_rate,
 			emc->clk);
-		return err;
+		goto put_bpmp;
 	}
 
 	emc->debugfs.root = debugfs_create_dir("emc", NULL);
-	if (!emc->debugfs.root) {
-		dev_err(&pdev->dev, "failed to create debugfs directory\n");
-		return 0;
-	}
-
 	debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
 			    emc, &tegra186_emc_debug_available_rates_fops);
 	debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
@@ -254,6 +251,10 @@ static int tegra186_emc_probe(struct platform_device *pdev)
 			    emc, &tegra186_emc_debug_max_rate_fops);
 
 	return 0;
+
+put_bpmp:
+	tegra_bpmp_put(emc->bpmp);
+	return err;
 }
 
 static int tegra186_emc_remove(struct platform_device *pdev)
@@ -267,10 +268,10 @@ static int tegra186_emc_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id tegra186_emc_of_match[] = {
-#if defined(CONFIG_ARCH_TEGRA186_SOC)
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
 	{ .compatible = "nvidia,tegra186-emc" },
 #endif
-#if defined(CONFIG_ARCH_TEGRA194_SOC)
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
 	{ .compatible = "nvidia,tegra194-emc" },
 #endif
 	{ /* sentinel */ }
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index 5d53f11ca7b6..e25c954dde2e 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -1570,12 +1570,12 @@ static const struct of_device_id tegra186_mc_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, tegra186_mc_of_match);
 
-static int tegra186_mc_suspend(struct device *dev)
+static int __maybe_unused tegra186_mc_suspend(struct device *dev)
 {
 	return 0;
 }
 
-static int tegra186_mc_resume(struct device *dev)
+static int __maybe_unused tegra186_mc_resume(struct device *dev)
 {
 	struct tegra186_mc *mc = dev_get_drvdata(dev);
 
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index b16715e9515d..027f46287dbf 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -7,11 +7,11 @@
 
 #include <linux/clk.h>
 #include <linux/clk/tegra.h>
-#include <linux/completion.h>
 #include <linux/debugfs.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -144,7 +144,6 @@ struct emc_timing {
 
 struct tegra_emc {
 	struct device *dev;
-	struct completion clk_handshake_complete;
 	struct notifier_block clk_nb;
 	struct clk *clk;
 	void __iomem *regs;
@@ -162,17 +161,13 @@ struct tegra_emc {
 static irqreturn_t tegra_emc_isr(int irq, void *data)
 {
 	struct tegra_emc *emc = data;
-	u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+	u32 intmask = EMC_REFRESH_OVERFLOW_INT;
 	u32 status;
 
 	status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
 	if (!status)
 		return IRQ_NONE;
 
-	/* notify about EMC-CAR handshake completion */
-	if (status & EMC_CLKCHANGE_COMPLETE_INT)
-		complete(&emc->clk_handshake_complete);
-
 	/* notify about HW problem */
 	if (status & EMC_REFRESH_OVERFLOW_INT)
 		dev_err_ratelimited(emc->dev,
@@ -224,14 +219,13 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
 	/* wait until programming has settled */
 	readl_relaxed(emc->regs + emc_timing_registers[i - 1]);
 
-	reinit_completion(&emc->clk_handshake_complete);
-
 	return 0;
 }
 
 static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
 {
-	unsigned long timeout;
+	int err;
+	u32 v;
 
 	dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush);
 
@@ -242,11 +236,12 @@ static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
 		return 0;
 	}
 
-	timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
-					      msecs_to_jiffies(100));
-	if (timeout == 0) {
-		dev_err(emc->dev, "EMC-CAR handshake failed\n");
-		return -EIO;
+	err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+						v & EMC_CLKCHANGE_COMPLETE_INT,
+						1, 100);
+	if (err) {
+		dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+		return err;
 	}
 
 	return 0;
@@ -412,7 +407,7 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
 
 static int emc_setup_hw(struct tegra_emc *emc)
 {
-	u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+	u32 intmask = EMC_REFRESH_OVERFLOW_INT;
 	u32 emc_cfg, emc_dbg;
 
 	emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -647,11 +642,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
 		return;
 	}
 
-	debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+	debugfs_create_file("available_rates", 0444, emc->debugfs.root,
 			    emc, &tegra_emc_debug_available_rates_fops);
-	debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+	debugfs_create_file("min_rate", 0644, emc->debugfs.root,
 			    emc, &tegra_emc_debug_min_rate_fops);
-	debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+	debugfs_create_file("max_rate", 0644, emc->debugfs.root,
 			    emc, &tegra_emc_debug_max_rate_fops);
 }
 
@@ -686,7 +681,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	}
 
-	init_completion(&emc->clk_handshake_complete);
 	emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
 	emc->dev = &pdev->dev;
 
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
new file mode 100644
index 000000000000..ff55a17896fa
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -0,0 +1,1775 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION.  All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/*
+ * Enable flags for specifying verbosity.
+ */
+#define INFO            (1 << 0)
+#define STEPS           (1 << 1)
+#define SUB_STEPS       (1 << 2)
+#define PRELOCK         (1 << 3)
+#define PRELOCK_STEPS   (1 << 4)
+#define ACTIVE_EN       (1 << 5)
+#define PRAMP_UP        (1 << 6)
+#define PRAMP_DN        (1 << 7)
+#define EMA_WRITES      (1 << 10)
+#define EMA_UPDATES     (1 << 11)
+#define PER_TRAIN       (1 << 16)
+#define CC_PRINT        (1 << 17)
+#define CCFIFO          (1 << 29)
+#define REGS            (1 << 30)
+#define REG_LISTS       (1 << 31)
+
+#define emc_dbg(emc, flags, ...) dev_dbg(emc->dev, __VA_ARGS__)
+
+#define DVFS_CLOCK_CHANGE_VERSION	21021
+#define EMC_PRELOCK_VERSION		2101
+
+enum {
+	DVFS_SEQUENCE = 1,
+	WRITE_TRAINING_SEQUENCE = 2,
+	PERIODIC_TRAINING_SEQUENCE = 3,
+	DVFS_PT1 = 10,
+	DVFS_UPDATE = 11,
+	TRAINING_PT1 = 12,
+	TRAINING_UPDATE = 13,
+	PERIODIC_TRAINING_UPDATE = 14
+};
+
+/*
+ * PTFV defines - basically just indexes into the per table PTFV array.
+ */
+#define PTFV_DQSOSC_MOVAVG_C0D0U0_INDEX		0
+#define PTFV_DQSOSC_MOVAVG_C0D0U1_INDEX		1
+#define PTFV_DQSOSC_MOVAVG_C0D1U0_INDEX		2
+#define PTFV_DQSOSC_MOVAVG_C0D1U1_INDEX		3
+#define PTFV_DQSOSC_MOVAVG_C1D0U0_INDEX		4
+#define PTFV_DQSOSC_MOVAVG_C1D0U1_INDEX		5
+#define PTFV_DQSOSC_MOVAVG_C1D1U0_INDEX		6
+#define PTFV_DQSOSC_MOVAVG_C1D1U1_INDEX		7
+#define PTFV_DVFS_SAMPLES_INDEX			9
+#define PTFV_MOVAVG_WEIGHT_INDEX		10
+#define PTFV_CONFIG_CTRL_INDEX			11
+
+#define PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA	(1 << 0)
+
+/*
+ * Do arithmetic in fixed point.
+ */
+#define MOVAVG_PRECISION_FACTOR		100
+
+/*
+ * The division portion of the average operation.
+ */
+#define __AVERAGE_PTFV(dev)						\
+	({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] =	\
+	   next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] /	\
+	   next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+/*
+ * Convert val to fixed point and add it to the temporary average.
+ */
+#define __INCREMENT_PTFV(dev, val)					\
+	({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] +=	\
+	   ((val) * MOVAVG_PRECISION_FACTOR); })
+
+/*
+ * Convert a moving average back to integral form and return the value.
+ */
+#define __MOVAVG_AC(timing, dev)					\
+	((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] /	\
+	 MOVAVG_PRECISION_FACTOR)
+
+/* Weighted update. */
+#define __WEIGHTED_UPDATE_PTFV(dev, nval)				\
+	do {								\
+		int w = PTFV_MOVAVG_WEIGHT_INDEX;			\
+		int dqs = PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX;		\
+									\
+		next->ptfv_list[dqs] =					\
+			((nval * MOVAVG_PRECISION_FACTOR) +		\
+			 (next->ptfv_list[dqs] *			\
+			  next->ptfv_list[w])) /			\
+			(next->ptfv_list[w] + 1);			\
+									\
+		emc_dbg(emc, EMA_UPDATES, "%s: (s=%lu) EMA: %u\n",	\
+			__stringify(dev), nval, next->ptfv_list[dqs]);	\
+	} while (0)
+
+/* Access a particular average. */
+#define __MOVAVG(timing, dev)                      \
+	((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX])
+
+static u32 update_clock_tree_delay(struct tegra210_emc *emc, int type)
+{
+	bool periodic_training_update = type == PERIODIC_TRAINING_UPDATE;
+	struct tegra210_emc_timing *last = emc->last;
+	struct tegra210_emc_timing *next = emc->next;
+	u32 last_timing_rate_mhz = last->rate / 1000;
+	u32 next_timing_rate_mhz = next->rate / 1000;
+	bool dvfs_update = type == DVFS_UPDATE;
+	s32 tdel = 0, tmdel = 0, adel = 0;
+	bool dvfs_pt1 = type == DVFS_PT1;
+	unsigned long cval = 0;
+	u32 temp[2][2], value;
+	unsigned int i;
+
+	/*
+	 * Dev0 MSB.
+	 */
+	if (dvfs_pt1 || periodic_training_update) {
+		value = tegra210_emc_mrr_read(emc, 2, 19);
+
+		for (i = 0; i < emc->num_channels; i++) {
+			temp[i][0] = (value & 0x00ff) << 8;
+			temp[i][1] = (value & 0xff00) << 0;
+			value >>= 16;
+		}
+
+		/*
+		 * Dev0 LSB.
+		 */
+		value = tegra210_emc_mrr_read(emc, 2, 18);
+
+		for (i = 0; i < emc->num_channels; i++) {
+			temp[i][0] |= (value & 0x00ff) >> 0;
+			temp[i][1] |= (value & 0xff00) >> 8;
+			value >>= 16;
+		}
+	}
+
+	if (dvfs_pt1 || periodic_training_update) {
+		cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+		cval *= 1000000;
+		cval /= last_timing_rate_mhz * 2 * temp[0][0];
+	}
+
+	if (dvfs_pt1)
+		__INCREMENT_PTFV(C0D0U0, cval);
+	else if (dvfs_update)
+		__AVERAGE_PTFV(C0D0U0);
+	else if (periodic_training_update)
+		__WEIGHTED_UPDATE_PTFV(C0D0U0, cval);
+
+	if (dvfs_update || periodic_training_update) {
+		tdel = next->current_dram_clktree[C0D0U0] -
+				__MOVAVG_AC(next, C0D0U0);
+		tmdel = (tdel < 0) ? -1 * tdel : tdel;
+		adel = tmdel;
+
+		if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+		    next->tree_margin)
+			next->current_dram_clktree[C0D0U0] =
+				__MOVAVG_AC(next, C0D0U0);
+	}
+
+	if (dvfs_pt1 || periodic_training_update) {
+		cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+		cval *= 1000000;
+		cval /= last_timing_rate_mhz * 2 * temp[0][1];
+	}
+
+	if (dvfs_pt1)
+		__INCREMENT_PTFV(C0D0U1, cval);
+	else if (dvfs_update)
+		__AVERAGE_PTFV(C0D0U1);
+	else if (periodic_training_update)
+		__WEIGHTED_UPDATE_PTFV(C0D0U1, cval);
+
+	if (dvfs_update || periodic_training_update) {
+		tdel = next->current_dram_clktree[C0D0U1] -
+				__MOVAVG_AC(next, C0D0U1);
+		tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+		if (tmdel > adel)
+			adel = tmdel;
+
+		if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+		    next->tree_margin)
+			next->current_dram_clktree[C0D0U1] =
+				__MOVAVG_AC(next, C0D0U1);
+	}
+
+	if (emc->num_channels > 1) {
+		if (dvfs_pt1 || periodic_training_update) {
+			cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+			cval *= 1000000;
+			cval /= last_timing_rate_mhz * 2 * temp[1][0];
+		}
+
+		if (dvfs_pt1)
+			__INCREMENT_PTFV(C1D0U0, cval);
+		else if (dvfs_update)
+			__AVERAGE_PTFV(C1D0U0);
+		else if (periodic_training_update)
+			__WEIGHTED_UPDATE_PTFV(C1D0U0, cval);
+
+		if (dvfs_update || periodic_training_update) {
+			tdel = next->current_dram_clktree[C1D0U0] -
+					__MOVAVG_AC(next, C1D0U0);
+			tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+			if (tmdel > adel)
+				adel = tmdel;
+
+			if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+			    next->tree_margin)
+				next->current_dram_clktree[C1D0U0] =
+					__MOVAVG_AC(next, C1D0U0);
+		}
+
+		if (dvfs_pt1 || periodic_training_update) {
+			cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+			cval *= 1000000;
+			cval /= last_timing_rate_mhz * 2 * temp[1][1];
+		}
+
+		if (dvfs_pt1)
+			__INCREMENT_PTFV(C1D0U1, cval);
+		else if (dvfs_update)
+			__AVERAGE_PTFV(C1D0U1);
+		else if (periodic_training_update)
+			__WEIGHTED_UPDATE_PTFV(C1D0U1, cval);
+
+		if (dvfs_update || periodic_training_update) {
+			tdel = next->current_dram_clktree[C1D0U1] -
+					__MOVAVG_AC(next, C1D0U1);
+			tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+			if (tmdel > adel)
+				adel = tmdel;
+
+			if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+			    next->tree_margin)
+				next->current_dram_clktree[C1D0U1] =
+					__MOVAVG_AC(next, C1D0U1);
+		}
+	}
+
+	if (emc->num_devices < 2)
+		goto done;
+
+	/*
+	 * Dev1 MSB.
+	 */
+	if (dvfs_pt1 || periodic_training_update) {
+		value = tegra210_emc_mrr_read(emc, 1, 19);
+
+		for (i = 0; i < emc->num_channels; i++) {
+			temp[i][0] = (value & 0x00ff) << 8;
+			temp[i][1] = (value & 0xff00) << 0;
+			value >>= 16;
+		}
+
+		/*
+		 * Dev1 LSB.
+		 */
+		value = tegra210_emc_mrr_read(emc, 2, 18);
+
+		for (i = 0; i < emc->num_channels; i++) {
+			temp[i][0] |= (value & 0x00ff) >> 0;
+			temp[i][1] |= (value & 0xff00) >> 8;
+			value >>= 16;
+		}
+	}
+
+	if (dvfs_pt1 || periodic_training_update) {
+		cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+		cval *= 1000000;
+		cval /= last_timing_rate_mhz * 2 * temp[0][0];
+	}
+
+	if (dvfs_pt1)
+		__INCREMENT_PTFV(C0D1U0, cval);
+	else if (dvfs_update)
+		__AVERAGE_PTFV(C0D1U0);
+	else if (periodic_training_update)
+		__WEIGHTED_UPDATE_PTFV(C0D1U0, cval);
+
+	if (dvfs_update || periodic_training_update) {
+		tdel = next->current_dram_clktree[C0D1U0] -
+				__MOVAVG_AC(next, C0D1U0);
+		tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+		if (tmdel > adel)
+			adel = tmdel;
+
+		if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+		    next->tree_margin)
+			next->current_dram_clktree[C0D1U0] =
+				__MOVAVG_AC(next, C0D1U0);
+	}
+
+	if (dvfs_pt1 || periodic_training_update) {
+		cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+		cval *= 1000000;
+		cval /= last_timing_rate_mhz * 2 * temp[0][1];
+	}
+
+	if (dvfs_pt1)
+		__INCREMENT_PTFV(C0D1U1, cval);
+	else if (dvfs_update)
+		__AVERAGE_PTFV(C0D1U1);
+	else if (periodic_training_update)
+		__WEIGHTED_UPDATE_PTFV(C0D1U1, cval);
+
+	if (dvfs_update || periodic_training_update) {
+		tdel = next->current_dram_clktree[C0D1U1] -
+				__MOVAVG_AC(next, C0D1U1);
+		tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+		if (tmdel > adel)
+			adel = tmdel;
+
+		if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+		    next->tree_margin)
+			next->current_dram_clktree[C0D1U1] =
+				__MOVAVG_AC(next, C0D1U1);
+	}
+
+	if (emc->num_channels > 1) {
+		if (dvfs_pt1 || periodic_training_update) {
+			cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+			cval *= 1000000;
+			cval /= last_timing_rate_mhz * 2 * temp[1][0];
+		}
+
+		if (dvfs_pt1)
+			__INCREMENT_PTFV(C1D1U0, cval);
+		else if (dvfs_update)
+			__AVERAGE_PTFV(C1D1U0);
+		else if (periodic_training_update)
+			__WEIGHTED_UPDATE_PTFV(C1D1U0, cval);
+
+		if (dvfs_update || periodic_training_update) {
+			tdel = next->current_dram_clktree[C1D1U0] -
+					__MOVAVG_AC(next, C1D1U0);
+			tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+			if (tmdel > adel)
+				adel = tmdel;
+
+			if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+			    next->tree_margin)
+				next->current_dram_clktree[C1D1U0] =
+					__MOVAVG_AC(next, C1D1U0);
+		}
+
+		if (dvfs_pt1 || periodic_training_update) {
+			cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+			cval *= 1000000;
+			cval /= last_timing_rate_mhz * 2 * temp[1][1];
+		}
+
+		if (dvfs_pt1)
+			__INCREMENT_PTFV(C1D1U1, cval);
+		else if (dvfs_update)
+			__AVERAGE_PTFV(C1D1U1);
+		else if (periodic_training_update)
+			__WEIGHTED_UPDATE_PTFV(C1D1U1, cval);
+
+		if (dvfs_update || periodic_training_update) {
+			tdel = next->current_dram_clktree[C1D1U1] -
+					__MOVAVG_AC(next, C1D1U1);
+			tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+			if (tmdel > adel)
+				adel = tmdel;
+
+			if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+			    next->tree_margin)
+				next->current_dram_clktree[C1D1U1] =
+					__MOVAVG_AC(next, C1D1U1);
+		}
+	}
+
+done:
+	return adel;
+}
+
+static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
+					 struct tegra210_emc_timing *last,
+					 struct tegra210_emc_timing *next)
+{
+#define __COPY_EMA(nt, lt, dev)						\
+	({ __MOVAVG(nt, dev) = __MOVAVG(lt, dev) *			\
+	   (nt)->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+	u32 i, adel = 0, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX];
+	u32 delay;
+
+	delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+	delay *= 1000;
+	delay = 2 + (delay / last->rate);
+
+	if (!next->periodic_training)
+		return 0;
+
+	if (type == DVFS_SEQUENCE) {
+		if (last->periodic_training &&
+		    (next->ptfv_list[PTFV_CONFIG_CTRL_INDEX] &
+		     PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA)) {
+			/*
+			 * If the previous frequency was using periodic
+			 * calibration then we can reuse the previous
+			 * frequencies EMA data.
+			 */
+			__COPY_EMA(next, last, C0D0U0);
+			__COPY_EMA(next, last, C0D0U1);
+			__COPY_EMA(next, last, C1D0U0);
+			__COPY_EMA(next, last, C1D0U1);
+			__COPY_EMA(next, last, C0D1U0);
+			__COPY_EMA(next, last, C0D1U1);
+			__COPY_EMA(next, last, C1D1U0);
+			__COPY_EMA(next, last, C1D1U1);
+		} else {
+			/* Reset the EMA.*/
+			__MOVAVG(next, C0D0U0) = 0;
+			__MOVAVG(next, C0D0U1) = 0;
+			__MOVAVG(next, C1D0U0) = 0;
+			__MOVAVG(next, C1D0U1) = 0;
+			__MOVAVG(next, C0D1U0) = 0;
+			__MOVAVG(next, C0D1U1) = 0;
+			__MOVAVG(next, C1D1U0) = 0;
+			__MOVAVG(next, C1D1U1) = 0;
+
+			for (i = 0; i < samples; i++) {
+				tegra210_emc_start_periodic_compensation(emc);
+				udelay(delay);
+
+				/*
+				 * Generate next sample of data.
+				 */
+				adel = update_clock_tree_delay(emc, DVFS_PT1);
+			}
+		}
+
+		/*
+		 * Seems like it should be part of the
+		 * 'if (last_timing->periodic_training)' conditional
+		 * since is already done for the else clause.
+		 */
+		adel = update_clock_tree_delay(emc, DVFS_UPDATE);
+	}
+
+	if (type == PERIODIC_TRAINING_SEQUENCE) {
+		tegra210_emc_start_periodic_compensation(emc);
+		udelay(delay);
+
+		adel = update_clock_tree_delay(emc, PERIODIC_TRAINING_UPDATE);
+	}
+
+	return adel;
+}
+
+static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
+{
+	u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
+	u32 list[] = {
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+		EMC_DATA_BRLSHFT_0,
+		EMC_DATA_BRLSHFT_1
+	};
+	struct tegra210_emc_timing *last = emc->last;
+	unsigned int items = ARRAY_SIZE(list), i;
+	unsigned long delay;
+
+	if (last->periodic_training) {
+		emc_dbg(emc, PER_TRAIN, "Periodic training starting\n");
+
+		value = emc_readl(emc, EMC_DBG);
+		emc_cfg_o = emc_readl(emc, EMC_CFG);
+		emc_cfg = emc_cfg_o & ~(EMC_CFG_DYN_SELF_REF |
+					EMC_CFG_DRAM_ACPD |
+					EMC_CFG_DRAM_CLKSTOP_PD |
+					EMC_CFG_DRAM_CLKSTOP_PD);
+
+
+		/*
+		 * 1. Power optimizations should be off.
+		 */
+		emc_writel(emc, emc_cfg, EMC_CFG);
+
+		/* Does emc_timing_update() for above changes. */
+		tegra210_emc_dll_disable(emc);
+
+		for (i = 0; i < emc->num_channels; i++)
+			tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+						     EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+						     0);
+
+		for (i = 0; i < emc->num_channels; i++)
+			tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+						     EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+						     0);
+
+		emc_cfg_update = value = emc_readl(emc, EMC_CFG_UPDATE);
+		value &= ~EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK;
+		value |= (2 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT);
+		emc_writel(emc, value, EMC_CFG_UPDATE);
+
+		/*
+		 * 2. osc kick off - this assumes training and dvfs have set
+		 *    correct MR23.
+		 */
+		tegra210_emc_start_periodic_compensation(emc);
+
+		/*
+		 * 3. Let dram capture its clock tree delays.
+		 */
+		delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+		delay *= 1000;
+		delay /= last->rate + 1;
+		udelay(delay);
+
+		/*
+		 * 4. Check delta wrt previous values (save value if margin
+		 *    exceeds what is set in table).
+		 */
+		del = periodic_compensation_handler(emc,
+						    PERIODIC_TRAINING_SEQUENCE,
+						    last, last);
+
+		/*
+		 * 5. Apply compensation w.r.t. trained values (if clock tree
+		 *    has drifted more than the set margin).
+		 */
+		if (last->tree_margin < ((del * 128 * (last->rate / 1000)) / 1000000)) {
+			for (i = 0; i < items; i++) {
+				value = tegra210_emc_compensate(last, list[i]);
+				emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+					list[i], value);
+				emc_writel(emc, value, list[i]);
+			}
+		}
+
+		emc_writel(emc, emc_cfg_o, EMC_CFG);
+
+		/*
+		 * 6. Timing update actally applies the new trimmers.
+		 */
+		tegra210_emc_timing_update(emc);
+
+		/* 6.1. Restore the UPDATE_DLL_IN_UPDATE field. */
+		emc_writel(emc, emc_cfg_update, EMC_CFG_UPDATE);
+
+		/* 6.2. Restore the DLL. */
+		tegra210_emc_dll_enable(emc);
+	}
+
+	return 0;
+}
+
+/*
+ * Do the clock change sequence.
+ */
+static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+	/* state variables */
+	static bool fsp_for_next_freq;
+	/* constant configuration parameters */
+	const bool save_restore_clkstop_pd = true;
+	const u32 zqcal_before_cc_cutoff = 2400;
+	const bool cya_allow_ref_cc = false;
+	const bool cya_issue_pc_ref = false;
+	const bool opt_cc_short_zcal = true;
+	const bool ref_b4_sref_en = false;
+	const u32 tZQCAL_lpddr4 = 1000000;
+	const bool opt_short_zcal = true;
+	const bool opt_do_sw_qrst = true;
+	const u32 opt_dvfs_mode = MAN_SR;
+	/*
+	 * This is the timing table for the source frequency. It does _not_
+	 * necessarily correspond to the actual timing values in the EMC at the
+	 * moment. If the boot BCT differs from the table then this can happen.
+	 * However, we need it for accessing the dram_timings (which are not
+	 * really registers) array for the current frequency.
+	 */
+	struct tegra210_emc_timing *fake, *last = emc->last, *next = emc->next;
+	u32 tRTM, RP_war, R2P_war, TRPab_war, deltaTWATM, W2P_war, tRPST;
+	u32 mr13_flip_fspwr, mr13_flip_fspop, ramp_up_wait, ramp_down_wait;
+	u32 zq_wait_long, zq_latch_dvfs_wait_time, tZQCAL_lpddr4_fc_adj;
+	u32 emc_auto_cal_config, auto_cal_en, emc_cfg, emc_sel_dpd_ctrl;
+	u32 tFC_lpddr4 = 1000 * next->dram_timings[T_FC_LPDDR4];
+	u32 bg_reg_mode_change, enable_bglp_reg, enable_bg_reg;
+	bool opt_zcal_en_cc = false, is_lpddr3 = false;
+	bool compensate_trimmer_applicable = false;
+	u32 emc_dbg, emc_cfg_pipe_clk, emc_pin;
+	u32 src_clk_period, dst_clk_period; /* in picoseconds */
+	bool shared_zq_resistor = false;
+	u32 value, dram_type;
+	u32 opt_dll_mode = 0;
+	unsigned long delay;
+	unsigned int i;
+
+	emc_dbg(emc, INFO, "Running clock change.\n");
+
+	/* XXX fake == last */
+	fake = tegra210_emc_find_timing(emc, last->rate * 1000UL);
+	fsp_for_next_freq = !fsp_for_next_freq;
+
+	value = emc_readl(emc, EMC_FBIO_CFG5) & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+	dram_type = value >> EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
+
+	if (last->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX] & BIT(31))
+		shared_zq_resistor = true;
+
+	if ((next->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0 &&
+	     last->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0) ||
+	    dram_type == DRAM_TYPE_LPDDR4)
+		opt_zcal_en_cc = true;
+
+	if (dram_type == DRAM_TYPE_DDR3)
+		opt_dll_mode = tegra210_emc_get_dll_state(next);
+
+	if ((next->burst_regs[EMC_FBIO_CFG5_INDEX] & BIT(25)) &&
+	    (dram_type == DRAM_TYPE_LPDDR2))
+		is_lpddr3 = true;
+
+	emc_readl(emc, EMC_CFG);
+	emc_readl(emc, EMC_AUTO_CAL_CONFIG);
+
+	src_clk_period = 1000000000 / last->rate;
+	dst_clk_period = 1000000000 / next->rate;
+
+	if (dst_clk_period <= zqcal_before_cc_cutoff)
+		tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4 - tFC_lpddr4;
+	else
+		tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4;
+
+	tZQCAL_lpddr4_fc_adj /= dst_clk_period;
+
+	emc_dbg = emc_readl(emc, EMC_DBG);
+	emc_pin = emc_readl(emc, EMC_PIN);
+	emc_cfg_pipe_clk = emc_readl(emc, EMC_CFG_PIPE_CLK);
+
+	emc_cfg = next->burst_regs[EMC_CFG_INDEX];
+	emc_cfg &= ~(EMC_CFG_DYN_SELF_REF | EMC_CFG_DRAM_ACPD |
+		     EMC_CFG_DRAM_CLKSTOP_SR | EMC_CFG_DRAM_CLKSTOP_PD);
+	emc_sel_dpd_ctrl = next->emc_sel_dpd_ctrl;
+	emc_sel_dpd_ctrl &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN |
+			      EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN |
+			      EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN |
+			      EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN |
+			      EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);
+
+	emc_dbg(emc, INFO, "Clock change version: %d\n",
+		DVFS_CLOCK_CHANGE_VERSION);
+	emc_dbg(emc, INFO, "DRAM type = %d\n", dram_type);
+	emc_dbg(emc, INFO, "DRAM dev #: %u\n", emc->num_devices);
+	emc_dbg(emc, INFO, "Next EMC clksrc: 0x%08x\n", clksrc);
+	emc_dbg(emc, INFO, "DLL clksrc:      0x%08x\n", next->dll_clk_src);
+	emc_dbg(emc, INFO, "last rate: %u, next rate %u\n", last->rate,
+		next->rate);
+	emc_dbg(emc, INFO, "last period: %u, next period: %u\n",
+		src_clk_period, dst_clk_period);
+	emc_dbg(emc, INFO, "  shared_zq_resistor: %d\n", !!shared_zq_resistor);
+	emc_dbg(emc, INFO, "  num_channels: %u\n", emc->num_channels);
+	emc_dbg(emc, INFO, "  opt_dll_mode: %d\n", opt_dll_mode);
+
+	/*
+	 * Step 1:
+	 *   Pre DVFS SW sequence.
+	 */
+	emc_dbg(emc, STEPS, "Step 1\n");
+	emc_dbg(emc, STEPS, "Step 1.1: Disable DLL temporarily.\n");
+
+	value = emc_readl(emc, EMC_CFG_DIG_DLL);
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+	emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+	tegra210_emc_timing_update(emc);
+
+	for (i = 0; i < emc->num_channels; i++)
+		tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+					     EMC_CFG_DIG_DLL_CFG_DLL_EN, 0);
+
+	emc_dbg(emc, STEPS, "Step 1.2: Disable AUTOCAL temporarily.\n");
+
+	emc_auto_cal_config = next->emc_auto_cal_config;
+	auto_cal_en = emc_auto_cal_config & EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
+	emc_auto_cal_config &= ~EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
+	emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL;
+	emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL;
+	emc_auto_cal_config |= auto_cal_en;
+	emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+	emc_readl(emc, EMC_AUTO_CAL_CONFIG); /* Flush write. */
+
+	emc_dbg(emc, STEPS, "Step 1.3: Disable other power features.\n");
+
+	tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+	emc_writel(emc, emc_cfg, EMC_CFG);
+	emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+	tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+	if (next->periodic_training) {
+		tegra210_emc_reset_dram_clktree_values(next);
+
+		for (i = 0; i < emc->num_channels; i++)
+			tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+						     EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+						     0);
+
+		for (i = 0; i < emc->num_channels; i++)
+			tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+						     EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+						     0);
+
+		tegra210_emc_start_periodic_compensation(emc);
+
+		delay = 1000 * tegra210_emc_actual_osc_clocks(last->run_clocks);
+		udelay((delay / last->rate) + 2);
+
+		value = periodic_compensation_handler(emc, DVFS_SEQUENCE, fake,
+						      next);
+		value = (value * 128 * next->rate / 1000) / 1000000;
+
+		if (next->periodic_training && value > next->tree_margin)
+			compensate_trimmer_applicable = true;
+	}
+
+	emc_writel(emc, EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
+	tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+	emc_writel(emc, emc_cfg, EMC_CFG);
+	emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+	emc_writel(emc, emc_cfg_pipe_clk | EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON,
+		   EMC_CFG_PIPE_CLK);
+	emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp &
+			~EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE,
+		   EMC_FDPD_CTRL_CMD_NO_RAMP);
+
+	bg_reg_mode_change =
+		((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+		  EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) ^
+		 (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+		  EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD)) ||
+		((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+		  EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) ^
+		 (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+		  EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD));
+	enable_bglp_reg =
+		(next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+		 EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) == 0;
+	enable_bg_reg =
+		(next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+		 EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) == 0;
+
+	if (bg_reg_mode_change) {
+		if (enable_bg_reg)
+			emc_writel(emc, last->burst_regs
+				   [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+				   ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+				   EMC_PMACRO_BG_BIAS_CTRL_0);
+
+		if (enable_bglp_reg)
+			emc_writel(emc, last->burst_regs
+				   [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+				   ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+				   EMC_PMACRO_BG_BIAS_CTRL_0);
+	}
+
+	/* Check if we need to turn on VREF generator. */
+	if ((((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+	       EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 0) &&
+	     ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+	       EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 1)) ||
+	    (((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+	       EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 0) &&
+	     ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+	       EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) != 0))) {
+		u32 pad_tx_ctrl =
+		    next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+		u32 last_pad_tx_ctrl =
+		    last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+		u32 next_dq_e_ivref, next_dqs_e_ivref;
+
+		next_dqs_e_ivref = pad_tx_ctrl &
+				   EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF;
+		next_dq_e_ivref = pad_tx_ctrl &
+				  EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF;
+		value = (last_pad_tx_ctrl &
+				~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF &
+				~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) |
+			next_dq_e_ivref | next_dqs_e_ivref;
+		emc_writel(emc, value, EMC_PMACRO_DATA_PAD_TX_CTRL);
+		udelay(1);
+	} else if (bg_reg_mode_change) {
+		udelay(1);
+	}
+
+	tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+	/*
+	 * Step 2:
+	 *   Prelock the DLL.
+	 */
+	emc_dbg(emc, STEPS, "Step 2\n");
+
+	if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] &
+	    EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+		emc_dbg(emc, INFO, "Prelock enabled for target frequency.\n");
+		value = tegra210_emc_dll_prelock(emc, clksrc);
+		emc_dbg(emc, INFO, "DLL out: 0x%03x\n", value);
+	} else {
+		emc_dbg(emc, INFO, "Disabling DLL for target frequency.\n");
+		tegra210_emc_dll_disable(emc);
+	}
+
+	/*
+	 * Step 3:
+	 *   Prepare autocal for the clock change.
+	 */
+	emc_dbg(emc, STEPS, "Step 3\n");
+
+	tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+	emc_writel(emc, next->emc_auto_cal_config2, EMC_AUTO_CAL_CONFIG2);
+	emc_writel(emc, next->emc_auto_cal_config3, EMC_AUTO_CAL_CONFIG3);
+	emc_writel(emc, next->emc_auto_cal_config4, EMC_AUTO_CAL_CONFIG4);
+	emc_writel(emc, next->emc_auto_cal_config5, EMC_AUTO_CAL_CONFIG5);
+	emc_writel(emc, next->emc_auto_cal_config6, EMC_AUTO_CAL_CONFIG6);
+	emc_writel(emc, next->emc_auto_cal_config7, EMC_AUTO_CAL_CONFIG7);
+	emc_writel(emc, next->emc_auto_cal_config8, EMC_AUTO_CAL_CONFIG8);
+	tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+	emc_auto_cal_config |= (EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START |
+				auto_cal_en);
+	emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+	/*
+	 * Step 4:
+	 *   Update EMC_CFG. (??)
+	 */
+	emc_dbg(emc, STEPS, "Step 4\n");
+
+	if (src_clk_period > 50000 && dram_type == DRAM_TYPE_LPDDR4)
+		ccfifo_writel(emc, 1, EMC_SELF_REF, 0);
+	else
+		emc_writel(emc, next->emc_cfg_2, EMC_CFG_2);
+
+	/*
+	 * Step 5:
+	 *   Prepare reference variables for ZQCAL regs.
+	 */
+	emc_dbg(emc, STEPS, "Step 5\n");
+
+	if (dram_type == DRAM_TYPE_LPDDR4)
+		zq_wait_long = max((u32)1, div_o3(1000000, dst_clk_period));
+	else if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)
+		zq_wait_long = max(next->min_mrs_wait,
+				   div_o3(360000, dst_clk_period)) + 4;
+	else if (dram_type == DRAM_TYPE_DDR3)
+		zq_wait_long = max((u32)256,
+				   div_o3(320000, dst_clk_period) + 2);
+	else
+		zq_wait_long = 0;
+
+	/*
+	 * Step 6:
+	 *   Training code - removed.
+	 */
+	emc_dbg(emc, STEPS, "Step 6\n");
+
+	/*
+	 * Step 7:
+	 *   Program FSP reference registers and send MRWs to new FSPWR.
+	 */
+	emc_dbg(emc, STEPS, "Step 7\n");
+	emc_dbg(emc, SUB_STEPS, "Step 7.1: Bug 200024907 - Patch RP R2P");
+
+	/* WAR 200024907 */
+	if (dram_type == DRAM_TYPE_LPDDR4) {
+		u32 nRTP = 16;
+
+		if (src_clk_period >= 1000000 / 1866) /* 535.91 ps */
+			nRTP = 14;
+
+		if (src_clk_period >= 1000000 / 1600) /* 625.00 ps */
+			nRTP = 12;
+
+		if (src_clk_period >= 1000000 / 1333) /* 750.19 ps */
+			nRTP = 10;
+
+		if (src_clk_period >= 1000000 / 1066) /* 938.09 ps */
+			nRTP = 8;
+
+		deltaTWATM = max_t(u32, div_o3(7500, src_clk_period), 8);
+
+		/*
+		 * Originally there was a + .5 in the tRPST calculation.
+		 * However since we can't do FP in the kernel and the tRTM
+		 * computation was in a floating point ceiling function, adding
+		 * one to tRTP should be ok. There is no other source of non
+		 * integer values, so the result was always going to be
+		 * something for the form: f_ceil(N + .5) = N + 1;
+		 */
+		tRPST = (last->emc_mrw & 0x80) >> 7;
+		tRTM = fake->dram_timings[RL] + div_o3(3600, src_clk_period) +
+			max_t(u32, div_o3(7500, src_clk_period), 8) + tRPST +
+			1 + nRTP;
+
+		emc_dbg(emc, INFO, "tRTM = %u, EMC_RP = %u\n", tRTM,
+			next->burst_regs[EMC_RP_INDEX]);
+
+		if (last->burst_regs[EMC_RP_INDEX] < tRTM) {
+			if (tRTM > (last->burst_regs[EMC_R2P_INDEX] +
+				    last->burst_regs[EMC_RP_INDEX])) {
+				R2P_war = tRTM - last->burst_regs[EMC_RP_INDEX];
+				RP_war = last->burst_regs[EMC_RP_INDEX];
+				TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+
+				if (R2P_war > 63) {
+					RP_war = R2P_war +
+						 last->burst_regs[EMC_RP_INDEX] - 63;
+
+					if (TRPab_war < RP_war)
+						TRPab_war = RP_war;
+
+					R2P_war = 63;
+				}
+			} else {
+				R2P_war = last->burst_regs[EMC_R2P_INDEX];
+				RP_war = last->burst_regs[EMC_RP_INDEX];
+				TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+			}
+
+			if (RP_war < deltaTWATM) {
+				W2P_war = last->burst_regs[EMC_W2P_INDEX]
+					  + deltaTWATM - RP_war;
+				if (W2P_war > 63) {
+					RP_war = RP_war + W2P_war - 63;
+					if (TRPab_war < RP_war)
+						TRPab_war = RP_war;
+					W2P_war = 63;
+				}
+			} else {
+				W2P_war = last->burst_regs[
+					  EMC_W2P_INDEX];
+			}
+
+			if ((last->burst_regs[EMC_W2P_INDEX] ^ W2P_war) ||
+			    (last->burst_regs[EMC_R2P_INDEX] ^ R2P_war) ||
+			    (last->burst_regs[EMC_RP_INDEX] ^ RP_war) ||
+			    (last->burst_regs[EMC_TRPAB_INDEX] ^ TRPab_war)) {
+				emc_writel(emc, RP_war, EMC_RP);
+				emc_writel(emc, R2P_war, EMC_R2P);
+				emc_writel(emc, W2P_war, EMC_W2P);
+				emc_writel(emc, TRPab_war, EMC_TRPAB);
+			}
+
+			tegra210_emc_timing_update(emc);
+		} else {
+			emc_dbg(emc, INFO, "Skipped WAR\n");
+		}
+	}
+
+	if (!fsp_for_next_freq) {
+		mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x80;
+		mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0x00;
+	} else {
+		mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x40;
+		mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0xc0;
+	}
+
+	if (dram_type == DRAM_TYPE_LPDDR4) {
+		emc_writel(emc, mr13_flip_fspwr, EMC_MRW3);
+		emc_writel(emc, next->emc_mrw, EMC_MRW);
+		emc_writel(emc, next->emc_mrw2, EMC_MRW2);
+	}
+
+	/*
+	 * Step 8:
+	 *   Program the shadow registers.
+	 */
+	emc_dbg(emc, STEPS, "Step 8\n");
+	emc_dbg(emc, SUB_STEPS, "Writing burst_regs\n");
+
+	for (i = 0; i < next->num_burst; i++) {
+		const u16 *offsets = emc->offsets->burst;
+		u16 offset;
+
+		if (!offsets[i])
+			continue;
+
+		value = next->burst_regs[i];
+		offset = offsets[i];
+
+		if (dram_type != DRAM_TYPE_LPDDR4 &&
+		    (offset == EMC_MRW6 || offset == EMC_MRW7 ||
+		     offset == EMC_MRW8 || offset == EMC_MRW9 ||
+		     offset == EMC_MRW10 || offset == EMC_MRW11 ||
+		     offset == EMC_MRW12 || offset == EMC_MRW13 ||
+		     offset == EMC_MRW14 || offset == EMC_MRW15 ||
+		     offset == EMC_TRAINING_CTRL))
+			continue;
+
+		/* Pain... And suffering. */
+		if (offset == EMC_CFG) {
+			value &= ~EMC_CFG_DRAM_ACPD;
+			value &= ~EMC_CFG_DYN_SELF_REF;
+
+			if (dram_type == DRAM_TYPE_LPDDR4) {
+				value &= ~EMC_CFG_DRAM_CLKSTOP_SR;
+				value &= ~EMC_CFG_DRAM_CLKSTOP_PD;
+			}
+		} else if (offset == EMC_MRS_WAIT_CNT &&
+			   dram_type == DRAM_TYPE_LPDDR2 &&
+			   opt_zcal_en_cc && !opt_cc_short_zcal &&
+			   opt_short_zcal) {
+			value = (value & ~(EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK <<
+					   EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)) |
+				((zq_wait_long & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) <<
+						 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+		} else if (offset == EMC_ZCAL_WAIT_CNT &&
+			   dram_type == DRAM_TYPE_DDR3 && opt_zcal_en_cc &&
+			   !opt_cc_short_zcal && opt_short_zcal) {
+			value = (value & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<
+					   EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |
+			        ((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
+						 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+		} else if (offset == EMC_ZCAL_INTERVAL && opt_zcal_en_cc) {
+			value = 0; /* EMC_ZCAL_INTERVAL reset value. */
+		} else if (offset == EMC_PMACRO_AUTOCAL_CFG_COMMON) {
+			value |= EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS;
+		} else if (offset == EMC_PMACRO_DATA_PAD_TX_CTRL) {
+			value &= ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+				   EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+				   EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+				   EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+		} else if (offset == EMC_PMACRO_CMD_PAD_TX_CTRL) {
+			value |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+			value &= ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+				   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+				   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+				   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+		} else if (offset == EMC_PMACRO_BRICK_CTRL_RFU1) {
+			value &= 0xf800f800;
+		} else if (offset == EMC_PMACRO_COMMON_PAD_TX_CTRL) {
+			value &= 0xfffffff0;
+		}
+
+		emc_writel(emc, value, offset);
+	}
+
+	/* SW addition: do EMC refresh adjustment here. */
+	tegra210_emc_adjust_timing(emc, next);
+
+	if (dram_type == DRAM_TYPE_LPDDR4) {
+		value = (23 << EMC_MRW_MRW_MA_SHIFT) |
+			(next->run_clocks & EMC_MRW_MRW_OP_MASK);
+		emc_writel(emc, value, EMC_MRW);
+	}
+
+	/* Per channel burst registers. */
+	emc_dbg(emc, SUB_STEPS, "Writing burst_regs_per_ch\n");
+
+	for (i = 0; i < next->num_burst_per_ch; i++) {
+		const struct tegra210_emc_per_channel_regs *burst =
+				emc->offsets->burst_per_channel;
+
+		if (!burst[i].offset)
+			continue;
+
+		if (dram_type != DRAM_TYPE_LPDDR4 &&
+		    (burst[i].offset == EMC_MRW6 ||
+		     burst[i].offset == EMC_MRW7 ||
+		     burst[i].offset == EMC_MRW8 ||
+		     burst[i].offset == EMC_MRW9 ||
+		     burst[i].offset == EMC_MRW10 ||
+		     burst[i].offset == EMC_MRW11 ||
+		     burst[i].offset == EMC_MRW12 ||
+		     burst[i].offset == EMC_MRW13 ||
+		     burst[i].offset == EMC_MRW14 ||
+		     burst[i].offset == EMC_MRW15))
+			continue;
+
+		/* Filter out second channel if not in DUAL_CHANNEL mode. */
+		if (emc->num_channels < 2 && burst[i].bank >= 1)
+			continue;
+
+		emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+			next->burst_reg_per_ch[i], burst[i].offset);
+		emc_channel_writel(emc, burst[i].bank,
+				   next->burst_reg_per_ch[i],
+				   burst[i].offset);
+	}
+
+	/* Vref regs. */
+	emc_dbg(emc, SUB_STEPS, "Writing vref_regs\n");
+
+	for (i = 0; i < next->vref_num; i++) {
+		const struct tegra210_emc_per_channel_regs *vref =
+					emc->offsets->vref_per_channel;
+
+		if (!vref[i].offset)
+			continue;
+
+		if (emc->num_channels < 2 && vref[i].bank >= 1)
+			continue;
+
+		emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+			next->vref_perch_regs[i], vref[i].offset);
+		emc_channel_writel(emc, vref[i].bank, next->vref_perch_regs[i],
+				   vref[i].offset);
+	}
+
+	/* Trimmers. */
+	emc_dbg(emc, SUB_STEPS, "Writing trim_regs\n");
+
+	for (i = 0; i < next->num_trim; i++) {
+		const u16 *offsets = emc->offsets->trim;
+
+		if (!offsets[i])
+			continue;
+
+		if (compensate_trimmer_applicable &&
+		    (offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+		     offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+		     offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+		     offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+		     offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+		     offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+		     offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+		     offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+		     offsets[i] == EMC_DATA_BRLSHFT_0 ||
+		     offsets[i] == EMC_DATA_BRLSHFT_1)) {
+			value = tegra210_emc_compensate(next, offsets[i]);
+			emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+				value, offsets[i]);
+			emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+				(u32)(u64)offsets[i], value);
+			emc_writel(emc, value, offsets[i]);
+		} else {
+			emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+				next->trim_regs[i], offsets[i]);
+			emc_writel(emc, next->trim_regs[i], offsets[i]);
+		}
+	}
+
+	/* Per channel trimmers. */
+	emc_dbg(emc, SUB_STEPS, "Writing trim_regs_per_ch\n");
+
+	for (i = 0; i < next->num_trim_per_ch; i++) {
+		const struct tegra210_emc_per_channel_regs *trim =
+				&emc->offsets->trim_per_channel[0];
+		unsigned int offset;
+
+		if (!trim[i].offset)
+			continue;
+
+		if (emc->num_channels < 2 && trim[i].bank >= 1)
+			continue;
+
+		offset = trim[i].offset;
+
+		if (compensate_trimmer_applicable &&
+		    (offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+		     offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+		     offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+		     offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+		     offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+		     offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+		     offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+		     offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+		     offset == EMC_DATA_BRLSHFT_0 ||
+		     offset == EMC_DATA_BRLSHFT_1)) {
+			value = tegra210_emc_compensate(next, offset);
+			emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+				value, offset);
+			emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n", offset,
+				value);
+			emc_channel_writel(emc, trim[i].bank, value, offset);
+		} else {
+			emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+				next->trim_perch_regs[i], offset);
+			emc_channel_writel(emc, trim[i].bank,
+					   next->trim_perch_regs[i], offset);
+		}
+	}
+
+	emc_dbg(emc, SUB_STEPS, "Writing burst_mc_regs\n");
+
+	for (i = 0; i < next->num_mc_regs; i++) {
+		const u16 *offsets = emc->offsets->burst_mc;
+		u32 *values = next->burst_mc_regs;
+
+		emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+			values[i], offsets[i]);
+		mc_writel(emc->mc, values[i], offsets[i]);
+	}
+
+	/* Registers to be programmed on the faster clock. */
+	if (next->rate < last->rate) {
+		const u16 *la = emc->offsets->la_scale;
+
+		emc_dbg(emc, SUB_STEPS, "Writing la_scale_regs\n");
+
+		for (i = 0; i < next->num_up_down; i++) {
+			emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+				next->la_scale_regs[i], la[i]);
+			mc_writel(emc->mc, next->la_scale_regs[i], la[i]);
+		}
+	}
+
+	/* Flush all the burst register writes. */
+	mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+	/*
+	 * Step 9:
+	 *   LPDDR4 section A.
+	 */
+	emc_dbg(emc, STEPS, "Step 9\n");
+
+	value = next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];
+	value &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;
+
+	if (dram_type == DRAM_TYPE_LPDDR4) {
+		emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+		emc_writel(emc, value, EMC_ZCAL_WAIT_CNT);
+
+		value = emc_dbg | (EMC_DBG_WRITE_MUX_ACTIVE |
+				   EMC_DBG_WRITE_ACTIVE_ONLY);
+
+		emc_writel(emc, value, EMC_DBG);
+		emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+		emc_writel(emc, emc_dbg, EMC_DBG);
+	}
+
+	/*
+	 * Step 10:
+	 *   LPDDR4 and DDR3 common section.
+	 */
+	emc_dbg(emc, STEPS, "Step 10\n");
+
+	if (opt_dvfs_mode == MAN_SR || dram_type == DRAM_TYPE_LPDDR4) {
+		if (dram_type == DRAM_TYPE_LPDDR4)
+			ccfifo_writel(emc, 0x101, EMC_SELF_REF, 0);
+		else
+			ccfifo_writel(emc, 0x1, EMC_SELF_REF, 0);
+
+		if (dram_type == DRAM_TYPE_LPDDR4 &&
+		    dst_clk_period <= zqcal_before_cc_cutoff) {
+			ccfifo_writel(emc, mr13_flip_fspwr ^ 0x40, EMC_MRW3, 0);
+			ccfifo_writel(emc, (next->burst_regs[EMC_MRW6_INDEX] &
+						0xFFFF3F3F) |
+					   (last->burst_regs[EMC_MRW6_INDEX] &
+						0x0000C0C0), EMC_MRW6, 0);
+			ccfifo_writel(emc, (next->burst_regs[EMC_MRW14_INDEX] &
+						0xFFFF0707) |
+					   (last->burst_regs[EMC_MRW14_INDEX] &
+						0x00003838), EMC_MRW14, 0);
+
+			if (emc->num_devices > 1) {
+				ccfifo_writel(emc,
+				      (next->burst_regs[EMC_MRW7_INDEX] &
+				       0xFFFF3F3F) |
+				      (last->burst_regs[EMC_MRW7_INDEX] &
+				       0x0000C0C0), EMC_MRW7, 0);
+				ccfifo_writel(emc,
+				     (next->burst_regs[EMC_MRW15_INDEX] &
+				      0xFFFF0707) |
+				     (last->burst_regs[EMC_MRW15_INDEX] &
+				      0x00003838), EMC_MRW15, 0);
+			}
+
+			if (opt_zcal_en_cc) {
+				if (emc->num_devices < 2)
+					ccfifo_writel(emc,
+						2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+						| EMC_ZQ_CAL_ZQ_CAL_CMD,
+						EMC_ZQ_CAL, 0);
+				else if (shared_zq_resistor)
+					ccfifo_writel(emc,
+						2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+						| EMC_ZQ_CAL_ZQ_CAL_CMD,
+						EMC_ZQ_CAL, 0);
+				else
+					ccfifo_writel(emc,
+						      EMC_ZQ_CAL_ZQ_CAL_CMD,
+						      EMC_ZQ_CAL, 0);
+			}
+		}
+	}
+
+	if (dram_type == DRAM_TYPE_LPDDR4) {
+		value = (1000 * fake->dram_timings[T_RP]) / src_clk_period;
+		ccfifo_writel(emc, mr13_flip_fspop | 0x8, EMC_MRW3, value);
+		ccfifo_writel(emc, 0, 0, tFC_lpddr4 / src_clk_period);
+	}
+
+	if (dram_type == DRAM_TYPE_LPDDR4 || opt_dvfs_mode != MAN_SR) {
+		delay = 30;
+
+		if (cya_allow_ref_cc) {
+			delay += (1000 * fake->dram_timings[T_RP]) /
+					src_clk_period;
+			delay += 4000 * fake->dram_timings[T_RFC];
+		}
+
+		ccfifo_writel(emc, emc_pin & ~(EMC_PIN_PIN_CKE_PER_DEV |
+					       EMC_PIN_PIN_CKEB |
+					       EMC_PIN_PIN_CKE),
+			      EMC_PIN, delay);
+	}
+
+	/* calculate reference delay multiplier */
+	value = 1;
+
+	if (ref_b4_sref_en)
+		value++;
+
+	if (cya_allow_ref_cc)
+		value++;
+
+	if (cya_issue_pc_ref)
+		value++;
+
+	if (dram_type != DRAM_TYPE_LPDDR4) {
+		delay = ((1000 * fake->dram_timings[T_RP] / src_clk_period) +
+			 (1000 * fake->dram_timings[T_RFC] / src_clk_period));
+		delay = value * delay + 20;
+	} else {
+		delay = 0;
+	}
+
+	/*
+	 * Step 11:
+	 *   Ramp down.
+	 */
+	emc_dbg(emc, STEPS, "Step 11\n");
+
+	ccfifo_writel(emc, 0x0, EMC_CFG_SYNC, delay);
+
+	value = emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE | EMC_DBG_WRITE_ACTIVE_ONLY;
+	ccfifo_writel(emc, value, EMC_DBG, 0);
+
+	ramp_down_wait = tegra210_emc_dvfs_power_ramp_down(emc, src_clk_period,
+							   0);
+
+	/*
+	 * Step 12:
+	 *   And finally - trigger the clock change.
+	 */
+	emc_dbg(emc, STEPS, "Step 12\n");
+
+	ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE, 0);
+	value &= ~EMC_DBG_WRITE_ACTIVE_ONLY;
+	ccfifo_writel(emc, value, EMC_DBG, 0);
+
+	/*
+	 * Step 13:
+	 *   Ramp up.
+	 */
+	emc_dbg(emc, STEPS, "Step 13\n");
+
+	ramp_up_wait = tegra210_emc_dvfs_power_ramp_up(emc, dst_clk_period, 0);
+	ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+
+	/*
+	 * Step 14:
+	 *   Bringup CKE pins.
+	 */
+	emc_dbg(emc, STEPS, "Step 14\n");
+
+	if (dram_type == DRAM_TYPE_LPDDR4) {
+		value = emc_pin | EMC_PIN_PIN_CKE;
+
+		if (emc->num_devices <= 1)
+			value &= ~(EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV);
+		else
+			value |= EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV;
+
+		ccfifo_writel(emc, value, EMC_PIN, 0);
+	}
+
+	/*
+	 * Step 15: (two step 15s ??)
+	 *   Calculate zqlatch wait time; has dependency on ramping times.
+	 */
+	emc_dbg(emc, STEPS, "Step 15\n");
+
+	if (dst_clk_period <= zqcal_before_cc_cutoff) {
+		s32 t = (s32)(ramp_up_wait + ramp_down_wait) /
+			(s32)dst_clk_period;
+		zq_latch_dvfs_wait_time = (s32)tZQCAL_lpddr4_fc_adj - t;
+	} else {
+		zq_latch_dvfs_wait_time = tZQCAL_lpddr4_fc_adj -
+			div_o3(1000 * next->dram_timings[T_PDEX],
+			       dst_clk_period);
+	}
+
+	emc_dbg(emc, INFO, "tZQCAL_lpddr4_fc_adj = %u\n", tZQCAL_lpddr4_fc_adj);
+	emc_dbg(emc, INFO, "dst_clk_period = %u\n",
+		dst_clk_period);
+	emc_dbg(emc, INFO, "next->dram_timings[T_PDEX] = %u\n",
+		next->dram_timings[T_PDEX]);
+	emc_dbg(emc, INFO, "zq_latch_dvfs_wait_time = %d\n",
+		max_t(s32, 0, zq_latch_dvfs_wait_time));
+
+	if (dram_type == DRAM_TYPE_LPDDR4 && opt_zcal_en_cc) {
+		delay = div_o3(1000 * next->dram_timings[T_PDEX],
+			       dst_clk_period);
+
+		if (emc->num_devices < 2) {
+			if (dst_clk_period > zqcal_before_cc_cutoff)
+				ccfifo_writel(emc,
+					      2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+					      EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+					      delay);
+
+			value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+			ccfifo_writel(emc, value, EMC_MRW3, delay);
+			ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+			ccfifo_writel(emc, 0, EMC_REF, 0);
+			ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+				      EMC_ZQ_CAL_ZQ_LATCH_CMD,
+				      EMC_ZQ_CAL,
+				      max_t(s32, 0, zq_latch_dvfs_wait_time));
+		} else if (shared_zq_resistor) {
+			if (dst_clk_period > zqcal_before_cc_cutoff)
+				ccfifo_writel(emc,
+					      2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+					      EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+					      delay);
+
+			ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+				      EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+				      max_t(s32, 0, zq_latch_dvfs_wait_time) +
+					delay);
+			ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+				      EMC_ZQ_CAL_ZQ_LATCH_CMD,
+				      EMC_ZQ_CAL, 0);
+
+			value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+			ccfifo_writel(emc, value, EMC_MRW3, 0);
+			ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+			ccfifo_writel(emc, 0, EMC_REF, 0);
+
+			ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+				      EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+				      tZQCAL_lpddr4 / dst_clk_period);
+		} else {
+			if (dst_clk_period > zqcal_before_cc_cutoff)
+				ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_CAL_CMD,
+					      EMC_ZQ_CAL, delay);
+
+			value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+			ccfifo_writel(emc, value, EMC_MRW3, delay);
+			ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+			ccfifo_writel(emc, 0, EMC_REF, 0);
+
+			ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+				      max_t(s32, 0, zq_latch_dvfs_wait_time));
+		}
+	}
+
+	/* WAR: delay for zqlatch */
+	ccfifo_writel(emc, 0, 0, 10);
+
+	/*
+	 * Step 16:
+	 *   LPDDR4 Conditional Training Kickoff. Removed.
+	 */
+
+	/*
+	 * Step 17:
+	 *   MANSR exit self refresh.
+	 */
+	emc_dbg(emc, STEPS, "Step 17\n");
+
+	if (opt_dvfs_mode == MAN_SR && dram_type != DRAM_TYPE_LPDDR4)
+		ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+
+	/*
+	 * Step 18:
+	 *   Send MRWs to LPDDR3/DDR3.
+	 */
+	emc_dbg(emc, STEPS, "Step 18\n");
+
+	if (dram_type == DRAM_TYPE_LPDDR2) {
+		ccfifo_writel(emc, next->emc_mrw2, EMC_MRW2, 0);
+		ccfifo_writel(emc, next->emc_mrw,  EMC_MRW,  0);
+		if (is_lpddr3)
+			ccfifo_writel(emc, next->emc_mrw4, EMC_MRW4, 0);
+	} else if (dram_type == DRAM_TYPE_DDR3) {
+		if (opt_dll_mode)
+			ccfifo_writel(emc, next->emc_emrs &
+				      ~EMC_EMRS_USE_EMRS_LONG_CNT, EMC_EMRS, 0);
+		ccfifo_writel(emc, next->emc_emrs2 &
+			      ~EMC_EMRS2_USE_EMRS2_LONG_CNT, EMC_EMRS2, 0);
+		ccfifo_writel(emc, next->emc_mrs |
+			      EMC_EMRS_USE_EMRS_LONG_CNT, EMC_MRS, 0);
+	}
+
+	/*
+	 * Step 19:
+	 *   ZQCAL for LPDDR3/DDR3
+	 */
+	emc_dbg(emc, STEPS, "Step 19\n");
+
+	if (opt_zcal_en_cc) {
+		if (dram_type == DRAM_TYPE_LPDDR2) {
+			value = opt_cc_short_zcal ? 90000 : 360000;
+			value = div_o3(value, dst_clk_period);
+			value = value <<
+				EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT |
+				value <<
+				EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT;
+			ccfifo_writel(emc, value, EMC_MRS_WAIT_CNT2, 0);
+
+			value = opt_cc_short_zcal ? 0x56 : 0xab;
+			ccfifo_writel(emc, 2 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+					   EMC_MRW_USE_MRW_EXT_CNT |
+					   10 << EMC_MRW_MRW_MA_SHIFT |
+					   value << EMC_MRW_MRW_OP_SHIFT,
+				      EMC_MRW, 0);
+
+			if (emc->num_devices > 1) {
+				value = 1 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+					EMC_MRW_USE_MRW_EXT_CNT |
+					10 << EMC_MRW_MRW_MA_SHIFT |
+					value << EMC_MRW_MRW_OP_SHIFT;
+				ccfifo_writel(emc, value, EMC_MRW, 0);
+			}
+		} else if (dram_type == DRAM_TYPE_DDR3) {
+			value = opt_cc_short_zcal ? 0 : EMC_ZQ_CAL_LONG;
+
+			ccfifo_writel(emc, value |
+					   2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+					   EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+					   0);
+
+			if (emc->num_devices > 1) {
+				value = value | 1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+						EMC_ZQ_CAL_ZQ_CAL_CMD;
+				ccfifo_writel(emc, value, EMC_ZQ_CAL, 0);
+			}
+		}
+	}
+
+	if (bg_reg_mode_change) {
+		tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+
+		if (ramp_up_wait <= 1250000)
+			delay = (1250000 - ramp_up_wait) / dst_clk_period;
+		else
+			delay = 0;
+
+		ccfifo_writel(emc,
+			      next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX],
+			      EMC_PMACRO_BG_BIAS_CTRL_0, delay);
+		tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+	}
+
+	/*
+	 * Step 20:
+	 *   Issue ref and optional QRST.
+	 */
+	emc_dbg(emc, STEPS, "Step 20\n");
+
+	if (dram_type != DRAM_TYPE_LPDDR4)
+		ccfifo_writel(emc, 0, EMC_REF, 0);
+
+	if (opt_do_sw_qrst) {
+		ccfifo_writel(emc, 1, EMC_ISSUE_QRST, 0);
+		ccfifo_writel(emc, 0, EMC_ISSUE_QRST, 2);
+	}
+
+	/*
+	 * Step 21:
+	 *   Restore ZCAL and ZCAL interval.
+	 */
+	emc_dbg(emc, STEPS, "Step 21\n");
+
+	if (save_restore_clkstop_pd || opt_zcal_en_cc) {
+		ccfifo_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
+			      EMC_DBG, 0);
+		if (opt_zcal_en_cc && dram_type != DRAM_TYPE_LPDDR4)
+			ccfifo_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+				      EMC_ZCAL_INTERVAL, 0);
+
+		if (save_restore_clkstop_pd)
+			ccfifo_writel(emc, next->burst_regs[EMC_CFG_INDEX] &
+						~EMC_CFG_DYN_SELF_REF,
+				      EMC_CFG, 0);
+		ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+	}
+
+	/*
+	 * Step 22:
+	 *   Restore EMC_CFG_PIPE_CLK.
+	 */
+	emc_dbg(emc, STEPS, "Step 22\n");
+
+	ccfifo_writel(emc, emc_cfg_pipe_clk, EMC_CFG_PIPE_CLK, 0);
+
+	if (bg_reg_mode_change) {
+		if (enable_bg_reg)
+			emc_writel(emc,
+				   next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+					~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+				   EMC_PMACRO_BG_BIAS_CTRL_0);
+		else
+			emc_writel(emc,
+				   next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+					~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+				   EMC_PMACRO_BG_BIAS_CTRL_0);
+	}
+
+	/*
+	 * Step 23:
+	 */
+	emc_dbg(emc, STEPS, "Step 23\n");
+
+	value = emc_readl(emc, EMC_CFG_DIG_DLL);
+	value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+	value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+		(2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+	emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+	tegra210_emc_do_clock_change(emc, clksrc);
+
+	/*
+	 * Step 24:
+	 *   Save training results. Removed.
+	 */
+
+	/*
+	 * Step 25:
+	 *   Program MC updown registers.
+	 */
+	emc_dbg(emc, STEPS, "Step 25\n");
+
+	if (next->rate > last->rate) {
+		for (i = 0; i < next->num_up_down; i++)
+			mc_writel(emc->mc, next->la_scale_regs[i],
+				  emc->offsets->la_scale[i]);
+
+		tegra210_emc_timing_update(emc);
+	}
+
+	/*
+	 * Step 26:
+	 *   Restore ZCAL registers.
+	 */
+	emc_dbg(emc, STEPS, "Step 26\n");
+
+	if (dram_type == DRAM_TYPE_LPDDR4) {
+		tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+		emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+			   EMC_ZCAL_WAIT_CNT);
+		emc_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+			   EMC_ZCAL_INTERVAL);
+		tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+	}
+
+	if (dram_type != DRAM_TYPE_LPDDR4 && opt_zcal_en_cc &&
+	    !opt_short_zcal && opt_cc_short_zcal) {
+		udelay(2);
+
+		tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+		if (dram_type == DRAM_TYPE_LPDDR2)
+			emc_writel(emc, next->burst_regs[EMC_MRS_WAIT_CNT_INDEX],
+				   EMC_MRS_WAIT_CNT);
+		else if (dram_type == DRAM_TYPE_DDR3)
+			emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+				   EMC_ZCAL_WAIT_CNT);
+		tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+	}
+
+	/*
+	 * Step 27:
+	 *   Restore EMC_CFG, FDPD registers.
+	 */
+	emc_dbg(emc, STEPS, "Step 27\n");
+
+	tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+	emc_writel(emc, next->burst_regs[EMC_CFG_INDEX], EMC_CFG);
+	tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+	emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp,
+		   EMC_FDPD_CTRL_CMD_NO_RAMP);
+	emc_writel(emc, next->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+
+	/*
+	 * Step 28:
+	 *   Training recover. Removed.
+	 */
+	emc_dbg(emc, STEPS, "Step 28\n");
+
+	tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+	emc_writel(emc,
+		   next->burst_regs[EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX],
+		   EMC_PMACRO_AUTOCAL_CFG_COMMON);
+	tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+	/*
+	 * Step 29:
+	 *   Power fix WAR.
+	 */
+	emc_dbg(emc, STEPS, "Step 29\n");
+
+	emc_writel(emc, EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 |
+		   EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 |
+		   EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 |
+		   EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 |
+		   EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 |
+		   EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 |
+		   EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 |
+		   EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7,
+		   EMC_PMACRO_CFG_PM_GLOBAL_0);
+	emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR,
+		   EMC_PMACRO_TRAINING_CTRL_0);
+	emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR,
+		   EMC_PMACRO_TRAINING_CTRL_1);
+	emc_writel(emc, 0, EMC_PMACRO_CFG_PM_GLOBAL_0);
+
+	/*
+	 * Step 30:
+	 *   Re-enable autocal.
+	 */
+	emc_dbg(emc, STEPS, "Step 30: Re-enable DLL and AUTOCAL\n");
+
+	if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] & EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+		value = emc_readl(emc, EMC_CFG_DIG_DLL);
+		value |=  EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+		value |=  EMC_CFG_DIG_DLL_CFG_DLL_EN;
+		value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+		value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+		value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+			(2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+		emc_writel(emc, value, EMC_CFG_DIG_DLL);
+		tegra210_emc_timing_update(emc);
+	}
+
+	emc_writel(emc, next->emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+	/* Done! Yay. */
+}
+
+const struct tegra210_emc_sequence tegra210_emc_r21021 = {
+	.revision = 0x7,
+	.set_clock = tegra210_emc_r21021_set_clock,
+	.periodic_compensation = tegra210_emc_r21021_periodic_compensation,
+};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
new file mode 100644
index 000000000000..cdd663ba4733
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -0,0 +1,2100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION.  All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC */
+#define EMC_CLK_EMC_2X_CLK_SRC_SHIFT			29
+#define EMC_CLK_EMC_2X_CLK_SRC_MASK			\
+	(0x7 << EMC_CLK_EMC_2X_CLK_SRC_SHIFT)
+#define EMC_CLK_SOURCE_PLLM_LJ				0x4
+#define EMC_CLK_SOURCE_PLLMB_LJ				0x5
+#define EMC_CLK_FORCE_CC_TRIGGER			BIT(27)
+#define EMC_CLK_MC_EMC_SAME_FREQ			BIT(16)
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT		0
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_MASK			\
+	(0xff << EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT)
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC_DLL */
+#define DLL_CLK_EMC_DLL_CLK_SRC_SHIFT			29
+#define DLL_CLK_EMC_DLL_CLK_SRC_MASK			\
+	(0x7 << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT)
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT		10
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK		\
+	(0x3 << DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT)
+#define PLLM_VCOA					0
+#define PLLM_VCOB					1
+#define EMC_DLL_SWITCH_OUT				2
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT		0
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK		\
+	(0xff << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT)
+
+/* MC_EMEM_ARB_MISC0 */
+#define MC_EMEM_ARB_MISC0_EMC_SAME_FREQ			BIT(27)
+
+/* EMC_DATA_BRLSHFT_X */
+#define EMC0_EMC_DATA_BRLSHFT_0_INDEX	2
+#define EMC1_EMC_DATA_BRLSHFT_0_INDEX	3
+#define EMC0_EMC_DATA_BRLSHFT_1_INDEX	4
+#define EMC1_EMC_DATA_BRLSHFT_1_INDEX	5
+
+#define TRIM_REG(chan, rank, reg, byte)					\
+	(((EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ##	\
+	   _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _MASK &	\
+	   next->trim_regs[EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ##		\
+				 rank ## _ ## reg ## _INDEX]) >>	\
+	  EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ##	\
+	  _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _SHIFT)	\
+	 +								\
+	 (((EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ##	\
+	    byte ## _DATA_BRLSHFT_MASK &				\
+	    next->trim_perch_regs[EMC ## chan ##			\
+			      _EMC_DATA_BRLSHFT_ ## rank ## _INDEX]) >>	\
+	   EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ##	\
+	   byte ## _DATA_BRLSHFT_SHIFT) * 64))
+
+#define CALC_TEMP(rank, reg, byte1, byte2, n)				\
+	(((new[n] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ##	\
+	   reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _SHIFT) & \
+	  EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ##	\
+	  _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _MASK)	\
+	 |								\
+	 ((new[n + 1] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ##\
+	   reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _SHIFT) & \
+	  EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ##	\
+	  _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _MASK))
+
+#define REFRESH_SPEEDUP(value, speedup) \
+		(((value) & 0xffff0000) | ((value) & 0xffff) * (speedup))
+
+#define LPDDR2_MR4_SRR GENMASK(2, 0)
+
+static const struct tegra210_emc_sequence *tegra210_emc_sequences[] = {
+	&tegra210_emc_r21021,
+};
+
+static const struct tegra210_emc_table_register_offsets
+tegra210_emc_table_register_offsets = {
+	.burst = {
+		EMC_RC,
+		EMC_RFC,
+		EMC_RFCPB,
+		EMC_REFCTRL2,
+		EMC_RFC_SLR,
+		EMC_RAS,
+		EMC_RP,
+		EMC_R2W,
+		EMC_W2R,
+		EMC_R2P,
+		EMC_W2P,
+		EMC_R2R,
+		EMC_TPPD,
+		EMC_CCDMW,
+		EMC_RD_RCD,
+		EMC_WR_RCD,
+		EMC_RRD,
+		EMC_REXT,
+		EMC_WEXT,
+		EMC_WDV_CHK,
+		EMC_WDV,
+		EMC_WSV,
+		EMC_WEV,
+		EMC_WDV_MASK,
+		EMC_WS_DURATION,
+		EMC_WE_DURATION,
+		EMC_QUSE,
+		EMC_QUSE_WIDTH,
+		EMC_IBDLY,
+		EMC_OBDLY,
+		EMC_EINPUT,
+		EMC_MRW6,
+		EMC_EINPUT_DURATION,
+		EMC_PUTERM_EXTRA,
+		EMC_PUTERM_WIDTH,
+		EMC_QRST,
+		EMC_QSAFE,
+		EMC_RDV,
+		EMC_RDV_MASK,
+		EMC_RDV_EARLY,
+		EMC_RDV_EARLY_MASK,
+		EMC_REFRESH,
+		EMC_BURST_REFRESH_NUM,
+		EMC_PRE_REFRESH_REQ_CNT,
+		EMC_PDEX2WR,
+		EMC_PDEX2RD,
+		EMC_PCHG2PDEN,
+		EMC_ACT2PDEN,
+		EMC_AR2PDEN,
+		EMC_RW2PDEN,
+		EMC_CKE2PDEN,
+		EMC_PDEX2CKE,
+		EMC_PDEX2MRR,
+		EMC_TXSR,
+		EMC_TXSRDLL,
+		EMC_TCKE,
+		EMC_TCKESR,
+		EMC_TPD,
+		EMC_TFAW,
+		EMC_TRPAB,
+		EMC_TCLKSTABLE,
+		EMC_TCLKSTOP,
+		EMC_MRW7,
+		EMC_TREFBW,
+		EMC_ODT_WRITE,
+		EMC_FBIO_CFG5,
+		EMC_FBIO_CFG7,
+		EMC_CFG_DIG_DLL,
+		EMC_CFG_DIG_DLL_PERIOD,
+		EMC_PMACRO_IB_RXRT,
+		EMC_CFG_PIPE_1,
+		EMC_CFG_PIPE_2,
+		EMC_PMACRO_QUSE_DDLL_RANK0_4,
+		EMC_PMACRO_QUSE_DDLL_RANK0_5,
+		EMC_PMACRO_QUSE_DDLL_RANK1_4,
+		EMC_PMACRO_QUSE_DDLL_RANK1_5,
+		EMC_MRW8,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4,
+		EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5,
+		EMC_PMACRO_DDLL_LONG_CMD_0,
+		EMC_PMACRO_DDLL_LONG_CMD_1,
+		EMC_PMACRO_DDLL_LONG_CMD_2,
+		EMC_PMACRO_DDLL_LONG_CMD_3,
+		EMC_PMACRO_DDLL_LONG_CMD_4,
+		EMC_PMACRO_DDLL_SHORT_CMD_0,
+		EMC_PMACRO_DDLL_SHORT_CMD_1,
+		EMC_PMACRO_DDLL_SHORT_CMD_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3,
+		EMC_TXDSRVTTGEN,
+		EMC_FDPD_CTRL_DQ,
+		EMC_FDPD_CTRL_CMD,
+		EMC_FBIO_SPARE,
+		EMC_ZCAL_INTERVAL,
+		EMC_ZCAL_WAIT_CNT,
+		EMC_MRS_WAIT_CNT,
+		EMC_MRS_WAIT_CNT2,
+		EMC_AUTO_CAL_CHANNEL,
+		EMC_DLL_CFG_0,
+		EMC_DLL_CFG_1,
+		EMC_PMACRO_AUTOCAL_CFG_COMMON,
+		EMC_PMACRO_ZCTRL,
+		EMC_CFG,
+		EMC_CFG_PIPE,
+		EMC_DYN_SELF_REF_CONTROL,
+		EMC_QPOP,
+		EMC_DQS_BRLSHFT_0,
+		EMC_DQS_BRLSHFT_1,
+		EMC_CMD_BRLSHFT_2,
+		EMC_CMD_BRLSHFT_3,
+		EMC_PMACRO_PAD_CFG_CTRL,
+		EMC_PMACRO_DATA_PAD_RX_CTRL,
+		EMC_PMACRO_CMD_PAD_RX_CTRL,
+		EMC_PMACRO_DATA_RX_TERM_MODE,
+		EMC_PMACRO_CMD_RX_TERM_MODE,
+		EMC_PMACRO_CMD_PAD_TX_CTRL,
+		EMC_PMACRO_DATA_PAD_TX_CTRL,
+		EMC_PMACRO_COMMON_PAD_TX_CTRL,
+		EMC_PMACRO_VTTGEN_CTRL_0,
+		EMC_PMACRO_VTTGEN_CTRL_1,
+		EMC_PMACRO_VTTGEN_CTRL_2,
+		EMC_PMACRO_BRICK_CTRL_RFU1,
+		EMC_PMACRO_CMD_BRICK_CTRL_FDPD,
+		EMC_PMACRO_BRICK_CTRL_RFU2,
+		EMC_PMACRO_DATA_BRICK_CTRL_FDPD,
+		EMC_PMACRO_BG_BIAS_CTRL_0,
+		EMC_CFG_3,
+		EMC_PMACRO_TX_PWRD_0,
+		EMC_PMACRO_TX_PWRD_1,
+		EMC_PMACRO_TX_PWRD_2,
+		EMC_PMACRO_TX_PWRD_3,
+		EMC_PMACRO_TX_PWRD_4,
+		EMC_PMACRO_TX_PWRD_5,
+		EMC_CONFIG_SAMPLE_DELAY,
+		EMC_PMACRO_TX_SEL_CLK_SRC_0,
+		EMC_PMACRO_TX_SEL_CLK_SRC_1,
+		EMC_PMACRO_TX_SEL_CLK_SRC_2,
+		EMC_PMACRO_TX_SEL_CLK_SRC_3,
+		EMC_PMACRO_TX_SEL_CLK_SRC_4,
+		EMC_PMACRO_TX_SEL_CLK_SRC_5,
+		EMC_PMACRO_DDLL_BYPASS,
+		EMC_PMACRO_DDLL_PWRD_0,
+		EMC_PMACRO_DDLL_PWRD_1,
+		EMC_PMACRO_DDLL_PWRD_2,
+		EMC_PMACRO_CMD_CTRL_0,
+		EMC_PMACRO_CMD_CTRL_1,
+		EMC_PMACRO_CMD_CTRL_2,
+		EMC_TR_TIMING_0,
+		EMC_TR_DVFS,
+		EMC_TR_CTRL_1,
+		EMC_TR_RDV,
+		EMC_TR_QPOP,
+		EMC_TR_RDV_MASK,
+		EMC_MRW14,
+		EMC_TR_QSAFE,
+		EMC_TR_QRST,
+		EMC_TRAINING_CTRL,
+		EMC_TRAINING_SETTLE,
+		EMC_TRAINING_VREF_SETTLE,
+		EMC_TRAINING_CA_FINE_CTRL,
+		EMC_TRAINING_CA_CTRL_MISC,
+		EMC_TRAINING_CA_CTRL_MISC1,
+		EMC_TRAINING_CA_VREF_CTRL,
+		EMC_TRAINING_QUSE_CORS_CTRL,
+		EMC_TRAINING_QUSE_FINE_CTRL,
+		EMC_TRAINING_QUSE_CTRL_MISC,
+		EMC_TRAINING_QUSE_VREF_CTRL,
+		EMC_TRAINING_READ_FINE_CTRL,
+		EMC_TRAINING_READ_CTRL_MISC,
+		EMC_TRAINING_READ_VREF_CTRL,
+		EMC_TRAINING_WRITE_FINE_CTRL,
+		EMC_TRAINING_WRITE_CTRL_MISC,
+		EMC_TRAINING_WRITE_VREF_CTRL,
+		EMC_TRAINING_MPC,
+		EMC_MRW15,
+	},
+	.trim = {
+		EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0,
+		EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1,
+		EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2,
+		EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3,
+		EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0,
+		EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1,
+		EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2,
+		EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+		EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+		EMC_PMACRO_IB_VREF_DQS_0,
+		EMC_PMACRO_IB_VREF_DQS_1,
+		EMC_PMACRO_IB_VREF_DQ_0,
+		EMC_PMACRO_IB_VREF_DQ_1,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+		EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+		EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+		EMC_PMACRO_QUSE_DDLL_RANK0_0,
+		EMC_PMACRO_QUSE_DDLL_RANK0_1,
+		EMC_PMACRO_QUSE_DDLL_RANK0_2,
+		EMC_PMACRO_QUSE_DDLL_RANK0_3,
+		EMC_PMACRO_QUSE_DDLL_RANK1_0,
+		EMC_PMACRO_QUSE_DDLL_RANK1_1,
+		EMC_PMACRO_QUSE_DDLL_RANK1_2,
+		EMC_PMACRO_QUSE_DDLL_RANK1_3
+	},
+	.burst_mc = {
+		MC_EMEM_ARB_CFG,
+		MC_EMEM_ARB_OUTSTANDING_REQ,
+		MC_EMEM_ARB_REFPB_HP_CTRL,
+		MC_EMEM_ARB_REFPB_BANK_CTRL,
+		MC_EMEM_ARB_TIMING_RCD,
+		MC_EMEM_ARB_TIMING_RP,
+		MC_EMEM_ARB_TIMING_RC,
+		MC_EMEM_ARB_TIMING_RAS,
+		MC_EMEM_ARB_TIMING_FAW,
+		MC_EMEM_ARB_TIMING_RRD,
+		MC_EMEM_ARB_TIMING_RAP2PRE,
+		MC_EMEM_ARB_TIMING_WAP2PRE,
+		MC_EMEM_ARB_TIMING_R2R,
+		MC_EMEM_ARB_TIMING_W2W,
+		MC_EMEM_ARB_TIMING_R2W,
+		MC_EMEM_ARB_TIMING_CCDMW,
+		MC_EMEM_ARB_TIMING_W2R,
+		MC_EMEM_ARB_TIMING_RFCPB,
+		MC_EMEM_ARB_DA_TURNS,
+		MC_EMEM_ARB_DA_COVERS,
+		MC_EMEM_ARB_MISC0,
+		MC_EMEM_ARB_MISC1,
+		MC_EMEM_ARB_MISC2,
+		MC_EMEM_ARB_RING1_THROTTLE,
+		MC_EMEM_ARB_DHYST_CTRL,
+		MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0,
+		MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1,
+		MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2,
+		MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3,
+		MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4,
+		MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5,
+		MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6,
+		MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7,
+	},
+	.la_scale = {
+		MC_MLL_MPCORER_PTSA_RATE,
+		MC_FTOP_PTSA_RATE,
+		MC_PTSA_GRANT_DECREMENT,
+		MC_LATENCY_ALLOWANCE_XUSB_0,
+		MC_LATENCY_ALLOWANCE_XUSB_1,
+		MC_LATENCY_ALLOWANCE_TSEC_0,
+		MC_LATENCY_ALLOWANCE_SDMMCA_0,
+		MC_LATENCY_ALLOWANCE_SDMMCAA_0,
+		MC_LATENCY_ALLOWANCE_SDMMC_0,
+		MC_LATENCY_ALLOWANCE_SDMMCAB_0,
+		MC_LATENCY_ALLOWANCE_PPCS_0,
+		MC_LATENCY_ALLOWANCE_PPCS_1,
+		MC_LATENCY_ALLOWANCE_MPCORE_0,
+		MC_LATENCY_ALLOWANCE_HC_0,
+		MC_LATENCY_ALLOWANCE_HC_1,
+		MC_LATENCY_ALLOWANCE_AVPC_0,
+		MC_LATENCY_ALLOWANCE_GPU_0,
+		MC_LATENCY_ALLOWANCE_GPU2_0,
+		MC_LATENCY_ALLOWANCE_NVENC_0,
+		MC_LATENCY_ALLOWANCE_NVDEC_0,
+		MC_LATENCY_ALLOWANCE_VIC_0,
+		MC_LATENCY_ALLOWANCE_VI2_0,
+		MC_LATENCY_ALLOWANCE_ISP2_0,
+		MC_LATENCY_ALLOWANCE_ISP2_1,
+	},
+	.burst_per_channel = {
+		{ .bank = 0, .offset = EMC_MRW10, },
+		{ .bank = 1, .offset = EMC_MRW10, },
+		{ .bank = 0, .offset = EMC_MRW11, },
+		{ .bank = 1, .offset = EMC_MRW11, },
+		{ .bank = 0, .offset = EMC_MRW12, },
+		{ .bank = 1, .offset = EMC_MRW12, },
+		{ .bank = 0, .offset = EMC_MRW13, },
+		{ .bank = 1, .offset = EMC_MRW13, },
+	},
+	.trim_per_channel = {
+		{ .bank = 0, .offset = EMC_CMD_BRLSHFT_0, },
+		{ .bank = 1, .offset = EMC_CMD_BRLSHFT_1, },
+		{ .bank = 0, .offset = EMC_DATA_BRLSHFT_0, },
+		{ .bank = 1, .offset = EMC_DATA_BRLSHFT_0, },
+		{ .bank = 0, .offset = EMC_DATA_BRLSHFT_1, },
+		{ .bank = 1, .offset = EMC_DATA_BRLSHFT_1, },
+		{ .bank = 0, .offset = EMC_QUSE_BRLSHFT_0, },
+		{ .bank = 1, .offset = EMC_QUSE_BRLSHFT_1, },
+		{ .bank = 0, .offset = EMC_QUSE_BRLSHFT_2, },
+		{ .bank = 1, .offset = EMC_QUSE_BRLSHFT_3, },
+	},
+	.vref_per_channel = {
+		{
+			.bank = 0,
+			.offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+		}, {
+			.bank = 1,
+			.offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+		}, {
+			.bank = 0,
+			.offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+		}, {
+			.bank = 1,
+			.offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+		},
+	},
+};
+
+static void tegra210_emc_train(struct timer_list *timer)
+{
+	struct tegra210_emc *emc = from_timer(emc, timer, training);
+	unsigned long flags;
+
+	if (!emc->last)
+		return;
+
+	spin_lock_irqsave(&emc->lock, flags);
+
+	if (emc->sequence->periodic_compensation)
+		emc->sequence->periodic_compensation(emc);
+
+	spin_unlock_irqrestore(&emc->lock, flags);
+
+	mod_timer(&emc->training,
+		  jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_start(struct tegra210_emc *emc)
+{
+	mod_timer(&emc->training,
+		  jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_stop(struct tegra210_emc *emc)
+{
+	del_timer(&emc->training);
+}
+
+static unsigned int tegra210_emc_get_temperature(struct tegra210_emc *emc)
+{
+	unsigned long flags;
+	u32 value, max = 0;
+	unsigned int i;
+
+	spin_lock_irqsave(&emc->lock, flags);
+
+	for (i = 0; i < emc->num_devices; i++) {
+		value = tegra210_emc_mrr_read(emc, i, 4);
+
+		if (value & BIT(7))
+			dev_dbg(emc->dev,
+				"sensor reading changed for device %u: %08x\n",
+				i, value);
+
+		value = FIELD_GET(LPDDR2_MR4_SRR, value);
+		if (value > max)
+			max = value;
+	}
+
+	spin_unlock_irqrestore(&emc->lock, flags);
+
+	return max;
+}
+
+static void tegra210_emc_poll_refresh(struct timer_list *timer)
+{
+	struct tegra210_emc *emc = from_timer(emc, timer, refresh_timer);
+	unsigned int temperature;
+
+	if (!emc->debugfs.temperature)
+		temperature = tegra210_emc_get_temperature(emc);
+	else
+		temperature = emc->debugfs.temperature;
+
+	if (temperature == emc->temperature)
+		goto reset;
+
+	switch (temperature) {
+	case 0 ... 3:
+		/* temperature is fine, using regular refresh */
+		dev_dbg(emc->dev, "switching to nominal refresh...\n");
+		tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_NOMINAL);
+		break;
+
+	case 4:
+		dev_dbg(emc->dev, "switching to 2x refresh...\n");
+		tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_2X);
+		break;
+
+	case 5:
+		dev_dbg(emc->dev, "switching to 4x refresh...\n");
+		tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_4X);
+		break;
+
+	case 6 ... 7:
+		dev_dbg(emc->dev, "switching to throttle refresh...\n");
+		tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_THROTTLE);
+		break;
+
+	default:
+		WARN(1, "invalid DRAM temperature state %u\n", temperature);
+		return;
+	}
+
+	emc->temperature = temperature;
+
+reset:
+	if (atomic_read(&emc->refresh_poll) > 0) {
+		unsigned int interval = emc->refresh_poll_interval;
+		unsigned int timeout = msecs_to_jiffies(interval);
+
+		mod_timer(&emc->refresh_timer, jiffies + timeout);
+	}
+}
+
+static void tegra210_emc_poll_refresh_stop(struct tegra210_emc *emc)
+{
+	atomic_set(&emc->refresh_poll, 0);
+	del_timer_sync(&emc->refresh_timer);
+}
+
+static void tegra210_emc_poll_refresh_start(struct tegra210_emc *emc)
+{
+	atomic_set(&emc->refresh_poll, 1);
+
+	mod_timer(&emc->refresh_timer,
+		  jiffies + msecs_to_jiffies(emc->refresh_poll_interval));
+}
+
+static int tegra210_emc_cd_max_state(struct thermal_cooling_device *cd,
+				     unsigned long *state)
+{
+	*state = 1;
+
+	return 0;
+}
+
+static int tegra210_emc_cd_get_state(struct thermal_cooling_device *cd,
+				     unsigned long *state)
+{
+	struct tegra210_emc *emc = cd->devdata;
+
+	*state = atomic_read(&emc->refresh_poll);
+
+	return 0;
+}
+
+static int tegra210_emc_cd_set_state(struct thermal_cooling_device *cd,
+				     unsigned long state)
+{
+	struct tegra210_emc *emc = cd->devdata;
+
+	if (state == atomic_read(&emc->refresh_poll))
+		return 0;
+
+	if (state)
+		tegra210_emc_poll_refresh_start(emc);
+	else
+		tegra210_emc_poll_refresh_stop(emc);
+
+	return 0;
+}
+
+static struct thermal_cooling_device_ops tegra210_emc_cd_ops = {
+	.get_max_state = tegra210_emc_cd_max_state,
+	.get_cur_state = tegra210_emc_cd_get_state,
+	.set_cur_state = tegra210_emc_cd_set_state,
+};
+
+static void tegra210_emc_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+	emc->sequence->set_clock(emc, clksrc);
+
+	if (emc->next->periodic_training)
+		tegra210_emc_training_start(emc);
+	else
+		tegra210_emc_training_stop(emc);
+}
+
+static void tegra210_change_dll_src(struct tegra210_emc *emc,
+				    u32 clksrc)
+{
+	u32 dll_setting = emc->next->dll_clk_src;
+	u32 emc_clk_src;
+	u32 emc_clk_div;
+
+	emc_clk_src = (clksrc & EMC_CLK_EMC_2X_CLK_SRC_MASK) >>
+		       EMC_CLK_EMC_2X_CLK_SRC_SHIFT;
+	emc_clk_div = (clksrc & EMC_CLK_EMC_2X_CLK_DIVISOR_MASK) >>
+		       EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT;
+
+	dll_setting &= ~(DLL_CLK_EMC_DLL_CLK_SRC_MASK |
+			 DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK);
+	dll_setting |= emc_clk_src << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT;
+	dll_setting |= emc_clk_div << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT;
+
+	dll_setting &= ~DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK;
+	if (emc_clk_src == EMC_CLK_SOURCE_PLLMB_LJ)
+		dll_setting |= (PLLM_VCOB <<
+				DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+	else if (emc_clk_src == EMC_CLK_SOURCE_PLLM_LJ)
+		dll_setting |= (PLLM_VCOA <<
+				DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+	else
+		dll_setting |= (EMC_DLL_SWITCH_OUT <<
+				DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+
+	tegra210_clk_emc_dll_update_setting(dll_setting);
+
+	if (emc->next->clk_out_enb_x_0_clk_enb_emc_dll)
+		tegra210_clk_emc_dll_enable(true);
+	else
+		tegra210_clk_emc_dll_enable(false);
+}
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+			     enum tegra210_emc_refresh refresh)
+{
+	struct tegra210_emc_timing *timings;
+	unsigned long flags;
+
+	if ((emc->dram_type != DRAM_TYPE_LPDDR2 &&
+	     emc->dram_type != DRAM_TYPE_LPDDR4) ||
+	    !emc->last)
+		return -ENODEV;
+
+	if (refresh > TEGRA210_EMC_REFRESH_THROTTLE)
+		return -EINVAL;
+
+	if (refresh == emc->refresh)
+		return 0;
+
+	spin_lock_irqsave(&emc->lock, flags);
+
+	if (refresh == TEGRA210_EMC_REFRESH_THROTTLE && emc->derated)
+		timings = emc->derated;
+	else
+		timings = emc->nominal;
+
+	if (timings != emc->timings) {
+		unsigned int index = emc->last - emc->timings;
+		u32 clksrc;
+
+		clksrc = emc->provider.configs[index].value |
+			 EMC_CLK_FORCE_CC_TRIGGER;
+
+		emc->next = &timings[index];
+		emc->timings = timings;
+
+		tegra210_emc_set_clock(emc, clksrc);
+	} else {
+		tegra210_emc_adjust_timing(emc, emc->last);
+		tegra210_emc_timing_update(emc);
+
+		if (refresh != TEGRA210_EMC_REFRESH_NOMINAL)
+			emc_writel(emc, EMC_REF_REF_CMD, EMC_REF);
+	}
+
+	spin_unlock_irqrestore(&emc->lock, flags);
+
+	return 0;
+}
+
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+			  unsigned int address)
+{
+	u32 value, ret = 0;
+	unsigned int i;
+
+	value = (chip & EMC_MRR_DEV_SEL_MASK) << EMC_MRR_DEV_SEL_SHIFT |
+		(address & EMC_MRR_MA_MASK) << EMC_MRR_MA_SHIFT;
+	emc_writel(emc, value, EMC_MRR);
+
+	for (i = 0; i < emc->num_channels; i++)
+		WARN(tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+						  EMC_EMC_STATUS_MRR_DIVLD, 1),
+		     "Timed out waiting for MRR %u (ch=%u)\n", address, i);
+
+	for (i = 0; i < emc->num_channels; i++) {
+		value = emc_channel_readl(emc, i, EMC_MRR);
+		value &= EMC_MRR_DATA_MASK;
+
+		ret = (ret << 16) | value;
+	}
+
+	return ret;
+}
+
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc)
+{
+	int err;
+
+	mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+	emc_readl(emc, EMC_INTSTATUS);
+
+	tegra210_clk_emc_update_setting(clksrc);
+
+	err = tegra210_emc_wait_for_update(emc, 0, EMC_INTSTATUS,
+					   EMC_INTSTATUS_CLKCHANGE_COMPLETE,
+					   true);
+	if (err)
+		dev_warn(emc->dev, "clock change completion error: %d\n", err);
+}
+
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+						     unsigned long rate)
+{
+	unsigned int i;
+
+	for (i = 0; i < emc->num_timings; i++)
+		if (emc->timings[i].rate * 1000UL == rate)
+			return &emc->timings[i];
+
+	return NULL;
+}
+
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+				 unsigned int offset, u32 bit_mask, bool state)
+{
+	unsigned int i;
+	u32 value;
+
+	for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
+		value = emc_channel_readl(emc, channel, offset);
+		if (!!(value & bit_mask) == state)
+			return 0;
+
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+}
+
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set)
+{
+	u32 emc_dbg = emc_readl(emc, EMC_DBG);
+
+	if (set)
+		emc_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+	else
+		emc_writel(emc, emc_dbg & ~EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+}
+
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next)
+{
+	if (next->emc_emrs & 0x1)
+		return 0;
+
+	return 1;
+}
+
+void tegra210_emc_timing_update(struct tegra210_emc *emc)
+{
+	unsigned int i;
+	int err = 0;
+
+	emc_writel(emc, 0x1, EMC_TIMING_CONTROL);
+
+	for (i = 0; i < emc->num_channels; i++) {
+		err |= tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+						    EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+						    false);
+	}
+
+	if (err)
+		dev_warn(emc->dev, "timing update error: %d\n", err);
+}
+
+unsigned long tegra210_emc_actual_osc_clocks(u32 in)
+{
+	if (in < 0x40)
+		return in * 16;
+	else if (in < 0x80)
+		return 2048;
+	else if (in < 0xc0)
+		return 4096;
+	else
+		return 8192;
+}
+
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc)
+{
+	u32 mpc_req = 0x4b;
+
+	emc_writel(emc, mpc_req, EMC_MPC);
+	mpc_req = emc_readl(emc, EMC_MPC);
+}
+
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset)
+{
+	u32 temp = 0, rate = next->rate / 1000;
+	s32 delta[4], delta_taps[4];
+	s32 new[] = {
+		TRIM_REG(0, 0, 0, 0),
+		TRIM_REG(0, 0, 0, 1),
+		TRIM_REG(0, 0, 1, 2),
+		TRIM_REG(0, 0, 1, 3),
+
+		TRIM_REG(1, 0, 2, 4),
+		TRIM_REG(1, 0, 2, 5),
+		TRIM_REG(1, 0, 3, 6),
+		TRIM_REG(1, 0, 3, 7),
+
+		TRIM_REG(0, 1, 0, 0),
+		TRIM_REG(0, 1, 0, 1),
+		TRIM_REG(0, 1, 1, 2),
+		TRIM_REG(0, 1, 1, 3),
+
+		TRIM_REG(1, 1, 2, 4),
+		TRIM_REG(1, 1, 2, 5),
+		TRIM_REG(1, 1, 3, 6),
+		TRIM_REG(1, 1, 3, 7)
+	};
+	unsigned i;
+
+	switch (offset) {
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+	case EMC_DATA_BRLSHFT_0:
+		delta[0] = 128 * (next->current_dram_clktree[C0D0U0] -
+				  next->trained_dram_clktree[C0D0U0]);
+		delta[1] = 128 * (next->current_dram_clktree[C0D0U1] -
+				  next->trained_dram_clktree[C0D0U1]);
+		delta[2] = 128 * (next->current_dram_clktree[C1D0U0] -
+				  next->trained_dram_clktree[C1D0U0]);
+		delta[3] = 128 * (next->current_dram_clktree[C1D0U1] -
+				  next->trained_dram_clktree[C1D0U1]);
+
+		delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+		delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+		delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+		delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+		for (i = 0; i < 4; i++) {
+			if ((delta_taps[i] > next->tree_margin) ||
+			    (delta_taps[i] < (-1 * next->tree_margin))) {
+				new[i * 2] = new[i * 2] + delta_taps[i];
+				new[i * 2 + 1] = new[i * 2 + 1] +
+							delta_taps[i];
+			}
+		}
+
+		if (offset == EMC_DATA_BRLSHFT_0) {
+			for (i = 0; i < 8; i++)
+				new[i] = new[i] / 64;
+		} else {
+			for (i = 0; i < 8; i++)
+				new[i] = new[i] % 64;
+		}
+
+		break;
+
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+	case EMC_DATA_BRLSHFT_1:
+		delta[0] = 128 * (next->current_dram_clktree[C0D1U0] -
+				  next->trained_dram_clktree[C0D1U0]);
+		delta[1] = 128 * (next->current_dram_clktree[C0D1U1] -
+				  next->trained_dram_clktree[C0D1U1]);
+		delta[2] = 128 * (next->current_dram_clktree[C1D1U0] -
+				  next->trained_dram_clktree[C1D1U0]);
+		delta[3] = 128 * (next->current_dram_clktree[C1D1U1] -
+				  next->trained_dram_clktree[C1D1U1]);
+
+		delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+		delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+		delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+		delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+		for (i = 0; i < 4; i++) {
+			if ((delta_taps[i] > next->tree_margin) ||
+			    (delta_taps[i] < (-1 * next->tree_margin))) {
+				new[8 + i * 2] = new[8 + i * 2] +
+							delta_taps[i];
+				new[8 + i * 2 + 1] = new[8 + i * 2 + 1] +
+							delta_taps[i];
+			}
+		}
+
+		if (offset == EMC_DATA_BRLSHFT_1) {
+			for (i = 0; i < 8; i++)
+				new[i + 8] = new[i + 8] / 64;
+		} else {
+			for (i = 0; i < 8; i++)
+				new[i + 8] = new[i + 8] % 64;
+		}
+
+		break;
+	}
+
+	switch (offset) {
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+		temp = CALC_TEMP(0, 0, 0, 1, 0);
+		break;
+
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+		temp = CALC_TEMP(0, 1, 2, 3, 2);
+		break;
+
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+		temp = CALC_TEMP(0, 2, 4, 5, 4);
+		break;
+
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+		temp = CALC_TEMP(0, 3, 6, 7, 6);
+		break;
+
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+		temp = CALC_TEMP(1, 0, 0, 1, 8);
+		break;
+
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+		temp = CALC_TEMP(1, 1, 2, 3, 10);
+		break;
+
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+		temp = CALC_TEMP(1, 2, 4, 5, 12);
+		break;
+
+	case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+		temp = CALC_TEMP(1, 3, 6, 7, 14);
+		break;
+
+	case EMC_DATA_BRLSHFT_0:
+		temp = ((new[0] <<
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK) |
+		       ((new[1] <<
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK) |
+		       ((new[2] <<
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK) |
+		       ((new[3] <<
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK) |
+		       ((new[4] <<
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK) |
+		       ((new[5] <<
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK) |
+		       ((new[6] <<
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK) |
+		       ((new[7] <<
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK);
+		break;
+
+	case EMC_DATA_BRLSHFT_1:
+		temp = ((new[8] <<
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK) |
+		       ((new[9] <<
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK) |
+		       ((new[10] <<
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK) |
+		       ((new[11] <<
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK) |
+		       ((new[12] <<
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK) |
+		       ((new[13] <<
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK) |
+		       ((new[14] <<
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK) |
+		       ((new[15] <<
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT) &
+			 EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK);
+		break;
+
+	default:
+		break;
+	}
+
+	return temp;
+}
+
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc)
+{
+	unsigned int i;
+	u32 value;
+
+	value = emc_readl(emc, EMC_CFG_DIG_DLL);
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK;
+	value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT);
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK;
+	value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+	value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+	emc_writel(emc, value, EMC_CFG_DIG_DLL);
+	emc_writel(emc, 1, EMC_TIMING_CONTROL);
+
+	for (i = 0; i < emc->num_channels; i++)
+		tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+					     EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+					     0);
+
+	for (i = 0; i < emc->num_channels; i++) {
+		while (true) {
+			value = emc_channel_readl(emc, i, EMC_CFG_DIG_DLL);
+			if ((value & EMC_CFG_DIG_DLL_CFG_DLL_EN) == 0)
+				break;
+		}
+	}
+
+	value = emc->next->burst_regs[EMC_DLL_CFG_0_INDEX];
+	emc_writel(emc, value, EMC_DLL_CFG_0);
+
+	value = emc_readl(emc, EMC_DLL_CFG_1);
+	value &= EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK;
+
+	if (emc->next->rate >= 400000 && emc->next->rate < 600000)
+		value |= 150;
+	else if (emc->next->rate >= 600000 && emc->next->rate < 800000)
+		value |= 100;
+	else if (emc->next->rate >= 800000 && emc->next->rate < 1000000)
+		value |= 70;
+	else if (emc->next->rate >= 1000000 && emc->next->rate < 1200000)
+		value |= 30;
+	else
+		value |= 20;
+
+	emc_writel(emc, value, EMC_DLL_CFG_1);
+
+	tegra210_change_dll_src(emc, clksrc);
+
+	value = emc_readl(emc, EMC_CFG_DIG_DLL);
+	value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+	emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+	tegra210_emc_timing_update(emc);
+
+	for (i = 0; i < emc->num_channels; i++) {
+		while (true) {
+			value = emc_channel_readl(emc, 0, EMC_CFG_DIG_DLL);
+			if (value & EMC_CFG_DIG_DLL_CFG_DLL_EN)
+				break;
+		}
+	}
+
+	while (true) {
+		value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+		if ((value & EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED) == 0)
+			continue;
+
+		if ((value & EMC_DIG_DLL_STATUS_DLL_LOCK) == 0)
+			continue;
+
+		break;
+	}
+
+	value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+	return value & EMC_DIG_DLL_STATUS_DLL_OUT_MASK;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+				    bool flip_backward)
+{
+	u32 cmd_pad, dq_pad, rfu1, cfg5, common_tx, ramp_up_wait = 0;
+	const struct tegra210_emc_timing *timing;
+
+	if (flip_backward)
+		timing = emc->last;
+	else
+		timing = emc->next;
+
+	cmd_pad = timing->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+	dq_pad = timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+	rfu1 = timing->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+	cfg5 = timing->burst_regs[EMC_FBIO_CFG5_INDEX];
+	common_tx = timing->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+	cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+	if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+		ccfifo_writel(emc, common_tx & 0xa,
+			      EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+		ccfifo_writel(emc, common_tx & 0xf,
+			      EMC_PMACRO_COMMON_PAD_TX_CTRL,
+			      (100000 / clk) + 1);
+		ramp_up_wait += 100000;
+	} else {
+		ccfifo_writel(emc, common_tx | 0x8,
+			      EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+	}
+
+	if (clk < 1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD) {
+		if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+			cmd_pad |=
+				EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+				EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+			cmd_pad &=
+				~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+				  EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+			ccfifo_writel(emc, cmd_pad,
+				      EMC_PMACRO_CMD_PAD_TX_CTRL,
+				      (100000 / clk) + 1);
+			ramp_up_wait += 100000;
+
+			dq_pad |=
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+			dq_pad &=
+			       ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+				 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+			ccfifo_writel(emc, dq_pad,
+				      EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+			ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+				      EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+		} else {
+			ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+				      EMC_PMACRO_BRICK_CTRL_RFU1,
+				      (100000 / clk) + 1);
+			ramp_up_wait += 100000;
+		}
+
+		ccfifo_writel(emc, rfu1 & 0xfeedfeed,
+			      EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+		ramp_up_wait += 100000;
+
+		if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+			cmd_pad |=
+				EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+				EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+				EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+				EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC;
+			ccfifo_writel(emc, cmd_pad,
+				      EMC_PMACRO_CMD_PAD_TX_CTRL,
+				      (100000 / clk) + 1);
+			ramp_up_wait += 100000;
+
+			dq_pad |=
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC;
+			ccfifo_writel(emc, dq_pad,
+				      EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+			ccfifo_writel(emc, rfu1,
+				      EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+		} else {
+			ccfifo_writel(emc, rfu1,
+				      EMC_PMACRO_BRICK_CTRL_RFU1,
+				      (100000 / clk) + 1);
+			ramp_up_wait += 100000;
+		}
+
+		ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+			      EMC_FBIO_CFG5, (100000 / clk) + 10);
+		ramp_up_wait += 100000 + (10 * clk);
+	} else if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+		ccfifo_writel(emc, rfu1 | 0x06000600,
+			      EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+		ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+			      EMC_FBIO_CFG5, (100000 / clk) + 10);
+		ramp_up_wait += 100000 + 10 * clk;
+	} else {
+		ccfifo_writel(emc, rfu1 | 0x00000600,
+			      EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+		ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+			      EMC_FBIO_CFG5, 12);
+		ramp_up_wait += 12 * clk;
+	}
+
+	cmd_pad &= ~EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+	ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 5);
+
+	return ramp_up_wait;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+				      bool flip_backward)
+{
+	u32 ramp_down_wait = 0, cmd_pad, dq_pad, rfu1, cfg5, common_tx;
+	const struct tegra210_emc_timing *entry;
+	u32 seq_wait;
+
+	if (flip_backward)
+		entry = emc->next;
+	else
+		entry = emc->last;
+
+	cmd_pad = entry->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+	dq_pad = entry->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+	rfu1 = entry->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+	cfg5 = entry->burst_regs[EMC_FBIO_CFG5_INDEX];
+	common_tx = entry->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+	cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+	ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 0);
+	ccfifo_writel(emc, cfg5 | EMC_FBIO_CFG5_CMD_TX_DIS,
+		      EMC_FBIO_CFG5, 12);
+	ramp_down_wait = 12 * clk;
+
+	seq_wait = (100000 / clk) + 1;
+
+	if (clk < (1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD)) {
+		if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+			cmd_pad &=
+				~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+				  EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+			cmd_pad |=
+				EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+				EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+			ccfifo_writel(emc, cmd_pad,
+				      EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+			ramp_down_wait += 100000;
+
+			dq_pad &=
+			      ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+			dq_pad |=
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+			ccfifo_writel(emc, dq_pad,
+				      EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+			ccfifo_writel(emc, rfu1 & ~0x01120112,
+				      EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+		} else {
+			ccfifo_writel(emc, rfu1 & ~0x01120112,
+				      EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+			ramp_down_wait += 100000;
+		}
+
+		ccfifo_writel(emc, rfu1 & ~0x01bf01bf,
+			      EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+		ramp_down_wait += 100000;
+
+		if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+			cmd_pad &=
+				~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+				  EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC |
+				  EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+				  EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC);
+			ccfifo_writel(emc, cmd_pad,
+				      EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+			ramp_down_wait += 100000;
+
+			dq_pad &=
+			      ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC |
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+				EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC);
+			ccfifo_writel(emc, dq_pad,
+				      EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+			ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+				      EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+		} else {
+			ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+				      EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+			ramp_down_wait += 100000;
+		}
+	} else {
+		ccfifo_writel(emc, rfu1 & ~0xffff07ff,
+			      EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait + 19);
+		ramp_down_wait += 100000 + (20 * clk);
+	}
+
+	if (clk < (1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD)) {
+		ramp_down_wait += 100000;
+		ccfifo_writel(emc, common_tx & ~0x5,
+			      EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+		ramp_down_wait += 100000;
+		ccfifo_writel(emc, common_tx & ~0xf,
+			      EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+		ramp_down_wait += 100000;
+		ccfifo_writel(emc, 0, 0, seq_wait);
+		ramp_down_wait += 100000;
+	} else {
+		ccfifo_writel(emc, common_tx & ~0xf,
+			      EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+	}
+
+	return ramp_down_wait;
+}
+
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing)
+{
+	timing->current_dram_clktree[C0D0U0] =
+		timing->trained_dram_clktree[C0D0U0];
+	timing->current_dram_clktree[C0D0U1] =
+		timing->trained_dram_clktree[C0D0U1];
+	timing->current_dram_clktree[C1D0U0] =
+		timing->trained_dram_clktree[C1D0U0];
+	timing->current_dram_clktree[C1D0U1] =
+		timing->trained_dram_clktree[C1D0U1];
+	timing->current_dram_clktree[C1D1U0] =
+		timing->trained_dram_clktree[C1D1U0];
+	timing->current_dram_clktree[C1D1U1] =
+		timing->trained_dram_clktree[C1D1U1];
+}
+
+static void update_dll_control(struct tegra210_emc *emc, u32 value, bool state)
+{
+	unsigned int i;
+
+	emc_writel(emc, value, EMC_CFG_DIG_DLL);
+	tegra210_emc_timing_update(emc);
+
+	for (i = 0; i < emc->num_channels; i++)
+		tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+					     EMC_CFG_DIG_DLL_CFG_DLL_EN,
+					     state);
+}
+
+void tegra210_emc_dll_disable(struct tegra210_emc *emc)
+{
+	u32 value;
+
+	value = emc_readl(emc, EMC_CFG_DIG_DLL);
+	value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+	update_dll_control(emc, value, false);
+}
+
+void tegra210_emc_dll_enable(struct tegra210_emc *emc)
+{
+	u32 value;
+
+	value = emc_readl(emc, EMC_CFG_DIG_DLL);
+	value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+	update_dll_control(emc, value, true);
+}
+
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+				struct tegra210_emc_timing *timing)
+{
+	u32 dsr_cntrl = timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
+	u32 pre_ref = timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
+	u32 ref = timing->burst_regs[EMC_REFRESH_INDEX];
+
+	switch (emc->refresh) {
+	case TEGRA210_EMC_REFRESH_NOMINAL:
+	case TEGRA210_EMC_REFRESH_THROTTLE:
+		break;
+
+	case TEGRA210_EMC_REFRESH_2X:
+		ref = REFRESH_SPEEDUP(ref, 2);
+		pre_ref = REFRESH_SPEEDUP(pre_ref, 2);
+		dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 2);
+		break;
+
+	case TEGRA210_EMC_REFRESH_4X:
+		ref = REFRESH_SPEEDUP(ref, 4);
+		pre_ref = REFRESH_SPEEDUP(pre_ref, 4);
+		dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 4);
+		break;
+
+	default:
+		dev_warn(emc->dev, "failed to set refresh: %d\n", emc->refresh);
+		return;
+	}
+
+	emc_writel(emc, ref, emc->offsets->burst[EMC_REFRESH_INDEX]);
+	emc_writel(emc, pre_ref,
+		   emc->offsets->burst[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
+	emc_writel(emc, dsr_cntrl,
+		   emc->offsets->burst[EMC_DYN_SELF_REF_CONTROL_INDEX]);
+}
+
+static int tegra210_emc_set_rate(struct device *dev,
+				 const struct tegra210_clk_emc_config *config)
+{
+	struct tegra210_emc *emc = dev_get_drvdata(dev);
+	struct tegra210_emc_timing *timing = NULL;
+	unsigned long rate = config->rate;
+	s64 last_change_delay;
+	unsigned long flags;
+	unsigned int i;
+
+	if (rate == emc->last->rate * 1000UL)
+		return 0;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		if (emc->timings[i].rate * 1000UL == rate) {
+			timing = &emc->timings[i];
+			break;
+		}
+	}
+
+	if (!timing)
+		return -EINVAL;
+
+	if (rate > 204000000 && !timing->trained)
+		return -EINVAL;
+
+	emc->next = timing;
+	last_change_delay = ktime_us_delta(ktime_get(), emc->clkchange_time);
+
+	/* XXX use non-busy-looping sleep? */
+	if ((last_change_delay >= 0) &&
+	    (last_change_delay < emc->clkchange_delay))
+		udelay(emc->clkchange_delay - (int)last_change_delay);
+
+	spin_lock_irqsave(&emc->lock, flags);
+	tegra210_emc_set_clock(emc, config->value);
+	emc->clkchange_time = ktime_get();
+	emc->last = timing;
+	spin_unlock_irqrestore(&emc->lock, flags);
+
+	return 0;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ *   /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ *   - available_rates: This file contains a list of valid, space-separated
+ *     EMC frequencies.
+ *
+ *   - min_rate: Writing a value to this file sets the given frequency as the
+ *       floor of the permitted range. If this is higher than the currently
+ *       configured EMC frequency, this will cause the frequency to be
+ *       increased so that it stays within the valid range.
+ *
+ *   - max_rate: Similarily to the min_rate file, writing a value to this file
+ *       sets the given frequency as the ceiling of the permitted range. If
+ *       the value is lower than the currently configured EMC frequency, this
+ *       will cause the frequency to be decreased so that it stays within the
+ *       valid range.
+ */
+
+static bool tegra210_emc_validate_rate(struct tegra210_emc *emc,
+				       unsigned long rate)
+{
+	unsigned int i;
+
+	for (i = 0; i < emc->num_timings; i++)
+		if (rate == emc->timings[i].rate * 1000UL)
+			return true;
+
+	return false;
+}
+
+static int tegra210_emc_debug_available_rates_show(struct seq_file *s,
+						   void *data)
+{
+	struct tegra210_emc *emc = s->private;
+	const char *prefix = "";
+	unsigned int i;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		seq_printf(s, "%s%u", prefix, emc->timings[i].rate * 1000);
+		prefix = " ";
+	}
+
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static int tegra210_emc_debug_available_rates_open(struct inode *inode,
+						   struct file *file)
+{
+	return single_open(file, tegra210_emc_debug_available_rates_show,
+			   inode->i_private);
+}
+
+static const struct file_operations tegra210_emc_debug_available_rates_fops = {
+	.open = tegra210_emc_debug_available_rates_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int tegra210_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+	struct tegra210_emc *emc = data;
+
+	*rate = emc->debugfs.min_rate;
+
+	return 0;
+}
+
+static int tegra210_emc_debug_min_rate_set(void *data, u64 rate)
+{
+	struct tegra210_emc *emc = data;
+	int err;
+
+	if (!tegra210_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_min_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.min_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
+			tegra210_emc_debug_min_rate_get,
+			tegra210_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+	struct tegra210_emc *emc = data;
+
+	*rate = emc->debugfs.max_rate;
+
+	return 0;
+}
+
+static int tegra210_emc_debug_max_rate_set(void *data, u64 rate)
+{
+	struct tegra210_emc *emc = data;
+	int err;
+
+	if (!tegra210_emc_validate_rate(emc, rate))
+		return -EINVAL;
+
+	err = clk_set_max_rate(emc->clk, rate);
+	if (err < 0)
+		return err;
+
+	emc->debugfs.max_rate = rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
+			tegra210_emc_debug_max_rate_get,
+			tegra210_emc_debug_max_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_temperature_get(void *data, u64 *temperature)
+{
+	struct tegra210_emc *emc = data;
+	unsigned int value;
+
+	if (!emc->debugfs.temperature)
+		value = tegra210_emc_get_temperature(emc);
+	else
+		value = emc->debugfs.temperature;
+
+	*temperature = value;
+
+	return 0;
+}
+
+static int tegra210_emc_debug_temperature_set(void *data, u64 temperature)
+{
+	struct tegra210_emc *emc = data;
+
+	if (temperature > 7)
+		return -EINVAL;
+
+	emc->debugfs.temperature = temperature;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
+			tegra210_emc_debug_temperature_get,
+			tegra210_emc_debug_temperature_set, "%llu\n");
+
+static void tegra210_emc_debugfs_init(struct tegra210_emc *emc)
+{
+	struct device *dev = emc->dev;
+	unsigned int i;
+	int err;
+
+	emc->debugfs.min_rate = ULONG_MAX;
+	emc->debugfs.max_rate = 0;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		if (emc->timings[i].rate * 1000UL < emc->debugfs.min_rate)
+			emc->debugfs.min_rate = emc->timings[i].rate * 1000UL;
+
+		if (emc->timings[i].rate * 1000UL > emc->debugfs.max_rate)
+			emc->debugfs.max_rate = emc->timings[i].rate * 1000UL;
+	}
+
+	if (!emc->num_timings) {
+		emc->debugfs.min_rate = clk_get_rate(emc->clk);
+		emc->debugfs.max_rate = emc->debugfs.min_rate;
+	}
+
+	err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+				 emc->debugfs.max_rate);
+	if (err < 0) {
+		dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+			emc->debugfs.min_rate, emc->debugfs.max_rate,
+			emc->clk);
+		return;
+	}
+
+	emc->debugfs.root = debugfs_create_dir("emc", NULL);
+	if (!emc->debugfs.root) {
+		dev_err(dev, "failed to create debugfs directory\n");
+		return;
+	}
+
+	debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
+			    &tegra210_emc_debug_available_rates_fops);
+	debugfs_create_file("min_rate", 0644, emc->debugfs.root, emc,
+			    &tegra210_emc_debug_min_rate_fops);
+	debugfs_create_file("max_rate", 0644, emc->debugfs.root, emc,
+			    &tegra210_emc_debug_max_rate_fops);
+	debugfs_create_file("temperature", 0644, emc->debugfs.root, emc,
+			    &tegra210_emc_debug_temperature_fops);
+}
+
+static void tegra210_emc_detect(struct tegra210_emc *emc)
+{
+	u32 value;
+
+	/* probe the number of connected DRAM devices */
+	value = mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+	if (value & MC_EMEM_ADR_CFG_EMEM_NUMDEV)
+		emc->num_devices = 2;
+	else
+		emc->num_devices = 1;
+
+	/* probe the type of DRAM */
+	value = emc_readl(emc, EMC_FBIO_CFG5);
+	emc->dram_type = value & 0x3;
+
+	/* probe the number of channels */
+	value = emc_readl(emc, EMC_FBIO_CFG7);
+
+	if ((value & EMC_FBIO_CFG7_CH1_ENABLE) &&
+	    (value & EMC_FBIO_CFG7_CH0_ENABLE))
+		emc->num_channels = 2;
+	else
+		emc->num_channels = 1;
+}
+
+static int tegra210_emc_validate_timings(struct tegra210_emc *emc,
+					 struct tegra210_emc_timing *timings,
+					 unsigned int num_timings)
+{
+	unsigned int i;
+
+	for (i = 0; i < num_timings; i++) {
+		u32 min_volt = timings[i].min_volt;
+		u32 rate = timings[i].rate;
+
+		if (!rate)
+			return -EINVAL;
+
+		if ((i > 0) && ((rate <= timings[i - 1].rate) ||
+		    (min_volt < timings[i - 1].min_volt)))
+			return -EINVAL;
+
+		if (timings[i].revision != timings[0].revision)
+			continue;
+	}
+
+	return 0;
+}
+
+static int tegra210_emc_probe(struct platform_device *pdev)
+{
+	struct thermal_cooling_device *cd;
+	unsigned long current_rate;
+	struct platform_device *mc;
+	struct tegra210_emc *emc;
+	struct device_node *np;
+	unsigned int i;
+	int err;
+
+	emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+	if (!emc)
+		return -ENOMEM;
+
+	emc->clk = devm_clk_get(&pdev->dev, "emc");
+	if (IS_ERR(emc->clk))
+		return PTR_ERR(emc->clk);
+
+	platform_set_drvdata(pdev, emc);
+	spin_lock_init(&emc->lock);
+	emc->dev = &pdev->dev;
+
+	np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
+	if (!np) {
+		dev_err(&pdev->dev, "could not get memory controller\n");
+		return -ENOENT;
+	}
+
+	mc = of_find_device_by_node(np);
+	of_node_put(np);
+	if (!mc)
+		return -ENOENT;
+
+	emc->mc = platform_get_drvdata(mc);
+	if (!emc->mc) {
+		put_device(&mc->dev);
+		return -EPROBE_DEFER;
+	}
+
+	emc->regs = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(emc->regs)) {
+		err = PTR_ERR(emc->regs);
+		goto put_mc;
+	}
+
+	for (i = 0; i < 2; i++) {
+		emc->channel[i] = devm_platform_ioremap_resource(pdev, 1 + i);
+		if (IS_ERR(emc->channel[i])) {
+			err = PTR_ERR(emc->channel[i]);
+			goto put_mc;
+		}
+	}
+
+	tegra210_emc_detect(emc);
+	np = pdev->dev.of_node;
+
+	/* attach to the nominal and (optional) derated tables */
+	err = of_reserved_mem_device_init_by_name(emc->dev, np, "nominal");
+	if (err < 0) {
+		dev_err(emc->dev, "failed to get nominal EMC table: %d\n", err);
+		goto put_mc;
+	}
+
+	err = of_reserved_mem_device_init_by_name(emc->dev, np, "derated");
+	if (err < 0 && err != -ENODEV) {
+		dev_err(emc->dev, "failed to get derated EMC table: %d\n", err);
+		goto release;
+	}
+
+	/* validate the tables */
+	if (emc->nominal) {
+		err = tegra210_emc_validate_timings(emc, emc->nominal,
+						    emc->num_timings);
+		if (err < 0)
+			goto release;
+	}
+
+	if (emc->derated) {
+		err = tegra210_emc_validate_timings(emc, emc->derated,
+						    emc->num_timings);
+		if (err < 0)
+			goto release;
+	}
+
+	/* default to the nominal table */
+	emc->timings = emc->nominal;
+
+	/* pick the current timing based on the current EMC clock rate */
+	current_rate = clk_get_rate(emc->clk) / 1000;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		if (emc->timings[i].rate == current_rate) {
+			emc->last = &emc->timings[i];
+			break;
+		}
+	}
+
+	if (i == emc->num_timings) {
+		dev_err(emc->dev, "no EMC table entry found for %lu kHz\n",
+			current_rate);
+		err = -ENOENT;
+		goto release;
+	}
+
+	/* pick a compatible clock change sequence for the EMC table */
+	for (i = 0; i < ARRAY_SIZE(tegra210_emc_sequences); i++) {
+		const struct tegra210_emc_sequence *sequence =
+				tegra210_emc_sequences[i];
+
+		if (emc->timings[0].revision == sequence->revision) {
+			emc->sequence = sequence;
+			break;
+		}
+	}
+
+	if (!emc->sequence) {
+		dev_err(&pdev->dev, "sequence %u not supported\n",
+			emc->timings[0].revision);
+		err = -ENOTSUPP;
+		goto release;
+	}
+
+	emc->offsets = &tegra210_emc_table_register_offsets;
+	emc->refresh = TEGRA210_EMC_REFRESH_NOMINAL;
+
+	emc->provider.owner = THIS_MODULE;
+	emc->provider.dev = &pdev->dev;
+	emc->provider.set_rate = tegra210_emc_set_rate;
+
+	emc->provider.configs = devm_kcalloc(&pdev->dev, emc->num_timings,
+					     sizeof(*emc->provider.configs),
+					     GFP_KERNEL);
+	if (!emc->provider.configs) {
+		err = -ENOMEM;
+		goto release;
+	}
+
+	emc->provider.num_configs = emc->num_timings;
+
+	for (i = 0; i < emc->provider.num_configs; i++) {
+		struct tegra210_emc_timing *timing = &emc->timings[i];
+		struct tegra210_clk_emc_config *config =
+				&emc->provider.configs[i];
+		u32 value;
+
+		config->rate = timing->rate * 1000UL;
+		config->value = timing->clk_src_emc;
+
+		value = timing->burst_mc_regs[MC_EMEM_ARB_MISC0_INDEX];
+
+		if ((value & MC_EMEM_ARB_MISC0_EMC_SAME_FREQ) == 0)
+			config->same_freq = false;
+		else
+			config->same_freq = true;
+	}
+
+	err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to attach to EMC clock: %d\n", err);
+		goto release;
+	}
+
+	emc->clkchange_delay = 100;
+	emc->training_interval = 100;
+	dev_set_drvdata(emc->dev, emc);
+
+	timer_setup(&emc->refresh_timer, tegra210_emc_poll_refresh,
+		    TIMER_DEFERRABLE);
+	atomic_set(&emc->refresh_poll, 0);
+	emc->refresh_poll_interval = 1000;
+
+	timer_setup(&emc->training, tegra210_emc_train, 0);
+
+	tegra210_emc_debugfs_init(emc);
+
+	cd = devm_thermal_of_cooling_device_register(emc->dev, np, "emc", emc,
+						     &tegra210_emc_cd_ops);
+	if (IS_ERR(cd)) {
+		err = PTR_ERR(cd);
+		dev_err(emc->dev, "failed to register cooling device: %d\n",
+			err);
+		goto detach;
+	}
+
+	return 0;
+
+detach:
+	debugfs_remove_recursive(emc->debugfs.root);
+	tegra210_clk_emc_detach(emc->clk);
+release:
+	of_reserved_mem_device_release(emc->dev);
+put_mc:
+	put_device(emc->mc->dev);
+	return err;
+}
+
+static int tegra210_emc_remove(struct platform_device *pdev)
+{
+	struct tegra210_emc *emc = platform_get_drvdata(pdev);
+
+	debugfs_remove_recursive(emc->debugfs.root);
+	tegra210_clk_emc_detach(emc->clk);
+	of_reserved_mem_device_release(emc->dev);
+	put_device(emc->mc->dev);
+
+	return 0;
+}
+
+static int __maybe_unused tegra210_emc_suspend(struct device *dev)
+{
+	struct tegra210_emc *emc = dev_get_drvdata(dev);
+	int err;
+
+	err = clk_rate_exclusive_get(emc->clk);
+	if (err < 0) {
+		dev_err(emc->dev, "failed to acquire clock: %d\n", err);
+		return err;
+	}
+
+	emc->resume_rate = clk_get_rate(emc->clk);
+
+	clk_set_rate(emc->clk, 204000000);
+	tegra210_clk_emc_detach(emc->clk);
+
+	dev_dbg(dev, "suspending at %lu Hz\n", clk_get_rate(emc->clk));
+
+	return 0;
+}
+
+static int __maybe_unused tegra210_emc_resume(struct device *dev)
+{
+	struct tegra210_emc *emc = dev_get_drvdata(dev);
+	int err;
+
+	err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+	if (err < 0) {
+		dev_err(dev, "failed to attach to EMC clock: %d\n", err);
+		return err;
+	}
+
+	clk_set_rate(emc->clk, emc->resume_rate);
+	clk_rate_exclusive_put(emc->clk);
+
+	dev_dbg(dev, "resuming at %lu Hz\n", clk_get_rate(emc->clk));
+
+	return 0;
+}
+
+static const struct dev_pm_ops tegra210_emc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(tegra210_emc_suspend, tegra210_emc_resume)
+};
+
+static const struct of_device_id tegra210_emc_of_match[] = {
+	{ .compatible = "nvidia,tegra210-emc", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra210_emc_of_match);
+
+static struct platform_driver tegra210_emc_driver = {
+	.driver = {
+		.name = "tegra210-emc",
+		.of_match_table = tegra210_emc_of_match,
+		.pm = &tegra210_emc_pm_ops,
+	},
+	.probe = tegra210_emc_probe,
+	.remove = tegra210_emc_remove,
+};
+
+module_platform_driver(tegra210_emc_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_AUTHOR("Joseph Lo <josephl@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra210 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra210-emc-table.c b/drivers/memory/tegra/tegra210-emc-table.c
new file mode 100644
index 000000000000..3e0598363b87
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-table.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
+ */
+
+#include <linux/of_reserved_mem.h>
+
+#include "tegra210-emc.h"
+
+#define TEGRA_EMC_MAX_FREQS		16
+
+static int tegra210_emc_table_device_init(struct reserved_mem *rmem,
+					  struct device *dev)
+{
+	struct tegra210_emc *emc = dev_get_drvdata(dev);
+	struct tegra210_emc_timing *timings;
+	unsigned int i, count = 0;
+
+	timings = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+	if (!timings) {
+		dev_err(dev, "failed to map EMC table\n");
+		return -ENOMEM;
+	}
+
+	count = 0;
+
+	for (i = 0; i < TEGRA_EMC_MAX_FREQS; i++) {
+		if (timings[i].revision == 0)
+			break;
+
+		count++;
+	}
+
+	/* only the nominal and derated tables are expected */
+	if (emc->derated) {
+		dev_warn(dev, "excess EMC table '%s'\n", rmem->name);
+		goto out;
+	}
+
+	if (emc->nominal) {
+		if (count != emc->num_timings) {
+			dev_warn(dev, "%u derated vs. %u nominal entries\n",
+				 count, emc->num_timings);
+			memunmap(timings);
+			return -EINVAL;
+		}
+
+		emc->derated = timings;
+	} else {
+		emc->num_timings = count;
+		emc->nominal = timings;
+	}
+
+out:
+	/* keep track of which table this is */
+	rmem->priv = timings;
+
+	return 0;
+}
+
+static void tegra210_emc_table_device_release(struct reserved_mem *rmem,
+					      struct device *dev)
+{
+	struct tegra210_emc_timing *timings = rmem->priv;
+	struct tegra210_emc *emc = dev_get_drvdata(dev);
+
+	if ((emc->nominal && timings != emc->nominal) &&
+	    (emc->derated && timings != emc->derated))
+		dev_warn(dev, "trying to release unassigned EMC table '%s'\n",
+			 rmem->name);
+
+	memunmap(timings);
+}
+
+static const struct reserved_mem_ops tegra210_emc_table_ops = {
+	.device_init = tegra210_emc_table_device_init,
+	.device_release = tegra210_emc_table_device_release,
+};
+
+static int tegra210_emc_table_init(struct reserved_mem *rmem)
+{
+	pr_debug("Tegra210 EMC table at %pa, size %lu bytes\n", &rmem->base,
+		 (unsigned long)rmem->size);
+
+	rmem->ops = &tegra210_emc_table_ops;
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(tegra210_emc_table, "nvidia,tegra210-emc-table",
+		       tegra210_emc_table_init);
diff --git a/drivers/memory/tegra/tegra210-emc.h b/drivers/memory/tegra/tegra210-emc.h
new file mode 100644
index 000000000000..8988bcf15290
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc.h
@@ -0,0 +1,1016 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION.  All rights reserved.
+ */
+
+#ifndef TEGRA210_EMC_H
+#define TEGRA210_EMC_H
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#define DVFS_FGCG_HIGH_SPEED_THRESHOLD				1000
+#define IOBRICK_DCC_THRESHOLD					2400
+#define DVFS_FGCG_MID_SPEED_THRESHOLD				600
+
+#define EMC_STATUS_UPDATE_TIMEOUT				1000
+
+/* register definitions */
+#define EMC_INTSTATUS						0x0
+#define EMC_INTSTATUS_CLKCHANGE_COMPLETE			BIT(4)
+#define EMC_DBG							0x8
+#define EMC_DBG_WRITE_MUX_ACTIVE				BIT(1)
+#define EMC_DBG_WRITE_ACTIVE_ONLY				BIT(30)
+#define EMC_CFG							0xc
+#define EMC_CFG_DRAM_CLKSTOP_PD					BIT(31)
+#define EMC_CFG_DRAM_CLKSTOP_SR					BIT(30)
+#define EMC_CFG_DRAM_ACPD					BIT(29)
+#define EMC_CFG_DYN_SELF_REF					BIT(28)
+#define EMC_PIN							0x24
+#define EMC_PIN_PIN_CKE						BIT(0)
+#define EMC_PIN_PIN_CKEB					BIT(1)
+#define EMC_PIN_PIN_CKE_PER_DEV					BIT(2)
+#define EMC_TIMING_CONTROL					0x28
+#define EMC_RC							0x2c
+#define EMC_RFC							0x30
+#define EMC_RAS							0x34
+#define EMC_RP							0x38
+#define EMC_R2W							0x3c
+#define EMC_W2R							0x40
+#define EMC_R2P							0x44
+#define EMC_W2P							0x48
+#define EMC_RD_RCD						0x4c
+#define EMC_WR_RCD						0x50
+#define EMC_RRD							0x54
+#define EMC_REXT						0x58
+#define EMC_WDV							0x5c
+#define EMC_QUSE						0x60
+#define EMC_QRST						0x64
+#define EMC_QSAFE						0x68
+#define EMC_RDV							0x6c
+#define EMC_REFRESH						0x70
+#define EMC_BURST_REFRESH_NUM					0x74
+#define EMC_PDEX2WR						0x78
+#define EMC_PDEX2RD						0x7c
+#define EMC_PCHG2PDEN						0x80
+#define EMC_ACT2PDEN						0x84
+#define EMC_AR2PDEN						0x88
+#define EMC_RW2PDEN						0x8c
+#define EMC_TXSR						0x90
+#define EMC_TCKE						0x94
+#define EMC_TFAW						0x98
+#define EMC_TRPAB						0x9c
+#define EMC_TCLKSTABLE						0xa0
+#define EMC_TCLKSTOP						0xa4
+#define EMC_TREFBW						0xa8
+#define EMC_TPPD						0xac
+#define EMC_ODT_WRITE						0xb0
+#define EMC_PDEX2MRR						0xb4
+#define EMC_WEXT						0xb8
+#define EMC_RFC_SLR						0xc0
+#define EMC_MRS_WAIT_CNT2					0xc4
+#define EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT		16
+#define EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT		0
+#define EMC_MRS_WAIT_CNT					0xc8
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT			0
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK			\
+	(0x3FF << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)
+
+#define EMC_MRS							0xcc
+#define EMC_EMRS						0xd0
+#define EMC_EMRS_USE_EMRS_LONG_CNT				BIT(26)
+#define EMC_REF							0xd4
+#define  EMC_REF_REF_CMD					BIT(0)
+#define EMC_SELF_REF						0xe0
+#define EMC_MRW							0xe8
+#define EMC_MRW_MRW_OP_SHIFT					0
+#define EMC_MRW_MRW_OP_MASK					\
+	(0xff << EMC_MRW_MRW_OP_SHIFT)
+#define EMC_MRW_MRW_MA_SHIFT					16
+#define EMC_MRW_USE_MRW_EXT_CNT					27
+#define EMC_MRW_MRW_DEV_SELECTN_SHIFT				30
+
+#define EMC_MRR							0xec
+#define EMC_MRR_DEV_SEL_SHIFT					30
+#define EMC_MRR_DEV_SEL_MASK					0x3
+#define EMC_MRR_MA_SHIFT					16
+#define EMC_MRR_MA_MASK						0xff
+#define EMC_MRR_DATA_SHIFT					0
+#define EMC_MRR_DATA_MASK					0xffff
+
+#define EMC_FBIO_SPARE						0x100
+#define EMC_FBIO_CFG5						0x104
+#define EMC_FBIO_CFG5_DRAM_TYPE_SHIFT				0
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK				\
+	(0x3 << EMC_FBIO_CFG5_DRAM_TYPE_SHIFT)
+#define EMC_FBIO_CFG5_CMD_TX_DIS				BIT(8)
+
+#define EMC_PDEX2CKE						0x118
+#define EMC_CKE2PDEN						0x11c
+#define EMC_MPC							0x128
+#define EMC_EMRS2						0x12c
+#define EMC_EMRS2_USE_EMRS2_LONG_CNT				BIT(26)
+#define EMC_MRW2						0x134
+#define EMC_MRW3						0x138
+#define EMC_MRW4						0x13c
+#define EMC_R2R							0x144
+#define EMC_EINPUT						0x14c
+#define EMC_EINPUT_DURATION					0x150
+#define EMC_PUTERM_EXTRA					0x154
+#define EMC_TCKESR						0x158
+#define EMC_TPD							0x15c
+#define EMC_AUTO_CAL_CONFIG					0x2a4
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START		BIT(0)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL		BIT(9)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL		BIT(10)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE			BIT(29)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_START			BIT(31)
+#define EMC_EMC_STATUS						0x2b4
+#define EMC_EMC_STATUS_MRR_DIVLD				BIT(20)
+#define EMC_EMC_STATUS_TIMING_UPDATE_STALLED			BIT(23)
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT			4
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK			\
+	(0x3 << EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT)
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT		8
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK		\
+	(0x3 << EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT)
+
+#define EMC_CFG_2						0x2b8
+#define EMC_CFG_DIG_DLL						0x2bc
+#define EMC_CFG_DIG_DLL_CFG_DLL_EN				BIT(0)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK		BIT(1)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC		BIT(3)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK		BIT(4)
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT			6
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK			\
+	(0x3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT)
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT		8
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK			\
+	(0x7 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_PERIOD					0x2c0
+#define EMC_DIG_DLL_STATUS					0x2c4
+#define EMC_DIG_DLL_STATUS_DLL_LOCK				BIT(15)
+#define EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED			BIT(17)
+#define EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT			0
+#define EMC_DIG_DLL_STATUS_DLL_OUT_MASK				\
+	(0x7ff << EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_1					0x2c8
+#define EMC_RDV_MASK						0x2cc
+#define EMC_WDV_MASK						0x2d0
+#define EMC_RDV_EARLY_MASK					0x2d4
+#define EMC_RDV_EARLY						0x2d8
+#define EMC_AUTO_CAL_CONFIG8					0x2dc
+#define EMC_ZCAL_INTERVAL					0x2e0
+#define EMC_ZCAL_WAIT_CNT					0x2e4
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK			0x7ff
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT			0
+
+#define EMC_ZQ_CAL						0x2ec
+#define EMC_ZQ_CAL_DEV_SEL_SHIFT				30
+#define EMC_ZQ_CAL_LONG						BIT(4)
+#define EMC_ZQ_CAL_ZQ_LATCH_CMD					BIT(1)
+#define EMC_ZQ_CAL_ZQ_CAL_CMD					BIT(0)
+#define EMC_FDPD_CTRL_DQ					0x310
+#define EMC_FDPD_CTRL_CMD					0x314
+#define EMC_PMACRO_CMD_BRICK_CTRL_FDPD				0x318
+#define EMC_PMACRO_DATA_BRICK_CTRL_FDPD				0x31c
+#define EMC_PMACRO_BRICK_CTRL_RFU1				0x330
+#define EMC_PMACRO_BRICK_CTRL_RFU2				0x334
+#define EMC_TR_TIMING_0						0x3b4
+#define EMC_TR_CTRL_1						0x3bc
+#define EMC_TR_RDV						0x3c4
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE			0x3cc
+#define EMC_SEL_DPD_CTRL					0x3d8
+#define EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN			BIT(8)
+#define EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN				BIT(5)
+#define EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN			BIT(4)
+#define EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN				BIT(3)
+#define EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN				BIT(2)
+#define EMC_PRE_REFRESH_REQ_CNT					0x3dc
+#define EMC_DYN_SELF_REF_CONTROL				0x3e0
+#define EMC_TXSRDLL						0x3e4
+#define EMC_CCFIFO_ADDR						0x3e8
+#define  EMC_CCFIFO_ADDR_STALL_BY_1 (1 << 31)
+#define  EMC_CCFIFO_ADDR_STALL(x) (((x) & 0x7fff) << 16)
+#define  EMC_CCFIFO_ADDR_OFFSET(x) ((x) & 0xffff)
+#define EMC_CCFIFO_DATA						0x3ec
+#define EMC_TR_QPOP						0x3f4
+#define EMC_TR_RDV_MASK						0x3f8
+#define EMC_TR_QSAFE						0x3fc
+#define EMC_TR_QRST						0x400
+#define EMC_ISSUE_QRST						0x428
+#define EMC_AUTO_CAL_CONFIG2					0x458
+#define EMC_AUTO_CAL_CONFIG3					0x45c
+#define EMC_TR_DVFS						0x460
+#define EMC_AUTO_CAL_CHANNEL					0x464
+#define EMC_IBDLY						0x468
+#define EMC_OBDLY						0x46c
+#define EMC_TXDSRVTTGEN						0x480
+#define EMC_WE_DURATION						0x48c
+#define EMC_WS_DURATION						0x490
+#define EMC_WEV							0x494
+#define EMC_WSV							0x498
+#define EMC_CFG_3						0x49c
+#define EMC_MRW6						0x4a4
+#define EMC_MRW7						0x4a8
+#define EMC_MRW8						0x4ac
+#define EMC_MRW9						0x4b0
+#define EMC_MRW10						0x4b4
+#define EMC_MRW11						0x4b8
+#define EMC_MRW12						0x4bc
+#define EMC_MRW13						0x4c0
+#define EMC_MRW14						0x4c4
+#define EMC_MRW15						0x4d0
+#define EMC_CFG_SYNC						0x4d4
+#define EMC_FDPD_CTRL_CMD_NO_RAMP				0x4d8
+#define EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE	BIT(0)
+#define EMC_WDV_CHK						0x4e0
+#define EMC_CFG_PIPE_2						0x554
+#define EMC_CFG_PIPE_CLK					0x558
+#define EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON				BIT(0)
+#define EMC_CFG_PIPE_1						0x55c
+#define EMC_CFG_PIPE						0x560
+#define EMC_QPOP						0x564
+#define EMC_QUSE_WIDTH						0x568
+#define EMC_PUTERM_WIDTH					0x56c
+#define EMC_AUTO_CAL_CONFIG7					0x574
+#define EMC_REFCTRL2						0x580
+#define EMC_FBIO_CFG7						0x584
+#define EMC_FBIO_CFG7_CH0_ENABLE				BIT(1)
+#define EMC_FBIO_CFG7_CH1_ENABLE				BIT(2)
+#define EMC_DATA_BRLSHFT_0					0x588
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT	21
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT	18
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT	15
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT	12
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT	9
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT	6
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT	3
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT	0
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_DATA_BRLSHFT_1					0x58c
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT	21
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT	18
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT	15
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT	12
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT	9
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT	6
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT	3
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT	0
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK	\
+	(0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_RFCPB						0x590
+#define EMC_DQS_BRLSHFT_0					0x594
+#define EMC_DQS_BRLSHFT_1					0x598
+#define EMC_CMD_BRLSHFT_0					0x59c
+#define EMC_CMD_BRLSHFT_1					0x5a0
+#define EMC_CMD_BRLSHFT_2					0x5a4
+#define EMC_CMD_BRLSHFT_3					0x5a8
+#define EMC_QUSE_BRLSHFT_0					0x5ac
+#define EMC_AUTO_CAL_CONFIG4					0x5b0
+#define EMC_AUTO_CAL_CONFIG5					0x5b4
+#define EMC_QUSE_BRLSHFT_1					0x5b8
+#define EMC_QUSE_BRLSHFT_2					0x5bc
+#define EMC_CCDMW						0x5c0
+#define EMC_QUSE_BRLSHFT_3					0x5c4
+#define EMC_AUTO_CAL_CONFIG6					0x5cc
+#define EMC_DLL_CFG_0						0x5e4
+#define EMC_DLL_CFG_1						0x5e8
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT		10
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK		\
+	(0x7ff << EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT)
+
+#define EMC_CONFIG_SAMPLE_DELAY					0x5f0
+#define EMC_CFG_UPDATE						0x5f4
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT		9
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK		\
+	(0x3 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT)
+
+#define EMC_PMACRO_QUSE_DDLL_RANK0_0				0x600
+#define EMC_PMACRO_QUSE_DDLL_RANK0_1				0x604
+#define EMC_PMACRO_QUSE_DDLL_RANK0_2				0x608
+#define EMC_PMACRO_QUSE_DDLL_RANK0_3				0x60c
+#define EMC_PMACRO_QUSE_DDLL_RANK0_4				0x610
+#define EMC_PMACRO_QUSE_DDLL_RANK0_5				0x614
+#define EMC_PMACRO_QUSE_DDLL_RANK1_0				0x620
+#define EMC_PMACRO_QUSE_DDLL_RANK1_1				0x624
+#define EMC_PMACRO_QUSE_DDLL_RANK1_2				0x628
+#define EMC_PMACRO_QUSE_DDLL_RANK1_3				0x62c
+#define EMC_PMACRO_QUSE_DDLL_RANK1_4				0x630
+#define EMC_PMACRO_QUSE_DDLL_RANK1_5				0x634
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0			0x640
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT \
+	16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT \
+	0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_MASK \
+	(0x3ff <<							    \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1			0x644
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT \
+	16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT \
+	0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2			0x648
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT  \
+	16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT \
+	0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3			0x64c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT \
+	16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT \
+	0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4			0x650
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5			0x654
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0			0x660
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT \
+	16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT \
+	0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1			0x664
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT \
+	16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT \
+	0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2			0x668
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT \
+	16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT \
+	0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3			0x66c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT \
+	16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT \
+	0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_MASK  \
+	(0x3ff <<							     \
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4			0x670
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5			0x674
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0			0x680
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1			0x684
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2			0x688
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3			0x68c
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4			0x690
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5			0x694
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0			0x6a0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1			0x6a4
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2			0x6a8
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3			0x6ac
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4			0x6b0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5			0x6b4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0			0x6c0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1			0x6c4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2			0x6c8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3			0x6cc
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0			0x6e0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1			0x6e4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2			0x6e8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3			0x6ec
+#define EMC_PMACRO_TX_PWRD_0					0x720
+#define EMC_PMACRO_TX_PWRD_1					0x724
+#define EMC_PMACRO_TX_PWRD_2					0x728
+#define EMC_PMACRO_TX_PWRD_3					0x72c
+#define EMC_PMACRO_TX_PWRD_4					0x730
+#define EMC_PMACRO_TX_PWRD_5					0x734
+#define EMC_PMACRO_TX_SEL_CLK_SRC_0				0x740
+#define EMC_PMACRO_TX_SEL_CLK_SRC_1				0x744
+#define EMC_PMACRO_TX_SEL_CLK_SRC_3				0x74c
+#define EMC_PMACRO_TX_SEL_CLK_SRC_2				0x748
+#define EMC_PMACRO_TX_SEL_CLK_SRC_4				0x750
+#define EMC_PMACRO_TX_SEL_CLK_SRC_5				0x754
+#define EMC_PMACRO_DDLL_BYPASS					0x760
+#define EMC_PMACRO_DDLL_PWRD_0					0x770
+#define EMC_PMACRO_DDLL_PWRD_1					0x774
+#define EMC_PMACRO_DDLL_PWRD_2					0x778
+#define EMC_PMACRO_CMD_CTRL_0					0x780
+#define EMC_PMACRO_CMD_CTRL_1					0x784
+#define EMC_PMACRO_CMD_CTRL_2					0x788
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0		0x800
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1		0x804
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2		0x808
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3		0x80c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0		0x810
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1		0x814
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2		0x818
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3		0x81c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0		0x820
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1		0x824
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2		0x828
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3		0x82c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0		0x830
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1		0x834
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2		0x838
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3		0x83c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0		0x840
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1		0x844
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2		0x848
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3		0x84c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0		0x850
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1		0x854
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2		0x858
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3		0x85c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0		0x860
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1		0x864
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2		0x868
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3		0x86c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0		0x870
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1		0x874
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2		0x878
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3		0x87c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0		0x880
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1		0x884
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2		0x888
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3		0x88c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0		0x890
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1		0x894
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2		0x898
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3		0x89c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0		0x8a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1		0x8a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2		0x8a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3		0x8ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0		0x8b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1		0x8b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2		0x8b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3		0x8bc
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0		0x900
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1		0x904
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2		0x908
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3		0x90c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0		0x910
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1		0x914
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2		0x918
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3		0x91c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0		0x920
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1		0x924
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2		0x928
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3		0x92c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0		0x930
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1		0x934
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2		0x938
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3		0x93c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0		0x940
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1		0x944
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2		0x948
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3		0x94c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0		0x950
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1		0x954
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2		0x958
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3		0x95c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0		0x960
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1		0x964
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2		0x968
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3		0x96c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0		0x970
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1		0x974
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2		0x978
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3		0x97c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0		0x980
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1		0x984
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2		0x988
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3		0x98c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0		0x990
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1		0x994
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2		0x998
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3		0x99c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0		0x9a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1		0x9a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2		0x9a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3		0x9ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0		0x9b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1		0x9b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2		0x9b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3		0x9bc
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0		0xa00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1		0xa04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2		0xa08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0		0xa10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1		0xa14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2		0xa18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0		0xa20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1		0xa24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2		0xa28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0		0xa30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1		0xa34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2		0xa38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0		0xa40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1		0xa44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2		0xa48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0		0xa50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1		0xa54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2		0xa58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0		0xa60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1		0xa64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2		0xa68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0		0xa70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1		0xa74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2		0xa78
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0		0xb00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1		0xb04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2		0xb08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0		0xb10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1		0xb14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2		0xb18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0		0xb20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1		0xb24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2		0xb28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0		0xb30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1		0xb34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2		0xb38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0		0xb40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1		0xb44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2		0xb48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0		0xb50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1		0xb54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2		0xb58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0		0xb60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1		0xb64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2		0xb68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0		0xb70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1		0xb74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2		0xb78
+#define EMC_PMACRO_IB_VREF_DQ_0					0xbe0
+#define EMC_PMACRO_IB_VREF_DQ_1					0xbe4
+#define EMC_PMACRO_IB_VREF_DQS_0				0xbf0
+#define EMC_PMACRO_IB_VREF_DQS_1				0xbf4
+#define EMC_PMACRO_DDLL_LONG_CMD_0				0xc00
+#define EMC_PMACRO_DDLL_LONG_CMD_1				0xc04
+#define EMC_PMACRO_DDLL_LONG_CMD_2				0xc08
+#define EMC_PMACRO_DDLL_LONG_CMD_3				0xc0c
+#define EMC_PMACRO_DDLL_LONG_CMD_4				0xc10
+#define EMC_PMACRO_DDLL_LONG_CMD_5				0xc14
+#define EMC_PMACRO_DDLL_SHORT_CMD_0				0xc20
+#define EMC_PMACRO_DDLL_SHORT_CMD_1				0xc24
+#define EMC_PMACRO_DDLL_SHORT_CMD_2				0xc28
+#define EMC_PMACRO_CFG_PM_GLOBAL_0				0xc30
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0		BIT(16)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1		BIT(17)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2		BIT(18)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3		BIT(19)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4		BIT(20)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5		BIT(21)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6		BIT(22)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7		BIT(23)
+#define EMC_PMACRO_VTTGEN_CTRL_0				0xc34
+#define EMC_PMACRO_VTTGEN_CTRL_1				0xc38
+#define EMC_PMACRO_BG_BIAS_CTRL_0				0xc3c
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD			BIT(0)
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD			BIT(2)
+#define EMC_PMACRO_PAD_CFG_CTRL					0xc40
+#define EMC_PMACRO_ZCTRL					0xc44
+#define EMC_PMACRO_CMD_PAD_RX_CTRL				0xc50
+#define EMC_PMACRO_DATA_PAD_RX_CTRL				0xc54
+#define EMC_PMACRO_CMD_RX_TERM_MODE				0xc58
+#define EMC_PMACRO_DATA_RX_TERM_MODE				0xc5c
+#define EMC_PMACRO_CMD_PAD_TX_CTRL				0xc60
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC		BIT(1)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC		BIT(9)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC		BIT(16)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC		BIT(24)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON		BIT(26)
+
+#define EMC_PMACRO_DATA_PAD_TX_CTRL				0xc64
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF		BIT(0)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC		BIT(1)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF		BIT(8)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC		BIT(9)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC		BIT(16)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC		BIT(24)
+
+#define EMC_PMACRO_COMMON_PAD_TX_CTRL				0xc68
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON				0xc78
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS		BIT(16)
+#define EMC_PMACRO_VTTGEN_CTRL_2				0xcf0
+#define EMC_PMACRO_IB_RXRT					0xcf4
+#define EMC_PMACRO_TRAINING_CTRL_0				0xcf8
+#define EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR		BIT(3)
+#define EMC_PMACRO_TRAINING_CTRL_1				0xcfc
+#define EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR		BIT(3)
+#define EMC_TRAINING_CTRL					0xe04
+#define EMC_TRAINING_QUSE_CORS_CTRL				0xe0c
+#define EMC_TRAINING_QUSE_FINE_CTRL				0xe10
+#define EMC_TRAINING_QUSE_CTRL_MISC				0xe14
+#define EMC_TRAINING_WRITE_FINE_CTRL				0xe18
+#define EMC_TRAINING_WRITE_CTRL_MISC				0xe1c
+#define EMC_TRAINING_WRITE_VREF_CTRL				0xe20
+#define EMC_TRAINING_READ_FINE_CTRL				0xe24
+#define EMC_TRAINING_READ_CTRL_MISC				0xe28
+#define EMC_TRAINING_READ_VREF_CTRL				0xe2c
+#define EMC_TRAINING_CA_FINE_CTRL				0xe30
+#define EMC_TRAINING_CA_CTRL_MISC				0xe34
+#define EMC_TRAINING_CA_CTRL_MISC1				0xe38
+#define EMC_TRAINING_CA_VREF_CTRL				0xe3c
+#define EMC_TRAINING_SETTLE					0xe44
+#define EMC_TRAINING_MPC					0xe5c
+#define EMC_TRAINING_VREF_SETTLE				0xe6c
+#define EMC_TRAINING_QUSE_VREF_CTRL				0xed0
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK0			0xed4
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK1			0xed8
+
+#define EMC_COPY_TABLE_PARAM_PERIODIC_FIELDS			BIT(0)
+#define EMC_COPY_TABLE_PARAM_TRIM_REGS				BIT(1)
+
+enum burst_regs_list {
+	EMC_RP_INDEX = 6,
+	EMC_R2P_INDEX = 9,
+	EMC_W2P_INDEX,
+	EMC_MRW6_INDEX = 31,
+	EMC_REFRESH_INDEX = 41,
+	EMC_PRE_REFRESH_REQ_CNT_INDEX = 43,
+	EMC_TRPAB_INDEX = 59,
+	EMC_MRW7_INDEX = 62,
+	EMC_FBIO_CFG5_INDEX = 65,
+	EMC_FBIO_CFG7_INDEX,
+	EMC_CFG_DIG_DLL_INDEX,
+	EMC_ZCAL_INTERVAL_INDEX = 139,
+	EMC_ZCAL_WAIT_CNT_INDEX,
+	EMC_MRS_WAIT_CNT_INDEX = 141,
+	EMC_DLL_CFG_0_INDEX = 144,
+	EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX = 146,
+	EMC_CFG_INDEX = 148,
+	EMC_DYN_SELF_REF_CONTROL_INDEX = 150,
+	EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX = 161,
+	EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX,
+	EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX,
+	EMC_PMACRO_BRICK_CTRL_RFU1_INDEX = 167,
+	EMC_PMACRO_BG_BIAS_CTRL_0_INDEX = 171,
+	EMC_MRW14_INDEX = 199,
+	EMC_MRW15_INDEX = 220,
+};
+
+enum trim_regs_list {
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_INDEX = 60,
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_INDEX,
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_INDEX,
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_INDEX,
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4_INDEX,
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5_INDEX,
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_INDEX,
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_INDEX,
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_INDEX,
+	EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_INDEX,
+};
+
+enum burst_mc_regs_list {
+	MC_EMEM_ARB_MISC0_INDEX = 20,
+};
+
+enum {
+	T_RP,
+	T_FC_LPDDR4,
+	T_RFC,
+	T_PDEX,
+	RL,
+};
+
+enum {
+	AUTO_PD = 0,
+	MAN_SR  = 2,
+};
+
+enum {
+	ASSEMBLY = 0,
+	ACTIVE,
+};
+
+enum {
+	C0D0U0,
+	C0D0U1,
+	C0D1U0,
+	C0D1U1,
+	C1D0U0,
+	C1D0U1,
+	C1D1U0,
+	C1D1U1,
+	DRAM_CLKTREE_NUM,
+};
+
+#define VREF_REGS_PER_CHANNEL_SIZE 4
+#define DRAM_TIMINGS_NUM 5
+#define BURST_REGS_PER_CHANNEL_SIZE 8
+#define TRIM_REGS_PER_CHANNEL_SIZE 10
+#define PTFV_ARRAY_SIZE 12
+#define SAVE_RESTORE_MOD_REGS_SIZE 12
+#define TRAINING_MOD_REGS_SIZE 20
+#define BURST_UP_DOWN_REGS_SIZE 24
+#define BURST_MC_REGS_SIZE 33
+#define TRIM_REGS_SIZE 138
+#define BURST_REGS_SIZE 221
+
+struct tegra210_emc_per_channel_regs {
+	u16 bank;
+	u16 offset;
+};
+
+struct tegra210_emc_table_register_offsets {
+	u16 burst[BURST_REGS_SIZE];
+	u16 trim[TRIM_REGS_SIZE];
+	u16 burst_mc[BURST_MC_REGS_SIZE];
+	u16 la_scale[BURST_UP_DOWN_REGS_SIZE];
+	struct tegra210_emc_per_channel_regs burst_per_channel[BURST_REGS_PER_CHANNEL_SIZE];
+	struct tegra210_emc_per_channel_regs trim_per_channel[TRIM_REGS_PER_CHANNEL_SIZE];
+	struct tegra210_emc_per_channel_regs vref_per_channel[VREF_REGS_PER_CHANNEL_SIZE];
+};
+
+struct tegra210_emc_timing {
+	u32 revision;
+	const char dvfs_ver[60];
+	u32 rate;
+	u32 min_volt;
+	u32 gpu_min_volt;
+	const char clock_src[32];
+	u32 clk_src_emc;
+	u32 needs_training;
+	u32 training_pattern;
+	u32 trained;
+
+	u32 periodic_training;
+	u32 trained_dram_clktree[DRAM_CLKTREE_NUM];
+	u32 current_dram_clktree[DRAM_CLKTREE_NUM];
+	u32 run_clocks;
+	u32 tree_margin;
+
+	u32 num_burst;
+	u32 num_burst_per_ch;
+	u32 num_trim;
+	u32 num_trim_per_ch;
+	u32 num_mc_regs;
+	u32 num_up_down;
+	u32 vref_num;
+	u32 training_mod_num;
+	u32 dram_timing_num;
+
+	u32 ptfv_list[PTFV_ARRAY_SIZE];
+
+	u32 burst_regs[BURST_REGS_SIZE];
+	u32 burst_reg_per_ch[BURST_REGS_PER_CHANNEL_SIZE];
+	u32 shadow_regs_ca_train[BURST_REGS_SIZE];
+	u32 shadow_regs_quse_train[BURST_REGS_SIZE];
+	u32 shadow_regs_rdwr_train[BURST_REGS_SIZE];
+
+	u32 trim_regs[TRIM_REGS_SIZE];
+	u32 trim_perch_regs[TRIM_REGS_PER_CHANNEL_SIZE];
+
+	u32 vref_perch_regs[VREF_REGS_PER_CHANNEL_SIZE];
+
+	u32 dram_timings[DRAM_TIMINGS_NUM];
+	u32 training_mod_regs[TRAINING_MOD_REGS_SIZE];
+	u32 save_restore_mod_regs[SAVE_RESTORE_MOD_REGS_SIZE];
+	u32 burst_mc_regs[BURST_MC_REGS_SIZE];
+	u32 la_scale_regs[BURST_UP_DOWN_REGS_SIZE];
+
+	u32 min_mrs_wait;
+	u32 emc_mrw;
+	u32 emc_mrw2;
+	u32 emc_mrw3;
+	u32 emc_mrw4;
+	u32 emc_mrw9;
+	u32 emc_mrs;
+	u32 emc_emrs;
+	u32 emc_emrs2;
+	u32 emc_auto_cal_config;
+	u32 emc_auto_cal_config2;
+	u32 emc_auto_cal_config3;
+	u32 emc_auto_cal_config4;
+	u32 emc_auto_cal_config5;
+	u32 emc_auto_cal_config6;
+	u32 emc_auto_cal_config7;
+	u32 emc_auto_cal_config8;
+	u32 emc_cfg_2;
+	u32 emc_sel_dpd_ctrl;
+	u32 emc_fdpd_ctrl_cmd_no_ramp;
+	u32 dll_clk_src;
+	u32 clk_out_enb_x_0_clk_enb_emc_dll;
+	u32 latency;
+};
+
+enum tegra210_emc_refresh {
+	TEGRA210_EMC_REFRESH_NOMINAL = 0,
+	TEGRA210_EMC_REFRESH_2X,
+	TEGRA210_EMC_REFRESH_4X,
+	TEGRA210_EMC_REFRESH_THROTTLE, /* 4x Refresh + derating. */
+};
+
+#define DRAM_TYPE_DDR3		0
+#define DRAM_TYPE_LPDDR4	1
+#define DRAM_TYPE_LPDDR2	2
+#define DRAM_TYPE_DDR2		3
+
+struct tegra210_emc {
+	struct tegra_mc *mc;
+	struct device *dev;
+	struct clk *clk;
+
+	/* nominal EMC frequency table */
+	struct tegra210_emc_timing *nominal;
+	/* derated EMC frequency table */
+	struct tegra210_emc_timing *derated;
+
+	/* currently selected table (nominal or derated) */
+	struct tegra210_emc_timing *timings;
+	unsigned int num_timings;
+
+	const struct tegra210_emc_table_register_offsets *offsets;
+
+	const struct tegra210_emc_sequence *sequence;
+	spinlock_t lock;
+
+	void __iomem *regs, *channel[2];
+	unsigned int num_channels;
+	unsigned int num_devices;
+	unsigned int dram_type;
+
+	struct tegra210_emc_timing *last;
+	struct tegra210_emc_timing *next;
+
+	unsigned int training_interval;
+	struct timer_list training;
+
+	enum tegra210_emc_refresh refresh;
+	unsigned int refresh_poll_interval;
+	struct timer_list refresh_timer;
+	unsigned int temperature;
+	atomic_t refresh_poll;
+
+	ktime_t clkchange_time;
+	int clkchange_delay;
+
+	unsigned long resume_rate;
+
+	struct {
+		struct dentry *root;
+		unsigned long min_rate;
+		unsigned long max_rate;
+		unsigned int temperature;
+	} debugfs;
+
+	struct tegra210_clk_emc_provider provider;
+};
+
+struct tegra210_emc_sequence {
+	u8 revision;
+	void (*set_clock)(struct tegra210_emc *emc, u32 clksrc);
+	u32 (*periodic_compensation)(struct tegra210_emc *emc);
+};
+
+static inline void emc_writel(struct tegra210_emc *emc, u32 value,
+			      unsigned int offset)
+{
+	writel_relaxed(value, emc->regs + offset);
+}
+
+static inline u32 emc_readl(struct tegra210_emc *emc, unsigned int offset)
+{
+	return readl_relaxed(emc->regs + offset);
+}
+
+static inline void emc_channel_writel(struct tegra210_emc *emc,
+				      unsigned int channel,
+				      u32 value, unsigned int offset)
+{
+	writel_relaxed(value, emc->channel[channel] + offset);
+}
+
+static inline u32 emc_channel_readl(struct tegra210_emc *emc,
+				    unsigned int channel, unsigned int offset)
+{
+	return readl_relaxed(emc->channel[channel] + offset);
+}
+
+static inline void ccfifo_writel(struct tegra210_emc *emc, u32 value,
+				 unsigned int offset, u32 delay)
+{
+	writel_relaxed(value, emc->regs + EMC_CCFIFO_DATA);
+
+	value = EMC_CCFIFO_ADDR_STALL_BY_1 | EMC_CCFIFO_ADDR_STALL(delay) |
+		EMC_CCFIFO_ADDR_OFFSET(offset);
+	writel_relaxed(value, emc->regs + EMC_CCFIFO_ADDR);
+}
+
+static inline u32 div_o3(u32 a, u32 b)
+{
+	u32 result = a / b;
+
+	if ((b * result) < a)
+		return result + 1;
+
+	return result;
+}
+
+/* from tegra210-emc-r21021.c */
+extern const struct tegra210_emc_sequence tegra210_emc_r21021;
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+			     enum tegra210_emc_refresh refresh);
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+			  unsigned int address);
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc);
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set);
+void tegra210_emc_timing_update(struct tegra210_emc *emc);
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next);
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+						     unsigned long rate);
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+				struct tegra210_emc_timing *timing);
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+				 unsigned int offset, u32 bit_mask, bool state);
+unsigned long tegra210_emc_actual_osc_clocks(u32 in);
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset);
+void tegra210_emc_dll_disable(struct tegra210_emc *emc);
+void tegra210_emc_dll_enable(struct tegra210_emc *emc);
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc);
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+				      bool flip_backward);
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+				    bool flip_backward);
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing);
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc);
+
+#endif
diff --git a/drivers/memory/tegra/tegra210-mc.h b/drivers/memory/tegra/tegra210-mc.h
new file mode 100644
index 000000000000..b9b91ceb4730
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-mc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION.  All rights reserved.
+ */
+
+#ifndef TEGRA210_MC_H
+#define TEGRA210_MC_H
+
+#include "mc.h"
+
+/* register definitions */
+#define MC_LATENCY_ALLOWANCE_AVPC_0				0x2e4
+#define MC_LATENCY_ALLOWANCE_HC_0				0x310
+#define MC_LATENCY_ALLOWANCE_HC_1				0x314
+#define MC_LATENCY_ALLOWANCE_MPCORE_0				0x320
+#define MC_LATENCY_ALLOWANCE_NVENC_0				0x328
+#define MC_LATENCY_ALLOWANCE_PPCS_0				0x344
+#define MC_LATENCY_ALLOWANCE_PPCS_1				0x348
+#define MC_LATENCY_ALLOWANCE_ISP2_0				0x370
+#define MC_LATENCY_ALLOWANCE_ISP2_1				0x374
+#define MC_LATENCY_ALLOWANCE_XUSB_0				0x37c
+#define MC_LATENCY_ALLOWANCE_XUSB_1				0x380
+#define MC_LATENCY_ALLOWANCE_TSEC_0				0x390
+#define MC_LATENCY_ALLOWANCE_VIC_0				0x394
+#define MC_LATENCY_ALLOWANCE_VI2_0				0x398
+#define MC_LATENCY_ALLOWANCE_GPU_0				0x3ac
+#define MC_LATENCY_ALLOWANCE_SDMMCA_0				0x3b8
+#define MC_LATENCY_ALLOWANCE_SDMMCAA_0				0x3bc
+#define MC_LATENCY_ALLOWANCE_SDMMC_0				0x3c0
+#define MC_LATENCY_ALLOWANCE_SDMMCAB_0				0x3c4
+#define MC_LATENCY_ALLOWANCE_GPU2_0				0x3e8
+#define MC_LATENCY_ALLOWANCE_NVDEC_0				0x3d8
+#define MC_MLL_MPCORER_PTSA_RATE				0x44c
+#define MC_FTOP_PTSA_RATE					0x50c
+#define MC_EMEM_ARB_TIMING_RFCPB				0x6c0
+#define MC_EMEM_ARB_TIMING_CCDMW				0x6c4
+#define MC_EMEM_ARB_REFPB_HP_CTRL				0x6f0
+#define MC_EMEM_ARB_REFPB_BANK_CTRL				0x6f4
+#define MC_PTSA_GRANT_DECREMENT					0x960
+#define MC_EMEM_ARB_DHYST_CTRL					0xbcc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0			0xbd0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1			0xbd4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2			0xbd8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3			0xbdc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4			0xbe0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5			0xbe4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6			0xbe8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7			0xbec
+
+#endif
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index b42bdb667e85..055af0e08a2e 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -11,7 +11,6 @@
 
 #include <linux/clk.h>
 #include <linux/clk/tegra.h>
-#include <linux/completion.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -327,7 +326,6 @@ struct emc_timing {
 struct tegra_emc {
 	struct device *dev;
 	struct tegra_mc *mc;
-	struct completion clk_handshake_complete;
 	struct notifier_block clk_nb;
 	struct clk *clk;
 	void __iomem *regs;
@@ -374,52 +372,10 @@ static int emc_seq_update_timing(struct tegra_emc *emc)
 	return 0;
 }
 
-static void emc_complete_clk_change(struct tegra_emc *emc)
-{
-	struct emc_timing *timing = emc->new_timing;
-	unsigned int dram_num;
-	bool failed = false;
-	int err;
-
-	/* re-enable auto-refresh */
-	dram_num = tegra_mc_get_emem_device_count(emc->mc);
-	writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
-		       emc->regs + EMC_REFCTRL);
-
-	/* restore auto-calibration */
-	if (emc->vref_cal_toggle)
-		writel_relaxed(timing->emc_auto_cal_interval,
-			       emc->regs + EMC_AUTO_CAL_INTERVAL);
-
-	/* restore dynamic self-refresh */
-	if (timing->emc_cfg_dyn_self_ref) {
-		emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
-		writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
-	}
-
-	/* set number of clocks to wait after each ZQ command */
-	if (emc->zcal_long)
-		writel_relaxed(timing->emc_zcal_cnt_long,
-			       emc->regs + EMC_ZCAL_WAIT_CNT);
-
-	/* wait for writes to settle */
-	udelay(2);
-
-	/* update restored timing */
-	err = emc_seq_update_timing(emc);
-	if (err)
-		failed = true;
-
-	/* restore early ACK */
-	mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
-
-	WRITE_ONCE(emc->bad_state, failed);
-}
-
 static irqreturn_t tegra_emc_isr(int irq, void *data)
 {
 	struct tegra_emc *emc = data;
-	u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+	u32 intmask = EMC_REFRESH_OVERFLOW_INT;
 	u32 status;
 
 	status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
@@ -434,18 +390,6 @@ static irqreturn_t tegra_emc_isr(int irq, void *data)
 	/* clear interrupts */
 	writel_relaxed(status, emc->regs + EMC_INTSTATUS);
 
-	/* notify about EMC-CAR handshake completion */
-	if (status & EMC_CLKCHANGE_COMPLETE_INT) {
-		if (completion_done(&emc->clk_handshake_complete)) {
-			dev_err_ratelimited(emc->dev,
-					    "bogus handshake interrupt\n");
-			return IRQ_NONE;
-		}
-
-		emc_complete_clk_change(emc);
-		complete(&emc->clk_handshake_complete);
-	}
-
 	return IRQ_HANDLED;
 }
 
@@ -801,29 +745,58 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
 	 */
 	mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
 
-	reinit_completion(&emc->clk_handshake_complete);
-
-	emc->new_timing = timing;
-
 	return 0;
 }
 
 static int emc_complete_timing_change(struct tegra_emc *emc,
 				      unsigned long rate)
 {
-	unsigned long timeout;
+	struct emc_timing *timing = emc_find_timing(emc, rate);
+	unsigned int dram_num;
+	int err;
+	u32 v;
 
-	timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
-					      msecs_to_jiffies(100));
-	if (timeout == 0) {
-		dev_err(emc->dev, "emc-car handshake failed\n");
-		return -EIO;
+	err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+						v & EMC_CLKCHANGE_COMPLETE_INT,
+						1, 100);
+	if (err) {
+		dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+		return err;
 	}
 
-	if (READ_ONCE(emc->bad_state))
-		return -EIO;
+	/* re-enable auto-refresh */
+	dram_num = tegra_mc_get_emem_device_count(emc->mc);
+	writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+		       emc->regs + EMC_REFCTRL);
+
+	/* restore auto-calibration */
+	if (emc->vref_cal_toggle)
+		writel_relaxed(timing->emc_auto_cal_interval,
+			       emc->regs + EMC_AUTO_CAL_INTERVAL);
 
-	return 0;
+	/* restore dynamic self-refresh */
+	if (timing->emc_cfg_dyn_self_ref) {
+		emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+		writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+	}
+
+	/* set number of clocks to wait after each ZQ command */
+	if (emc->zcal_long)
+		writel_relaxed(timing->emc_zcal_cnt_long,
+			       emc->regs + EMC_ZCAL_WAIT_CNT);
+
+	/* wait for writes to settle */
+	udelay(2);
+
+	/* update restored timing */
+	err = emc_seq_update_timing(emc);
+	if (!err)
+		emc->bad_state = false;
+
+	/* restore early ACK */
+	mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+	return err;
 }
 
 static int emc_unprepare_timing_change(struct tegra_emc *emc,
@@ -1033,7 +1006,7 @@ static struct device_node *emc_find_node_by_ram_code(struct device *dev)
 
 static int emc_setup_hw(struct tegra_emc *emc)
 {
-	u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+	u32 intmask = EMC_REFRESH_OVERFLOW_INT;
 	u32 fbio_cfg5, emc_cfg, emc_dbg;
 	enum emc_dram_type dram_type;
 
@@ -1275,11 +1248,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
 		return;
 	}
 
-	debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+	debugfs_create_file("available_rates", 0444, emc->debugfs.root,
 			    emc, &tegra_emc_debug_available_rates_fops);
-	debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+	debugfs_create_file("min_rate", 0644, emc->debugfs.root,
 			    emc, &tegra_emc_debug_min_rate_fops);
-	debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+	debugfs_create_file("max_rate", 0644, emc->debugfs.root,
 			    emc, &tegra_emc_debug_max_rate_fops);
 }
 
@@ -1321,7 +1294,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
 	if (!emc->mc)
 		return -EPROBE_DEFER;
 
-	init_completion(&emc->clk_handshake_complete);
 	emc->clk_nb.notifier_call = emc_clk_change_notify;
 	emc->dev = &pdev->dev;
 
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index db526dbf71ee..159a16f5e7d6 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -27,7 +27,7 @@
 #define WSTROBE_SHIFT	20
 #define WSETUP_SHIFT	26
 #define EW_SHIFT	30
-#define SS_SHIFT	31
+#define SSTROBE_SHIFT	31
 
 #define TA(x)		((x) << TA_SHIFT)
 #define RHOLD(x)	((x) << RHOLD_SHIFT)
@@ -37,7 +37,7 @@
 #define WSTROBE(x)	((x) << WSTROBE_SHIFT)
 #define WSETUP(x)	((x) << WSETUP_SHIFT)
 #define EW(x)		((x) << EW_SHIFT)
-#define SS(x)		((x) << SS_SHIFT)
+#define SSTROBE(x)	((x) << SSTROBE_SHIFT)
 
 #define ASIZE_MAX	0x1
 #define TA_MAX		0x3
@@ -48,7 +48,7 @@
 #define WSTROBE_MAX	0x3f
 #define WSETUP_MAX	0xf
 #define EW_MAX		0x1
-#define SS_MAX		0x1
+#define SSTROBE_MAX	0x1
 #define NUM_CS		4
 
 #define TA_VAL(x)	(((x) & TA(TA_MAX)) >> TA_SHIFT)
@@ -59,7 +59,7 @@
 #define WSTROBE_VAL(x)	(((x) & WSTROBE(WSTROBE_MAX)) >> WSTROBE_SHIFT)
 #define WSETUP_VAL(x)	(((x) & WSETUP(WSETUP_MAX)) >> WSETUP_SHIFT)
 #define EW_VAL(x)	(((x) & EW(EW_MAX)) >> EW_SHIFT)
-#define SS_VAL(x)	(((x) & SS(SS_MAX)) >> SS_SHIFT)
+#define SSTROBE_VAL(x)	(((x) & SSTROBE(SSTROBE_MAX)) >> SSTROBE_SHIFT)
 
 #define NRCSR_OFFSET	0x00
 #define AWCCR_OFFSET	0x04
@@ -67,7 +67,7 @@
 
 #define ACR_ASIZE_MASK	0x3
 #define ACR_EW_MASK	BIT(30)
-#define ACR_SS_MASK	BIT(31)
+#define ACR_SSTROBE_MASK	BIT(31)
 #define ASIZE_16BIT	1
 
 #define CONFIG_MASK	(TA(TA_MAX) | \
@@ -77,7 +77,7 @@
 				WHOLD(WHOLD_MAX) | \
 				WSTROBE(WSTROBE_MAX) | \
 				WSETUP(WSETUP_MAX) | \
-				EW(EW_MAX) | SS(SS_MAX) | \
+				EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | \
 				ASIZE_MAX)
 
 /**
@@ -204,7 +204,7 @@ static int aemif_config_abus(struct platform_device *pdev, int csnum)
 	if (data->enable_ew)
 		set |= ACR_EW_MASK;
 	if (data->enable_ss)
-		set |= ACR_SS_MASK;
+		set |= ACR_SSTROBE_MASK;
 
 	val = readl(aemif->base + offset);
 	val &= ~CONFIG_MASK;
@@ -246,7 +246,7 @@ static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
 	data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
 	data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
 	data->enable_ew = EW_VAL(val);
-	data->enable_ss = SS_VAL(val);
+	data->enable_ss = SSTROBE_VAL(val);
 	data->asize = val & ASIZE_MAX;
 }
 
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index 9c90f815ad3a..6c747c1e98cb 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -248,7 +248,7 @@ MODULE_DEVICE_TABLE(of, ti_emif_of_match);
 static int ti_emif_resume(struct device *dev)
 {
 	unsigned long tmp =
-			__raw_readl((void *)emif_instance->ti_emif_sram_virt);
+			__raw_readl((void __iomem *)emif_instance->ti_emif_sram_virt);
 
 	/*
 	 * Check to see if what we are copying is already present in the