summary refs log tree commit diff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/class.c17
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/memory.c62
-rw-r--r--drivers/base/platform.c54
-rw-r--r--drivers/base/power/Makefile4
-rw-r--r--drivers/base/power/clock_ops.c194
-rw-r--r--drivers/base/power/common.c86
-rw-r--r--drivers/base/power/domain.c352
-rw-r--r--drivers/base/power/main.c42
-rw-r--r--drivers/base/power/opp.c30
-rw-r--r--drivers/base/power/power.h10
-rw-r--r--drivers/base/power/qos.c419
-rw-r--r--drivers/base/power/runtime.c127
-rw-r--r--drivers/base/power/wakeup.c4
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/base/regmap/Makefile3
-rw-r--r--drivers/base/regmap/internal.h128
-rw-r--r--drivers/base/regmap/regcache-indexed.c64
-rw-r--r--drivers/base/regmap/regcache-lzo.c361
-rw-r--r--drivers/base/regmap/regcache-rbtree.c345
-rw-r--r--drivers/base/regmap/regcache.c401
-rw-r--r--drivers/base/regmap/regmap-debugfs.c209
-rw-r--r--drivers/base/regmap/regmap-i2c.c2
-rw-r--r--drivers/base/regmap/regmap-spi.c2
-rw-r--r--drivers/base/regmap/regmap.c226
26 files changed, 2755 insertions, 397 deletions
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 4f1df2e8fd74..b80d91cc8c3a 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -47,6 +47,18 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
 	return ret;
 }
 
+static const void *class_attr_namespace(struct kobject *kobj,
+					const struct attribute *attr)
+{
+	struct class_attribute *class_attr = to_class_attr(attr);
+	struct subsys_private *cp = to_subsys_private(kobj);
+	const void *ns = NULL;
+
+	if (class_attr->namespace)
+		ns = class_attr->namespace(cp->class, class_attr);
+	return ns;
+}
+
 static void class_release(struct kobject *kobj)
 {
 	struct subsys_private *cp = to_subsys_private(kobj);
@@ -72,8 +84,9 @@ static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject
 }
 
 static const struct sysfs_ops class_sysfs_ops = {
-	.show	= class_attr_show,
-	.store	= class_attr_store,
+	.show	   = class_attr_show,
+	.store	   = class_attr_store,
+	.namespace = class_attr_namespace,
 };
 
 static struct kobj_type class_ktype = {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index bc8729d603a7..82c865452c70 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1764,8 +1764,8 @@ void device_shutdown(void)
 
 #ifdef CONFIG_PRINTK
 
-static int __dev_printk(const char *level, const struct device *dev,
-			struct va_format *vaf)
+int __dev_printk(const char *level, const struct device *dev,
+		 struct va_format *vaf)
 {
 	if (!dev)
 		return printk("%s(NULL device *): %pV", level, vaf);
@@ -1773,6 +1773,7 @@ static int __dev_printk(const char *level, const struct device *dev,
 	return printk("%s%s %s: %pV",
 		      level, dev_driver_string(dev), dev_name(dev), vaf);
 }
+EXPORT_SYMBOL(__dev_printk);
 
 int dev_printk(const char *level, const struct device *dev,
 	       const char *fmt, ...)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 6658da743c3a..142e3d600f14 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -147,6 +147,9 @@ probe_failed:
 		printk(KERN_WARNING
 		       "%s: probe of %s failed with error %d\n",
 		       drv->name, dev_name(dev), ret);
+	} else {
+		pr_debug("%s: probe of %s rejects match %d\n",
+		       drv->name, dev_name(dev), ret);
 	}
 	/*
 	 * Ignore errors returned by ->probe so that the next driver can try
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2840ed4668c1..8272d92d22c0 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -224,13 +224,48 @@ int memory_isolate_notify(unsigned long val, void *v)
 }
 
 /*
+ * The probe routines leave the pages reserved, just as the bootmem code does.
+ * Make sure they're still that way.
+ */
+static bool pages_correctly_reserved(unsigned long start_pfn,
+					unsigned long nr_pages)
+{
+	int i, j;
+	struct page *page;
+	unsigned long pfn = start_pfn;
+
+	/*
+	 * memmap between sections is not contiguous except with
+	 * SPARSEMEM_VMEMMAP. We lookup the page once per section
+	 * and assume memmap is contiguous within each section
+	 */
+	for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
+		if (WARN_ON_ONCE(!pfn_valid(pfn)))
+			return false;
+		page = pfn_to_page(pfn);
+
+		for (j = 0; j < PAGES_PER_SECTION; j++) {
+			if (PageReserved(page + j))
+				continue;
+
+			printk(KERN_WARNING "section number %ld page number %d "
+				"not reserved, was it already online?\n",
+				pfn_to_section_nr(pfn), j);
+
+			return false;
+		}
+	}
+
+	return true;
+}
+
+/*
  * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
  * OK to have direct references to sparsemem variables in here.
  */
 static int
 memory_block_action(unsigned long phys_index, unsigned long action)
 {
-	int i;
 	unsigned long start_pfn, start_paddr;
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
 	struct page *first_page;
@@ -238,26 +273,13 @@ memory_block_action(unsigned long phys_index, unsigned long action)
 
 	first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
 
-	/*
-	 * The probe routines leave the pages reserved, just
-	 * as the bootmem code does.  Make sure they're still
-	 * that way.
-	 */
-	if (action == MEM_ONLINE) {
-		for (i = 0; i < nr_pages; i++) {
-			if (PageReserved(first_page+i))
-				continue;
-
-			printk(KERN_WARNING "section number %ld page number %d "
-				"not reserved, was it already online?\n",
-				phys_index, i);
-			return -EBUSY;
-		}
-	}
-
 	switch (action) {
 		case MEM_ONLINE:
 			start_pfn = page_to_pfn(first_page);
+
+			if (!pages_correctly_reserved(start_pfn, nr_pages))
+				return -EBUSY;
+
 			ret = online_pages(start_pfn, nr_pages);
 			break;
 		case MEM_OFFLINE:
@@ -380,9 +402,13 @@ memory_probe_store(struct class *class, struct class_attribute *attr,
 	u64 phys_addr;
 	int nid;
 	int i, ret;
+	unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
 
 	phys_addr = simple_strtoull(buf, NULL, 0);
 
+	if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
+		return -EINVAL;
+
 	for (i = 0; i < sections_per_block; i++) {
 		nid = memory_add_physaddr_to_nid(phys_addr);
 		ret = add_memory(nid, phys_addr,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 99a5272d7c2f..7a24895543e7 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -375,52 +375,64 @@ void platform_device_unregister(struct platform_device *pdev)
 EXPORT_SYMBOL_GPL(platform_device_unregister);
 
 /**
- * platform_device_register_resndata - add a platform-level device with
+ * platform_device_register_full - add a platform-level device with
  * resources and platform-specific data
  *
- * @parent: parent device for the device we're adding
- * @name: base name of the device we're adding
- * @id: instance id
- * @res: set of resources that needs to be allocated for the device
- * @num: number of resources
- * @data: platform specific data for this platform device
- * @size: size of platform specific data
+ * @pdevinfo: data used to create device
  *
  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  */
-struct platform_device *platform_device_register_resndata(
-		struct device *parent,
-		const char *name, int id,
-		const struct resource *res, unsigned int num,
-		const void *data, size_t size)
+struct platform_device *platform_device_register_full(
+		struct platform_device_info *pdevinfo)
 {
 	int ret = -ENOMEM;
 	struct platform_device *pdev;
 
-	pdev = platform_device_alloc(name, id);
+	pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
 	if (!pdev)
-		goto err;
-
-	pdev->dev.parent = parent;
+		goto err_alloc;
+
+	pdev->dev.parent = pdevinfo->parent;
+
+	if (pdevinfo->dma_mask) {
+		/*
+		 * This memory isn't freed when the device is put,
+		 * I don't have a nice idea for that though.  Conceptually
+		 * dma_mask in struct device should not be a pointer.
+		 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
+		 */
+		pdev->dev.dma_mask =
+			kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
+		if (!pdev->dev.dma_mask)
+			goto err;
+
+		*pdev->dev.dma_mask = pdevinfo->dma_mask;
+		pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
+	}
 
-	ret = platform_device_add_resources(pdev, res, num);
+	ret = platform_device_add_resources(pdev,
+			pdevinfo->res, pdevinfo->num_res);
 	if (ret)
 		goto err;
 
-	ret = platform_device_add_data(pdev, data, size);
+	ret = platform_device_add_data(pdev,
+			pdevinfo->data, pdevinfo->size_data);
 	if (ret)
 		goto err;
 
 	ret = platform_device_add(pdev);
 	if (ret) {
 err:
+		kfree(pdev->dev.dma_mask);
+
+err_alloc:
 		platform_device_put(pdev);
 		return ERR_PTR(ret);
 	}
 
 	return pdev;
 }
-EXPORT_SYMBOL_GPL(platform_device_register_resndata);
+EXPORT_SYMBOL_GPL(platform_device_register_full);
 
 static int platform_drv_probe(struct device *_dev)
 {
@@ -614,7 +626,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
 		return rc;
 
 	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
-		(pdev->id_entry) ? pdev->id_entry->name : pdev->name);
+			pdev->name);
 	return 0;
 }
 
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2639ae79a372..81676dd17900 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_PM)	+= sysfs.o generic_ops.o
+obj-$(CONFIG_PM)	+= sysfs.o generic_ops.o common.o qos.o
 obj-$(CONFIG_PM_SLEEP)	+= main.o wakeup.o
 obj-$(CONFIG_PM_RUNTIME)	+= runtime.o
 obj-$(CONFIG_PM_TRACE_RTC)	+= trace.o
@@ -6,4 +6,4 @@ obj-$(CONFIG_PM_OPP)	+= opp.o
 obj-$(CONFIG_PM_GENERIC_DOMAINS)	+=  domain.o
 obj-$(CONFIG_HAVE_CLK)	+= clock_ops.o
 
-ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
\ No newline at end of file
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 2c18d584066d..5f0f85d5c576 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -10,18 +10,13 @@
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/pm.h>
-#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 
 #ifdef CONFIG_PM
 
-struct pm_clk_data {
-	struct list_head clock_list;
-	spinlock_t lock;
-};
-
 enum pce_status {
 	PCE_STATUS_NONE = 0,
 	PCE_STATUS_ACQUIRED,
@@ -36,9 +31,20 @@ struct pm_clock_entry {
 	enum pce_status status;
 };
 
-static struct pm_clk_data *__to_pcd(struct device *dev)
+/**
+ * pm_clk_acquire - Acquire a device clock.
+ * @dev: Device whose clock is to be acquired.
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
 {
-	return dev ? dev->power.subsys_data : NULL;
+	ce->clk = clk_get(dev, ce->con_id);
+	if (IS_ERR(ce->clk)) {
+		ce->status = PCE_STATUS_ERROR;
+	} else {
+		ce->status = PCE_STATUS_ACQUIRED;
+		dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
+	}
 }
 
 /**
@@ -51,10 +57,10 @@ static struct pm_clk_data *__to_pcd(struct device *dev)
  */
 int pm_clk_add(struct device *dev, const char *con_id)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 
-	if (!pcd)
+	if (!psd)
 		return -EINVAL;
 
 	ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -73,26 +79,23 @@ int pm_clk_add(struct device *dev, const char *con_id)
 		}
 	}
 
-	spin_lock_irq(&pcd->lock);
-	list_add_tail(&ce->node, &pcd->clock_list);
-	spin_unlock_irq(&pcd->lock);
+	pm_clk_acquire(dev, ce);
+
+	spin_lock_irq(&psd->lock);
+	list_add_tail(&ce->node, &psd->clock_list);
+	spin_unlock_irq(&psd->lock);
 	return 0;
 }
 
 /**
  * __pm_clk_remove - Destroy PM clock entry.
  * @ce: PM clock entry to destroy.
- *
- * This routine must be called under the spinlock protecting the PM list of
- * clocks corresponding the the @ce's device.
  */
 static void __pm_clk_remove(struct pm_clock_entry *ce)
 {
 	if (!ce)
 		return;
 
-	list_del(&ce->node);
-
 	if (ce->status < PCE_STATUS_ERROR) {
 		if (ce->status == PCE_STATUS_ENABLED)
 			clk_disable(ce->clk);
@@ -101,9 +104,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
 			clk_put(ce->clk);
 	}
 
-	if (ce->con_id)
-		kfree(ce->con_id);
-
+	kfree(ce->con_id);
 	kfree(ce);
 }
 
@@ -117,50 +118,58 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
  */
 void pm_clk_remove(struct device *dev, const char *con_id)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 
-	if (!pcd)
+	if (!psd)
 		return;
 
-	spin_lock_irq(&pcd->lock);
+	spin_lock_irq(&psd->lock);
 
-	list_for_each_entry(ce, &pcd->clock_list, node) {
-		if (!con_id && !ce->con_id) {
-			__pm_clk_remove(ce);
-			break;
-		} else if (!con_id || !ce->con_id) {
+	list_for_each_entry(ce, &psd->clock_list, node) {
+		if (!con_id && !ce->con_id)
+			goto remove;
+		else if (!con_id || !ce->con_id)
 			continue;
-		} else if (!strcmp(con_id, ce->con_id)) {
-			__pm_clk_remove(ce);
-			break;
-		}
+		else if (!strcmp(con_id, ce->con_id))
+			goto remove;
 	}
 
-	spin_unlock_irq(&pcd->lock);
+	spin_unlock_irq(&psd->lock);
+	return;
+
+ remove:
+	list_del(&ce->node);
+	spin_unlock_irq(&psd->lock);
+
+	__pm_clk_remove(ce);
 }
 
 /**
  * pm_clk_init - Initialize a device's list of power management clocks.
  * @dev: Device to initialize the list of PM clocks for.
  *
- * Allocate a struct pm_clk_data object, initialize its lock member and
- * make the @dev's power.subsys_data field point to it.
+ * Initialize the lock and clock_list members of the device's pm_subsys_data
+ * object.
  */
-int pm_clk_init(struct device *dev)
+void pm_clk_init(struct device *dev)
 {
-	struct pm_clk_data *pcd;
-
-	pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
-	if (!pcd) {
-		dev_err(dev, "Not enough memory for PM clock data.\n");
-		return -ENOMEM;
-	}
+	struct pm_subsys_data *psd = dev_to_psd(dev);
+	if (psd)
+		INIT_LIST_HEAD(&psd->clock_list);
+}
 
-	INIT_LIST_HEAD(&pcd->clock_list);
-	spin_lock_init(&pcd->lock);
-	dev->power.subsys_data = pcd;
-	return 0;
+/**
+ * pm_clk_create - Create and initialize a device's list of PM clocks.
+ * @dev: Device to create and initialize the list of PM clocks for.
+ *
+ * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
+ * members and make the @dev's power.subsys_data field point to it.
+ */
+int pm_clk_create(struct device *dev)
+{
+	int ret = dev_pm_get_subsys_data(dev);
+	return ret < 0 ? ret : 0;
 }
 
 /**
@@ -168,27 +177,33 @@ int pm_clk_init(struct device *dev)
  * @dev: Device to destroy the list of PM clocks for.
  *
  * Clear the @dev's power.subsys_data field, remove the list of clock entries
- * from the struct pm_clk_data object pointed to by it before and free
+ * from the struct pm_subsys_data object pointed to by it before and free
  * that object.
  */
 void pm_clk_destroy(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce, *c;
+	struct list_head list;
 
-	if (!pcd)
+	if (!psd)
 		return;
 
-	dev->power.subsys_data = NULL;
+	INIT_LIST_HEAD(&list);
 
-	spin_lock_irq(&pcd->lock);
+	spin_lock_irq(&psd->lock);
 
-	list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
-		__pm_clk_remove(ce);
+	list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
+		list_move(&ce->node, &list);
 
-	spin_unlock_irq(&pcd->lock);
+	spin_unlock_irq(&psd->lock);
 
-	kfree(pcd);
+	dev_pm_put_subsys_data(dev);
+
+	list_for_each_entry_safe_reverse(ce, c, &list, node) {
+		list_del(&ce->node);
+		__pm_clk_remove(ce);
+	}
 }
 
 #endif /* CONFIG_PM */
@@ -196,50 +211,30 @@ void pm_clk_destroy(struct device *dev)
 #ifdef CONFIG_PM_RUNTIME
 
 /**
- * pm_clk_acquire - Acquire a device clock.
- * @dev: Device whose clock is to be acquired.
- * @con_id: Connection ID of the clock.
- */
-static void pm_clk_acquire(struct device *dev,
-				    struct pm_clock_entry *ce)
-{
-	ce->clk = clk_get(dev, ce->con_id);
-	if (IS_ERR(ce->clk)) {
-		ce->status = PCE_STATUS_ERROR;
-	} else {
-		ce->status = PCE_STATUS_ACQUIRED;
-		dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
-	}
-}
-
-/**
  * pm_clk_suspend - Disable clocks in a device's PM clock list.
  * @dev: Device to disable the clocks for.
  */
 int pm_clk_suspend(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 	unsigned long flags;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
-	if (!pcd)
+	if (!psd)
 		return 0;
 
-	spin_lock_irqsave(&pcd->lock, flags);
-
-	list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
-		if (ce->status == PCE_STATUS_NONE)
-			pm_clk_acquire(dev, ce);
+	spin_lock_irqsave(&psd->lock, flags);
 
+	list_for_each_entry_reverse(ce, &psd->clock_list, node) {
 		if (ce->status < PCE_STATUS_ERROR) {
 			clk_disable(ce->clk);
 			ce->status = PCE_STATUS_ACQUIRED;
 		}
 	}
 
-	spin_unlock_irqrestore(&pcd->lock, flags);
+	spin_unlock_irqrestore(&psd->lock, flags);
 
 	return 0;
 }
@@ -250,28 +245,25 @@ int pm_clk_suspend(struct device *dev)
  */
 int pm_clk_resume(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 	unsigned long flags;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
-	if (!pcd)
+	if (!psd)
 		return 0;
 
-	spin_lock_irqsave(&pcd->lock, flags);
-
-	list_for_each_entry(ce, &pcd->clock_list, node) {
-		if (ce->status == PCE_STATUS_NONE)
-			pm_clk_acquire(dev, ce);
+	spin_lock_irqsave(&psd->lock, flags);
 
+	list_for_each_entry(ce, &psd->clock_list, node) {
 		if (ce->status < PCE_STATUS_ERROR) {
 			clk_enable(ce->clk);
 			ce->status = PCE_STATUS_ENABLED;
 		}
 	}
 
-	spin_unlock_irqrestore(&pcd->lock, flags);
+	spin_unlock_irqrestore(&psd->lock, flags);
 
 	return 0;
 }
@@ -309,7 +301,7 @@ static int pm_clk_notify(struct notifier_block *nb,
 		if (dev->pm_domain)
 			break;
 
-		error = pm_clk_init(dev);
+		error = pm_clk_create(dev);
 		if (error)
 			break;
 
@@ -344,22 +336,22 @@ static int pm_clk_notify(struct notifier_block *nb,
  */
 int pm_clk_suspend(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 	unsigned long flags;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
 	/* If there is no driver, the clocks are already disabled. */
-	if (!pcd || !dev->driver)
+	if (!psd || !dev->driver)
 		return 0;
 
-	spin_lock_irqsave(&pcd->lock, flags);
+	spin_lock_irqsave(&psd->lock, flags);
 
-	list_for_each_entry_reverse(ce, &pcd->clock_list, node)
+	list_for_each_entry_reverse(ce, &psd->clock_list, node)
 		clk_disable(ce->clk);
 
-	spin_unlock_irqrestore(&pcd->lock, flags);
+	spin_unlock_irqrestore(&psd->lock, flags);
 
 	return 0;
 }
@@ -370,22 +362,22 @@ int pm_clk_suspend(struct device *dev)
  */
 int pm_clk_resume(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 	unsigned long flags;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
 	/* If there is no driver, the clocks should remain disabled. */
-	if (!pcd || !dev->driver)
+	if (!psd || !dev->driver)
 		return 0;
 
-	spin_lock_irqsave(&pcd->lock, flags);
+	spin_lock_irqsave(&psd->lock, flags);
 
-	list_for_each_entry(ce, &pcd->clock_list, node)
+	list_for_each_entry(ce, &psd->clock_list, node)
 		clk_enable(ce->clk);
 
-	spin_unlock_irqrestore(&pcd->lock, flags);
+	spin_unlock_irqrestore(&psd->lock, flags);
 
 	return 0;
 }
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
new file mode 100644
index 000000000000..29820c396182
--- /dev/null
+++ b/drivers/base/power/common.c
@@ -0,0 +1,86 @@
+/*
+ * drivers/base/power/common.c - Common device power management code.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pm_clock.h>
+
+/**
+ * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
+ * @dev: Device to handle.
+ *
+ * If power.subsys_data is NULL, point it to a new object, otherwise increment
+ * its reference counter.  Return 1 if a new object has been created, otherwise
+ * return 0 or error code.
+ */
+int dev_pm_get_subsys_data(struct device *dev)
+{
+	struct pm_subsys_data *psd;
+	int ret = 0;
+
+	psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+	if (!psd)
+		return -ENOMEM;
+
+	spin_lock_irq(&dev->power.lock);
+
+	if (dev->power.subsys_data) {
+		dev->power.subsys_data->refcount++;
+	} else {
+		spin_lock_init(&psd->lock);
+		psd->refcount = 1;
+		dev->power.subsys_data = psd;
+		pm_clk_init(dev);
+		psd = NULL;
+		ret = 1;
+	}
+
+	spin_unlock_irq(&dev->power.lock);
+
+	/* kfree() verifies that its argument is nonzero. */
+	kfree(psd);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
+
+/**
+ * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
+ * @dev: Device to handle.
+ *
+ * If the reference counter of power.subsys_data is zero after dropping the
+ * reference, power.subsys_data is removed.  Return 1 if that happens or 0
+ * otherwise.
+ */
+int dev_pm_put_subsys_data(struct device *dev)
+{
+	struct pm_subsys_data *psd;
+	int ret = 0;
+
+	spin_lock_irq(&dev->power.lock);
+
+	psd = dev_to_psd(dev);
+	if (!psd) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (--psd->refcount == 0) {
+		dev->power.subsys_data = NULL;
+		kfree(psd);
+		ret = 1;
+	}
+
+ out:
+	spin_unlock_irq(&dev->power.lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 1c374579407c..6790cf7eba5a 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
 	return pd_to_genpd(dev->pm_domain);
 }
 
-static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
 {
-	if (!WARN_ON(genpd->sd_count == 0))
-			genpd->sd_count--;
+	bool ret = false;
+
+	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
+		ret = !!atomic_dec_and_test(&genpd->sd_count);
+
+	return ret;
+}
+
+static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
+{
+	atomic_inc(&genpd->sd_count);
+	smp_mb__after_atomic_inc();
 }
 
 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
@@ -71,81 +81,119 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
 }
 
 /**
- * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+ * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
  * @genpd: PM domain to power up.
  *
- * Restore power to @genpd and all of its parents so that it is possible to
+ * Restore power to @genpd and all of its masters so that it is possible to
  * resume a device belonging to it.
  */
-int pm_genpd_poweron(struct generic_pm_domain *genpd)
+int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
-	struct generic_pm_domain *parent = genpd->parent;
+	struct gpd_link *link;
+	DEFINE_WAIT(wait);
 	int ret = 0;
 
- start:
-	if (parent) {
-		genpd_acquire_lock(parent);
-		mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
-	} else {
+	/* If the domain's master is being waited for, we have to wait too. */
+	for (;;) {
+		prepare_to_wait(&genpd->status_wait_queue, &wait,
+				TASK_UNINTERRUPTIBLE);
+		if (genpd->status != GPD_STATE_WAIT_MASTER)
+			break;
+		mutex_unlock(&genpd->lock);
+
+		schedule();
+
 		mutex_lock(&genpd->lock);
 	}
+	finish_wait(&genpd->status_wait_queue, &wait);
 
 	if (genpd->status == GPD_STATE_ACTIVE
 	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
-		goto out;
+		return 0;
 
 	if (genpd->status != GPD_STATE_POWER_OFF) {
 		genpd_set_active(genpd);
-		goto out;
+		return 0;
 	}
 
-	if (parent && parent->status != GPD_STATE_ACTIVE) {
+	/*
+	 * The list is guaranteed not to change while the loop below is being
+	 * executed, unless one of the masters' .power_on() callbacks fiddles
+	 * with it.
+	 */
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		genpd_sd_counter_inc(link->master);
+		genpd->status = GPD_STATE_WAIT_MASTER;
+
 		mutex_unlock(&genpd->lock);
-		genpd_release_lock(parent);
 
-		ret = pm_genpd_poweron(parent);
-		if (ret)
-			return ret;
+		ret = pm_genpd_poweron(link->master);
 
-		goto start;
+		mutex_lock(&genpd->lock);
+
+		/*
+		 * The "wait for parent" status is guaranteed not to change
+		 * while the master is powering on.
+		 */
+		genpd->status = GPD_STATE_POWER_OFF;
+		wake_up_all(&genpd->status_wait_queue);
+		if (ret) {
+			genpd_sd_counter_dec(link->master);
+			goto err;
+		}
 	}
 
 	if (genpd->power_on) {
 		ret = genpd->power_on(genpd);
 		if (ret)
-			goto out;
+			goto err;
 	}
 
 	genpd_set_active(genpd);
-	if (parent)
-		parent->sd_count++;
 
- out:
-	mutex_unlock(&genpd->lock);
-	if (parent)
-		genpd_release_lock(parent);
+	return 0;
+
+ err:
+	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
+		genpd_sd_counter_dec(link->master);
 
 	return ret;
 }
 
+/**
+ * pm_genpd_poweron - Restore power to a given PM domain and its masters.
+ * @genpd: PM domain to power up.
+ */
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+	int ret;
+
+	mutex_lock(&genpd->lock);
+	ret = __pm_genpd_poweron(genpd);
+	mutex_unlock(&genpd->lock);
+	return ret;
+}
+
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_RUNTIME
 
 /**
  * __pm_genpd_save_device - Save the pre-suspend state of a device.
- * @dle: Device list entry of the device to save the state of.
+ * @pdd: Domain data of the device to save the state of.
  * @genpd: PM domain the device belongs to.
  */
-static int __pm_genpd_save_device(struct dev_list_entry *dle,
+static int __pm_genpd_save_device(struct pm_domain_data *pdd,
 				  struct generic_pm_domain *genpd)
 	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
-	struct device *dev = dle->dev;
+	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+	struct device *dev = pdd->dev;
 	struct device_driver *drv = dev->driver;
 	int ret = 0;
 
-	if (dle->need_restore)
+	if (gpd_data->need_restore)
 		return 0;
 
 	mutex_unlock(&genpd->lock);
@@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
 	mutex_lock(&genpd->lock);
 
 	if (!ret)
-		dle->need_restore = true;
+		gpd_data->need_restore = true;
 
 	return ret;
 }
 
 /**
  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
- * @dle: Device list entry of the device to restore the state of.
+ * @pdd: Domain data of the device to restore the state of.
  * @genpd: PM domain the device belongs to.
  */
-static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
 				      struct generic_pm_domain *genpd)
 	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
-	struct device *dev = dle->dev;
+	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+	struct device *dev = pdd->dev;
 	struct device_driver *drv = dev->driver;
 
-	if (!dle->need_restore)
+	if (!gpd_data->need_restore)
 		return;
 
 	mutex_unlock(&genpd->lock);
@@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
 
 	mutex_lock(&genpd->lock);
 
-	dle->need_restore = false;
+	gpd_data->need_restore = false;
 }
 
 /**
@@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
  */
 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
 {
-	return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+	return genpd->status == GPD_STATE_WAIT_MASTER
+		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
 }
 
 /**
@@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
 	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
-	struct generic_pm_domain *parent;
-	struct dev_list_entry *dle;
+	struct pm_domain_data *pdd;
+	struct gpd_link *link;
 	unsigned int not_suspended;
 	int ret = 0;
 
@@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
 	/*
 	 * Do not try to power off the domain in the following situations:
 	 * (1) The domain is already in the "power off" state.
-	 * (2) System suspend is in progress.
+	 * (2) The domain is waiting for its master to power up.
 	 * (3) One of the domain's devices is being resumed right now.
+	 * (4) System suspend is in progress.
 	 */
-	if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
-	    || genpd->resume_count > 0)
+	if (genpd->status == GPD_STATE_POWER_OFF
+	    || genpd->status == GPD_STATE_WAIT_MASTER
+	    || genpd->resume_count > 0 || genpd->prepared_count > 0)
 		return 0;
 
-	if (genpd->sd_count > 0)
+	if (atomic_read(&genpd->sd_count) > 0)
 		return -EBUSY;
 
 	not_suspended = 0;
-	list_for_each_entry(dle, &genpd->dev_list, node)
-		if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
+	list_for_each_entry(pdd, &genpd->dev_list, list_node)
+		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
+		    || pdd->dev->power.irq_safe))
 			not_suspended++;
 
 	if (not_suspended > genpd->in_progress)
@@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
 	genpd->status = GPD_STATE_BUSY;
 	genpd->poweroff_task = current;
 
-	list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
-		ret = __pm_genpd_save_device(dle, genpd);
+	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
+		ret = atomic_read(&genpd->sd_count) == 0 ?
+			__pm_genpd_save_device(pdd, genpd) : -EBUSY;
+
+		if (genpd_abort_poweroff(genpd))
+			goto out;
+
 		if (ret) {
 			genpd_set_active(genpd);
 			goto out;
 		}
 
-		if (genpd_abort_poweroff(genpd))
-			goto out;
-
 		if (genpd->status == GPD_STATE_REPEAT) {
 			genpd->poweroff_task = NULL;
 			goto start;
 		}
 	}
 
-	parent = genpd->parent;
-	if (parent) {
-		mutex_unlock(&genpd->lock);
-
-		genpd_acquire_lock(parent);
-		mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
-
-		if (genpd_abort_poweroff(genpd)) {
-			genpd_release_lock(parent);
+	if (genpd->power_off) {
+		if (atomic_read(&genpd->sd_count) > 0) {
+			ret = -EBUSY;
 			goto out;
 		}
-	}
 
-	if (genpd->power_off) {
+		/*
+		 * If sd_count > 0 at this point, one of the subdomains hasn't
+		 * managed to call pm_genpd_poweron() for the master yet after
+		 * incrementing it.  In that case pm_genpd_poweron() will wait
+		 * for us to drop the lock, so we can call .power_off() and let
+		 * the pm_genpd_poweron() restore power for us (this shouldn't
+		 * happen very often).
+		 */
 		ret = genpd->power_off(genpd);
 		if (ret == -EBUSY) {
 			genpd_set_active(genpd);
-			if (parent)
-				genpd_release_lock(parent);
-
 			goto out;
 		}
 	}
 
 	genpd->status = GPD_STATE_POWER_OFF;
 
-	if (parent) {
-		genpd_sd_counter_dec(parent);
-		if (parent->sd_count == 0)
-			genpd_queue_power_off_work(parent);
-
-		genpd_release_lock(parent);
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		genpd_sd_counter_dec(link->master);
+		genpd_queue_power_off_work(link->master);
 	}
 
  out:
@@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev)
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
+	might_sleep_if(!genpd->dev_irq_safe);
+
 	if (genpd->stop_device) {
 		int ret = genpd->stop_device(dev);
 		if (ret)
 			return ret;
 	}
 
+	/*
+	 * If power.irq_safe is set, this routine will be run with interrupts
+	 * off, so it can't use mutexes.
+	 */
+	if (dev->power.irq_safe)
+		return 0;
+
 	mutex_lock(&genpd->lock);
 	genpd->in_progress++;
 	pm_genpd_poweroff(genpd);
@@ -387,24 +445,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
 }
 
 /**
- * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
- * @dev: Device to resume.
- * @genpd: PM domain the device belongs to.
- */
-static void __pm_genpd_runtime_resume(struct device *dev,
-				      struct generic_pm_domain *genpd)
-{
-	struct dev_list_entry *dle;
-
-	list_for_each_entry(dle, &genpd->dev_list, node) {
-		if (dle->dev == dev) {
-			__pm_genpd_restore_device(dle, genpd);
-			break;
-		}
-	}
-}
-
-/**
  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  * @dev: Device to resume.
  *
@@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev)
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
-	ret = pm_genpd_poweron(genpd);
-	if (ret)
-		return ret;
+	might_sleep_if(!genpd->dev_irq_safe);
+
+	/* If power.irq_safe, the PM domain is never powered off. */
+	if (dev->power.irq_safe)
+		goto out;
 
 	mutex_lock(&genpd->lock);
+	ret = __pm_genpd_poweron(genpd);
+	if (ret) {
+		mutex_unlock(&genpd->lock);
+		return ret;
+	}
 	genpd->status = GPD_STATE_BUSY;
 	genpd->resume_count++;
 	for (;;) {
@@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev)
 		mutex_lock(&genpd->lock);
 	}
 	finish_wait(&genpd->status_wait_queue, &wait);
-	__pm_genpd_runtime_resume(dev, genpd);
+	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
 	genpd->resume_count--;
 	genpd_set_active(genpd);
 	wake_up_all(&genpd->status_wait_queue);
 	mutex_unlock(&genpd->lock);
 
+ out:
 	if (genpd->start_device)
 		genpd->start_device(dev);
 
@@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void)
 #else
 
 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
-static inline void __pm_genpd_runtime_resume(struct device *dev,
-					     struct generic_pm_domain *genpd) {}
 
 #define pm_genpd_runtime_suspend	NULL
 #define pm_genpd_runtime_resume		NULL
@@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
 #ifdef CONFIG_PM_SLEEP
 
 /**
- * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
+ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
  * @genpd: PM domain to power off, if possible.
  *
  * Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so.  Also, in that case propagate to its parent.
+ * hibernation) and do that if so.  Also, in that case propagate to its masters.
  *
  * This function is only called in "noirq" stages of system power transitions,
  * so it need not acquire locks (all of the "noirq" callbacks are executed
@@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
  */
 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
 {
-	struct generic_pm_domain *parent = genpd->parent;
+	struct gpd_link *link;
 
 	if (genpd->status == GPD_STATE_POWER_OFF)
 		return;
 
-	if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
+	if (genpd->suspended_count != genpd->device_count
+	    || atomic_read(&genpd->sd_count) > 0)
 		return;
 
 	if (genpd->power_off)
 		genpd->power_off(genpd);
 
 	genpd->status = GPD_STATE_POWER_OFF;
-	if (parent) {
-		genpd_sd_counter_dec(parent);
-		pm_genpd_sync_poweroff(parent);
+
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		genpd_sd_counter_dec(link->master);
+		pm_genpd_sync_poweroff(link->master);
 	}
 }
 
@@ -666,7 +714,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
 	if (ret)
 		return ret;
 
-	if (device_may_wakeup(dev)
+	if (dev->power.wakeup_path
 	    && genpd->active_wakeup && genpd->active_wakeup(dev))
 		return 0;
 
@@ -890,7 +938,7 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev)
 	if (ret)
 		return ret;
 
-	if (device_may_wakeup(dev)
+	if (dev->power.wakeup_path
 	    && genpd->active_wakeup && genpd->active_wakeup(dev))
 		return 0;
 
@@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev)
  */
 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
 {
-	struct dev_list_entry *dle;
+	struct generic_pm_domain_data *gpd_data;
+	struct pm_domain_data *pdd;
 	int ret = 0;
 
 	dev_dbg(dev, "%s()\n", __func__);
@@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
 		goto out;
 	}
 
-	list_for_each_entry(dle, &genpd->dev_list, node)
-		if (dle->dev == dev) {
+	list_for_each_entry(pdd, &genpd->dev_list, list_node)
+		if (pdd->dev == dev) {
 			ret = -EINVAL;
 			goto out;
 		}
 
-	dle = kzalloc(sizeof(*dle), GFP_KERNEL);
-	if (!dle) {
+	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+	if (!gpd_data) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	dle->dev = dev;
-	dle->need_restore = false;
-	list_add_tail(&dle->node, &genpd->dev_list);
 	genpd->device_count++;
 
-	spin_lock_irq(&dev->power.lock);
 	dev->pm_domain = &genpd->domain;
-	spin_unlock_irq(&dev->power.lock);
+	dev_pm_get_subsys_data(dev);
+	dev->power.subsys_data->domain_data = &gpd_data->base;
+	gpd_data->base.dev = dev;
+	gpd_data->need_restore = false;
+	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
 
  out:
 	genpd_release_lock(genpd);
@@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
 			   struct device *dev)
 {
-	struct dev_list_entry *dle;
+	struct pm_domain_data *pdd;
 	int ret = -EINVAL;
 
 	dev_dbg(dev, "%s()\n", __func__);
@@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
 		goto out;
 	}
 
-	list_for_each_entry(dle, &genpd->dev_list, node) {
-		if (dle->dev != dev)
+	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+		if (pdd->dev != dev)
 			continue;
 
-		spin_lock_irq(&dev->power.lock);
+		list_del_init(&pdd->list_node);
+		pdd->dev = NULL;
+		dev_pm_put_subsys_data(dev);
 		dev->pm_domain = NULL;
-		spin_unlock_irq(&dev->power.lock);
+		kfree(to_gpd_data(pdd));
 
 		genpd->device_count--;
-		list_del(&dle->node);
-		kfree(dle);
 
 		ret = 0;
 		break;
@@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
 /**
  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
  * @genpd: Master PM domain to add the subdomain to.
- * @new_subdomain: Subdomain to be added.
+ * @subdomain: Subdomain to be added.
  */
 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
-			   struct generic_pm_domain *new_subdomain)
+			   struct generic_pm_domain *subdomain)
 {
-	struct generic_pm_domain *subdomain;
+	struct gpd_link *link;
 	int ret = 0;
 
-	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
 		return -EINVAL;
 
  start:
 	genpd_acquire_lock(genpd);
-	mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
 
-	if (new_subdomain->status != GPD_STATE_POWER_OFF
-	    && new_subdomain->status != GPD_STATE_ACTIVE) {
-		mutex_unlock(&new_subdomain->lock);
+	if (subdomain->status != GPD_STATE_POWER_OFF
+	    && subdomain->status != GPD_STATE_ACTIVE) {
+		mutex_unlock(&subdomain->lock);
 		genpd_release_lock(genpd);
 		goto start;
 	}
 
 	if (genpd->status == GPD_STATE_POWER_OFF
-	    &&  new_subdomain->status != GPD_STATE_POWER_OFF) {
+	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
-		if (subdomain == new_subdomain) {
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		if (link->slave == subdomain && link->master == genpd) {
 			ret = -EINVAL;
 			goto out;
 		}
 	}
 
-	list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
-	new_subdomain->parent = genpd;
+	link = kzalloc(sizeof(*link), GFP_KERNEL);
+	if (!link) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	link->master = genpd;
+	list_add_tail(&link->master_node, &genpd->master_links);
+	link->slave = subdomain;
+	list_add_tail(&link->slave_node, &subdomain->slave_links);
 	if (subdomain->status != GPD_STATE_POWER_OFF)
-		genpd->sd_count++;
+		genpd_sd_counter_inc(genpd);
 
  out:
-	mutex_unlock(&new_subdomain->lock);
+	mutex_unlock(&subdomain->lock);
 	genpd_release_lock(genpd);
 
 	return ret;
@@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
 /**
  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
  * @genpd: Master PM domain to remove the subdomain from.
- * @target: Subdomain to be removed.
+ * @subdomain: Subdomain to be removed.
  */
 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
-			      struct generic_pm_domain *target)
+			      struct generic_pm_domain *subdomain)
 {
-	struct generic_pm_domain *subdomain;
+	struct gpd_link *link;
 	int ret = -EINVAL;
 
-	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
 		return -EINVAL;
 
  start:
 	genpd_acquire_lock(genpd);
 
-	list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
-		if (subdomain != target)
+	list_for_each_entry(link, &genpd->master_links, master_node) {
+		if (link->slave != subdomain)
 			continue;
 
 		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
@@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 			goto start;
 		}
 
-		list_del(&subdomain->sd_node);
-		subdomain->parent = NULL;
+		list_del(&link->master_node);
+		list_del(&link->slave_node);
+		kfree(link);
 		if (subdomain->status != GPD_STATE_POWER_OFF)
 			genpd_sd_counter_dec(genpd);
 
@@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
 	if (IS_ERR_OR_NULL(genpd))
 		return;
 
-	INIT_LIST_HEAD(&genpd->sd_node);
-	genpd->parent = NULL;
+	INIT_LIST_HEAD(&genpd->master_links);
+	INIT_LIST_HEAD(&genpd->slave_links);
 	INIT_LIST_HEAD(&genpd->dev_list);
-	INIT_LIST_HEAD(&genpd->sd_list);
 	mutex_init(&genpd->lock);
 	genpd->gov = gov;
 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
 	genpd->in_progress = 0;
-	genpd->sd_count = 0;
+	atomic_set(&genpd->sd_count, 0);
 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
 	init_waitqueue_head(&genpd->status_wait_queue);
 	genpd->poweroff_task = NULL;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a85459126bc6..59f8ab235486 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -46,6 +46,7 @@ LIST_HEAD(dpm_prepared_list);
 LIST_HEAD(dpm_suspended_list);
 LIST_HEAD(dpm_noirq_list);
 
+struct suspend_stats suspend_stats;
 static DEFINE_MUTEX(dpm_list_mtx);
 static pm_message_t pm_transition;
 
@@ -65,6 +66,7 @@ void device_pm_init(struct device *dev)
 	spin_lock_init(&dev->power.lock);
 	pm_runtime_init(dev);
 	INIT_LIST_HEAD(&dev->power.entry);
+	dev->power.power_state = PMSG_INVALID;
 }
 
 /**
@@ -96,6 +98,7 @@ void device_pm_add(struct device *dev)
 		dev_warn(dev, "parent %s should not be sleeping\n",
 			dev_name(dev->parent));
 	list_add_tail(&dev->power.entry, &dpm_list);
+	dev_pm_qos_constraints_init(dev);
 	mutex_unlock(&dpm_list_mtx);
 }
 
@@ -109,6 +112,7 @@ void device_pm_remove(struct device *dev)
 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 	complete_all(&dev->power.completion);
 	mutex_lock(&dpm_list_mtx);
+	dev_pm_qos_constraints_destroy(dev);
 	list_del_init(&dev->power.entry);
 	mutex_unlock(&dpm_list_mtx);
 	device_wakeup_disable(dev);
@@ -464,8 +468,12 @@ void dpm_resume_noirq(pm_message_t state)
 		mutex_unlock(&dpm_list_mtx);
 
 		error = device_resume_noirq(dev, state);
-		if (error)
+		if (error) {
+			suspend_stats.failed_resume_noirq++;
+			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+			dpm_save_failed_dev(dev_name(dev));
 			pm_dev_err(dev, state, " early", error);
+		}
 
 		mutex_lock(&dpm_list_mtx);
 		put_device(dev);
@@ -626,8 +634,12 @@ void dpm_resume(pm_message_t state)
 			mutex_unlock(&dpm_list_mtx);
 
 			error = device_resume(dev, state, false);
-			if (error)
+			if (error) {
+				suspend_stats.failed_resume++;
+				dpm_save_failed_step(SUSPEND_RESUME);
+				dpm_save_failed_dev(dev_name(dev));
 				pm_dev_err(dev, state, "", error);
+			}
 
 			mutex_lock(&dpm_list_mtx);
 		}
@@ -802,6 +814,9 @@ int dpm_suspend_noirq(pm_message_t state)
 		mutex_lock(&dpm_list_mtx);
 		if (error) {
 			pm_dev_err(dev, state, " late", error);
+			suspend_stats.failed_suspend_noirq++;
+			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
+			dpm_save_failed_dev(dev_name(dev));
 			put_device(dev);
 			break;
 		}
@@ -902,7 +917,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 	}
 
  End:
-	dev->power.is_suspended = !error;
+	if (!error) {
+		dev->power.is_suspended = true;
+		if (dev->power.wakeup_path && dev->parent)
+			dev->parent->power.wakeup_path = true;
+	}
 
 	device_unlock(dev);
 	complete_all(&dev->power.completion);
@@ -923,8 +942,10 @@ static void async_suspend(void *data, async_cookie_t cookie)
 	int error;
 
 	error = __device_suspend(dev, pm_transition, true);
-	if (error)
+	if (error) {
+		dpm_save_failed_dev(dev_name(dev));
 		pm_dev_err(dev, pm_transition, " async", error);
+	}
 
 	put_device(dev);
 }
@@ -967,6 +988,7 @@ int dpm_suspend(pm_message_t state)
 		mutex_lock(&dpm_list_mtx);
 		if (error) {
 			pm_dev_err(dev, state, "", error);
+			dpm_save_failed_dev(dev_name(dev));
 			put_device(dev);
 			break;
 		}
@@ -980,7 +1002,10 @@ int dpm_suspend(pm_message_t state)
 	async_synchronize_full();
 	if (!error)
 		error = async_error;
-	if (!error)
+	if (error) {
+		suspend_stats.failed_suspend++;
+		dpm_save_failed_step(SUSPEND_SUSPEND);
+	} else
 		dpm_show_time(starttime, state, NULL);
 	return error;
 }
@@ -999,6 +1024,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
 
 	device_lock(dev);
 
+	dev->power.wakeup_path = device_may_wakeup(dev);
+
 	if (dev->pm_domain) {
 		pm_dev_dbg(dev, state, "preparing power domain ");
 		if (dev->pm_domain->ops.prepare)
@@ -1088,7 +1115,10 @@ int dpm_suspend_start(pm_message_t state)
 	int error;
 
 	error = dpm_prepare(state);
-	if (!error)
+	if (error) {
+		suspend_stats.failed_prepare++;
+		dpm_save_failed_step(SUSPEND_PREPARE);
+	} else
 		error = dpm_suspend(state);
 	return error;
 }
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index b23de185cb04..434a6c011675 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -73,6 +73,7 @@ struct opp {
  *		RCU usage: nodes are not modified in the list of device_opp,
  *		however addition is possible and is secured by dev_opp_list_lock
  * @dev:	device pointer
+ * @head:	notifier head to notify the OPP availability changes.
  * @opp_list:	list of opps
  *
  * This is an internal data structure maintaining the link to opps attached to
@@ -83,6 +84,7 @@ struct device_opp {
 	struct list_head node;
 
 	struct device *dev;
+	struct srcu_notifier_head head;
 	struct list_head opp_list;
 };
 
@@ -404,6 +406,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
 		}
 
 		dev_opp->dev = dev;
+		srcu_init_notifier_head(&dev_opp->head);
 		INIT_LIST_HEAD(&dev_opp->opp_list);
 
 		/* Secure the device list modification */
@@ -428,6 +431,11 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
 	list_add_rcu(&new_opp->node, head);
 	mutex_unlock(&dev_opp_list_lock);
 
+	/*
+	 * Notify the changes in the availability of the operable
+	 * frequency/voltage list.
+	 */
+	srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
 	return 0;
 }
 
@@ -504,6 +512,14 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
 	mutex_unlock(&dev_opp_list_lock);
 	synchronize_rcu();
 
+	/* Notify the change of the OPP availability */
+	if (availability_req)
+		srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
+					 new_opp);
+	else
+		srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
+					 new_opp);
+
 	/* clean up old opp */
 	new_opp = opp;
 	goto out;
@@ -643,3 +659,17 @@ void opp_free_cpufreq_table(struct device *dev,
 	*table = NULL;
 }
 #endif		/* CONFIG_CPU_FREQ */
+
+/**
+ * opp_get_notifier() - find notifier_head of the device with opp
+ * @dev:	device pointer used to lookup device OPPs.
+ */
+struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+{
+	struct device_opp *dev_opp = find_device_opp(dev);
+
+	if (IS_ERR(dev_opp))
+		return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */
+
+	return &dev_opp->head;
+}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index f2a25f18fde7..9bf62323aaf3 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,3 +1,5 @@
+#include <linux/pm_qos.h>
+
 #ifdef CONFIG_PM_RUNTIME
 
 extern void pm_runtime_init(struct device *dev);
@@ -35,15 +37,21 @@ extern void device_pm_move_last(struct device *);
 static inline void device_pm_init(struct device *dev)
 {
 	spin_lock_init(&dev->power.lock);
+	dev->power.power_state = PMSG_INVALID;
 	pm_runtime_init(dev);
 }
 
+static inline void device_pm_add(struct device *dev)
+{
+	dev_pm_qos_constraints_init(dev);
+}
+
 static inline void device_pm_remove(struct device *dev)
 {
+	dev_pm_qos_constraints_destroy(dev);
 	pm_runtime_remove(dev);
 }
 
-static inline void device_pm_add(struct device *dev) {}
 static inline void device_pm_move_before(struct device *deva,
 					 struct device *devb) {}
 static inline void device_pm_move_after(struct device *deva,
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
new file mode 100644
index 000000000000..91e061417382
--- /dev/null
+++ b/drivers/base/power/qos.c
@@ -0,0 +1,419 @@
+/*
+ * Devices PM QoS constraints management
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * This module exposes the interface to kernel space for specifying
+ * per-device PM QoS dependencies. It provides infrastructure for registration
+ * of:
+ *
+ * Dependents on a QoS value : register requests
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ * Watchers can register different types of notification callbacks:
+ *  . a per-device notification callback using the dev_pm_qos_*_notifier API.
+ *    The notification chain data is stored in the per-device constraint
+ *    data struct.
+ *  . a system-wide notification callback using the dev_pm_qos_*_global_notifier
+ *    API. The notification chain data is stored in a static variable.
+ *
+ * Note about the per-device constraint data struct allocation:
+ * . The per-device constraints data struct ptr is tored into the device
+ *    dev_pm_info.
+ * . To minimize the data usage by the per-device constraints, the data struct
+ *   is only allocated at the first call to dev_pm_qos_add_request.
+ * . The data is later free'd when the device is removed from the system.
+ *  . A global mutex protects the constraints users from the data being
+ *     allocated and free'd.
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+
+static DEFINE_MUTEX(dev_pm_qos_mtx);
+
+static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ */
+s32 dev_pm_qos_read_value(struct device *dev)
+{
+	struct pm_qos_constraints *c;
+	unsigned long flags;
+	s32 ret = 0;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+
+	c = dev->power.constraints;
+	if (c)
+		ret = pm_qos_read_value(c);
+
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+
+	return ret;
+}
+
+/*
+ * apply_constraint
+ * @req: constraint request to apply
+ * @action: action to perform add/update/remove, of type enum pm_qos_req_action
+ * @value: defines the qos request
+ *
+ * Internal function to update the constraints list using the PM QoS core
+ * code and if needed call the per-device and the global notification
+ * callbacks
+ */
+static int apply_constraint(struct dev_pm_qos_request *req,
+			    enum pm_qos_req_action action, int value)
+{
+	int ret, curr_value;
+
+	ret = pm_qos_update_target(req->dev->power.constraints,
+				   &req->node, action, value);
+
+	if (ret) {
+		/* Call the global callbacks if needed */
+		curr_value = pm_qos_read_value(req->dev->power.constraints);
+		blocking_notifier_call_chain(&dev_pm_notifiers,
+					     (unsigned long)curr_value,
+					     req);
+	}
+
+	return ret;
+}
+
+/*
+ * dev_pm_qos_constraints_allocate
+ * @dev: device to allocate data for
+ *
+ * Called at the first call to add_request, for constraint data allocation
+ * Must be called with the dev_pm_qos_mtx mutex held
+ */
+static int dev_pm_qos_constraints_allocate(struct device *dev)
+{
+	struct pm_qos_constraints *c;
+	struct blocking_notifier_head *n;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return -ENOMEM;
+
+	n = kzalloc(sizeof(*n), GFP_KERNEL);
+	if (!n) {
+		kfree(c);
+		return -ENOMEM;
+	}
+	BLOCKING_INIT_NOTIFIER_HEAD(n);
+
+	plist_head_init(&c->list);
+	c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+	c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+	c->type = PM_QOS_MIN;
+	c->notifiers = n;
+
+	spin_lock_irq(&dev->power.lock);
+	dev->power.constraints = c;
+	spin_unlock_irq(&dev->power.lock);
+
+	return 0;
+}
+
+/**
+ * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
+ * @dev: target device
+ *
+ * Called from the device PM subsystem during device insertion under
+ * device_pm_lock().
+ */
+void dev_pm_qos_constraints_init(struct device *dev)
+{
+	mutex_lock(&dev_pm_qos_mtx);
+	dev->power.constraints = NULL;
+	dev->power.power_state = PMSG_ON;
+	mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_constraints_destroy
+ * @dev: target device
+ *
+ * Called from the device PM subsystem on device removal under device_pm_lock().
+ */
+void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+	struct dev_pm_qos_request *req, *tmp;
+	struct pm_qos_constraints *c;
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	dev->power.power_state = PMSG_INVALID;
+	c = dev->power.constraints;
+	if (!c)
+		goto out;
+
+	/* Flush the constraints list for the device */
+	plist_for_each_entry_safe(req, tmp, &c->list, node) {
+		/*
+		 * Update constraints list and call the notification
+		 * callbacks if needed
+		 */
+		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+		memset(req, 0, sizeof(*req));
+	}
+
+	spin_lock_irq(&dev->power.lock);
+	dev->power.constraints = NULL;
+	spin_unlock_irq(&dev->power.lock);
+
+	kfree(c->notifiers);
+	kfree(c);
+
+ out:
+	mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_add_request - inserts new qos request into the list
+ * @dev: target device for the constraint
+ * @req: pointer to a preallocated handle
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the device constraints list of
+ * requested qos performance characteristics. It recomputes the aggregate
+ * QoS expectations of parameters and initializes the dev_pm_qos_request
+ * handle.  Caller needs to save this handle for later use in updates and
+ * removal.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
+ * to allocate for data structures, -ENODEV if the device has just been removed
+ * from the system.
+ */
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+			   s32 value)
+{
+	int ret = 0;
+
+	if (!dev || !req) /*guard against callers passing in null */
+		return -EINVAL;
+
+	if (dev_pm_qos_request_active(req)) {
+		WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
+			"added request\n");
+		return -EINVAL;
+	}
+
+	req->dev = dev;
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (!dev->power.constraints) {
+		if (dev->power.power_state.event == PM_EVENT_INVALID) {
+			/* The device has been removed from the system. */
+			req->dev = NULL;
+			ret = -ENODEV;
+			goto out;
+		} else {
+			/*
+			 * Allocate the constraints data on the first call to
+			 * add_request, i.e. only if the data is not already
+			 * allocated and if the device has not been removed.
+			 */
+			ret = dev_pm_qos_constraints_allocate(dev);
+		}
+	}
+
+	if (!ret)
+		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+
+ out:
+	mutex_unlock(&dev_pm_qos_mtx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
+
+/**
+ * dev_pm_qos_update_request - modifies an existing qos request
+ * @req : handle to list element holding a dev_pm_qos request to use
+ * @new_value: defines the qos request
+ *
+ * Updates an existing dev PM qos request along with updating the
+ * target value.
+ *
+ * Attempts are made to make this code callable on hot code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ */
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+			      s32 new_value)
+{
+	int ret = 0;
+
+	if (!req) /*guard against callers passing in null */
+		return -EINVAL;
+
+	if (!dev_pm_qos_request_active(req)) {
+		WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
+			"unknown object\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (req->dev->power.constraints) {
+		if (new_value != req->node.prio)
+			ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
+					       new_value);
+	} else {
+		/* Return if the device has been removed */
+		ret = -ENODEV;
+	}
+
+	mutex_unlock(&dev_pm_qos_mtx);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+/**
+ * dev_pm_qos_remove_request - modifies an existing qos request
+ * @req: handle to request list element
+ *
+ * Will remove pm qos request from the list of constraints and
+ * recompute the current target value. Call this on slow code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ */
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+	int ret = 0;
+
+	if (!req) /*guard against callers passing in null */
+		return -EINVAL;
+
+	if (!dev_pm_qos_request_active(req)) {
+		WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
+			"unknown object\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	if (req->dev->power.constraints) {
+		ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
+				       PM_QOS_DEFAULT_VALUE);
+		memset(req, 0, sizeof(*req));
+	} else {
+		/* Return if the device has been removed */
+		ret = -ENODEV;
+	}
+
+	mutex_unlock(&dev_pm_qos_mtx);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
+
+/**
+ * dev_pm_qos_add_notifier - sets notification entry for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for the device.
+ */
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
+{
+	int retval = 0;
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	/* Silently return if the constraints object is not present. */
+	if (dev->power.constraints)
+		retval = blocking_notifier_chain_register(
+				dev->power.constraints->notifiers,
+				notifier);
+
+	mutex_unlock(&dev_pm_qos_mtx);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
+
+/**
+ * dev_pm_qos_remove_notifier - deletes notification for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block to be removed.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value.
+ */
+int dev_pm_qos_remove_notifier(struct device *dev,
+			       struct notifier_block *notifier)
+{
+	int retval = 0;
+
+	mutex_lock(&dev_pm_qos_mtx);
+
+	/* Silently return if the constraints object is not present. */
+	if (dev->power.constraints)
+		retval = blocking_notifier_chain_unregister(
+				dev->power.constraints->notifiers,
+				notifier);
+
+	mutex_unlock(&dev_pm_qos_mtx);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
+
+/**
+ * dev_pm_qos_add_global_notifier - sets notification entry for changes to
+ * target value of the PM QoS constraints for any device
+ *
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for any device.
+ */
+int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
+{
+	return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
+
+/**
+ * dev_pm_qos_remove_global_notifier - deletes notification for changes to
+ * target value of PM QoS constraints for any device
+ *
+ * @notifier: notifier block to be removed.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value for any device.
+ */
+int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
+{
+	return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index acb3f83b8079..6bb3aafa85ed 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -9,6 +9,7 @@
 
 #include <linux/sched.h>
 #include <linux/pm_runtime.h>
+#include <trace/events/rpm.h>
 #include "power.h"
 
 static int rpm_resume(struct device *dev, int rpmflags);
@@ -155,6 +156,31 @@ static int rpm_check_suspend_allowed(struct device *dev)
 }
 
 /**
+ * __rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+	__releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+	int retval;
+
+	if (dev->power.irq_safe)
+		spin_unlock(&dev->power.lock);
+	else
+		spin_unlock_irq(&dev->power.lock);
+
+	retval = cb(dev);
+
+	if (dev->power.irq_safe)
+		spin_lock(&dev->power.lock);
+	else
+		spin_lock_irq(&dev->power.lock);
+
+	return retval;
+}
+
+/**
  * rpm_idle - Notify device bus type if the device can be suspended.
  * @dev: Device to notify the bus type about.
  * @rpmflags: Flag bits.
@@ -171,6 +197,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
 	int (*callback)(struct device *);
 	int retval;
 
+	trace_rpm_idle(dev, rpmflags);
 	retval = rpm_check_suspend_allowed(dev);
 	if (retval < 0)
 		;	/* Conditions are wrong. */
@@ -225,24 +252,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
 	else
 		callback = NULL;
 
-	if (callback) {
-		if (dev->power.irq_safe)
-			spin_unlock(&dev->power.lock);
-		else
-			spin_unlock_irq(&dev->power.lock);
-
-		callback(dev);
-
-		if (dev->power.irq_safe)
-			spin_lock(&dev->power.lock);
-		else
-			spin_lock_irq(&dev->power.lock);
-	}
+	if (callback)
+		__rpm_callback(callback, dev);
 
 	dev->power.idle_notification = false;
 	wake_up_all(&dev->power.wait_queue);
 
  out:
+	trace_rpm_return_int(dev, _THIS_IP_, retval);
 	return retval;
 }
 
@@ -252,22 +269,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
  * @dev: Device to run the callback for.
  */
 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
-	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 {
 	int retval;
 
 	if (!cb)
 		return -ENOSYS;
 
-	if (dev->power.irq_safe) {
-		retval = cb(dev);
-	} else {
-		spin_unlock_irq(&dev->power.lock);
-
-		retval = cb(dev);
+	retval = __rpm_callback(cb, dev);
 
-		spin_lock_irq(&dev->power.lock);
-	}
 	dev->power.runtime_error = retval;
 	return retval != -EACCES ? retval : -EIO;
 }
@@ -277,14 +286,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
  * @dev: Device to suspend.
  * @rpmflags: Flag bits.
  *
- * Check if the device's runtime PM status allows it to be suspended.  If
- * another suspend has been started earlier, either return immediately or wait
- * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
- * pending idle notification.  If the RPM_ASYNC flag is set then queue a
- * suspend request; otherwise run the ->runtime_suspend() callback directly.
- * If a deferred resume was requested while the callback was running then carry
- * it out; otherwise send an idle notification for the device (if the suspend
- * failed) or for its parent (if the suspend succeeded).
+ * Check if the device's runtime PM status allows it to be suspended.
+ * Cancel a pending idle notification, autosuspend or suspend. If
+ * another suspend has been started earlier, either return immediately
+ * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
+ * flags. If the RPM_ASYNC flag is set then queue a suspend request;
+ * otherwise run the ->runtime_suspend() callback directly. When
+ * ->runtime_suspend succeeded, if a deferred resume was requested while
+ * the callback was running then carry it out, otherwise send an idle
+ * notification for its parent (if the suspend succeeded and both
+ * ignore_children of parent->power and irq_safe of dev->power are not set).
  *
  * This function must be called under dev->power.lock with interrupts disabled.
  */
@@ -295,7 +306,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 	struct device *parent = NULL;
 	int retval;
 
-	dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
+	trace_rpm_suspend(dev, rpmflags);
 
  repeat:
 	retval = rpm_check_suspend_allowed(dev);
@@ -347,6 +358,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 			goto out;
 		}
 
+		if (dev->power.irq_safe) {
+			spin_unlock(&dev->power.lock);
+
+			cpu_relax();
+
+			spin_lock(&dev->power.lock);
+			goto repeat;
+		}
+
 		/* Wait for the other suspend running in parallel with us. */
 		for (;;) {
 			prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -400,15 +420,16 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 			dev->power.runtime_error = 0;
 		else
 			pm_runtime_cancel_pending(dev);
-	} else {
+		wake_up_all(&dev->power.wait_queue);
+		goto out;
+	}
  no_callback:
-		__update_runtime_status(dev, RPM_SUSPENDED);
-		pm_runtime_deactivate_timer(dev);
+	__update_runtime_status(dev, RPM_SUSPENDED);
+	pm_runtime_deactivate_timer(dev);
 
-		if (dev->parent) {
-			parent = dev->parent;
-			atomic_add_unless(&parent->power.child_count, -1, 0);
-		}
+	if (dev->parent) {
+		parent = dev->parent;
+		atomic_add_unless(&parent->power.child_count, -1, 0);
 	}
 	wake_up_all(&dev->power.wait_queue);
 
@@ -430,7 +451,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 	}
 
  out:
-	dev_dbg(dev, "%s returns %d\n", __func__, retval);
+	trace_rpm_return_int(dev, _THIS_IP_, retval);
 
 	return retval;
 }
@@ -459,7 +480,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
 	struct device *parent = NULL;
 	int retval = 0;
 
-	dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
+	trace_rpm_resume(dev, rpmflags);
 
  repeat:
 	if (dev->power.runtime_error)
@@ -496,6 +517,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
 			goto out;
 		}
 
+		if (dev->power.irq_safe) {
+			spin_unlock(&dev->power.lock);
+
+			cpu_relax();
+
+			spin_lock(&dev->power.lock);
+			goto repeat;
+		}
+
 		/* Wait for the operation carried out in parallel with us. */
 		for (;;) {
 			prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -615,7 +645,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
 		spin_lock_irq(&dev->power.lock);
 	}
 
-	dev_dbg(dev, "%s returns %d\n", __func__, retval);
+	trace_rpm_return_int(dev, _THIS_IP_, retval);
 
 	return retval;
 }
@@ -732,13 +762,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
  * return immediately if it is larger than zero.  Then carry out an idle
  * notification, either synchronous or asynchronous.
  *
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
  */
 int __pm_runtime_idle(struct device *dev, int rpmflags)
 {
 	unsigned long flags;
 	int retval;
 
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
 	if (rpmflags & RPM_GET_PUT) {
 		if (!atomic_dec_and_test(&dev->power.usage_count))
 			return 0;
@@ -761,13 +794,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
  * return immediately if it is larger than zero.  Then carry out a suspend,
  * either synchronous or asynchronous.
  *
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
  */
 int __pm_runtime_suspend(struct device *dev, int rpmflags)
 {
 	unsigned long flags;
 	int retval;
 
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
 	if (rpmflags & RPM_GET_PUT) {
 		if (!atomic_dec_and_test(&dev->power.usage_count))
 			return 0;
@@ -789,13 +825,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
  * carry out a resume, either synchronous or asynchronous.
  *
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
  */
 int __pm_runtime_resume(struct device *dev, int rpmflags)
 {
 	unsigned long flags;
 	int retval;
 
+	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
 	if (rpmflags & RPM_GET_PUT)
 		atomic_inc(&dev->power.usage_count);
 
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 84f7c7d5a098..14ee07e9cc43 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -276,7 +276,9 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
  *
  * By default, most devices should leave wakeup disabled.  The exceptions are
  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
- * possibly network interfaces, etc.
+ * possibly network interfaces, etc.  Also, devices that don't generate their
+ * own wakeup requests but merely forward requests from one bus to another
+ * (like PCI bridges) should have wakeup enabled by default.
  */
 int device_init_wakeup(struct device *dev, bool enable)
 {
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index fabbf6cc5367..2fc6a66f39a4 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,6 +4,8 @@
 
 config REGMAP
 	default y if (REGMAP_I2C || REGMAP_SPI)
+	select LZO_COMPRESS
+	select LZO_DECOMPRESS
 	bool
 
 config REGMAP_I2C
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index f476f4571295..0573c8a9dacb 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,3 +1,4 @@
-obj-$(CONFIG_REGMAP) += regmap.o
+obj-$(CONFIG_REGMAP) += regmap.o regcache.o regcache-indexed.o regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
 obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
 obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
new file mode 100644
index 000000000000..348ff02eb93e
--- /dev/null
+++ b/drivers/base/regmap/internal.h
@@ -0,0 +1,128 @@
+/*
+ * Register map access API internal header
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGMAP_INTERNAL_H
+#define _REGMAP_INTERNAL_H
+
+#include <linux/regmap.h>
+#include <linux/fs.h>
+
+struct regmap;
+struct regcache_ops;
+
+struct regmap_format {
+	size_t buf_size;
+	size_t reg_bytes;
+	size_t val_bytes;
+	void (*format_write)(struct regmap *map,
+			     unsigned int reg, unsigned int val);
+	void (*format_reg)(void *buf, unsigned int reg);
+	void (*format_val)(void *buf, unsigned int val);
+	unsigned int (*parse_val)(void *buf);
+};
+
+struct regmap {
+	struct mutex lock;
+
+	struct device *dev; /* Device we do I/O on */
+	void *work_buf;     /* Scratch buffer used to format I/O */
+	struct regmap_format format;  /* Buffer format */
+	const struct regmap_bus *bus;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs;
+#endif
+
+	unsigned int max_register;
+	bool (*writeable_reg)(struct device *dev, unsigned int reg);
+	bool (*readable_reg)(struct device *dev, unsigned int reg);
+	bool (*volatile_reg)(struct device *dev, unsigned int reg);
+	bool (*precious_reg)(struct device *dev, unsigned int reg);
+
+	u8 read_flag_mask;
+	u8 write_flag_mask;
+
+	/* regcache specific members */
+	const struct regcache_ops *cache_ops;
+	enum regcache_type cache_type;
+
+	/* number of bytes in reg_defaults_raw */
+	unsigned int cache_size_raw;
+	/* number of bytes per word in reg_defaults_raw */
+	unsigned int cache_word_size;
+	/* number of entries in reg_defaults */
+	unsigned int num_reg_defaults;
+	/* number of entries in reg_defaults_raw */
+	unsigned int num_reg_defaults_raw;
+
+	/* if set, only the cache is modified not the HW */
+	unsigned int cache_only:1;
+	/* if set, only the HW is modified not the cache */
+	unsigned int cache_bypass:1;
+	/* if set, remember to free reg_defaults_raw */
+	unsigned int cache_free:1;
+
+	struct reg_default *reg_defaults;
+	const void *reg_defaults_raw;
+	void *cache;
+};
+
+struct regcache_ops {
+	const char *name;
+	enum regcache_type type;
+	int (*init)(struct regmap *map);
+	int (*exit)(struct regmap *map);
+	int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
+	int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
+	int (*sync)(struct regmap *map);
+};
+
+bool regmap_writeable(struct regmap *map, unsigned int reg);
+bool regmap_readable(struct regmap *map, unsigned int reg);
+bool regmap_volatile(struct regmap *map, unsigned int reg);
+bool regmap_precious(struct regmap *map, unsigned int reg);
+
+int _regmap_write(struct regmap *map, unsigned int reg,
+		  unsigned int val);
+
+#ifdef CONFIG_DEBUG_FS
+extern void regmap_debugfs_initcall(void);
+extern void regmap_debugfs_init(struct regmap *map);
+extern void regmap_debugfs_exit(struct regmap *map);
+#else
+static inline void regmap_debugfs_initcall(void) { }
+static inline void regmap_debugfs_init(struct regmap *map) { }
+static inline void regmap_debugfs_exit(struct regmap *map) { }
+#endif
+
+/* regcache core declarations */
+int regcache_init(struct regmap *map);
+void regcache_exit(struct regmap *map);
+int regcache_read(struct regmap *map,
+		       unsigned int reg, unsigned int *value);
+int regcache_write(struct regmap *map,
+			unsigned int reg, unsigned int value);
+int regcache_sync(struct regmap *map);
+
+unsigned int regcache_get_val(const void *base, unsigned int idx,
+			      unsigned int word_size);
+bool regcache_set_val(void *base, unsigned int idx,
+		      unsigned int val, unsigned int word_size);
+int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+int regcache_insert_reg(struct regmap *map, unsigned int reg,
+			unsigned int val);
+
+extern struct regcache_ops regcache_indexed_ops;
+extern struct regcache_ops regcache_rbtree_ops;
+extern struct regcache_ops regcache_lzo_ops;
+
+#endif
diff --git a/drivers/base/regmap/regcache-indexed.c b/drivers/base/regmap/regcache-indexed.c
new file mode 100644
index 000000000000..507731ad8ec1
--- /dev/null
+++ b/drivers/base/regmap/regcache-indexed.c
@@ -0,0 +1,64 @@
+/*
+ * Register cache access API - indexed caching support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static int regcache_indexed_read(struct regmap *map, unsigned int reg,
+				 unsigned int *value)
+{
+	int ret;
+
+	ret = regcache_lookup_reg(map, reg);
+	if (ret >= 0)
+		*value = map->reg_defaults[ret].def;
+
+	return ret;
+}
+
+static int regcache_indexed_write(struct regmap *map, unsigned int reg,
+				  unsigned int value)
+{
+	int ret;
+
+	ret = regcache_lookup_reg(map, reg);
+	if (ret < 0)
+		return regcache_insert_reg(map, reg, value);
+	map->reg_defaults[ret].def = value;
+	return 0;
+}
+
+static int regcache_indexed_sync(struct regmap *map)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < map->num_reg_defaults; i++) {
+		ret = _regmap_write(map, map->reg_defaults[i].reg,
+				    map->reg_defaults[i].def);
+		if (ret < 0)
+			return ret;
+		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+			map->reg_defaults[i].reg,
+			map->reg_defaults[i].def);
+	}
+	return 0;
+}
+
+struct regcache_ops regcache_indexed_ops = {
+	.type = REGCACHE_INDEXED,
+	.name = "indexed",
+	.read = regcache_indexed_read,
+	.write = regcache_indexed_write,
+	.sync = regcache_indexed_sync
+};
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
new file mode 100644
index 000000000000..066aeece3626
--- /dev/null
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -0,0 +1,361 @@
+/*
+ * Register cache access API - LZO caching support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "internal.h"
+
+struct regcache_lzo_ctx {
+	void *wmem;
+	void *dst;
+	const void *src;
+	size_t src_len;
+	size_t dst_len;
+	size_t decompressed_size;
+	unsigned long *sync_bmp;
+	int sync_bmp_nbits;
+};
+
+#define LZO_BLOCK_NUM 8
+static int regcache_lzo_block_count(void)
+{
+	return LZO_BLOCK_NUM;
+}
+
+static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
+{
+	lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+	if (!lzo_ctx->wmem)
+		return -ENOMEM;
+	return 0;
+}
+
+static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
+{
+	size_t compress_size;
+	int ret;
+
+	ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
+			       lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
+	if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
+		return -EINVAL;
+	lzo_ctx->dst_len = compress_size;
+	return 0;
+}
+
+static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
+{
+	size_t dst_len;
+	int ret;
+
+	dst_len = lzo_ctx->dst_len;
+	ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
+				    lzo_ctx->dst, &dst_len);
+	if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
+		return -EINVAL;
+	return 0;
+}
+
+static int regcache_lzo_compress_cache_block(struct regmap *map,
+		struct regcache_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = regcache_lzo_compress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int regcache_lzo_decompress_cache_block(struct regmap *map,
+		struct regcache_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo_ctx->decompressed_size;
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = regcache_lzo_decompress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static inline int regcache_lzo_get_blkindex(struct regmap *map,
+					    unsigned int reg)
+{
+	return (reg * map->cache_word_size) /
+		DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+}
+
+static inline int regcache_lzo_get_blkpos(struct regmap *map,
+					  unsigned int reg)
+{
+	return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()) /
+		      map->cache_word_size);
+}
+
+static inline int regcache_lzo_get_blksize(struct regmap *map)
+{
+	return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+}
+
+static int regcache_lzo_init(struct regmap *map)
+{
+	struct regcache_lzo_ctx **lzo_blocks;
+	size_t bmp_size;
+	int ret, i, blksize, blkcount;
+	const char *p, *end;
+	unsigned long *sync_bmp;
+
+	ret = 0;
+
+	blkcount = regcache_lzo_block_count();
+	map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
+			     GFP_KERNEL);
+	if (!map->cache)
+		return -ENOMEM;
+	lzo_blocks = map->cache;
+
+	/*
+	 * allocate a bitmap to be used when syncing the cache with
+	 * the hardware.  Each time a register is modified, the corresponding
+	 * bit is set in the bitmap, so we know that we have to sync
+	 * that register.
+	 */
+	bmp_size = map->num_reg_defaults_raw;
+	sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
+			   GFP_KERNEL);
+	if (!sync_bmp) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	bitmap_zero(sync_bmp, bmp_size);
+
+	/* allocate the lzo blocks and initialize them */
+	for (i = 0; i < blkcount; i++) {
+		lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
+					GFP_KERNEL);
+		if (!lzo_blocks[i]) {
+			kfree(sync_bmp);
+			ret = -ENOMEM;
+			goto err;
+		}
+		lzo_blocks[i]->sync_bmp = sync_bmp;
+		lzo_blocks[i]->sync_bmp_nbits = bmp_size;
+		/* alloc the working space for the compressed block */
+		ret = regcache_lzo_prepare(lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	blksize = regcache_lzo_get_blksize(map);
+	p = map->reg_defaults_raw;
+	end = map->reg_defaults_raw + map->cache_size_raw;
+	/* compress the register map and fill the lzo blocks */
+	for (i = 0; i < blkcount; i++, p += blksize) {
+		lzo_blocks[i]->src = p;
+		if (p + blksize > end)
+			lzo_blocks[i]->src_len = end - p;
+		else
+			lzo_blocks[i]->src_len = blksize;
+		ret = regcache_lzo_compress_cache_block(map,
+						       lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+		lzo_blocks[i]->decompressed_size =
+			lzo_blocks[i]->src_len;
+	}
+
+	return 0;
+err:
+	regcache_exit(map);
+	return ret;
+}
+
+static int regcache_lzo_exit(struct regmap *map)
+{
+	struct regcache_lzo_ctx **lzo_blocks;
+	int i, blkcount;
+
+	lzo_blocks = map->cache;
+	if (!lzo_blocks)
+		return 0;
+
+	blkcount = regcache_lzo_block_count();
+	/*
+	 * the pointer to the bitmap used for syncing the cache
+	 * is shared amongst all lzo_blocks.  Ensure it is freed
+	 * only once.
+	 */
+	if (lzo_blocks[0])
+		kfree(lzo_blocks[0]->sync_bmp);
+	for (i = 0; i < blkcount; i++) {
+		if (lzo_blocks[i]) {
+			kfree(lzo_blocks[i]->wmem);
+			kfree(lzo_blocks[i]->dst);
+		}
+		/* each lzo_block is a pointer returned by kmalloc or NULL */
+		kfree(lzo_blocks[i]);
+	}
+	kfree(lzo_blocks);
+	map->cache = NULL;
+	return 0;
+}
+
+static int regcache_lzo_read(struct regmap *map,
+			     unsigned int reg, unsigned int *value)
+{
+	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t blksize, tmp_dst_len;
+	void *tmp_dst;
+
+	/* index of the compressed lzo block */
+	blkindex = regcache_lzo_get_blkindex(map, reg);
+	/* register index within the decompressed block */
+	blkpos = regcache_lzo_get_blkpos(map, reg);
+	/* size of the compressed block */
+	blksize = regcache_lzo_get_blksize(map);
+	lzo_blocks = map->cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
+	if (ret >= 0)
+		/* fetch the value from the cache */
+		*value = regcache_get_val(lzo_block->dst, blkpos,
+					  map->cache_word_size);
+
+	kfree(lzo_block->dst);
+	/* restore the pointer and length of the compressed block */
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+
+	return ret;
+}
+
+static int regcache_lzo_write(struct regmap *map,
+			      unsigned int reg, unsigned int value)
+{
+	struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t blksize, tmp_dst_len;
+	void *tmp_dst;
+
+	/* index of the compressed lzo block */
+	blkindex = regcache_lzo_get_blkindex(map, reg);
+	/* register index within the decompressed block */
+	blkpos = regcache_lzo_get_blkpos(map, reg);
+	/* size of the compressed block */
+	blksize = regcache_lzo_get_blksize(map);
+	lzo_blocks = map->cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		goto out;
+	}
+
+	/* write the new value to the cache */
+	if (regcache_set_val(lzo_block->dst, blkpos, value,
+			     map->cache_word_size)) {
+		kfree(lzo_block->dst);
+		goto out;
+	}
+
+	/* prepare the source to be the decompressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* compress the block */
+	ret = regcache_lzo_compress_cache_block(map, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		kfree(lzo_block->src);
+		goto out;
+	}
+
+	/* set the bit so we know we have to sync this register */
+	set_bit(reg, lzo_block->sync_bmp);
+	kfree(tmp_dst);
+	kfree(lzo_block->src);
+	return 0;
+out:
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+	return ret;
+}
+
+static int regcache_lzo_sync(struct regmap *map)
+{
+	struct regcache_lzo_ctx **lzo_blocks;
+	unsigned int val;
+	int i;
+	int ret;
+
+	lzo_blocks = map->cache;
+	for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
+		ret = regcache_read(map, i, &val);
+		if (ret)
+			return ret;
+		map->cache_bypass = 1;
+		ret = _regmap_write(map, i, val);
+		map->cache_bypass = 0;
+		if (ret)
+			return ret;
+		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+			i, val);
+	}
+
+	return 0;
+}
+
+struct regcache_ops regcache_lzo_ops = {
+	.type = REGCACHE_LZO,
+	.name = "lzo",
+	.init = regcache_lzo_init,
+	.exit = regcache_lzo_exit,
+	.read = regcache_lzo_read,
+	.write = regcache_lzo_write,
+	.sync = regcache_lzo_sync
+};
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
new file mode 100644
index 000000000000..e31498499b0f
--- /dev/null
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -0,0 +1,345 @@
+/*
+ * Register cache access API - rbtree caching support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+
+#include "internal.h"
+
+static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+				 unsigned int value);
+
+struct regcache_rbtree_node {
+	/* the actual rbtree node holding this block */
+	struct rb_node node;
+	/* base register handled by this block */
+	unsigned int base_reg;
+	/* block of adjacent registers */
+	void *block;
+	/* number of registers available in the block */
+	unsigned int blklen;
+} __attribute__ ((packed));
+
+struct regcache_rbtree_ctx {
+	struct rb_root root;
+	struct regcache_rbtree_node *cached_rbnode;
+};
+
+static inline void regcache_rbtree_get_base_top_reg(
+	struct regcache_rbtree_node *rbnode,
+	unsigned int *base, unsigned int *top)
+{
+	*base = rbnode->base_reg;
+	*top = rbnode->base_reg + rbnode->blklen - 1;
+}
+
+static unsigned int regcache_rbtree_get_register(
+	struct regcache_rbtree_node *rbnode, unsigned int idx,
+	unsigned int word_size)
+{
+	return regcache_get_val(rbnode->block, idx, word_size);
+}
+
+static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode,
+					 unsigned int idx, unsigned int val,
+					 unsigned int word_size)
+{
+	regcache_set_val(rbnode->block, idx, val, word_size);
+}
+
+static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
+	unsigned int reg)
+{
+	struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+	struct rb_node *node;
+	struct regcache_rbtree_node *rbnode;
+	unsigned int base_reg, top_reg;
+
+	rbnode = rbtree_ctx->cached_rbnode;
+	if (rbnode) {
+		regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
+		if (reg >= base_reg && reg <= top_reg)
+			return rbnode;
+	}
+
+	node = rbtree_ctx->root.rb_node;
+	while (node) {
+		rbnode = container_of(node, struct regcache_rbtree_node, node);
+		regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
+		if (reg >= base_reg && reg <= top_reg) {
+			rbtree_ctx->cached_rbnode = rbnode;
+			return rbnode;
+		} else if (reg > top_reg) {
+			node = node->rb_right;
+		} else if (reg < base_reg) {
+			node = node->rb_left;
+		}
+	}
+
+	return NULL;
+}
+
+static int regcache_rbtree_insert(struct rb_root *root,
+				  struct regcache_rbtree_node *rbnode)
+{
+	struct rb_node **new, *parent;
+	struct regcache_rbtree_node *rbnode_tmp;
+	unsigned int base_reg_tmp, top_reg_tmp;
+	unsigned int base_reg;
+
+	parent = NULL;
+	new = &root->rb_node;
+	while (*new) {
+		rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
+					  node);
+		/* base and top registers of the current rbnode */
+		regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
+						 &top_reg_tmp);
+		/* base register of the rbnode to be added */
+		base_reg = rbnode->base_reg;
+		parent = *new;
+		/* if this register has already been inserted, just return */
+		if (base_reg >= base_reg_tmp &&
+		    base_reg <= top_reg_tmp)
+			return 0;
+		else if (base_reg > top_reg_tmp)
+			new = &((*new)->rb_right);
+		else if (base_reg < base_reg_tmp)
+			new = &((*new)->rb_left);
+	}
+
+	/* insert the node into the rbtree */
+	rb_link_node(&rbnode->node, parent, new);
+	rb_insert_color(&rbnode->node, root);
+
+	return 1;
+}
+
+static int regcache_rbtree_init(struct regmap *map)
+{
+	struct regcache_rbtree_ctx *rbtree_ctx;
+	int i;
+	int ret;
+
+	map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+	if (!map->cache)
+		return -ENOMEM;
+
+	rbtree_ctx = map->cache;
+	rbtree_ctx->root = RB_ROOT;
+	rbtree_ctx->cached_rbnode = NULL;
+
+	for (i = 0; i < map->num_reg_defaults; i++) {
+		ret = regcache_rbtree_write(map,
+					    map->reg_defaults[i].reg,
+					    map->reg_defaults[i].def);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	regcache_exit(map);
+	return ret;
+}
+
+static int regcache_rbtree_exit(struct regmap *map)
+{
+	struct rb_node *next;
+	struct regcache_rbtree_ctx *rbtree_ctx;
+	struct regcache_rbtree_node *rbtree_node;
+
+	/* if we've already been called then just return */
+	rbtree_ctx = map->cache;
+	if (!rbtree_ctx)
+		return 0;
+
+	/* free up the rbtree */
+	next = rb_first(&rbtree_ctx->root);
+	while (next) {
+		rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
+		next = rb_next(&rbtree_node->node);
+		rb_erase(&rbtree_node->node, &rbtree_ctx->root);
+		kfree(rbtree_node->block);
+		kfree(rbtree_node);
+	}
+
+	/* release the resources */
+	kfree(map->cache);
+	map->cache = NULL;
+
+	return 0;
+}
+
+static int regcache_rbtree_read(struct regmap *map,
+				unsigned int reg, unsigned int *value)
+{
+	struct regcache_rbtree_node *rbnode;
+	unsigned int reg_tmp;
+
+	rbnode = regcache_rbtree_lookup(map, reg);
+	if (rbnode) {
+		reg_tmp = reg - rbnode->base_reg;
+		*value = regcache_rbtree_get_register(rbnode, reg_tmp,
+						      map->cache_word_size);
+	} else {
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
+
+static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
+					   unsigned int pos, unsigned int reg,
+					   unsigned int value, unsigned int word_size)
+{
+	u8 *blk;
+
+	blk = krealloc(rbnode->block,
+		       (rbnode->blklen + 1) * word_size, GFP_KERNEL);
+	if (!blk)
+		return -ENOMEM;
+
+	/* insert the register value in the correct place in the rbnode block */
+	memmove(blk + (pos + 1) * word_size,
+		blk + pos * word_size,
+		(rbnode->blklen - pos) * word_size);
+
+	/* update the rbnode block, its size and the base register */
+	rbnode->block = blk;
+	rbnode->blklen++;
+	if (!pos)
+		rbnode->base_reg = reg;
+
+	regcache_rbtree_set_register(rbnode, pos, value, word_size);
+	return 0;
+}
+
+static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+				 unsigned int value)
+{
+	struct regcache_rbtree_ctx *rbtree_ctx;
+	struct regcache_rbtree_node *rbnode, *rbnode_tmp;
+	struct rb_node *node;
+	unsigned int val;
+	unsigned int reg_tmp;
+	unsigned int pos;
+	int i;
+	int ret;
+
+	rbtree_ctx = map->cache;
+	/* if we can't locate it in the cached rbnode we'll have
+	 * to traverse the rbtree looking for it.
+	 */
+	rbnode = regcache_rbtree_lookup(map, reg);
+	if (rbnode) {
+		reg_tmp = reg - rbnode->base_reg;
+		val = regcache_rbtree_get_register(rbnode, reg_tmp,
+						   map->cache_word_size);
+		if (val == value)
+			return 0;
+		regcache_rbtree_set_register(rbnode, reg_tmp, value,
+					     map->cache_word_size);
+	} else {
+		/* look for an adjacent register to the one we are about to add */
+		for (node = rb_first(&rbtree_ctx->root); node;
+		     node = rb_next(node)) {
+			rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node);
+			for (i = 0; i < rbnode_tmp->blklen; i++) {
+				reg_tmp = rbnode_tmp->base_reg + i;
+				if (abs(reg_tmp - reg) != 1)
+					continue;
+				/* decide where in the block to place our register */
+				if (reg_tmp + 1 == reg)
+					pos = i + 1;
+				else
+					pos = i;
+				ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos,
+								      reg, value,
+								      map->cache_word_size);
+				if (ret)
+					return ret;
+				rbtree_ctx->cached_rbnode = rbnode_tmp;
+				return 0;
+			}
+		}
+		/* we did not manage to find a place to insert it in an existing
+		 * block so create a new rbnode with a single register in its block.
+		 * This block will get populated further if any other adjacent
+		 * registers get modified in the future.
+		 */
+		rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
+		if (!rbnode)
+			return -ENOMEM;
+		rbnode->blklen = 1;
+		rbnode->base_reg = reg;
+		rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
+					GFP_KERNEL);
+		if (!rbnode->block) {
+			kfree(rbnode);
+			return -ENOMEM;
+		}
+		regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
+		regcache_rbtree_insert(&rbtree_ctx->root, rbnode);
+		rbtree_ctx->cached_rbnode = rbnode;
+	}
+
+	return 0;
+}
+
+static int regcache_rbtree_sync(struct regmap *map)
+{
+	struct regcache_rbtree_ctx *rbtree_ctx;
+	struct rb_node *node;
+	struct regcache_rbtree_node *rbnode;
+	unsigned int regtmp;
+	unsigned int val;
+	int ret;
+	int i;
+
+	rbtree_ctx = map->cache;
+	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+		rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+		for (i = 0; i < rbnode->blklen; i++) {
+			regtmp = rbnode->base_reg + i;
+			val = regcache_rbtree_get_register(rbnode, i,
+							   map->cache_word_size);
+
+			/* Is this the hardware default?  If so skip. */
+			ret = regcache_lookup_reg(map, i);
+			if (ret > 0 && val == map->reg_defaults[ret].def)
+				continue;
+
+			map->cache_bypass = 1;
+			ret = _regmap_write(map, regtmp, val);
+			map->cache_bypass = 0;
+			if (ret)
+				return ret;
+			dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+				regtmp, val);
+		}
+	}
+
+	return 0;
+}
+
+struct regcache_ops regcache_rbtree_ops = {
+	.type = REGCACHE_RBTREE,
+	.name = "rbtree",
+	.init = regcache_rbtree_init,
+	.exit = regcache_rbtree_exit,
+	.read = regcache_rbtree_read,
+	.write = regcache_rbtree_write,
+	.sync = regcache_rbtree_sync
+};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
new file mode 100644
index 000000000000..afcfef838263
--- /dev/null
+++ b/drivers/base/regmap/regcache.c
@@ -0,0 +1,401 @@
+/*
+ * Register cache access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <trace/events/regmap.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
+
+#include "internal.h"
+
+static const struct regcache_ops *cache_types[] = {
+	&regcache_indexed_ops,
+	&regcache_rbtree_ops,
+	&regcache_lzo_ops,
+};
+
+static int regcache_hw_init(struct regmap *map)
+{
+	int i, j;
+	int ret;
+	int count;
+	unsigned int val;
+	void *tmp_buf;
+
+	if (!map->num_reg_defaults_raw)
+		return -EINVAL;
+
+	if (!map->reg_defaults_raw) {
+		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
+		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
+		if (!tmp_buf)
+			return -EINVAL;
+		ret = regmap_bulk_read(map, 0, tmp_buf,
+				       map->num_reg_defaults_raw);
+		if (ret < 0) {
+			kfree(tmp_buf);
+			return ret;
+		}
+		map->reg_defaults_raw = tmp_buf;
+		map->cache_free = 1;
+	}
+
+	/* calculate the size of reg_defaults */
+	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
+		val = regcache_get_val(map->reg_defaults_raw,
+				       i, map->cache_word_size);
+		if (!val)
+			continue;
+		count++;
+	}
+
+	map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
+				      GFP_KERNEL);
+	if (!map->reg_defaults)
+		return -ENOMEM;
+
+	/* fill the reg_defaults */
+	map->num_reg_defaults = count;
+	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
+		val = regcache_get_val(map->reg_defaults_raw,
+				       i, map->cache_word_size);
+		if (!val)
+			continue;
+		map->reg_defaults[j].reg = i;
+		map->reg_defaults[j].def = val;
+		j++;
+	}
+
+	return 0;
+}
+
+int regcache_init(struct regmap *map)
+{
+	int ret;
+	int i;
+	void *tmp_buf;
+
+	if (map->cache_type == REGCACHE_NONE) {
+		map->cache_bypass = true;
+		return 0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
+		if (cache_types[i]->type == map->cache_type)
+			break;
+
+	if (i == ARRAY_SIZE(cache_types)) {
+		dev_err(map->dev, "Could not match compress type: %d\n",
+			map->cache_type);
+		return -EINVAL;
+	}
+
+	map->cache = NULL;
+	map->cache_ops = cache_types[i];
+
+	if (!map->cache_ops->read ||
+	    !map->cache_ops->write ||
+	    !map->cache_ops->name)
+		return -EINVAL;
+
+	/* We still need to ensure that the reg_defaults
+	 * won't vanish from under us.  We'll need to make
+	 * a copy of it.
+	 */
+	if (map->reg_defaults) {
+		if (!map->num_reg_defaults)
+			return -EINVAL;
+		tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
+				  sizeof(struct reg_default), GFP_KERNEL);
+		if (!tmp_buf)
+			return -ENOMEM;
+		map->reg_defaults = tmp_buf;
+	} else if (map->num_reg_defaults_raw) {
+		/* Some devices such as PMICs don't have cache defaults,
+		 * we cope with this by reading back the HW registers and
+		 * crafting the cache defaults by hand.
+		 */
+		ret = regcache_hw_init(map);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (!map->max_register)
+		map->max_register = map->num_reg_defaults_raw;
+
+	if (map->cache_ops->init) {
+		dev_dbg(map->dev, "Initializing %s cache\n",
+			map->cache_ops->name);
+		return map->cache_ops->init(map);
+	}
+	return 0;
+}
+
+void regcache_exit(struct regmap *map)
+{
+	if (map->cache_type == REGCACHE_NONE)
+		return;
+
+	BUG_ON(!map->cache_ops);
+
+	kfree(map->reg_defaults);
+	if (map->cache_free)
+		kfree(map->reg_defaults_raw);
+
+	if (map->cache_ops->exit) {
+		dev_dbg(map->dev, "Destroying %s cache\n",
+			map->cache_ops->name);
+		map->cache_ops->exit(map);
+	}
+}
+
+/**
+ * regcache_read: Fetch the value of a given register from the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The value to be returned.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_read(struct regmap *map,
+		  unsigned int reg, unsigned int *value)
+{
+	if (map->cache_type == REGCACHE_NONE)
+		return -ENOSYS;
+
+	BUG_ON(!map->cache_ops);
+
+	if (!regmap_readable(map, reg))
+		return -EIO;
+
+	if (!regmap_volatile(map, reg))
+		return map->cache_ops->read(map, reg, value);
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regcache_read);
+
+/**
+ * regcache_write: Set the value of a given register in the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The new register value.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_write(struct regmap *map,
+		   unsigned int reg, unsigned int value)
+{
+	if (map->cache_type == REGCACHE_NONE)
+		return 0;
+
+	BUG_ON(!map->cache_ops);
+
+	if (!regmap_writeable(map, reg))
+		return -EIO;
+
+	if (!regmap_volatile(map, reg))
+		return map->cache_ops->write(map, reg, value);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(regcache_write);
+
+/**
+ * regcache_sync: Sync the register cache with the hardware.
+ *
+ * @map: map to configure.
+ *
+ * Any registers that should not be synced should be marked as
+ * volatile.  In general drivers can choose not to use the provided
+ * syncing functionality if they so require.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_sync(struct regmap *map)
+{
+	int ret = 0;
+	unsigned int val;
+	unsigned int i;
+	const char *name;
+	unsigned int bypass;
+
+	BUG_ON(!map->cache_ops);
+
+	mutex_lock(&map->lock);
+	/* Remember the initial bypass state */
+	bypass = map->cache_bypass;
+	dev_dbg(map->dev, "Syncing %s cache\n",
+		map->cache_ops->name);
+	name = map->cache_ops->name;
+	trace_regcache_sync(map->dev, name, "start");
+	if (map->cache_ops->sync) {
+		ret = map->cache_ops->sync(map);
+	} else {
+		for (i = 0; i < map->num_reg_defaults; i++) {
+			ret = regcache_read(map, i, &val);
+			if (ret < 0)
+				goto out;
+			map->cache_bypass = 1;
+			ret = _regmap_write(map, i, val);
+			map->cache_bypass = 0;
+			if (ret < 0)
+				goto out;
+			dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+				map->reg_defaults[i].reg,
+				map->reg_defaults[i].def);
+		}
+
+	}
+out:
+	trace_regcache_sync(map->dev, name, "stop");
+	/* Restore the bypass state */
+	map->cache_bypass = bypass;
+	mutex_unlock(&map->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_sync);
+
+/**
+ * regcache_cache_only: Put a register map into cache only mode
+ *
+ * @map: map to configure
+ * @cache_only: flag if changes should be written to the hardware
+ *
+ * When a register map is marked as cache only writes to the register
+ * map API will only update the register cache, they will not cause
+ * any hardware changes.  This is useful for allowing portions of
+ * drivers to act as though the device were functioning as normal when
+ * it is disabled for power saving reasons.
+ */
+void regcache_cache_only(struct regmap *map, bool enable)
+{
+	mutex_lock(&map->lock);
+	WARN_ON(map->cache_bypass && enable);
+	map->cache_only = enable;
+	mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_only);
+
+/**
+ * regcache_cache_bypass: Put a register map into cache bypass mode
+ *
+ * @map: map to configure
+ * @cache_bypass: flag if changes should not be written to the hardware
+ *
+ * When a register map is marked with the cache bypass option, writes
+ * to the register map API will only update the hardware and not the
+ * the cache directly.  This is useful when syncing the cache back to
+ * the hardware.
+ */
+void regcache_cache_bypass(struct regmap *map, bool enable)
+{
+	mutex_lock(&map->lock);
+	WARN_ON(map->cache_only && enable);
+	map->cache_bypass = enable;
+	mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_bypass);
+
+bool regcache_set_val(void *base, unsigned int idx,
+		      unsigned int val, unsigned int word_size)
+{
+	switch (word_size) {
+	case 1: {
+		u8 *cache = base;
+		if (cache[idx] == val)
+			return true;
+		cache[idx] = val;
+		break;
+	}
+	case 2: {
+		u16 *cache = base;
+		if (cache[idx] == val)
+			return true;
+		cache[idx] = val;
+		break;
+	}
+	default:
+		BUG();
+	}
+	/* unreachable */
+	return false;
+}
+
+unsigned int regcache_get_val(const void *base, unsigned int idx,
+			      unsigned int word_size)
+{
+	if (!base)
+		return -EINVAL;
+
+	switch (word_size) {
+	case 1: {
+		const u8 *cache = base;
+		return cache[idx];
+	}
+	case 2: {
+		const u16 *cache = base;
+		return cache[idx];
+	}
+	default:
+		BUG();
+	}
+	/* unreachable */
+	return -1;
+}
+
+static int regcache_default_cmp(const void *a, const void *b)
+{
+	const struct reg_default *_a = a;
+	const struct reg_default *_b = b;
+
+	return _a->reg - _b->reg;
+}
+
+int regcache_lookup_reg(struct regmap *map, unsigned int reg)
+{
+	struct reg_default key;
+	struct reg_default *r;
+
+	key.reg = reg;
+	key.def = 0;
+
+	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
+		    sizeof(struct reg_default), regcache_default_cmp);
+
+	if (r)
+		return r - map->reg_defaults;
+	else
+		return -ENOENT;
+}
+
+int regcache_insert_reg(struct regmap *map, unsigned int reg,
+			unsigned int val)
+{
+	void *tmp;
+
+	tmp = krealloc(map->reg_defaults,
+		       (map->num_reg_defaults + 1) * sizeof(struct reg_default),
+		       GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+	map->reg_defaults = tmp;
+	map->num_reg_defaults++;
+	map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
+	map->reg_defaults[map->num_reg_defaults - 1].def = val;
+	sort(map->reg_defaults, map->num_reg_defaults,
+	     sizeof(struct reg_default), regcache_default_cmp, NULL);
+	return 0;
+}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
new file mode 100644
index 000000000000..6f397476e27c
--- /dev/null
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -0,0 +1,209 @@
+/*
+ * Register map access API - debugfs
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include "internal.h"
+
+static struct dentry *regmap_debugfs_root;
+
+/* Calculate the length of a fixed format  */
+static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
+{
+	snprintf(buf, buf_size, "%x", max_val);
+	return strlen(buf);
+}
+
+static int regmap_open_file(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	int reg_len, val_len, tot_len;
+	size_t buf_pos = 0;
+	loff_t p = 0;
+	ssize_t ret;
+	int i;
+	struct regmap *map = file->private_data;
+	char *buf;
+	unsigned int val;
+
+	if (*ppos < 0 || !count)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Calculate the length of a fixed format  */
+	reg_len = regmap_calc_reg_len(map->max_register, buf, count);
+	val_len = 2 * map->format.val_bytes;
+	tot_len = reg_len + val_len + 3;      /* : \n */
+
+	for (i = 0; i < map->max_register + 1; i++) {
+		if (!regmap_readable(map, i))
+			continue;
+
+		if (regmap_precious(map, i))
+			continue;
+
+		/* If we're in the region the user is trying to read */
+		if (p >= *ppos) {
+			/* ...but not beyond it */
+			if (buf_pos >= count - 1 - tot_len)
+				break;
+
+			/* Format the register */
+			snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
+				 reg_len, i);
+			buf_pos += reg_len + 2;
+
+			/* Format the value, write all X if we can't read */
+			ret = regmap_read(map, i, &val);
+			if (ret == 0)
+				snprintf(buf + buf_pos, count - buf_pos,
+					 "%.*x", val_len, val);
+			else
+				memset(buf + buf_pos, 'X', val_len);
+			buf_pos += 2 * map->format.val_bytes;
+
+			buf[buf_pos++] = '\n';
+		}
+		p += tot_len;
+	}
+
+	ret = buf_pos;
+
+	if (copy_to_user(user_buf, buf, buf_pos)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	*ppos += buf_pos;
+
+out:
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations regmap_map_fops = {
+	.open = regmap_open_file,
+	.read = regmap_map_read_file,
+	.llseek = default_llseek,
+};
+
+static ssize_t regmap_access_read_file(struct file *file,
+				       char __user *user_buf, size_t count,
+				       loff_t *ppos)
+{
+	int reg_len, tot_len;
+	size_t buf_pos = 0;
+	loff_t p = 0;
+	ssize_t ret;
+	int i;
+	struct regmap *map = file->private_data;
+	char *buf;
+
+	if (*ppos < 0 || !count)
+		return -EINVAL;
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Calculate the length of a fixed format  */
+	reg_len = regmap_calc_reg_len(map->max_register, buf, count);
+	tot_len = reg_len + 10; /* ': R W V P\n' */
+
+	for (i = 0; i < map->max_register + 1; i++) {
+		/* Ignore registers which are neither readable nor writable */
+		if (!regmap_readable(map, i) && !regmap_writeable(map, i))
+			continue;
+
+		/* If we're in the region the user is trying to read */
+		if (p >= *ppos) {
+			/* ...but not beyond it */
+			if (buf_pos >= count - 1 - tot_len)
+				break;
+
+			/* Format the register */
+			snprintf(buf + buf_pos, count - buf_pos,
+				 "%.*x: %c %c %c %c\n",
+				 reg_len, i,
+				 regmap_readable(map, i) ? 'y' : 'n',
+				 regmap_writeable(map, i) ? 'y' : 'n',
+				 regmap_volatile(map, i) ? 'y' : 'n',
+				 regmap_precious(map, i) ? 'y' : 'n');
+
+			buf_pos += tot_len;
+		}
+		p += tot_len;
+	}
+
+	ret = buf_pos;
+
+	if (copy_to_user(user_buf, buf, buf_pos)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	*ppos += buf_pos;
+
+out:
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations regmap_access_fops = {
+	.open = regmap_open_file,
+	.read = regmap_access_read_file,
+	.llseek = default_llseek,
+};
+
+void regmap_debugfs_init(struct regmap *map)
+{
+	map->debugfs = debugfs_create_dir(dev_name(map->dev),
+					  regmap_debugfs_root);
+	if (!map->debugfs) {
+		dev_warn(map->dev, "Failed to create debugfs directory\n");
+		return;
+	}
+
+	if (map->max_register) {
+		debugfs_create_file("registers", 0400, map->debugfs,
+				    map, &regmap_map_fops);
+		debugfs_create_file("access", 0400, map->debugfs,
+				    map, &regmap_access_fops);
+	}
+}
+
+void regmap_debugfs_exit(struct regmap *map)
+{
+	debugfs_remove_recursive(map->debugfs);
+}
+
+void regmap_debugfs_initcall(void)
+{
+	regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
+	if (!regmap_debugfs_root) {
+		pr_warn("regmap: Failed to create debugfs root\n");
+		return;
+	}
+}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index c4f7a45cd2c3..38621ec87c05 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -90,11 +90,9 @@ static int regmap_i2c_read(struct device *dev,
 }
 
 static struct regmap_bus regmap_i2c = {
-	.type = &i2c_bus_type,
 	.write = regmap_i2c_write,
 	.gather_write = regmap_i2c_gather_write,
 	.read = regmap_i2c_read,
-	.owner = THIS_MODULE,
 };
 
 /**
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index f8396945d6ed..2560658de344 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -48,11 +48,9 @@ static int regmap_spi_read(struct device *dev,
 }
 
 static struct regmap_bus regmap_spi = {
-	.type = &spi_bus_type,
 	.write = regmap_spi_write,
 	.gather_write = regmap_spi_gather_write,
 	.read = regmap_spi_read,
-	.owner = THIS_MODULE,
 	.read_flag_mask = 0x80,
 };
 
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0eef4da1ac61..bf441db1ee90 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -15,29 +15,54 @@
 #include <linux/mutex.h>
 #include <linux/err.h>
 
-#include <linux/regmap.h>
-
-struct regmap;
-
-struct regmap_format {
-	size_t buf_size;
-	size_t reg_bytes;
-	size_t val_bytes;
-	void (*format_write)(struct regmap *map,
-			     unsigned int reg, unsigned int val);
-	void (*format_reg)(void *buf, unsigned int reg);
-	void (*format_val)(void *buf, unsigned int val);
-	unsigned int (*parse_val)(void *buf);
-};
-
-struct regmap {
-	struct mutex lock;
-
-	struct device *dev; /* Device we do I/O on */
-	void *work_buf;     /* Scratch buffer used to format I/O */
-	struct regmap_format format;  /* Buffer format */
-	const struct regmap_bus *bus;
-};
+#define CREATE_TRACE_POINTS
+#include <trace/events/regmap.h>
+
+#include "internal.h"
+
+bool regmap_writeable(struct regmap *map, unsigned int reg)
+{
+	if (map->max_register && reg > map->max_register)
+		return false;
+
+	if (map->writeable_reg)
+		return map->writeable_reg(map->dev, reg);
+
+	return true;
+}
+
+bool regmap_readable(struct regmap *map, unsigned int reg)
+{
+	if (map->max_register && reg > map->max_register)
+		return false;
+
+	if (map->readable_reg)
+		return map->readable_reg(map->dev, reg);
+
+	return true;
+}
+
+bool regmap_volatile(struct regmap *map, unsigned int reg)
+{
+	if (map->max_register && reg > map->max_register)
+		return false;
+
+	if (map->volatile_reg)
+		return map->volatile_reg(map->dev, reg);
+
+	return true;
+}
+
+bool regmap_precious(struct regmap *map, unsigned int reg)
+{
+	if (map->max_register && reg > map->max_register)
+		return false;
+
+	if (map->precious_reg)
+		return map->precious_reg(map->dev, reg);
+
+	return false;
+}
 
 static void regmap_format_4_12_write(struct regmap *map,
 				     unsigned int reg, unsigned int val)
@@ -116,6 +141,25 @@ struct regmap *regmap_init(struct device *dev,
 	map->format.val_bytes = config->val_bits / 8;
 	map->dev = dev;
 	map->bus = bus;
+	map->max_register = config->max_register;
+	map->writeable_reg = config->writeable_reg;
+	map->readable_reg = config->readable_reg;
+	map->volatile_reg = config->volatile_reg;
+	map->precious_reg = config->precious_reg;
+	map->cache_type = config->cache_type;
+	map->reg_defaults = config->reg_defaults;
+	map->num_reg_defaults = config->num_reg_defaults;
+	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
+	map->reg_defaults_raw = config->reg_defaults_raw;
+	map->cache_size_raw = (config->val_bits / 8) * config->num_reg_defaults_raw;
+	map->cache_word_size = config->val_bits / 8;
+
+	if (config->read_flag_mask || config->write_flag_mask) {
+		map->read_flag_mask = config->read_flag_mask;
+		map->write_flag_mask = config->write_flag_mask;
+	} else {
+		map->read_flag_mask = bus->read_flag_mask;
+	}
 
 	switch (config->reg_bits) {
 	case 4:
@@ -168,13 +212,17 @@ struct regmap *regmap_init(struct device *dev,
 	map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
 	if (map->work_buf == NULL) {
 		ret = -ENOMEM;
-		goto err_bus;
+		goto err_map;
 	}
 
+	ret = regcache_init(map);
+	if (ret < 0)
+		goto err_map;
+
+	regmap_debugfs_init(map);
+
 	return map;
 
-err_bus:
-	module_put(map->bus->owner);
 err_map:
 	kfree(map);
 err:
@@ -187,8 +235,9 @@ EXPORT_SYMBOL_GPL(regmap_init);
  */
 void regmap_exit(struct regmap *map)
 {
+	regcache_exit(map);
+	regmap_debugfs_exit(map);
 	kfree(map->work_buf);
-	module_put(map->bus->owner);
 	kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
@@ -196,19 +245,38 @@ EXPORT_SYMBOL_GPL(regmap_exit);
 static int _regmap_raw_write(struct regmap *map, unsigned int reg,
 			     const void *val, size_t val_len)
 {
+	u8 *u8 = map->work_buf;
 	void *buf;
 	int ret = -ENOTSUPP;
 	size_t len;
+	int i;
+
+	/* Check for unwritable registers before we start */
+	if (map->writeable_reg)
+		for (i = 0; i < val_len / map->format.val_bytes; i++)
+			if (!map->writeable_reg(map->dev, reg + i))
+				return -EINVAL;
 
 	map->format.format_reg(map->work_buf, reg);
 
-	/* Try to do a gather write if we can */
-	if (map->bus->gather_write)
+	u8[0] |= map->write_flag_mask;
+
+	trace_regmap_hw_write_start(map->dev, reg,
+				    val_len / map->format.val_bytes);
+
+	/* If we're doing a single register write we can probably just
+	 * send the work_buf directly, otherwise try to do a gather
+	 * write.
+	 */
+	if (val == map->work_buf + map->format.reg_bytes)
+		ret = map->bus->write(map->dev, map->work_buf,
+				      map->format.reg_bytes + val_len);
+	else if (map->bus->gather_write)
 		ret = map->bus->gather_write(map->dev, map->work_buf,
 					     map->format.reg_bytes,
 					     val, val_len);
 
-	/* Otherwise fall back on linearising by hand. */
+	/* If that didn't work fall back on linearising by hand. */
 	if (ret == -ENOTSUPP) {
 		len = map->format.reg_bytes + val_len;
 		buf = kmalloc(len, GFP_KERNEL);
@@ -222,19 +290,39 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
 		kfree(buf);
 	}
 
+	trace_regmap_hw_write_done(map->dev, reg,
+				   val_len / map->format.val_bytes);
+
 	return ret;
 }
 
-static int _regmap_write(struct regmap *map, unsigned int reg,
-			 unsigned int val)
+int _regmap_write(struct regmap *map, unsigned int reg,
+		  unsigned int val)
 {
+	int ret;
 	BUG_ON(!map->format.format_write && !map->format.format_val);
 
+	if (!map->cache_bypass) {
+		ret = regcache_write(map, reg, val);
+		if (ret != 0)
+			return ret;
+		if (map->cache_only)
+			return 0;
+	}
+
+	trace_regmap_reg_write(map->dev, reg, val);
+
 	if (map->format.format_write) {
 		map->format.format_write(map, reg, val);
 
-		return map->bus->write(map->dev, map->work_buf,
-				       map->format.buf_size);
+		trace_regmap_hw_write_start(map->dev, reg, 1);
+
+		ret = map->bus->write(map->dev, map->work_buf,
+				      map->format.buf_size);
+
+		trace_regmap_hw_write_done(map->dev, reg, 1);
+
+		return ret;
 	} else {
 		map->format.format_val(map->work_buf + map->format.reg_bytes,
 				       val);
@@ -289,6 +377,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
 {
 	int ret;
 
+	WARN_ON(map->cache_type != REGCACHE_NONE);
+
 	mutex_lock(&map->lock);
 
 	ret = _regmap_raw_write(map, reg, val, val_len);
@@ -308,20 +398,23 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
 	map->format.format_reg(map->work_buf, reg);
 
 	/*
-	 * Some buses flag reads by setting the high bits in the
+	 * Some buses or devices flag reads by setting the high bits in the
 	 * register addresss; since it's always the high bits for all
 	 * current formats we can do this here rather than in
 	 * formatting.  This may break if we get interesting formats.
 	 */
-	if (map->bus->read_flag_mask)
-		u8[0] |= map->bus->read_flag_mask;
+	u8[0] |= map->read_flag_mask;
+
+	trace_regmap_hw_read_start(map->dev, reg,
+				   val_len / map->format.val_bytes);
 
 	ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
 			     val, val_len);
-	if (ret != 0)
-		return ret;
 
-	return 0;
+	trace_regmap_hw_read_done(map->dev, reg,
+				  val_len / map->format.val_bytes);
+
+	return ret;
 }
 
 static int _regmap_read(struct regmap *map, unsigned int reg,
@@ -332,9 +425,20 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
 	if (!map->format.parse_val)
 		return -EINVAL;
 
+	if (!map->cache_bypass) {
+		ret = regcache_read(map, reg, val);
+		if (ret == 0)
+			return 0;
+	}
+
+	if (map->cache_only)
+		return -EBUSY;
+
 	ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
-	if (ret == 0)
+	if (ret == 0) {
 		*val = map->format.parse_val(map->work_buf);
+		trace_regmap_reg_read(map->dev, reg, *val);
+	}
 
 	return ret;
 }
@@ -378,6 +482,14 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
 		    size_t val_len)
 {
 	int ret;
+	int i;
+	bool vol = true;
+
+	for (i = 0; i < val_len / map->format.val_bytes; i++)
+		if (!regmap_volatile(map, reg + i))
+			vol = false;
+
+	WARN_ON(!vol && map->cache_type != REGCACHE_NONE);
 
 	mutex_lock(&map->lock);
 
@@ -405,16 +517,30 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
 {
 	int ret, i;
 	size_t val_bytes = map->format.val_bytes;
+	bool vol = true;
 
 	if (!map->format.parse_val)
 		return -EINVAL;
 
-	ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
-	if (ret != 0)
-		return ret;
+	/* Is this a block of volatile registers? */
+	for (i = 0; i < val_count; i++)
+		if (!regmap_volatile(map, reg + i))
+			vol = false;
+
+	if (vol || map->cache_type == REGCACHE_NONE) {
+		ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
+		if (ret != 0)
+			return ret;
 
-	for (i = 0; i < val_count * val_bytes; i += val_bytes)
-		map->format.parse_val(val + i);
+		for (i = 0; i < val_count * val_bytes; i += val_bytes)
+			map->format.parse_val(val + i);
+	} else {
+		for (i = 0; i < val_count; i++) {
+			ret = regmap_read(map, reg + i, val + (i * val_bytes));
+			if (ret != 0)
+				return ret;
+		}
+	}
 
 	return 0;
 }
@@ -453,3 +579,11 @@ out:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(regmap_update_bits);
+
+static int __init regmap_initcall(void)
+{
+	regmap_debugfs_initcall();
+
+	return 0;
+}
+postcore_initcall(regmap_initcall);