summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 16:01:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 16:01:57 -0700
commit431bf99d26157d56689e5de65bd27ce9f077fc3f (patch)
treeb15e357039956fcdd0e0e6177d2fc99bb3cfa822 /drivers
parent72f96e0e38d7e29ba16dcfd824ecaebe38b8293e (diff)
parent7ae033cc0dfce68d8e0c83aca60837cf2bf0d2e6 (diff)
downloadlinux-431bf99d26157d56689e5de65bd27ce9f077fc3f.tar.gz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6: (51 commits)
  PM: Improve error code of pm_notifier_call_chain()
  PM: Add "RTC" to PM trace time stamps to avoid confusion
  PM / Suspend: Export suspend_set_ops, suspend_valid_only_mem
  PM / Suspend: Add .suspend_again() callback to suspend_ops
  PM / OPP: Introduce function to free cpufreq table
  ARM / shmobile: Return -EBUSY from A4LC power off if A3RV is active
  PM / Domains: Take .power_off() error code into account
  ARM / shmobile: Use genpd_queue_power_off_work()
  ARM / shmobile: Use pm_genpd_poweroff_unused()
  PM / Domains: Introduce function to power off all unused PM domains
  OMAP: PM: disable idle on suspend for GPIO and UART
  OMAP: PM: omap_device: add API to disable idle on suspend
  OMAP: PM: omap_device: add system PM methods for PM domain handling
  OMAP: PM: omap_device: conditionally use PM domain runtime helpers
  PM / Runtime: Add new helper function: pm_runtime_status_suspended()
  PM / Domains: Queue up power off work only if it is not pending
  PM / Domains: Improve handling of wakeup devices during system suspend
  PM / Domains: Do not restore all devices on power off error
  PM / Domains: Allow callbacks to execute all runtime PM helpers
  PM / Domains: Do not execute device callbacks under locks
  ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c234
-rw-r--r--drivers/base/power/domain.c1273
-rw-r--r--drivers/base/power/generic_ops.c98
-rw-r--r--drivers/base/power/main.c65
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/pci/pci-driver.c18
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/scsi/scsi_pm.c8
14 files changed, 1637 insertions, 188 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 3647e114d0e7..2639ae79a372 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP)	+= main.o wakeup.o
 obj-$(CONFIG_PM_RUNTIME)	+= runtime.o
 obj-$(CONFIG_PM_TRACE_RTC)	+= trace.o
 obj-$(CONFIG_PM_OPP)	+= opp.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS)	+=  domain.o
 obj-$(CONFIG_HAVE_CLK)	+= clock_ops.o
 
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
\ No newline at end of file
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index ad367c4139b1..a846b2f95cfb 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -15,9 +15,9 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 
-struct pm_runtime_clk_data {
+struct pm_clk_data {
 	struct list_head clock_list;
 	struct mutex lock;
 };
@@ -36,25 +36,25 @@ struct pm_clock_entry {
 	enum pce_status status;
 };
 
-static struct pm_runtime_clk_data *__to_prd(struct device *dev)
+static struct pm_clk_data *__to_pcd(struct device *dev)
 {
 	return dev ? dev->power.subsys_data : NULL;
 }
 
 /**
- * pm_runtime_clk_add - Start using a device clock for runtime PM.
- * @dev: Device whose clock is going to be used for runtime PM.
+ * pm_clk_add - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
  * @con_id: Connection ID of the clock.
  *
  * Add the clock represented by @con_id to the list of clocks used for
- * the runtime PM of @dev.
+ * the power management of @dev.
  */
-int pm_runtime_clk_add(struct device *dev, const char *con_id)
+int pm_clk_add(struct device *dev, const char *con_id)
 {
-	struct pm_runtime_clk_data *prd = __to_prd(dev);
+	struct pm_clk_data *pcd = __to_pcd(dev);
 	struct pm_clock_entry *ce;
 
-	if (!prd)
+	if (!pcd)
 		return -EINVAL;
 
 	ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -73,20 +73,20 @@ int pm_runtime_clk_add(struct device *dev, const char *con_id)
 		}
 	}
 
-	mutex_lock(&prd->lock);
-	list_add_tail(&ce->node, &prd->clock_list);
-	mutex_unlock(&prd->lock);
+	mutex_lock(&pcd->lock);
+	list_add_tail(&ce->node, &pcd->clock_list);
+	mutex_unlock(&pcd->lock);
 	return 0;
 }
 
 /**
- * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
- * @ce: Runtime PM clock entry to destroy.
+ * __pm_clk_remove - Destroy PM clock entry.
+ * @ce: PM clock entry to destroy.
  *
- * This routine must be called under the mutex protecting the runtime PM list
- * of clocks corresponding the the @ce's device.
+ * This routine must be called under the mutex protecting the PM list of clocks
+ * corresponding the the @ce's device.
  */
-static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
+static void __pm_clk_remove(struct pm_clock_entry *ce)
 {
 	if (!ce)
 		return;
@@ -108,95 +108,99 @@ static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
 }
 
 /**
- * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
- * @dev: Device whose clock should not be used for runtime PM any more.
+ * pm_clk_remove - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
  * @con_id: Connection ID of the clock.
  *
  * Remove the clock represented by @con_id from the list of clocks used for
- * the runtime PM of @dev.
+ * the power management of @dev.
  */
-void pm_runtime_clk_remove(struct device *dev, const char *con_id)
+void pm_clk_remove(struct device *dev, const char *con_id)
 {
-	struct pm_runtime_clk_data *prd = __to_prd(dev);
+	struct pm_clk_data *pcd = __to_pcd(dev);
 	struct pm_clock_entry *ce;
 
-	if (!prd)
+	if (!pcd)
 		return;
 
-	mutex_lock(&prd->lock);
+	mutex_lock(&pcd->lock);
 
-	list_for_each_entry(ce, &prd->clock_list, node) {
+	list_for_each_entry(ce, &pcd->clock_list, node) {
 		if (!con_id && !ce->con_id) {
-			__pm_runtime_clk_remove(ce);
+			__pm_clk_remove(ce);
 			break;
 		} else if (!con_id || !ce->con_id) {
 			continue;
 		} else if (!strcmp(con_id, ce->con_id)) {
-			__pm_runtime_clk_remove(ce);
+			__pm_clk_remove(ce);
 			break;
 		}
 	}
 
-	mutex_unlock(&prd->lock);
+	mutex_unlock(&pcd->lock);
 }
 
 /**
- * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
- * @dev: Device to initialize the list of runtime PM clocks for.
+ * pm_clk_init - Initialize a device's list of power management clocks.
+ * @dev: Device to initialize the list of PM clocks for.
  *
- * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
+ * Allocate a struct pm_clk_data object, initialize its lock member and
  * make the @dev's power.subsys_data field point to it.
  */
-int pm_runtime_clk_init(struct device *dev)
+int pm_clk_init(struct device *dev)
 {
-	struct pm_runtime_clk_data *prd;
+	struct pm_clk_data *pcd;
 
-	prd = kzalloc(sizeof(*prd), GFP_KERNEL);
-	if (!prd) {
-		dev_err(dev, "Not enough memory fo runtime PM data.\n");
+	pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
+	if (!pcd) {
+		dev_err(dev, "Not enough memory for PM clock data.\n");
 		return -ENOMEM;
 	}
 
-	INIT_LIST_HEAD(&prd->clock_list);
-	mutex_init(&prd->lock);
-	dev->power.subsys_data = prd;
+	INIT_LIST_HEAD(&pcd->clock_list);
+	mutex_init(&pcd->lock);
+	dev->power.subsys_data = pcd;
 	return 0;
 }
 
 /**
- * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
- * @dev: Device to destroy the list of runtime PM clocks for.
+ * pm_clk_destroy - Destroy a device's list of power management clocks.
+ * @dev: Device to destroy the list of PM clocks for.
  *
  * Clear the @dev's power.subsys_data field, remove the list of clock entries
- * from the struct pm_runtime_clk_data object pointed to by it before and free
+ * from the struct pm_clk_data object pointed to by it before and free
  * that object.
  */
-void pm_runtime_clk_destroy(struct device *dev)
+void pm_clk_destroy(struct device *dev)
 {
-	struct pm_runtime_clk_data *prd = __to_prd(dev);
+	struct pm_clk_data *pcd = __to_pcd(dev);
 	struct pm_clock_entry *ce, *c;
 
-	if (!prd)
+	if (!pcd)
 		return;
 
 	dev->power.subsys_data = NULL;
 
-	mutex_lock(&prd->lock);
+	mutex_lock(&pcd->lock);
 
-	list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
-		__pm_runtime_clk_remove(ce);
+	list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
+		__pm_clk_remove(ce);
 
-	mutex_unlock(&prd->lock);
+	mutex_unlock(&pcd->lock);
 
-	kfree(prd);
+	kfree(pcd);
 }
 
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+
 /**
- * pm_runtime_clk_acquire - Acquire a device clock.
+ * pm_clk_acquire - Acquire a device clock.
  * @dev: Device whose clock is to be acquired.
  * @con_id: Connection ID of the clock.
  */
-static void pm_runtime_clk_acquire(struct device *dev,
+static void pm_clk_acquire(struct device *dev,
 				    struct pm_clock_entry *ce)
 {
 	ce->clk = clk_get(dev, ce->con_id);
@@ -209,24 +213,24 @@ static void pm_runtime_clk_acquire(struct device *dev,
 }
 
 /**
- * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
  * @dev: Device to disable the clocks for.
  */
-int pm_runtime_clk_suspend(struct device *dev)
+int pm_clk_suspend(struct device *dev)
 {
-	struct pm_runtime_clk_data *prd = __to_prd(dev);
+	struct pm_clk_data *pcd = __to_pcd(dev);
 	struct pm_clock_entry *ce;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
-	if (!prd)
+	if (!pcd)
 		return 0;
 
-	mutex_lock(&prd->lock);
+	mutex_lock(&pcd->lock);
 
-	list_for_each_entry_reverse(ce, &prd->clock_list, node) {
+	list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
 		if (ce->status == PCE_STATUS_NONE)
-			pm_runtime_clk_acquire(dev, ce);
+			pm_clk_acquire(dev, ce);
 
 		if (ce->status < PCE_STATUS_ERROR) {
 			clk_disable(ce->clk);
@@ -234,30 +238,30 @@ int pm_runtime_clk_suspend(struct device *dev)
 		}
 	}
 
-	mutex_unlock(&prd->lock);
+	mutex_unlock(&pcd->lock);
 
 	return 0;
 }
 
 /**
- * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
  * @dev: Device to enable the clocks for.
  */
-int pm_runtime_clk_resume(struct device *dev)
+int pm_clk_resume(struct device *dev)
 {
-	struct pm_runtime_clk_data *prd = __to_prd(dev);
+	struct pm_clk_data *pcd = __to_pcd(dev);
 	struct pm_clock_entry *ce;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
-	if (!prd)
+	if (!pcd)
 		return 0;
 
-	mutex_lock(&prd->lock);
+	mutex_lock(&pcd->lock);
 
-	list_for_each_entry(ce, &prd->clock_list, node) {
+	list_for_each_entry(ce, &pcd->clock_list, node) {
 		if (ce->status == PCE_STATUS_NONE)
-			pm_runtime_clk_acquire(dev, ce);
+			pm_clk_acquire(dev, ce);
 
 		if (ce->status < PCE_STATUS_ERROR) {
 			clk_enable(ce->clk);
@@ -265,28 +269,28 @@ int pm_runtime_clk_resume(struct device *dev)
 		}
 	}
 
-	mutex_unlock(&prd->lock);
+	mutex_unlock(&pcd->lock);
 
 	return 0;
 }
 
 /**
- * pm_runtime_clk_notify - Notify routine for device addition and removal.
+ * pm_clk_notify - Notify routine for device addition and removal.
  * @nb: Notifier block object this function is a member of.
  * @action: Operation being carried out by the caller.
  * @data: Device the routine is being run for.
  *
  * For this function to work, @nb must be a member of an object of type
  * struct pm_clk_notifier_block containing all of the requisite data.
- * Specifically, the pwr_domain member of that object is copied to the device's
- * pwr_domain field and its con_ids member is used to populate the device's list
- * of runtime PM clocks, depending on @action.
+ * Specifically, the pm_domain member of that object is copied to the device's
+ * pm_domain field and its con_ids member is used to populate the device's list
+ * of PM clocks, depending on @action.
  *
- * If the device's pwr_domain field is already populated with a value different
+ * If the device's pm_domain field is already populated with a value different
  * from the one stored in the struct pm_clk_notifier_block object, the function
  * does nothing.
  */
-static int pm_runtime_clk_notify(struct notifier_block *nb,
+static int pm_clk_notify(struct notifier_block *nb,
 				 unsigned long action, void *data)
 {
 	struct pm_clk_notifier_block *clknb;
@@ -300,28 +304,28 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
 
 	switch (action) {
 	case BUS_NOTIFY_ADD_DEVICE:
-		if (dev->pwr_domain)
+		if (dev->pm_domain)
 			break;
 
-		error = pm_runtime_clk_init(dev);
+		error = pm_clk_init(dev);
 		if (error)
 			break;
 
-		dev->pwr_domain = clknb->pwr_domain;
+		dev->pm_domain = clknb->pm_domain;
 		if (clknb->con_ids[0]) {
 			for (con_id = clknb->con_ids; *con_id; con_id++)
-				pm_runtime_clk_add(dev, *con_id);
+				pm_clk_add(dev, *con_id);
 		} else {
-			pm_runtime_clk_add(dev, NULL);
+			pm_clk_add(dev, NULL);
 		}
 
 		break;
 	case BUS_NOTIFY_DEL_DEVICE:
-		if (dev->pwr_domain != clknb->pwr_domain)
+		if (dev->pm_domain != clknb->pm_domain)
 			break;
 
-		dev->pwr_domain = NULL;
-		pm_runtime_clk_destroy(dev);
+		dev->pm_domain = NULL;
+		pm_clk_destroy(dev);
 		break;
 	}
 
@@ -330,6 +334,60 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
 
 #else /* !CONFIG_PM_RUNTIME */
 
+#ifdef CONFIG_PM
+
+/**
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
+ * @dev: Device to disable the clocks for.
+ */
+int pm_clk_suspend(struct device *dev)
+{
+	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_clock_entry *ce;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	/* If there is no driver, the clocks are already disabled. */
+	if (!pcd || !dev->driver)
+		return 0;
+
+	mutex_lock(&pcd->lock);
+
+	list_for_each_entry_reverse(ce, &pcd->clock_list, node)
+		clk_disable(ce->clk);
+
+	mutex_unlock(&pcd->lock);
+
+	return 0;
+}
+
+/**
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
+ * @dev: Device to enable the clocks for.
+ */
+int pm_clk_resume(struct device *dev)
+{
+	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_clock_entry *ce;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	/* If there is no driver, the clocks should remain disabled. */
+	if (!pcd || !dev->driver)
+		return 0;
+
+	mutex_lock(&pcd->lock);
+
+	list_for_each_entry(ce, &pcd->clock_list, node)
+		clk_enable(ce->clk);
+
+	mutex_unlock(&pcd->lock);
+
+	return 0;
+}
+
+#endif /* CONFIG_PM */
+
 /**
  * enable_clock - Enable a device clock.
  * @dev: Device whose clock is to be enabled.
@@ -365,7 +423,7 @@ static void disable_clock(struct device *dev, const char *con_id)
 }
 
 /**
- * pm_runtime_clk_notify - Notify routine for device addition and removal.
+ * pm_clk_notify - Notify routine for device addition and removal.
  * @nb: Notifier block object this function is a member of.
  * @action: Operation being carried out by the caller.
  * @data: Device the routine is being run for.
@@ -375,7 +433,7 @@ static void disable_clock(struct device *dev, const char *con_id)
  * Specifically, the con_ids member of that object is used to enable or disable
  * the device's clocks, depending on @action.
  */
-static int pm_runtime_clk_notify(struct notifier_block *nb,
+static int pm_clk_notify(struct notifier_block *nb,
 				 unsigned long action, void *data)
 {
 	struct pm_clk_notifier_block *clknb;
@@ -411,21 +469,21 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
 #endif /* !CONFIG_PM_RUNTIME */
 
 /**
- * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
+ * pm_clk_add_notifier - Add bus type notifier for power management clocks.
  * @bus: Bus type to add the notifier to.
  * @clknb: Notifier to be added to the given bus type.
  *
  * The nb member of @clknb is not expected to be initialized and its
- * notifier_call member will be replaced with pm_runtime_clk_notify().  However,
+ * notifier_call member will be replaced with pm_clk_notify().  However,
  * the remaining members of @clknb should be populated prior to calling this
  * routine.
  */
-void pm_runtime_clk_add_notifier(struct bus_type *bus,
+void pm_clk_add_notifier(struct bus_type *bus,
 				 struct pm_clk_notifier_block *clknb)
 {
 	if (!bus || !clknb)
 		return;
 
-	clknb->nb.notifier_call = pm_runtime_clk_notify;
+	clknb->nb.notifier_call = pm_clk_notify;
 	bus_register_notifier(bus, &clknb->nb);
 }
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
new file mode 100644
index 000000000000..be8714aa9dd6
--- /dev/null
+++ b/drivers/base/power/domain.c
@@ -0,0 +1,1273 @@
+/*
+ * drivers/base/power/domain.c - Common code related to device power domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+
+static LIST_HEAD(gpd_list);
+static DEFINE_MUTEX(gpd_list_lock);
+
+#ifdef CONFIG_PM
+
+static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+	if (IS_ERR_OR_NULL(dev->pm_domain))
+		return ERR_PTR(-EINVAL);
+
+	return pd_to_genpd(dev->pm_domain);
+}
+
+static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+{
+	if (!WARN_ON(genpd->sd_count == 0))
+			genpd->sd_count--;
+}
+
+static void genpd_acquire_lock(struct generic_pm_domain *genpd)
+{
+	DEFINE_WAIT(wait);
+
+	mutex_lock(&genpd->lock);
+	/*
+	 * Wait for the domain to transition into either the active,
+	 * or the power off state.
+	 */
+	for (;;) {
+		prepare_to_wait(&genpd->status_wait_queue, &wait,
+				TASK_UNINTERRUPTIBLE);
+		if (genpd->status == GPD_STATE_ACTIVE
+		    || genpd->status == GPD_STATE_POWER_OFF)
+			break;
+		mutex_unlock(&genpd->lock);
+
+		schedule();
+
+		mutex_lock(&genpd->lock);
+	}
+	finish_wait(&genpd->status_wait_queue, &wait);
+}
+
+static void genpd_release_lock(struct generic_pm_domain *genpd)
+{
+	mutex_unlock(&genpd->lock);
+}
+
+static void genpd_set_active(struct generic_pm_domain *genpd)
+{
+	if (genpd->resume_count == 0)
+		genpd->status = GPD_STATE_ACTIVE;
+}
+
+/**
+ * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+ * @genpd: PM domain to power up.
+ *
+ * Restore power to @genpd and all of its parents so that it is possible to
+ * resume a device belonging to it.
+ */
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+	struct generic_pm_domain *parent = genpd->parent;
+	DEFINE_WAIT(wait);
+	int ret = 0;
+
+ start:
+	if (parent) {
+		genpd_acquire_lock(parent);
+		mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+	} else {
+		mutex_lock(&genpd->lock);
+	}
+
+	if (genpd->status == GPD_STATE_ACTIVE
+	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+		goto out;
+
+	if (genpd->status != GPD_STATE_POWER_OFF) {
+		genpd_set_active(genpd);
+		goto out;
+	}
+
+	if (parent && parent->status != GPD_STATE_ACTIVE) {
+		mutex_unlock(&genpd->lock);
+		genpd_release_lock(parent);
+
+		ret = pm_genpd_poweron(parent);
+		if (ret)
+			return ret;
+
+		goto start;
+	}
+
+	if (genpd->power_on) {
+		int ret = genpd->power_on(genpd);
+		if (ret)
+			goto out;
+	}
+
+	genpd_set_active(genpd);
+	if (parent)
+		parent->sd_count++;
+
+ out:
+	mutex_unlock(&genpd->lock);
+	if (parent)
+		genpd_release_lock(parent);
+
+	return ret;
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+
+/**
+ * __pm_genpd_save_device - Save the pre-suspend state of a device.
+ * @dle: Device list entry of the device to save the state of.
+ * @genpd: PM domain the device belongs to.
+ */
+static int __pm_genpd_save_device(struct dev_list_entry *dle,
+				  struct generic_pm_domain *genpd)
+	__releases(&genpd->lock) __acquires(&genpd->lock)
+{
+	struct device *dev = dle->dev;
+	struct device_driver *drv = dev->driver;
+	int ret = 0;
+
+	if (dle->need_restore)
+		return 0;
+
+	mutex_unlock(&genpd->lock);
+
+	if (drv && drv->pm && drv->pm->runtime_suspend) {
+		if (genpd->start_device)
+			genpd->start_device(dev);
+
+		ret = drv->pm->runtime_suspend(dev);
+
+		if (genpd->stop_device)
+			genpd->stop_device(dev);
+	}
+
+	mutex_lock(&genpd->lock);
+
+	if (!ret)
+		dle->need_restore = true;
+
+	return ret;
+}
+
+/**
+ * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
+ * @dle: Device list entry of the device to restore the state of.
+ * @genpd: PM domain the device belongs to.
+ */
+static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+				      struct generic_pm_domain *genpd)
+	__releases(&genpd->lock) __acquires(&genpd->lock)
+{
+	struct device *dev = dle->dev;
+	struct device_driver *drv = dev->driver;
+
+	if (!dle->need_restore)
+		return;
+
+	mutex_unlock(&genpd->lock);
+
+	if (drv && drv->pm && drv->pm->runtime_resume) {
+		if (genpd->start_device)
+			genpd->start_device(dev);
+
+		drv->pm->runtime_resume(dev);
+
+		if (genpd->stop_device)
+			genpd->stop_device(dev);
+	}
+
+	mutex_lock(&genpd->lock);
+
+	dle->need_restore = false;
+}
+
+/**
+ * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
+ * @genpd: PM domain to check.
+ *
+ * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
+ * a "power off" operation, which means that a "power on" has occured in the
+ * meantime, or if its resume_count field is different from zero, which means
+ * that one of its devices has been resumed in the meantime.
+ */
+static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+{
+	return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+}
+
+/**
+ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * before.
+ */
+void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+	if (!work_pending(&genpd->power_off_work))
+		queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
+ * pm_genpd_poweroff - Remove power from a given PM domain.
+ * @genpd: PM domain to power down.
+ *
+ * If all of the @genpd's devices have been suspended and all of its subdomains
+ * have been powered down, run the runtime suspend callbacks provided by all of
+ * the @genpd's devices' drivers and remove power from @genpd.
+ */
+static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+	__releases(&genpd->lock) __acquires(&genpd->lock)
+{
+	struct generic_pm_domain *parent;
+	struct dev_list_entry *dle;
+	unsigned int not_suspended;
+	int ret = 0;
+
+ start:
+	/*
+	 * Do not try to power off the domain in the following situations:
+	 * (1) The domain is already in the "power off" state.
+	 * (2) System suspend is in progress.
+	 * (3) One of the domain's devices is being resumed right now.
+	 */
+	if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
+	    || genpd->resume_count > 0)
+		return 0;
+
+	if (genpd->sd_count > 0)
+		return -EBUSY;
+
+	not_suspended = 0;
+	list_for_each_entry(dle, &genpd->dev_list, node)
+		if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
+			not_suspended++;
+
+	if (not_suspended > genpd->in_progress)
+		return -EBUSY;
+
+	if (genpd->poweroff_task) {
+		/*
+		 * Another instance of pm_genpd_poweroff() is executing
+		 * callbacks, so tell it to start over and return.
+		 */
+		genpd->status = GPD_STATE_REPEAT;
+		return 0;
+	}
+
+	if (genpd->gov && genpd->gov->power_down_ok) {
+		if (!genpd->gov->power_down_ok(&genpd->domain))
+			return -EAGAIN;
+	}
+
+	genpd->status = GPD_STATE_BUSY;
+	genpd->poweroff_task = current;
+
+	list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
+		ret = __pm_genpd_save_device(dle, genpd);
+		if (ret) {
+			genpd_set_active(genpd);
+			goto out;
+		}
+
+		if (genpd_abort_poweroff(genpd))
+			goto out;
+
+		if (genpd->status == GPD_STATE_REPEAT) {
+			genpd->poweroff_task = NULL;
+			goto start;
+		}
+	}
+
+	parent = genpd->parent;
+	if (parent) {
+		mutex_unlock(&genpd->lock);
+
+		genpd_acquire_lock(parent);
+		mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+
+		if (genpd_abort_poweroff(genpd)) {
+			genpd_release_lock(parent);
+			goto out;
+		}
+	}
+
+	if (genpd->power_off) {
+		ret = genpd->power_off(genpd);
+		if (ret == -EBUSY) {
+			genpd_set_active(genpd);
+			if (parent)
+				genpd_release_lock(parent);
+
+			goto out;
+		}
+	}
+
+	genpd->status = GPD_STATE_POWER_OFF;
+
+	if (parent) {
+		genpd_sd_counter_dec(parent);
+		if (parent->sd_count == 0)
+			genpd_queue_power_off_work(parent);
+
+		genpd_release_lock(parent);
+	}
+
+ out:
+	genpd->poweroff_task = NULL;
+	wake_up_all(&genpd->status_wait_queue);
+	return ret;
+}
+
+/**
+ * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void genpd_power_off_work_fn(struct work_struct *work)
+{
+	struct generic_pm_domain *genpd;
+
+	genpd = container_of(work, struct generic_pm_domain, power_off_work);
+
+	genpd_acquire_lock(genpd);
+	pm_genpd_poweroff(genpd);
+	genpd_release_lock(genpd);
+}
+
+/**
+ * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a runtime suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_runtime_suspend(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	if (genpd->stop_device) {
+		int ret = genpd->stop_device(dev);
+		if (ret)
+			return ret;
+	}
+
+	mutex_lock(&genpd->lock);
+	genpd->in_progress++;
+	pm_genpd_poweroff(genpd);
+	genpd->in_progress--;
+	mutex_unlock(&genpd->lock);
+
+	return 0;
+}
+
+/**
+ * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ * @genpd: PM domain the device belongs to.
+ */
+static void __pm_genpd_runtime_resume(struct device *dev,
+				      struct generic_pm_domain *genpd)
+{
+	struct dev_list_entry *dle;
+
+	list_for_each_entry(dle, &genpd->dev_list, node) {
+		if (dle->dev == dev) {
+			__pm_genpd_restore_device(dle, genpd);
+			break;
+		}
+	}
+}
+
+/**
+ * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Carry out a runtime resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_runtime_resume(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	DEFINE_WAIT(wait);
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	ret = pm_genpd_poweron(genpd);
+	if (ret)
+		return ret;
+
+	mutex_lock(&genpd->lock);
+	genpd->status = GPD_STATE_BUSY;
+	genpd->resume_count++;
+	for (;;) {
+		prepare_to_wait(&genpd->status_wait_queue, &wait,
+				TASK_UNINTERRUPTIBLE);
+		/*
+		 * If current is the powering off task, we have been called
+		 * reentrantly from one of the device callbacks, so we should
+		 * not wait.
+		 */
+		if (!genpd->poweroff_task || genpd->poweroff_task == current)
+			break;
+		mutex_unlock(&genpd->lock);
+
+		schedule();
+
+		mutex_lock(&genpd->lock);
+	}
+	finish_wait(&genpd->status_wait_queue, &wait);
+	__pm_genpd_runtime_resume(dev, genpd);
+	genpd->resume_count--;
+	genpd_set_active(genpd);
+	wake_up_all(&genpd->status_wait_queue);
+	mutex_unlock(&genpd->lock);
+
+	if (genpd->start_device)
+		genpd->start_device(dev);
+
+	return 0;
+}
+
+#else
+
+static inline void genpd_power_off_work_fn(struct work_struct *work) {}
+static inline void __pm_genpd_runtime_resume(struct device *dev,
+					     struct generic_pm_domain *genpd) {}
+
+#define pm_genpd_runtime_suspend	NULL
+#define pm_genpd_runtime_resume		NULL
+
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
+ * @genpd: PM domain to power off, if possible.
+ *
+ * Check if the given PM domain can be powered off (during system suspend or
+ * hibernation) and do that if so.  Also, in that case propagate to its parent.
+ *
+ * This function is only called in "noirq" stages of system power transitions,
+ * so it need not acquire locks (all of the "noirq" callbacks are executed
+ * sequentially, so it is guaranteed that it will never run twice in parallel).
+ */
+static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+{
+	struct generic_pm_domain *parent = genpd->parent;
+
+	if (genpd->status == GPD_STATE_POWER_OFF)
+		return;
+
+	if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
+		return;
+
+	if (genpd->power_off)
+		genpd->power_off(genpd);
+
+	genpd->status = GPD_STATE_POWER_OFF;
+	if (parent) {
+		genpd_sd_counter_dec(parent);
+		pm_genpd_sync_poweroff(parent);
+	}
+}
+
+/**
+ * resume_needed - Check whether to resume a device before system suspend.
+ * @dev: Device to check.
+ * @genpd: PM domain the device belongs to.
+ *
+ * There are two cases in which a device that can wake up the system from sleep
+ * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * to wake up the system and it has to remain active for this purpose while the
+ * system is in the sleep state and (2) if the device is not enabled to wake up
+ * the system from sleep states and it generally doesn't generate wakeup signals
+ * by itself (those signals are generated on its behalf by other parts of the
+ * system).  In the latter case it may be necessary to reconfigure the device's
+ * wakeup settings during system suspend, because it may have been set up to
+ * signal remote wakeup from the system's working state as needed by runtime PM.
+ * Return 'true' in either of the above cases.
+ */
+static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
+{
+	bool active_wakeup;
+
+	if (!device_can_wakeup(dev))
+		return false;
+
+	active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
+}
+
+/**
+ * pm_genpd_prepare - Start power transition of a device in a PM domain.
+ * @dev: Device to start the transition of.
+ *
+ * Start a power transition of a device (during a system-wide power transition)
+ * under the assumption that its pm_domain field points to the domain member of
+ * an object of type struct generic_pm_domain representing a PM domain
+ * consisting of I/O devices.
+ */
+static int pm_genpd_prepare(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	/*
+	 * If a wakeup request is pending for the device, it should be woken up
+	 * at this point and a system wakeup event should be reported if it's
+	 * set up to wake up the system from sleep states.
+	 */
+	pm_runtime_get_noresume(dev);
+	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+		pm_wakeup_event(dev, 0);
+
+	if (pm_wakeup_pending()) {
+		pm_runtime_put_sync(dev);
+		return -EBUSY;
+	}
+
+	if (resume_needed(dev, genpd))
+		pm_runtime_resume(dev);
+
+	genpd_acquire_lock(genpd);
+
+	if (genpd->prepared_count++ == 0)
+		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
+
+	genpd_release_lock(genpd);
+
+	if (genpd->suspend_power_off) {
+		pm_runtime_put_noidle(dev);
+		return 0;
+	}
+
+	/*
+	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
+	 * so pm_genpd_poweron() will return immediately, but if the device
+	 * is suspended (e.g. it's been stopped by .stop_device()), we need
+	 * to make it operational.
+	 */
+	pm_runtime_resume(dev);
+	__pm_runtime_disable(dev, false);
+
+	ret = pm_generic_prepare(dev);
+	if (ret) {
+		mutex_lock(&genpd->lock);
+
+		if (--genpd->prepared_count == 0)
+			genpd->suspend_power_off = false;
+
+		mutex_unlock(&genpd->lock);
+		pm_runtime_enable(dev);
+	}
+
+	pm_runtime_put_sync(dev);
+	return ret;
+}
+
+/**
+ * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Suspend a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_suspend(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
+}
+
+/**
+ * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a late suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_suspend_noirq(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	if (genpd->suspend_power_off)
+		return 0;
+
+	ret = pm_generic_suspend_noirq(dev);
+	if (ret)
+		return ret;
+
+	if (device_may_wakeup(dev)
+	    && genpd->active_wakeup && genpd->active_wakeup(dev))
+		return 0;
+
+	if (genpd->stop_device)
+		genpd->stop_device(dev);
+
+	/*
+	 * Since all of the "noirq" callbacks are executed sequentially, it is
+	 * guaranteed that this function will never run twice in parallel for
+	 * the same PM domain, so it is not necessary to use locking here.
+	 */
+	genpd->suspended_count++;
+	pm_genpd_sync_poweroff(genpd);
+
+	return 0;
+}
+
+/**
+ * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_resume_noirq(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	if (genpd->suspend_power_off)
+		return 0;
+
+	/*
+	 * Since all of the "noirq" callbacks are executed sequentially, it is
+	 * guaranteed that this function will never run twice in parallel for
+	 * the same PM domain, so it is not necessary to use locking here.
+	 */
+	pm_genpd_poweron(genpd);
+	genpd->suspended_count--;
+	if (genpd->start_device)
+		genpd->start_device(dev);
+
+	return pm_generic_resume_noirq(dev);
+}
+
+/**
+ * pm_genpd_resume - Resume a device belonging to an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Resume a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_resume(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
+}
+
+/**
+ * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
+ * @dev: Device to freeze.
+ *
+ * Freeze a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_freeze(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
+}
+
+/**
+ * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
+ * @dev: Device to freeze.
+ *
+ * Carry out a late freeze of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_freeze_noirq(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	if (genpd->suspend_power_off)
+		return 0;
+
+	ret = pm_generic_freeze_noirq(dev);
+	if (ret)
+		return ret;
+
+	if (genpd->stop_device)
+		genpd->stop_device(dev);
+
+	return 0;
+}
+
+/**
+ * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
+ * @dev: Device to thaw.
+ *
+ * Carry out an early thaw of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_thaw_noirq(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	if (genpd->suspend_power_off)
+		return 0;
+
+	if (genpd->start_device)
+		genpd->start_device(dev);
+
+	return pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
+ * @dev: Device to thaw.
+ *
+ * Thaw a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_thaw(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
+}
+
+/**
+ * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Power off a device under the assumption that its pm_domain field points to
+ * the domain member of an object of type struct generic_pm_domain representing
+ * a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_dev_poweroff(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
+}
+
+/**
+ * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a late powering off of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_dev_poweroff_noirq(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	if (genpd->suspend_power_off)
+		return 0;
+
+	ret = pm_generic_poweroff_noirq(dev);
+	if (ret)
+		return ret;
+
+	if (device_may_wakeup(dev)
+	    && genpd->active_wakeup && genpd->active_wakeup(dev))
+		return 0;
+
+	if (genpd->stop_device)
+		genpd->stop_device(dev);
+
+	/*
+	 * Since all of the "noirq" callbacks are executed sequentially, it is
+	 * guaranteed that this function will never run twice in parallel for
+	 * the same PM domain, so it is not necessary to use locking here.
+	 */
+	genpd->suspended_count++;
+	pm_genpd_sync_poweroff(genpd);
+
+	return 0;
+}
+
+/**
+ * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early restore of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_restore_noirq(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	/*
+	 * Since all of the "noirq" callbacks are executed sequentially, it is
+	 * guaranteed that this function will never run twice in parallel for
+	 * the same PM domain, so it is not necessary to use locking here.
+	 */
+	genpd->status = GPD_STATE_POWER_OFF;
+	if (genpd->suspend_power_off) {
+		/*
+		 * The boot kernel might put the domain into the power on state,
+		 * so make sure it really is powered off.
+		 */
+		if (genpd->power_off)
+			genpd->power_off(genpd);
+		return 0;
+	}
+
+	pm_genpd_poweron(genpd);
+	genpd->suspended_count--;
+	if (genpd->start_device)
+		genpd->start_device(dev);
+
+	return pm_generic_restore_noirq(dev);
+}
+
+/**
+ * pm_genpd_restore - Restore a device belonging to an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Restore a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_restore(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return -EINVAL;
+
+	return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
+}
+
+/**
+ * pm_genpd_complete - Complete power transition of a device in a power domain.
+ * @dev: Device to complete the transition of.
+ *
+ * Complete a power transition of a device (during a system-wide power
+ * transition) under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static void pm_genpd_complete(struct device *dev)
+{
+	struct generic_pm_domain *genpd;
+	bool run_complete;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	genpd = dev_to_genpd(dev);
+	if (IS_ERR(genpd))
+		return;
+
+	mutex_lock(&genpd->lock);
+
+	run_complete = !genpd->suspend_power_off;
+	if (--genpd->prepared_count == 0)
+		genpd->suspend_power_off = false;
+
+	mutex_unlock(&genpd->lock);
+
+	if (run_complete) {
+		pm_generic_complete(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+		pm_runtime_idle(dev);
+	}
+}
+
+#else
+
+#define pm_genpd_prepare		NULL
+#define pm_genpd_suspend		NULL
+#define pm_genpd_suspend_noirq		NULL
+#define pm_genpd_resume_noirq		NULL
+#define pm_genpd_resume			NULL
+#define pm_genpd_freeze			NULL
+#define pm_genpd_freeze_noirq		NULL
+#define pm_genpd_thaw_noirq		NULL
+#define pm_genpd_thaw			NULL
+#define pm_genpd_dev_poweroff_noirq	NULL
+#define pm_genpd_dev_poweroff		NULL
+#define pm_genpd_restore_noirq		NULL
+#define pm_genpd_restore		NULL
+#define pm_genpd_complete		NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * @genpd: PM domain to add the device to.
+ * @dev: Device to be added.
+ */
+int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+{
+	struct dev_list_entry *dle;
+	int ret = 0;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+		return -EINVAL;
+
+	genpd_acquire_lock(genpd);
+
+	if (genpd->status == GPD_STATE_POWER_OFF) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (genpd->prepared_count > 0) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	list_for_each_entry(dle, &genpd->dev_list, node)
+		if (dle->dev == dev) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+	dle = kzalloc(sizeof(*dle), GFP_KERNEL);
+	if (!dle) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	dle->dev = dev;
+	dle->need_restore = false;
+	list_add_tail(&dle->node, &genpd->dev_list);
+	genpd->device_count++;
+
+	spin_lock_irq(&dev->power.lock);
+	dev->pm_domain = &genpd->domain;
+	spin_unlock_irq(&dev->power.lock);
+
+ out:
+	genpd_release_lock(genpd);
+
+	return ret;
+}
+
+/**
+ * pm_genpd_remove_device - Remove a device from an I/O PM domain.
+ * @genpd: PM domain to remove the device from.
+ * @dev: Device to be removed.
+ */
+int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+			   struct device *dev)
+{
+	struct dev_list_entry *dle;
+	int ret = -EINVAL;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+		return -EINVAL;
+
+	genpd_acquire_lock(genpd);
+
+	if (genpd->prepared_count > 0) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	list_for_each_entry(dle, &genpd->dev_list, node) {
+		if (dle->dev != dev)
+			continue;
+
+		spin_lock_irq(&dev->power.lock);
+		dev->pm_domain = NULL;
+		spin_unlock_irq(&dev->power.lock);
+
+		genpd->device_count--;
+		list_del(&dle->node);
+		kfree(dle);
+
+		ret = 0;
+		break;
+	}
+
+ out:
+	genpd_release_lock(genpd);
+
+	return ret;
+}
+
+/**
+ * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @genpd: Master PM domain to add the subdomain to.
+ * @new_subdomain: Subdomain to be added.
+ */
+int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+			   struct generic_pm_domain *new_subdomain)
+{
+	struct generic_pm_domain *subdomain;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
+		return -EINVAL;
+
+ start:
+	genpd_acquire_lock(genpd);
+	mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+
+	if (new_subdomain->status != GPD_STATE_POWER_OFF
+	    && new_subdomain->status != GPD_STATE_ACTIVE) {
+		mutex_unlock(&new_subdomain->lock);
+		genpd_release_lock(genpd);
+		goto start;
+	}
+
+	if (genpd->status == GPD_STATE_POWER_OFF
+	    &&  new_subdomain->status != GPD_STATE_POWER_OFF) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+		if (subdomain == new_subdomain) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
+	new_subdomain->parent = genpd;
+	if (subdomain->status != GPD_STATE_POWER_OFF)
+		genpd->sd_count++;
+
+ out:
+	mutex_unlock(&new_subdomain->lock);
+	genpd_release_lock(genpd);
+
+	return ret;
+}
+
+/**
+ * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @genpd: Master PM domain to remove the subdomain from.
+ * @target: Subdomain to be removed.
+ */
+int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+			      struct generic_pm_domain *target)
+{
+	struct generic_pm_domain *subdomain;
+	int ret = -EINVAL;
+
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
+		return -EINVAL;
+
+ start:
+	genpd_acquire_lock(genpd);
+
+	list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+		if (subdomain != target)
+			continue;
+
+		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+
+		if (subdomain->status != GPD_STATE_POWER_OFF
+		    && subdomain->status != GPD_STATE_ACTIVE) {
+			mutex_unlock(&subdomain->lock);
+			genpd_release_lock(genpd);
+			goto start;
+		}
+
+		list_del(&subdomain->sd_node);
+		subdomain->parent = NULL;
+		if (subdomain->status != GPD_STATE_POWER_OFF)
+			genpd_sd_counter_dec(genpd);
+
+		mutex_unlock(&subdomain->lock);
+
+		ret = 0;
+		break;
+	}
+
+	genpd_release_lock(genpd);
+
+	return ret;
+}
+
+/**
+ * pm_genpd_init - Initialize a generic I/O PM domain object.
+ * @genpd: PM domain object to initialize.
+ * @gov: PM domain governor to associate with the domain (may be NULL).
+ * @is_off: Initial value of the domain's power_is_off field.
+ */
+void pm_genpd_init(struct generic_pm_domain *genpd,
+		   struct dev_power_governor *gov, bool is_off)
+{
+	if (IS_ERR_OR_NULL(genpd))
+		return;
+
+	INIT_LIST_HEAD(&genpd->sd_node);
+	genpd->parent = NULL;
+	INIT_LIST_HEAD(&genpd->dev_list);
+	INIT_LIST_HEAD(&genpd->sd_list);
+	mutex_init(&genpd->lock);
+	genpd->gov = gov;
+	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+	genpd->in_progress = 0;
+	genpd->sd_count = 0;
+	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+	init_waitqueue_head(&genpd->status_wait_queue);
+	genpd->poweroff_task = NULL;
+	genpd->resume_count = 0;
+	genpd->device_count = 0;
+	genpd->suspended_count = 0;
+	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
+	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+	genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
+	genpd->domain.ops.prepare = pm_genpd_prepare;
+	genpd->domain.ops.suspend = pm_genpd_suspend;
+	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
+	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
+	genpd->domain.ops.resume = pm_genpd_resume;
+	genpd->domain.ops.freeze = pm_genpd_freeze;
+	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
+	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
+	genpd->domain.ops.thaw = pm_genpd_thaw;
+	genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
+	genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
+	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
+	genpd->domain.ops.restore = pm_genpd_restore;
+	genpd->domain.ops.complete = pm_genpd_complete;
+	mutex_lock(&gpd_list_lock);
+	list_add(&genpd->gpd_list_node, &gpd_list);
+	mutex_unlock(&gpd_list_lock);
+}
+
+/**
+ * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ */
+void pm_genpd_poweroff_unused(void)
+{
+	struct generic_pm_domain *genpd;
+
+	mutex_lock(&gpd_list_lock);
+
+	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+		genpd_queue_power_off_work(genpd);
+
+	mutex_unlock(&gpd_list_lock);
+}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index cb3bb368681c..9508df71274b 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -94,12 +94,13 @@ int pm_generic_prepare(struct device *dev)
  * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
  * @dev: Device to handle.
  * @event: PM transition of the system under way.
+ * @bool: Whether or not this is the "noirq" stage.
  *
  * If the device has not been suspended at run time, execute the
  * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
  * return its error code.  Otherwise, return zero.
  */
-static int __pm_generic_call(struct device *dev, int event)
+static int __pm_generic_call(struct device *dev, int event, bool noirq)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	int (*callback)(struct device *);
@@ -109,16 +110,16 @@ static int __pm_generic_call(struct device *dev, int event)
 
 	switch (event) {
 	case PM_EVENT_SUSPEND:
-		callback = pm->suspend;
+		callback = noirq ? pm->suspend_noirq : pm->suspend;
 		break;
 	case PM_EVENT_FREEZE:
-		callback = pm->freeze;
+		callback = noirq ? pm->freeze_noirq : pm->freeze;
 		break;
 	case PM_EVENT_HIBERNATE:
-		callback = pm->poweroff;
+		callback = noirq ? pm->poweroff_noirq : pm->poweroff;
 		break;
 	case PM_EVENT_THAW:
-		callback = pm->thaw;
+		callback = noirq ? pm->thaw_noirq : pm->thaw;
 		break;
 	default:
 		callback = NULL;
@@ -129,42 +130,82 @@ static int __pm_generic_call(struct device *dev, int event)
 }
 
 /**
+ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_noirq(struct device *dev)
+{
+	return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+
+/**
  * pm_generic_suspend - Generic suspend callback for subsystems.
  * @dev: Device to suspend.
  */
 int pm_generic_suspend(struct device *dev)
 {
-	return __pm_generic_call(dev, PM_EVENT_SUSPEND);
+	return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
 }
 EXPORT_SYMBOL_GPL(pm_generic_suspend);
 
 /**
+ * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_noirq(struct device *dev)
+{
+	return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
+
+/**
  * pm_generic_freeze - Generic freeze callback for subsystems.
  * @dev: Device to freeze.
  */
 int pm_generic_freeze(struct device *dev)
 {
-	return __pm_generic_call(dev, PM_EVENT_FREEZE);
+	return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
 }
 EXPORT_SYMBOL_GPL(pm_generic_freeze);
 
 /**
+ * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_noirq(struct device *dev)
+{
+	return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
+
+/**
  * pm_generic_poweroff - Generic poweroff callback for subsystems.
  * @dev: Device to handle.
  */
 int pm_generic_poweroff(struct device *dev)
 {
-	return __pm_generic_call(dev, PM_EVENT_HIBERNATE);
+	return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
 }
 EXPORT_SYMBOL_GPL(pm_generic_poweroff);
 
 /**
+ * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_noirq(struct device *dev)
+{
+	return __pm_generic_call(dev, PM_EVENT_THAW, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
+
+/**
  * pm_generic_thaw - Generic thaw callback for subsystems.
  * @dev: Device to thaw.
  */
 int pm_generic_thaw(struct device *dev)
 {
-	return __pm_generic_call(dev, PM_EVENT_THAW);
+	return __pm_generic_call(dev, PM_EVENT_THAW, false);
 }
 EXPORT_SYMBOL_GPL(pm_generic_thaw);
 
@@ -172,12 +213,13 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
  * __pm_generic_resume - Generic resume/restore callback for subsystems.
  * @dev: Device to handle.
  * @event: PM transition of the system under way.
+ * @bool: Whether or not this is the "noirq" stage.
  *
  * Execute the resume/resotre callback provided by the @dev's driver, if
  * defined.  If it returns 0, change the device's runtime PM status to 'active'.
  * Return the callback's error code.
  */
-static int __pm_generic_resume(struct device *dev, int event)
+static int __pm_generic_resume(struct device *dev, int event, bool noirq)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	int (*callback)(struct device *);
@@ -188,10 +230,10 @@ static int __pm_generic_resume(struct device *dev, int event)
 
 	switch (event) {
 	case PM_EVENT_RESUME:
-		callback = pm->resume;
+		callback = noirq ? pm->resume_noirq : pm->resume;
 		break;
 	case PM_EVENT_RESTORE:
-		callback = pm->restore;
+		callback = noirq ? pm->restore_noirq : pm->restore;
 		break;
 	default:
 		callback = NULL;
@@ -202,7 +244,7 @@ static int __pm_generic_resume(struct device *dev, int event)
 		return 0;
 
 	ret = callback(dev);
-	if (!ret && pm_runtime_enabled(dev)) {
+	if (!ret && !noirq && pm_runtime_enabled(dev)) {
 		pm_runtime_disable(dev);
 		pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
@@ -212,22 +254,42 @@ static int __pm_generic_resume(struct device *dev, int event)
 }
 
 /**
+ * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_noirq(struct device *dev)
+{
+	return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+
+/**
  * pm_generic_resume - Generic resume callback for subsystems.
  * @dev: Device to resume.
  */
 int pm_generic_resume(struct device *dev)
 {
-	return __pm_generic_resume(dev, PM_EVENT_RESUME);
+	return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
 }
 EXPORT_SYMBOL_GPL(pm_generic_resume);
 
 /**
+ * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore_noirq(struct device *dev)
+{
+	return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+
+/**
  * pm_generic_restore - Generic restore callback for subsystems.
  * @dev: Device to restore.
  */
 int pm_generic_restore(struct device *dev)
 {
-	return __pm_generic_resume(dev, PM_EVENT_RESTORE);
+	return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
 }
 EXPORT_SYMBOL_GPL(pm_generic_restore);
 
@@ -256,11 +318,17 @@ struct dev_pm_ops generic_subsys_pm_ops = {
 #ifdef CONFIG_PM_SLEEP
 	.prepare = pm_generic_prepare,
 	.suspend = pm_generic_suspend,
+	.suspend_noirq = pm_generic_suspend_noirq,
 	.resume = pm_generic_resume,
+	.resume_noirq = pm_generic_resume_noirq,
 	.freeze = pm_generic_freeze,
+	.freeze_noirq = pm_generic_freeze_noirq,
 	.thaw = pm_generic_thaw,
+	.thaw_noirq = pm_generic_thaw_noirq,
 	.poweroff = pm_generic_poweroff,
+	.poweroff_noirq = pm_generic_poweroff_noirq,
 	.restore = pm_generic_restore,
+	.restore_noirq = pm_generic_restore_noirq,
 	.complete = pm_generic_complete,
 #endif
 #ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 06f09bf89cb2..a85459126bc6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -425,9 +425,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
 	TRACE_DEVICE(dev);
 	TRACE_RESUME(0);
 
-	if (dev->pwr_domain) {
+	if (dev->pm_domain) {
 		pm_dev_dbg(dev, state, "EARLY power domain ");
-		error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
 	} else if (dev->type && dev->type->pm) {
 		pm_dev_dbg(dev, state, "EARLY type ");
 		error = pm_noirq_op(dev, dev->type->pm, state);
@@ -505,6 +505,7 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
 static int device_resume(struct device *dev, pm_message_t state, bool async)
 {
 	int error = 0;
+	bool put = false;
 
 	TRACE_DEVICE(dev);
 	TRACE_RESUME(0);
@@ -521,9 +522,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 	if (!dev->power.is_suspended)
 		goto Unlock;
 
-	if (dev->pwr_domain) {
+	pm_runtime_enable(dev);
+	put = true;
+
+	if (dev->pm_domain) {
 		pm_dev_dbg(dev, state, "power domain ");
-		error = pm_op(dev, &dev->pwr_domain->ops, state);
+		error = pm_op(dev, &dev->pm_domain->ops, state);
 		goto End;
 	}
 
@@ -563,6 +567,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 	complete_all(&dev->power.completion);
 
 	TRACE_RESUME(error);
+
+	if (put)
+		pm_runtime_put_sync(dev);
+
 	return error;
 }
 
@@ -641,10 +649,10 @@ static void device_complete(struct device *dev, pm_message_t state)
 {
 	device_lock(dev);
 
-	if (dev->pwr_domain) {
+	if (dev->pm_domain) {
 		pm_dev_dbg(dev, state, "completing power domain ");
-		if (dev->pwr_domain->ops.complete)
-			dev->pwr_domain->ops.complete(dev);
+		if (dev->pm_domain->ops.complete)
+			dev->pm_domain->ops.complete(dev);
 	} else if (dev->type && dev->type->pm) {
 		pm_dev_dbg(dev, state, "completing type ");
 		if (dev->type->pm->complete)
@@ -744,9 +752,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
 {
 	int error;
 
-	if (dev->pwr_domain) {
+	if (dev->pm_domain) {
 		pm_dev_dbg(dev, state, "LATE power domain ");
-		error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+		error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
 		if (error)
 			return error;
 	} else if (dev->type && dev->type->pm) {
@@ -843,19 +851,25 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 	int error = 0;
 
 	dpm_wait_for_children(dev, async);
-	device_lock(dev);
 
 	if (async_error)
-		goto Unlock;
+		return 0;
+
+	pm_runtime_get_noresume(dev);
+	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+		pm_wakeup_event(dev, 0);
 
 	if (pm_wakeup_pending()) {
+		pm_runtime_put_sync(dev);
 		async_error = -EBUSY;
-		goto Unlock;
+		return 0;
 	}
 
-	if (dev->pwr_domain) {
+	device_lock(dev);
+
+	if (dev->pm_domain) {
 		pm_dev_dbg(dev, state, "power domain ");
-		error = pm_op(dev, &dev->pwr_domain->ops, state);
+		error = pm_op(dev, &dev->pm_domain->ops, state);
 		goto End;
 	}
 
@@ -890,12 +904,15 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  End:
 	dev->power.is_suspended = !error;
 
- Unlock:
 	device_unlock(dev);
 	complete_all(&dev->power.completion);
 
-	if (error)
+	if (error) {
+		pm_runtime_put_sync(dev);
 		async_error = error;
+	} else if (dev->power.is_suspended) {
+		__pm_runtime_disable(dev, false);
+	}
 
 	return error;
 }
@@ -982,11 +999,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
 
 	device_lock(dev);
 
-	if (dev->pwr_domain) {
+	if (dev->pm_domain) {
 		pm_dev_dbg(dev, state, "preparing power domain ");
-		if (dev->pwr_domain->ops.prepare)
-			error = dev->pwr_domain->ops.prepare(dev);
-		suspend_report_result(dev->pwr_domain->ops.prepare, error);
+		if (dev->pm_domain->ops.prepare)
+			error = dev->pm_domain->ops.prepare(dev);
+		suspend_report_result(dev->pm_domain->ops.prepare, error);
 		if (error)
 			goto End;
 	} else if (dev->type && dev->type->pm) {
@@ -1035,13 +1052,7 @@ int dpm_prepare(pm_message_t state)
 		get_device(dev);
 		mutex_unlock(&dpm_list_mtx);
 
-		pm_runtime_get_noresume(dev);
-		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
-			pm_wakeup_event(dev, 0);
-
-		pm_runtime_put_sync(dev);
-		error = pm_wakeup_pending() ?
-				-EBUSY : device_prepare(dev, state);
+		error = device_prepare(dev, state);
 
 		mutex_lock(&dpm_list_mtx);
 		if (error) {
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899f5e9e..5cc12322ef32 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
 
 	return 0;
 }
+
+/**
+ * opp_free_cpufreq_table() - free the cpufreq table
+ * @dev:	device for which we do this operation
+ * @table:	table to free
+ *
+ * Free up the table allocated by opp_init_cpufreq_table
+ */
+void opp_free_cpufreq_table(struct device *dev,
+				struct cpufreq_frequency_table **table)
+{
+	if (!table)
+		return;
+
+	kfree(*table);
+	*table = NULL;
+}
 #endif		/* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0d4587b15c55..8dc247c974af 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,5 +1,5 @@
 /*
- * drivers/base/power/runtime.c - Helper functions for device run-time PM
+ * drivers/base/power/runtime.c - Helper functions for device runtime PM
  *
  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
@@ -135,8 +135,9 @@ static int rpm_check_suspend_allowed(struct device *dev)
 
 	if (dev->power.runtime_error)
 		retval = -EINVAL;
-	else if (atomic_read(&dev->power.usage_count) > 0
-	    || dev->power.disable_depth > 0)
+	else if (dev->power.disable_depth > 0)
+		retval = -EACCES;
+	else if (atomic_read(&dev->power.usage_count) > 0)
 		retval = -EAGAIN;
 	else if (!pm_children_suspended(dev))
 		retval = -EBUSY;
@@ -158,7 +159,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
  * @dev: Device to notify the bus type about.
  * @rpmflags: Flag bits.
  *
- * Check if the device's run-time PM status allows it to be suspended.  If
+ * Check if the device's runtime PM status allows it to be suspended.  If
  * another idle notification has been started earlier, return immediately.  If
  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
  * run the ->runtime_idle() callback directly.
@@ -213,8 +214,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
 
 	dev->power.idle_notification = true;
 
-	if (dev->pwr_domain)
-		callback = dev->pwr_domain->ops.runtime_idle;
+	if (dev->pm_domain)
+		callback = dev->pm_domain->ops.runtime_idle;
 	else if (dev->type && dev->type->pm)
 		callback = dev->type->pm->runtime_idle;
 	else if (dev->class && dev->class->pm)
@@ -262,15 +263,15 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 		spin_lock_irq(&dev->power.lock);
 	}
 	dev->power.runtime_error = retval;
-	return retval;
+	return retval != -EACCES ? retval : -EIO;
 }
 
 /**
- * rpm_suspend - Carry out run-time suspend of given device.
+ * rpm_suspend - Carry out runtime suspend of given device.
  * @dev: Device to suspend.
  * @rpmflags: Flag bits.
  *
- * Check if the device's run-time PM status allows it to be suspended.  If
+ * Check if the device's runtime PM status allows it to be suspended.  If
  * another suspend has been started earlier, either return immediately or wait
  * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
  * pending idle notification.  If the RPM_ASYNC flag is set then queue a
@@ -374,8 +375,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 
 	__update_runtime_status(dev, RPM_SUSPENDING);
 
-	if (dev->pwr_domain)
-		callback = dev->pwr_domain->ops.runtime_suspend;
+	if (dev->pm_domain)
+		callback = dev->pm_domain->ops.runtime_suspend;
 	else if (dev->type && dev->type->pm)
 		callback = dev->type->pm->runtime_suspend;
 	else if (dev->class && dev->class->pm)
@@ -388,7 +389,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 	retval = rpm_callback(callback, dev);
 	if (retval) {
 		__update_runtime_status(dev, RPM_ACTIVE);
-		dev->power.deferred_resume = 0;
+		dev->power.deferred_resume = false;
 		if (retval == -EAGAIN || retval == -EBUSY)
 			dev->power.runtime_error = 0;
 		else
@@ -429,11 +430,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 }
 
 /**
- * rpm_resume - Carry out run-time resume of given device.
+ * rpm_resume - Carry out runtime resume of given device.
  * @dev: Device to resume.
  * @rpmflags: Flag bits.
  *
- * Check if the device's run-time PM status allows it to be resumed.  Cancel
+ * Check if the device's runtime PM status allows it to be resumed.  Cancel
  * any scheduled or pending requests.  If another resume has been started
  * earlier, either return immediately or wait for it to finish, depending on the
  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
@@ -458,7 +459,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
 	if (dev->power.runtime_error)
 		retval = -EINVAL;
 	else if (dev->power.disable_depth > 0)
-		retval = -EAGAIN;
+		retval = -EACCES;
 	if (retval)
 		goto out;
 
@@ -550,7 +551,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
 
 		spin_lock(&parent->power.lock);
 		/*
-		 * We can resume if the parent's run-time PM is disabled or it
+		 * We can resume if the parent's runtime PM is disabled or it
 		 * is set to ignore children.
 		 */
 		if (!parent->power.disable_depth
@@ -573,8 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
 
 	__update_runtime_status(dev, RPM_RESUMING);
 
-	if (dev->pwr_domain)
-		callback = dev->pwr_domain->ops.runtime_resume;
+	if (dev->pm_domain)
+		callback = dev->pm_domain->ops.runtime_resume;
 	else if (dev->type && dev->type->pm)
 		callback = dev->type->pm->runtime_resume;
 	else if (dev->class && dev->class->pm)
@@ -614,11 +615,11 @@ static int rpm_resume(struct device *dev, int rpmflags)
 }
 
 /**
- * pm_runtime_work - Universal run-time PM work function.
+ * pm_runtime_work - Universal runtime PM work function.
  * @work: Work structure used for scheduling the execution of this function.
  *
  * Use @work to get the device object the work is to be done for, determine what
- * is to be done and execute the appropriate run-time PM function.
+ * is to be done and execute the appropriate runtime PM function.
  */
 static void pm_runtime_work(struct work_struct *work)
 {
@@ -717,7 +718,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
 
 /**
- * __pm_runtime_idle - Entry point for run-time idle operations.
+ * __pm_runtime_idle - Entry point for runtime idle operations.
  * @dev: Device to send idle notification for.
  * @rpmflags: Flag bits.
  *
@@ -746,7 +747,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
 
 /**
- * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
+ * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
  * @dev: Device to suspend.
  * @rpmflags: Flag bits.
  *
@@ -775,7 +776,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
 
 /**
- * __pm_runtime_resume - Entry point for run-time resume operations.
+ * __pm_runtime_resume - Entry point for runtime resume operations.
  * @dev: Device to resume.
  * @rpmflags: Flag bits.
  *
@@ -801,11 +802,11 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
 
 /**
- * __pm_runtime_set_status - Set run-time PM status of a device.
+ * __pm_runtime_set_status - Set runtime PM status of a device.
  * @dev: Device to handle.
- * @status: New run-time PM status of the device.
+ * @status: New runtime PM status of the device.
  *
- * If run-time PM of the device is disabled or its power.runtime_error field is
+ * If runtime PM of the device is disabled or its power.runtime_error field is
  * different from zero, the status may be changed either to RPM_ACTIVE, or to
  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
  * However, if the device has a parent and the parent is not active, and the
@@ -851,7 +852,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
 
 		/*
 		 * It is invalid to put an active child under a parent that is
-		 * not active, has run-time PM enabled and the
+		 * not active, has runtime PM enabled and the
 		 * 'power.ignore_children' flag unset.
 		 */
 		if (!parent->power.disable_depth
@@ -885,7 +886,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
  * @dev: Device to handle.
  *
  * Flush all pending requests for the device from pm_wq and wait for all
- * run-time PM operations involving the device in progress to complete.
+ * runtime PM operations involving the device in progress to complete.
  *
  * Should be called under dev->power.lock with interrupts disabled.
  */
@@ -933,7 +934,7 @@ static void __pm_runtime_barrier(struct device *dev)
  * Prevent the device from being suspended by incrementing its usage counter and
  * if there's a pending resume request for the device, wake the device up.
  * Next, make sure that all pending requests for the device have been flushed
- * from pm_wq and wait for all run-time PM operations involving the device in
+ * from pm_wq and wait for all runtime PM operations involving the device in
  * progress to complete.
  *
  * Return value:
@@ -963,18 +964,18 @@ int pm_runtime_barrier(struct device *dev)
 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
 
 /**
- * __pm_runtime_disable - Disable run-time PM of a device.
+ * __pm_runtime_disable - Disable runtime PM of a device.
  * @dev: Device to handle.
  * @check_resume: If set, check if there's a resume request for the device.
  *
  * Increment power.disable_depth for the device and if was zero previously,
- * cancel all pending run-time PM requests for the device and wait for all
+ * cancel all pending runtime PM requests for the device and wait for all
  * operations in progress to complete.  The device can be either active or
- * suspended after its run-time PM has been disabled.
+ * suspended after its runtime PM has been disabled.
  *
  * If @check_resume is set and there's a resume request pending when
  * __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its run-time PM.
+ * function will wake up the device before disabling its runtime PM.
  */
 void __pm_runtime_disable(struct device *dev, bool check_resume)
 {
@@ -987,7 +988,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
 
 	/*
 	 * Wake up the device if there's a resume request pending, because that
-	 * means there probably is some I/O to process and disabling run-time PM
+	 * means there probably is some I/O to process and disabling runtime PM
 	 * shouldn't prevent the device from processing the I/O.
 	 */
 	if (check_resume && dev->power.request_pending
@@ -1012,7 +1013,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
 
 /**
- * pm_runtime_enable - Enable run-time PM of a device.
+ * pm_runtime_enable - Enable runtime PM of a device.
  * @dev: Device to handle.
  */
 void pm_runtime_enable(struct device *dev)
@@ -1031,7 +1032,7 @@ void pm_runtime_enable(struct device *dev)
 EXPORT_SYMBOL_GPL(pm_runtime_enable);
 
 /**
- * pm_runtime_forbid - Block run-time PM of a device.
+ * pm_runtime_forbid - Block runtime PM of a device.
  * @dev: Device to handle.
  *
  * Increase the device's usage count and clear its power.runtime_auto flag,
@@ -1054,7 +1055,7 @@ void pm_runtime_forbid(struct device *dev)
 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
 
 /**
- * pm_runtime_allow - Unblock run-time PM of a device.
+ * pm_runtime_allow - Unblock runtime PM of a device.
  * @dev: Device to handle.
  *
  * Decrease the device's usage count and set its power.runtime_auto flag.
@@ -1075,12 +1076,12 @@ void pm_runtime_allow(struct device *dev)
 EXPORT_SYMBOL_GPL(pm_runtime_allow);
 
 /**
- * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
+ * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
  * @dev: Device to handle.
  *
  * Set the power.no_callbacks flag, which tells the PM core that this
- * device is power-managed through its parent and has no run-time PM
- * callbacks of its own.  The run-time sysfs attributes will be removed.
+ * device is power-managed through its parent and has no runtime PM
+ * callbacks of its own.  The runtime sysfs attributes will be removed.
  */
 void pm_runtime_no_callbacks(struct device *dev)
 {
@@ -1156,8 +1157,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
  * @delay: Value of the new delay in milliseconds.
  *
  * Set the device's power.autosuspend_delay value.  If it changes to negative
- * and the power.use_autosuspend flag is set, prevent run-time suspends.  If it
- * changes the other way, allow run-time suspends.
+ * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
+ * changes the other way, allow runtime suspends.
  */
 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
 {
@@ -1177,7 +1178,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
  * @dev: Device to handle.
  * @use: New value for use_autosuspend.
  *
- * Set the device's power.use_autosuspend flag, and allow or prevent run-time
+ * Set the device's power.use_autosuspend flag, and allow or prevent runtime
  * suspends as needed.
  */
 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
@@ -1194,7 +1195,7 @@ void __pm_runtime_use_autosuspend(struct device *dev, bool use)
 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
 
 /**
- * pm_runtime_init - Initialize run-time PM fields in given device object.
+ * pm_runtime_init - Initialize runtime PM fields in given device object.
  * @dev: Device object to initialize.
  */
 void pm_runtime_init(struct device *dev)
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a9f5b8979611..942d6a7c9ae1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -116,12 +116,14 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
 	cp = memchr(buf, '\n', n);
 	if (cp)
 		len = cp - buf;
+	device_lock(dev);
 	if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
 		pm_runtime_allow(dev);
 	else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
 		pm_runtime_forbid(dev);
 	else
-		return -EINVAL;
+		n = -EINVAL;
+	device_unlock(dev);
 	return n;
 }
 
@@ -205,7 +207,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
 	if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
 		return -EINVAL;
 
+	device_lock(dev);
 	pm_runtime_set_autosuspend_delay(dev, delay);
+	device_unlock(dev);
 	return n;
 }
 
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index c80e138b62fe..af10abecb99b 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
 	unsigned int val;
 
 	get_rtc_time(&time);
-	pr_info("Time: %2d:%02d:%02d  Date: %02d/%02d/%02d\n",
+	pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
 		time.tm_hour, time.tm_min, time.tm_sec,
 		time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
 	val = time.tm_year;				/* 100 years */
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 548708c4b2b8..a7346ab97a3c 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -606,7 +606,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
 			return NOTIFY_OK;
 
 		/* interrupted by signal */
-		return NOTIFY_BAD;
+		return notifier_from_errno(err);
 
 	case PM_POST_SUSPEND:
 		/*
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 46767c53917a..12d1e81a8abe 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -18,6 +18,7 @@
 #include <linux/sched.h>
 #include <linux/cpu.h>
 #include <linux/pm_runtime.h>
+#include <linux/suspend.h>
 #include "pci.h"
 
 struct pci_dynid {
@@ -616,6 +617,21 @@ static int pci_pm_prepare(struct device *dev)
 	int error = 0;
 
 	/*
+	 * If a PCI device configured to wake up the system from sleep states
+	 * has been suspended at run time and there's a resume request pending
+	 * for it, this is equivalent to the device signaling wakeup, so the
+	 * system suspend operation should be aborted.
+	 */
+	pm_runtime_get_noresume(dev);
+	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+		pm_wakeup_event(dev, 0);
+
+	if (pm_wakeup_pending()) {
+		pm_runtime_put_sync(dev);
+		return -EBUSY;
+	}
+
+	/*
 	 * PCI devices suspended at run time need to be resumed at this
 	 * point, because in general it is necessary to reconfigure them for
 	 * system suspend.  Namely, if the device is supposed to wake up the
@@ -624,7 +640,7 @@ static int pci_pm_prepare(struct device *dev)
 	 * system from the sleep state, we'll have to prevent it from signaling
 	 * wake-up.
 	 */
-	pm_runtime_get_sync(dev);
+	pm_runtime_resume(dev);
 
 	if (drv && drv->pm && drv->pm->prepare)
 		error = drv->pm->prepare(dev);
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 12ef9121d4f0..11312f401c70 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -258,13 +258,13 @@ static int vmwdt_suspend(void)
 	if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
 		pr_err("The system cannot be suspended while the watchdog"
 			" is in use\n");
-		return NOTIFY_BAD;
+		return notifier_from_errno(-EBUSY);
 	}
 	if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
 		clear_bit(VMWDT_OPEN, &vmwdt_is_open);
 		pr_err("The system cannot be suspended while the watchdog"
 			" is running\n");
-		return NOTIFY_BAD;
+		return notifier_from_errno(-EBUSY);
 	}
 	return NOTIFY_DONE;
 }
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c47b25fd3f43..92d7324acb1c 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -814,8 +814,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
 				mutex_unlock(&css->mutex);
 				continue;
 			}
-			if (__chsc_do_secm(css, 0))
-				ret = NOTIFY_BAD;
+			ret = __chsc_do_secm(css, 0);
+			ret = notifier_from_errno(ret);
 			mutex_unlock(&css->mutex);
 		}
 		break;
@@ -831,8 +831,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
 				mutex_unlock(&css->mutex);
 				continue;
 			}
-			if (__chsc_do_secm(css, 1))
-				ret = NOTIFY_BAD;
+			ret = __chsc_do_secm(css, 1);
+			ret = notifier_from_errno(ret);
 			mutex_unlock(&css->mutex);
 		}
 		/* search for subchannels, which appeared during hibernation */
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d70e91ae60af..d82a023a9015 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -144,9 +144,9 @@ int scsi_autopm_get_device(struct scsi_device *sdev)
 	int	err;
 
 	err = pm_runtime_get_sync(&sdev->sdev_gendev);
-	if (err < 0)
+	if (err < 0 && err !=-EACCES)
 		pm_runtime_put_sync(&sdev->sdev_gendev);
-	else if (err > 0)
+	else
 		err = 0;
 	return err;
 }
@@ -173,9 +173,9 @@ int scsi_autopm_get_host(struct Scsi_Host *shost)
 	int	err;
 
 	err = pm_runtime_get_sync(&shost->shost_gendev);
-	if (err < 0)
+	if (err < 0 && err !=-EACCES)
 		pm_runtime_put_sync(&shost->shost_gendev);
-	else if (err > 0)
+	else
 		err = 0;
 	return err;
 }