summary refs log tree commit diff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/clock_ops.c43
-rw-r--r--drivers/base/power/domain.c42
2 files changed, 67 insertions, 18 deletions
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 7fdd0172605a..acef9f9f759a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -15,6 +15,7 @@
 #include <linux/clkdev.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/pm_runtime.h>
 
 #ifdef CONFIG_PM
 
@@ -67,7 +68,8 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
 	} else {
 		clk_prepare(ce->clk);
 		ce->status = PCE_STATUS_ACQUIRED;
-		dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
+		dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+			ce->clk, ce->con_id);
 	}
 }
 
@@ -93,7 +95,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
 			return -ENOMEM;
 		}
 	} else {
-		if (IS_ERR(ce->clk) || !__clk_get(clk)) {
+		if (IS_ERR(clk) || !__clk_get(clk)) {
 			kfree(ce);
 			return -ENOENT;
 		}
@@ -367,6 +369,43 @@ static int pm_clk_notify(struct notifier_block *nb,
 	return 0;
 }
 
+int pm_clk_runtime_suspend(struct device *dev)
+{
+	int ret;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	ret = pm_generic_runtime_suspend(dev);
+	if (ret) {
+		dev_err(dev, "failed to suspend device\n");
+		return ret;
+	}
+
+	ret = pm_clk_suspend(dev);
+	if (ret) {
+		dev_err(dev, "failed to suspend clock\n");
+		pm_generic_runtime_resume(dev);
+		return ret;
+	}
+
+	return 0;
+}
+
+int pm_clk_runtime_resume(struct device *dev)
+{
+	int ret;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	ret = pm_clk_resume(dev);
+	if (ret) {
+		dev_err(dev, "failed to resume clock\n");
+		return ret;
+	}
+
+	return pm_generic_runtime_resume(dev);
+}
+
 #else /* !CONFIG_PM */
 
 /**
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 2327613d4539..cdd547bd67df 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -181,7 +181,7 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
 	genpd->cpuidle_data->idle_state->exit_latency = usecs64;
 }
 
-static int genpd_power_on(struct generic_pm_domain *genpd)
+static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 {
 	ktime_t time_start;
 	s64 elapsed_ns;
@@ -190,6 +190,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd)
 	if (!genpd->power_on)
 		return 0;
 
+	if (!timed)
+		return genpd->power_on(genpd);
+
 	time_start = ktime_get();
 	ret = genpd->power_on(genpd);
 	if (ret)
@@ -208,7 +211,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd)
 	return ret;
 }
 
-static int genpd_power_off(struct generic_pm_domain *genpd)
+static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 {
 	ktime_t time_start;
 	s64 elapsed_ns;
@@ -217,6 +220,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd)
 	if (!genpd->power_off)
 		return 0;
 
+	if (!timed)
+		return genpd->power_off(genpd);
+
 	time_start = ktime_get();
 	ret = genpd->power_off(genpd);
 	if (ret == -EBUSY)
@@ -305,7 +311,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
 		}
 	}
 
-	ret = genpd_power_on(genpd);
+	ret = genpd_power_on(genpd, true);
 	if (ret)
 		goto err;
 
@@ -615,7 +621,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
 		 * the pm_genpd_poweron() restore power for us (this shouldn't
 		 * happen very often).
 		 */
-		ret = genpd_power_off(genpd);
+		ret = genpd_power_off(genpd, true);
 		if (ret == -EBUSY) {
 			genpd_set_active(genpd);
 			goto out;
@@ -827,6 +833,7 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
 /**
  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
  * @genpd: PM domain to power off, if possible.
+ * @timed: True if latency measurements are allowed.
  *
  * Check if the given PM domain can be powered off (during system suspend or
  * hibernation) and do that if so.  Also, in that case propagate to its masters.
@@ -836,7 +843,8 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
  * executed sequentially, so it is guaranteed that it will never run twice in
  * parallel).
  */
-static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
+				   bool timed)
 {
 	struct gpd_link *link;
 
@@ -847,26 +855,28 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
 	    || atomic_read(&genpd->sd_count) > 0)
 		return;
 
-	genpd_power_off(genpd);
+	genpd_power_off(genpd, timed);
 
 	genpd->status = GPD_STATE_POWER_OFF;
 
 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 		genpd_sd_counter_dec(link->master);
-		pm_genpd_sync_poweroff(link->master);
+		pm_genpd_sync_poweroff(link->master, timed);
 	}
 }
 
 /**
  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
  * @genpd: PM domain to power on.
+ * @timed: True if latency measurements are allowed.
  *
  * This function is only called in "noirq" and "syscore" stages of system power
  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
  * executed sequentially, so it is guaranteed that it will never run twice in
  * parallel).
  */
-static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
+static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
+				  bool timed)
 {
 	struct gpd_link *link;
 
@@ -874,11 +884,11 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
 		return;
 
 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
-		pm_genpd_sync_poweron(link->master);
+		pm_genpd_sync_poweron(link->master, timed);
 		genpd_sd_counter_inc(link->master);
 	}
 
-	genpd_power_on(genpd);
+	genpd_power_on(genpd, timed);
 
 	genpd->status = GPD_STATE_ACTIVE;
 }
@@ -1056,7 +1066,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
 	 * the same PM domain, so it is not necessary to use locking here.
 	 */
 	genpd->suspended_count++;
-	pm_genpd_sync_poweroff(genpd);
+	pm_genpd_sync_poweroff(genpd, true);
 
 	return 0;
 }
@@ -1086,7 +1096,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
 	 * guaranteed that this function will never run twice in parallel for
 	 * the same PM domain, so it is not necessary to use locking here.
 	 */
-	pm_genpd_sync_poweron(genpd);
+	pm_genpd_sync_poweron(genpd, true);
 	genpd->suspended_count--;
 
 	return genpd_start_dev(genpd, dev);
@@ -1300,7 +1310,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
 			 * If the domain was off before the hibernation, make
 			 * sure it will be off going forward.
 			 */
-			genpd_power_off(genpd);
+			genpd_power_off(genpd, true);
 
 			return 0;
 		}
@@ -1309,7 +1319,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
 	if (genpd->suspend_power_off)
 		return 0;
 
-	pm_genpd_sync_poweron(genpd);
+	pm_genpd_sync_poweron(genpd, true);
 
 	return genpd_start_dev(genpd, dev);
 }
@@ -1367,9 +1377,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
 
 	if (suspend) {
 		genpd->suspended_count++;
-		pm_genpd_sync_poweroff(genpd);
+		pm_genpd_sync_poweroff(genpd, false);
 	} else {
-		pm_genpd_sync_poweron(genpd);
+		pm_genpd_sync_poweron(genpd, false);
 		genpd->suspended_count--;
 	}
 }