summary refs log tree commit diff
path: root/drivers/base
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2013-04-28 12:43:08 -0700
committerOlof Johansson <olof@lixom.net>2013-04-28 15:01:12 -0700
commitafcf7924ecab726dab0227188783c4a40d9f0eec (patch)
tree606b0883c0ad3fb2ef04f0f036a55ed3875fdc9b /drivers/base
parentdc9c220304c882f06aaadf427821c6388782aab8 (diff)
parentd21be237ffa357e55005e2bf9ffef10b23c184d0 (diff)
downloadlinux-afcf7924ecab726dab0227188783c4a40d9f0eec.tar.gz
Merge branch 'fixes' into next/cleanup
Merging in fixes since there's a conflict in the omap4 clock tables caused by
it.

* fixes: (245 commits)
  ARM: highbank: fix cache flush ordering for cpu hotplug
  ARM: OMAP4: hwmod data: make 'ocp2scp_usb_phy_phy_48m" as the main clock
  arm: mvebu: Fix the irq map function in SMP mode
  Fix GE0/GE1 init on ix2-200 as GE0 has no PHY
  ARM: S3C24XX: Fix interrupt pending register offset of the EINT controller
  ARM: S3C24XX: Correct NR_IRQS definition for s3c2440
  ARM i.MX6: Fix ldb_di clock selection
  ARM: imx: provide twd clock lookup from device tree
  ARM: imx35 Bugfix admux clock
  ARM: clk-imx35: Bugfix iomux clock
  + Linux 3.9-rc6

Signed-off-by: Olof Johansson <olof@lixom.net>

Conflicts:
	arch/arm/mach-omap2/cclock44xx_data.c
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/qos.c60
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regmap.c9
3 files changed, 53 insertions, 18 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 5f74587ef258..71671c42ef45 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -46,6 +46,7 @@
 #include "power.h"
 
 static DEFINE_MUTEX(dev_pm_qos_mtx);
+static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
 
 static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
 
@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
 	struct pm_qos_constraints *c;
 	struct pm_qos_flags *f;
 
-	mutex_lock(&dev_pm_qos_mtx);
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
 
 	/*
 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
 	 * exposed to user space, they have to be hidden at this point.
 	 */
+	pm_qos_sysfs_remove_latency(dev);
+	pm_qos_sysfs_remove_flags(dev);
+
+	mutex_lock(&dev_pm_qos_mtx);
+
 	__dev_pm_qos_hide_latency_limit(dev);
 	__dev_pm_qos_hide_flags(dev);
 
@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
 
  out:
 	mutex_unlock(&dev_pm_qos_mtx);
+
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
 }
 
 /**
@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
 	kfree(req);
 }
 
+static void dev_pm_qos_drop_user_request(struct device *dev,
+					 enum dev_pm_qos_req_type type)
+{
+	mutex_lock(&dev_pm_qos_mtx);
+	__dev_pm_qos_drop_user_request(dev, type);
+	mutex_unlock(&dev_pm_qos_mtx);
+}
+
 /**
  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
 		return ret;
 	}
 
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+
 	mutex_lock(&dev_pm_qos_mtx);
 
 	if (IS_ERR_OR_NULL(dev->power.qos))
@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
 	if (ret < 0) {
 		__dev_pm_qos_remove_request(req);
 		kfree(req);
+		mutex_unlock(&dev_pm_qos_mtx);
 		goto out;
 	}
-
 	dev->power.qos->latency_req = req;
+
+	mutex_unlock(&dev_pm_qos_mtx);
+
 	ret = pm_qos_sysfs_add_latency(dev);
 	if (ret)
-		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
 
  out:
-	mutex_unlock(&dev_pm_qos_mtx);
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
 
 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
 {
-	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
-		pm_qos_sysfs_remove_latency(dev);
+	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
-	}
 }
 
 /**
@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev)
  */
 void dev_pm_qos_hide_latency_limit(struct device *dev)
 {
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+	pm_qos_sysfs_remove_latency(dev);
+
 	mutex_lock(&dev_pm_qos_mtx);
 	__dev_pm_qos_hide_latency_limit(dev);
 	mutex_unlock(&dev_pm_qos_mtx);
+
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
 
@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
 	}
 
 	pm_runtime_get_sync(dev);
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+
 	mutex_lock(&dev_pm_qos_mtx);
 
 	if (IS_ERR_OR_NULL(dev->power.qos))
@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
 	if (ret < 0) {
 		__dev_pm_qos_remove_request(req);
 		kfree(req);
+		mutex_unlock(&dev_pm_qos_mtx);
 		goto out;
 	}
-
 	dev->power.qos->flags_req = req;
+
+	mutex_unlock(&dev_pm_qos_mtx);
+
 	ret = pm_qos_sysfs_add_flags(dev);
 	if (ret)
-		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
 
  out:
-	mutex_unlock(&dev_pm_qos_mtx);
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
 	pm_runtime_put(dev);
 	return ret;
 }
@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
 
 static void __dev_pm_qos_hide_flags(struct device *dev)
 {
-	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
-		pm_qos_sysfs_remove_flags(dev);
+	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
-	}
 }
 
 /**
@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev)
 void dev_pm_qos_hide_flags(struct device *dev)
 {
 	pm_runtime_get_sync(dev);
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+	pm_qos_sysfs_remove_flags(dev);
+
 	mutex_lock(&dev_pm_qos_mtx);
 	__dev_pm_qos_hide_flags(dev);
 	mutex_unlock(&dev_pm_qos_mtx);
+
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
 	pm_runtime_put(dev);
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index e6732cf7c06e..79f4fca9877a 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
 			base = 0;
 
 		if (max < rbnode->base_reg + rbnode->blklen)
-			end = rbnode->base_reg + rbnode->blklen - max;
+			end = max - rbnode->base_reg + 1;
 		else
 			end = rbnode->blklen;
 
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 3d2367501fd0..d34adef1e63e 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -710,12 +710,12 @@ skip_format_initialization:
 		}
 	}
 
+	regmap_debugfs_init(map, config->name);
+
 	ret = regcache_init(map, config);
 	if (ret != 0)
 		goto err_range;
 
-	regmap_debugfs_init(map, config->name);
-
 	/* Add a devres resource for dev_get_regmap() */
 	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
 	if (!m) {
@@ -943,8 +943,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
 		unsigned int ival;
 		int val_bytes = map->format.val_bytes;
 		for (i = 0; i < val_len / val_bytes; i++) {
-			memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
-			ival = map->format.parse_val(map->work_buf);
+			ival = map->format.parse_val(val + (i * val_bytes));
 			ret = regcache_write(map, reg + (i * map->reg_stride),
 					     ival);
 			if (ret) {
@@ -1036,6 +1035,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
 			kfree(async->work_buf);
 			kfree(async);
 		}
+
+		return ret;
 	}
 
 	trace_regmap_hw_write_start(map->dev, reg,