summary refs log tree commit diff
path: root/kernel/irq
diff options
context:
space:
mode:
authorMasahiro Yamada <yamada.masahiro@socionext.com>2017-10-05 10:44:54 +0900
committerMarc Zyngier <marc.zyngier@arm.com>2017-10-19 10:30:03 +0100
commitf1d783585486c7c612f277c2a6f0c9bb5a67e463 (patch)
tree87895cecab4db87b5db128cd91826f8421e440d1 /kernel/irq
parentc94fb639d5462027004ed8f5f71288955688a4ae (diff)
downloadlinux-f1d783585486c7c612f277c2a6f0c9bb5a67e463.tar.gz
irqdomain: Move revmap_trees_mutex to struct irq_domain
The revmap_trees_mutex protects domain->revmap_tree.  There is no
need to make it global because it is allowed to modify revmap_tree
of two different domains concurrently.  Having said that, this would
not be a actual bottleneck because the interrupt map/unmap does not
occur quite often.  Rather, the motivation is to tidy up the code
from a data structure point of view.

Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/irqdomain.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index b50f737574ae..31d0f9ff4f00 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -21,7 +21,6 @@
 static LIST_HEAD(irq_domain_list);
 static DEFINE_MUTEX(irq_domain_mutex);
 
-static DEFINE_MUTEX(revmap_trees_mutex);
 static struct irq_domain *irq_default_domain;
 
 static void irq_domain_check_hierarchy(struct irq_domain *domain);
@@ -211,6 +210,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
 
 	/* Fill structure */
 	INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
+	mutex_init(&domain->revmap_tree_mutex);
 	domain->ops = ops;
 	domain->host_data = host_data;
 	domain->hwirq_max = hwirq_max;
@@ -462,9 +462,9 @@ static void irq_domain_clear_mapping(struct irq_domain *domain,
 	if (hwirq < domain->revmap_size) {
 		domain->linear_revmap[hwirq] = 0;
 	} else {
-		mutex_lock(&revmap_trees_mutex);
+		mutex_lock(&domain->revmap_tree_mutex);
 		radix_tree_delete(&domain->revmap_tree, hwirq);
-		mutex_unlock(&revmap_trees_mutex);
+		mutex_unlock(&domain->revmap_tree_mutex);
 	}
 }
 
@@ -475,9 +475,9 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
 	if (hwirq < domain->revmap_size) {
 		domain->linear_revmap[hwirq] = irq_data->irq;
 	} else {
-		mutex_lock(&revmap_trees_mutex);
+		mutex_lock(&domain->revmap_tree_mutex);
 		radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
-		mutex_unlock(&revmap_trees_mutex);
+		mutex_unlock(&domain->revmap_tree_mutex);
 	}
 }
 
@@ -1459,11 +1459,11 @@ static void irq_domain_fix_revmap(struct irq_data *d)
 		return; /* Not using radix tree. */
 
 	/* Fix up the revmap. */
-	mutex_lock(&revmap_trees_mutex);
+	mutex_lock(&d->domain->revmap_tree_mutex);
 	slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
 	if (slot)
 		radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
-	mutex_unlock(&revmap_trees_mutex);
+	mutex_unlock(&d->domain->revmap_tree_mutex);
 }
 
 /**