summary refs log tree commit diff
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-02-08 17:11:03 +0100
committerThomas Gleixner <tglx@linutronix.de>2011-02-19 12:58:20 +0100
commita005677b3dd05decdd8880cf3044ae709856f58f (patch)
tree529d1454940fe8c6723bd54f01e009d9be2ab840 /kernel
parent1ce6068dac1924f7095be5850481e790cbf1b3c1 (diff)
downloadlinux-a005677b3dd05decdd8880cf3044ae709856f58f.tar.gz
genirq: Mirror IRQ_PER_CPU and IRQ_NO_BALANCING in irq_data.state
That's the right data structure to look at for arch code.

Accessor functions are provided.

	 irqd_is_per_cpu(irqdata);
	 irqd_can_balance(irqdata);

Coders who access them directly will be tracked down and slapped with
stinking trouts.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/chip.c15
-rw-r--r--kernel/irq/internals.h11
-rw-r--r--kernel/irq/manage.c16
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/irq/settings.h36
-rw-r--r--kernel/irq/spurious.c3
6 files changed, 69 insertions, 14 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 73b2e7e00934..b8aa3dfe8301 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -706,12 +706,15 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 	if (!desc)
 		return;
 
-	/* Sanitize flags */
-	set &= IRQF_MODIFY_MASK;
-	clr &= IRQF_MODIFY_MASK;
-
 	raw_spin_lock_irqsave(&desc->lock, flags);
-	desc->status &= ~clr;
-	desc->status |= set;
+
+	irq_settings_clr_and_set(desc, clr, set);
+
+	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU);
+	if (irq_settings_has_no_balance_set(desc))
+		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+	if (irq_settings_is_per_cpu(desc))
+		irqd_set(&desc->irq_data, IRQD_PER_CPU);
+
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b2ba59e73f21..a80b44d2735e 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -139,3 +139,14 @@ static inline void irqd_clr_move_pending(struct irq_data *d)
 	d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
 	irq_compat_clr_move_pending(irq_data_to_desc(d));
 }
+
+static inline void irqd_clear(struct irq_data *d, unsigned int mask)
+{
+	d->state_use_accessors &= ~mask;
+}
+
+static inline void irqd_set(struct irq_data *d, unsigned int mask)
+{
+	d->state_use_accessors |= mask;
+}
+
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f1cfa271ba70..84a0a9c22226 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
 
-	if ((desc->status & (IRQ_PER_CPU | IRQ_NO_BALANCING)) ||
-	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
+	if (!irqd_can_balance(&desc->irq_data) || !desc->irq_data.chip ||
+	    !desc->irq_data.chip->irq_set_affinity)
 		return 0;
 
 	return 1;
@@ -897,8 +897,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 				  IRQS_INPROGRESS | IRQS_ONESHOT | \
 				  IRQS_WAITING);
 
-		if (new->flags & IRQF_PERCPU)
-			desc->status |= IRQ_PER_CPU;
+		if (new->flags & IRQF_PERCPU) {
+			irqd_set(&desc->irq_data, IRQD_PER_CPU);
+			irq_settings_set_per_cpu(desc);
+		}
 
 		if (new->flags & IRQF_ONESHOT)
 			desc->istate |= IRQS_ONESHOT;
@@ -910,8 +912,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 			desc->depth = 1;
 
 		/* Exclude IRQ from balancing if requested */
-		if (new->flags & IRQF_NOBALANCING)
-			desc->status |= IRQ_NO_BALANCING;
+		if (new->flags & IRQF_NOBALANCING) {
+			irq_settings_set_no_balancing(desc);
+			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+		}
 
 		/* Set default affinity mask once everything is setup */
 		setup_affinity(irq, desc, mask);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 24f53caddf47..7a93c6b88b25 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -15,7 +15,7 @@ void move_masked_irq(int irq)
 	/*
 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
 	 */
-	if (desc->status & (IRQ_PER_CPU | IRQ_NO_BALANCING)) {
+	if (!irqd_can_balance(&desc->irq_data)) {
 		WARN_ON(1);
 		return;
 	}
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index bb104a2dce73..ba0fffe410ad 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -4,6 +4,9 @@
  */
 enum {
 	_IRQ_DEFAULT_INIT_FLAGS	= IRQ_DEFAULT_INIT_FLAGS,
+	_IRQ_PER_CPU		= IRQ_PER_CPU,
+	_IRQ_NO_BALANCING	= IRQ_NO_BALANCING,
+	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,
 };
 
 #undef IRQ_INPROGRESS
@@ -22,3 +25,36 @@ enum {
 #define IRQ_WAKEUP		GOT_YOU_MORON
 #undef IRQ_MOVE_PENDING
 #define IRQ_MOVE_PENDING	GOT_YOU_MORON
+#undef IRQ_PER_CPU
+#define IRQ_PER_CPU		GOT_YOU_MORON
+#undef IRQ_NO_BALANCING
+#define IRQ_NO_BALANCING	GOT_YOU_MORON
+#undef IRQF_MODIFY_MASK
+#define IRQF_MODIFY_MASK	GOT_YOU_MORON
+
+static inline void
+irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
+{
+	desc->status &= ~(clr & _IRQF_MODIFY_MASK);
+	desc->status |= (set & _IRQF_MODIFY_MASK);
+}
+
+static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+{
+	return desc->status & _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
+{
+	desc->status |= _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
+{
+	desc->status |= _IRQ_NO_BALANCING;
+}
+
+static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
+{
+	return desc->status & _IRQ_NO_BALANCING;
+}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 692ce2bae302..226ed7d26a84 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -68,7 +68,8 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
 	raw_spin_lock(&desc->lock);
 
 	/* PER_CPU and nested thread interrupts are never polled */
-	if (desc->status & (IRQ_PER_CPU | IRQ_NESTED_THREAD))
+	if (irq_settings_is_per_cpu(desc) ||
+	    (desc->status & IRQ_NESTED_THREAD))
 		goto out;
 
 	/*