summary refs log tree commit diff
path: root/kernel/rcu/tree.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-05-28 07:18:08 -0700
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-08-13 14:35:49 -0700
commit81c0b3d724f419c0524f432c1ac22b9f518c2899 (patch)
tree06456d42837dee4892ddf6b01700f36ae749bbe7 /kernel/rcu/tree.h
parent7f36ef82e5cf0b401c2676fb3e56ad0633ed6ad5 (diff)
downloadlinux-81c0b3d724f419c0524f432c1ac22b9f518c2899.tar.gz
rcu/nocb: Avoid ->nocb_lock capture by corresponding CPU
A given rcu_data structure's ->nocb_lock can be acquired very frequently
by the corresponding CPU and occasionally by the corresponding no-CBs
grace-period and callbacks kthreads.  In particular, these two kthreads
will have frequent gaps between ->nocb_lock acquisitions that are roughly
a grace period in duration.  This means that any excessive ->nocb_lock
contention will be due to the CPU's acquisitions, and this in turn
enables a very naive contention-avoidance strategy to be quite effective.

This commit therefore modifies rcu_nocb_lock() to first
attempt a raw_spin_trylock(), and to atomically increment a
separate ->nocb_lock_contended across a raw_spin_lock().  This new
->nocb_lock_contended field is checked in __call_rcu_nocb_wake() when
interrupts are enabled, with a spin-wait for contending acquisitions
to complete, thus allowing the kthreads a chance to acquire the lock.

Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.h')
-rw-r--r--kernel/rcu/tree.h18
1 files changed, 17 insertions, 1 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index c12e85c12310..7062f9d9c053 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -197,6 +197,7 @@ struct rcu_data {
 	struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
 	struct task_struct *nocb_gp_kthread;
 	raw_spinlock_t nocb_lock;	/* Guard following pair of fields. */
+	atomic_t nocb_lock_contended;	/* Contention experienced. */
 	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
 	struct timer_list nocb_timer;	/* Enforce finite deferral. */
 
@@ -430,7 +431,22 @@ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
 				       unsigned long flags);
 #ifdef CONFIG_RCU_NOCB_CPU
 static void __init rcu_organize_nocb_kthreads(void);
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
+#define rcu_nocb_lock_irqsave(rdp, flags)				\
+do {									\
+	if (!rcu_segcblist_is_offloaded(&(rdp)->cblist)) {		\
+		local_irq_save(flags);					\
+	} else if (!raw_spin_trylock_irqsave(&(rdp)->nocb_lock, (flags))) {\
+		atomic_inc(&(rdp)->nocb_lock_contended);		\
+		smp_mb__after_atomic(); /* atomic_inc() before lock. */	\
+		raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags));	\
+		smp_mb__before_atomic(); /* atomic_dec() after lock. */	\
+		atomic_dec(&(rdp)->nocb_lock_contended);		\
+	}								\
+} while (0)
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
 static void rcu_bind_gp_kthread(void);
 static bool rcu_nohz_full_cpu(void);
 static void rcu_dynticks_task_enter(void);