From be01b4cab118e7a2d9747c71b3bef8f28fdda193 Mon Sep 17 00:00:00 2001 From: Byungchul Park Date: Mon, 26 Feb 2018 14:11:36 +0900 Subject: rcu: Inline rcu_preempt_do_callback() into its sole caller The rcu_preempt_do_callbacks() function was introduced in commit 09223371dea(rcu: Use softirq to address performance regression), where it was necessary to handle kernel builds both containing and not containing RCU-preempt. Since then, various changes (most notably f8b7fc6b51 ("rcu: use softirq instead of kthreads except when RCU_BOOST=y")) have resulted in this function being invoked only from rcu_kthread_do_work(), which is present only in kernels containing RCU-preempt, which in turn means that the rcu_preempt_do_callbacks() function is no longer needed. This commit therefore inlines rcu_preempt_do_callbacks() into its sole remaining caller and also removes the rcu_state_p and rcu_data_p indirection for added clarity. Signed-off-by: Byungchul Park Reviewed-by: Steven Rostedt (VMware) [ paulmck: Remove the rcu_state_p and rcu_data_p indirection. ] Signed-off-by: Paul E. McKenney Tested-by: Nicholas Piggin --- kernel/rcu/tree.h | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel/rcu/tree.h') diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index f491ab4f2e8e..3a0dc30100e8 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -438,7 +438,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static void invoke_rcu_callbacks_kthread(void); static bool rcu_is_callbacks_kthread(void); #ifdef CONFIG_RCU_BOOST -static void rcu_preempt_do_callbacks(void); static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct rcu_node *rnp); #endif /* #ifdef CONFIG_RCU_BOOST */ -- cgit 1.4.1 From 6fba2b3767ea6e3e1204855031492415cc4dce4f Mon Sep 17 00:00:00 2001 From: Byungchul Park Date: Fri, 2 Mar 2018 16:39:12 +0900 Subject: rcu: Remove deprecated RCU debugfs tracing code Commit ae91aa0adb14 ("rcu: Remove debugfs tracing") removed the RCU debugfs tracing code, but did not remove the no-longer used ->exp_workdone{0,1,2,3} fields in the srcu_data structure. This commit therefore removes these fields along with the code that uselessly updates them. Signed-off-by: Byungchul Park Signed-off-by: Paul E. McKenney Tested-by: Nicholas Piggin --- kernel/rcu/tree.h | 4 ---- kernel/rcu/tree_exp.h | 13 +++++-------- 2 files changed, 5 insertions(+), 12 deletions(-) (limited to 'kernel/rcu/tree.h') diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 3a0dc30100e8..5fd374c71404 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -224,10 +224,6 @@ struct rcu_data { #ifdef CONFIG_RCU_FAST_NO_HZ struct rcu_head oom_head; #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ - atomic_long_t exp_workdone0; /* # done by workqueue. */ - atomic_long_t exp_workdone1; /* # done by others #1. */ - atomic_long_t exp_workdone2; /* # done by others #2. */ - atomic_long_t exp_workdone3; /* # done by others #3. */ int exp_dynticks_snap; /* Double-check need for IPI. */ /* 6) Callback offloading. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index f72eefab8543..f512dd4e57a8 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -248,14 +248,12 @@ static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp, } /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ -static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat, - unsigned long s) +static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s) { if (rcu_exp_gp_seq_done(rsp, s)) { trace_rcu_exp_grace_period(rsp->name, s, TPS("done")); /* Ensure test happens before caller kfree(). */ smp_mb__before_atomic(); /* ^^^ */ - atomic_long_inc(stat); return true; } return false; @@ -289,7 +287,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) * promoting locality and is not strictly needed for correctness. */ for (; rnp != NULL; rnp = rnp->parent) { - if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s)) + if (sync_exp_work_done(rsp, s)) return true; /* Work not done, either wait here or go up. */ @@ -302,8 +300,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) rnp->grplo, rnp->grphi, TPS("wait")); wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], - sync_exp_work_done(rsp, - &rdp->exp_workdone2, s)); + sync_exp_work_done(rsp, s)); return true; } rnp->exp_seq_rq = s; /* Followers can wait on us. */ @@ -313,7 +310,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) } mutex_lock(&rsp->exp_mutex); fastpath: - if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) { + if (sync_exp_work_done(rsp, s)) { mutex_unlock(&rsp->exp_mutex); return true; } @@ -633,7 +630,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); rnp = rcu_get_root(rsp); wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], - sync_exp_work_done(rsp, &rdp->exp_workdone0, s)); + sync_exp_work_done(rsp, s)); smp_mb(); /* Workqueue actions happen before return. */ /* Let the next expedited grace period start. */ -- cgit 1.4.1 From 17672480fb1e953f999623b598a98130f8aacfbc Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Sun, 25 Mar 2018 20:50:03 +0300 Subject: rcu: Declare rcu_eqs_special_set() in public header Because rcu_eqs_special_set() is declared only in internal header kernel/rcu/tree.h and stubbed in include/linux/rcutiny.h, it is inaccessible outside of the RCU implementation. This patch therefore moves the rcu_eqs_special_set() declaration to include/linux/rcutree.h, which allows it to be used in non-rcu kernel code. Signed-off-by: Yury Norov Signed-off-by: Paul E. McKenney Tested-by: Nicholas Piggin --- include/linux/rcutree.h | 1 + kernel/rcu/tree.h | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/rcu/tree.h') diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index fd996cdf1833..448f20f27396 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -74,6 +74,7 @@ static inline void synchronize_rcu_bh_expedited(void) void rcu_barrier(void); void rcu_barrier_bh(void); void rcu_barrier_sched(void); +bool rcu_eqs_special_set(int cpu); unsigned long get_state_synchronize_rcu(void); void cond_synchronize_rcu(unsigned long oldstate); unsigned long get_state_synchronize_sched(void); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 5fd374c71404..0b3a90ebe225 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -404,7 +404,6 @@ extern struct rcu_state rcu_preempt_state; #endif /* #ifdef CONFIG_PREEMPT_RCU */ int rcu_dynticks_snap(struct rcu_dynticks *rdtp); -bool rcu_eqs_special_set(int cpu); #ifdef CONFIG_RCU_BOOST DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); -- cgit 1.4.1 From c91a8675b9cc697c725b6d97fcc7f157f4a989d0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 18 Apr 2018 11:11:39 -0700 Subject: rcu: Add accessor macros for the ->need_future_gp[] array Accessors for the ->need_future_gp[] array are currently open-coded, which makes them difficult to change. To improve maintainability, this commit adds need_future_gp_mask() to compute the indexing mask from the array size, need_future_gp_element() to access the element corresponding to the specified grace-period number, and need_any_future_gp() to determine if any future grace period is needed. This commit also applies need_future_gp_element() to existing open-coded single-element accesses. Signed-off-by: Paul E. McKenney Tested-by: Nicholas Piggin --- kernel/rcu/tree.c | 16 +++++++--------- kernel/rcu/tree.h | 15 +++++++++++++++ kernel/rcu/tree_plugin.h | 2 +- 3 files changed, 23 insertions(+), 10 deletions(-) (limited to 'kernel/rcu/tree.h') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4bbba17422cd..79fb99951a0c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -718,11 +718,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) static int rcu_future_needs_gp(struct rcu_state *rsp) { struct rcu_node *rnp = rcu_get_root(rsp); - int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; - int *fp = &rnp->need_future_gp[idx]; lockdep_assert_irqs_disabled(); - return READ_ONCE(*fp); + return READ_ONCE(need_future_gp_element(rnp, rnp->completed)); } /* @@ -1699,7 +1697,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, */ c = rcu_cbs_completed(rdp->rsp, rnp); trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); - if (rnp->need_future_gp[c & 0x1]) { + if (need_future_gp_element(rnp, c)) { trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); goto out; } @@ -1711,7 +1709,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, * current grace period, we don't need to explicitly start one. */ if (rnp->gpnum != rnp->completed) { - rnp->need_future_gp[c & 0x1]++; + need_future_gp_element(rnp, c)++; trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); goto out; } @@ -1737,13 +1735,13 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, * If the needed for the required grace period is already * recorded, trace and leave. */ - if (rnp_root->need_future_gp[c & 0x1]) { + if (need_future_gp_element(rnp_root, c)) { trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot")); goto unlock_out; } /* Record the need for the future grace period. */ - rnp_root->need_future_gp[c & 0x1]++; + need_future_gp_element(rnp_root, c)++; /* If a grace period is not already in progress, start one. */ if (rnp_root->gpnum != rnp_root->completed) { @@ -1771,8 +1769,8 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) int needmore; struct rcu_data *rdp = this_cpu_ptr(rsp->rda); - rnp->need_future_gp[c & 0x1] = 0; - needmore = rnp->need_future_gp[(c + 1) & 0x1]; + need_future_gp_element(rnp, c) = 0; + needmore = need_future_gp_element(rnp, c + 1); trace_rcu_future_gp(rnp, rdp, c, needmore ? TPS("CleanupMore") : TPS("Cleanup")); return needmore; diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index f491ab4f2e8e..18b091474ffa 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -159,6 +159,21 @@ struct rcu_node { wait_queue_head_t exp_wq[4]; } ____cacheline_internodealigned_in_smp; +/* Accessors for ->need_future_gp[] array. */ +#define need_future_gp_mask() \ + (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1) +#define need_future_gp_element(rnp, c) \ + ((rnp)->need_future_gp[(c) & need_future_gp_mask()]) +#define need_any_future_gp(rnp) \ +({ \ + int __i; \ + bool __nonzero = false; \ + \ + for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++) \ + __nonzero = __nonzero || (rnp)->need_future_gp[__i]; \ + __nonzero; \ +}) + /* * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and * are indexed relative to this interval rather than the global CPU ID space. diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 84fbee4686d3..640ea927d8a4 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1790,7 +1790,7 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) */ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) { - rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; + need_future_gp_element(rnp, rnp->completed + 1) += nrq; } static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) -- cgit 1.4.1 From fb31340f8a43a6f2e871164822ef4979b36232ae Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 12 Apr 2018 07:20:30 -0700 Subject: rcu: Make rcu_gp_cleanup() more accurately predict need for new GP Currently, rcu_gp_cleanup() scans the rcu_node tree in order to reset state to reflect the end of the grace period. It also checks to see whether a new grace period is needed, but in a number of cases, rather than directly cause the new grace period to be immediately started, it instead leaves the grace-period-needed state where various fail-safes can find it. This works fine, but results in higher contention on the root rcu_node structure's ->lock, which is undesirable, and contention on that lock has recently become noticeable. This commit therefore makes rcu_gp_cleanup() immediately start a new grace period if there is any need for one. It is quite possible that it will later be necessary to throttle the grace-period rate, but that can be dealt with when and if. Reported-by: Nicholas Piggin Signed-off-by: Paul E. McKenney Tested-by: Nicholas Piggin --- kernel/rcu/tree.c | 16 ++++++++++------ kernel/rcu/tree.h | 1 - kernel/rcu/tree_plugin.h | 17 ----------------- 3 files changed, 10 insertions(+), 24 deletions(-) (limited to 'kernel/rcu/tree.h') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 497f139056c7..afc5e32f0da4 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1763,14 +1763,14 @@ out: * Clean up any old requests for the just-ended grace period. Also return * whether any additional grace periods have been requested. */ -static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) +static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) { int c = rnp->completed; - int needmore; + bool needmore; struct rcu_data *rdp = this_cpu_ptr(rsp->rda); need_future_gp_element(rnp, c) = 0; - needmore = need_future_gp_element(rnp, c + 1); + needmore = need_any_future_gp(rnp); trace_rcu_future_gp(rnp, rdp, c, needmore ? TPS("CleanupMore") : TPS("Cleanup")); return needmore; @@ -2113,7 +2113,6 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) { unsigned long gp_duration; bool needgp = false; - int nocb = 0; struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(rsp); struct swait_queue_head *sq; @@ -2152,7 +2151,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) if (rnp == rdp->mynode) needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; /* smp_mb() provided by prior unlock-lock pair. */ - nocb += rcu_future_gp_cleanup(rsp, rnp); + needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp; sq = rcu_nocb_gp_get(rnp); raw_spin_unlock_irq_rcu_node(rnp); rcu_nocb_gp_cleanup(sq); @@ -2162,13 +2161,18 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) } rnp = rcu_get_root(rsp); raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */ - rcu_nocb_gp_set(rnp, nocb); /* Declare grace period done. */ WRITE_ONCE(rsp->completed, rsp->gpnum); trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); rsp->gp_state = RCU_GP_IDLE; + /* Check for GP requests since above loop. */ rdp = this_cpu_ptr(rsp->rda); + if (need_any_future_gp(rnp)) { + trace_rcu_future_gp(rnp, rdp, rsp->completed - 1, + TPS("CleanupMore")); + needgp = true; + } /* Advance CBs to reduce false positives below. */ needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; if (needgp || cpu_needs_another_gp(rsp, rdp)) { diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 18b091474ffa..bd1103763551 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -469,7 +469,6 @@ static void print_cpu_stall_info_end(void); static void zero_cpu_stall_ticks(struct rcu_data *rdp); static void increment_cpu_stall_ticks(void); static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); -static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); static void rcu_init_one_nocb(struct rcu_node *rnp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 640ea927d8a4..313b77d9cf06 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1780,19 +1780,6 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) swake_up_all(sq); } -/* - * Set the root rcu_node structure's ->need_future_gp field - * based on the sum of those of all rcu_node structures. This does - * double-count the root rcu_node structure's requests, but this - * is necessary to handle the possibility of a rcu_nocb_kthread() - * having awakened during the time that the rcu_node structures - * were being updated for the end of the previous grace period. - */ -static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) -{ - need_future_gp_element(rnp, rnp->completed + 1) += nrq; -} - static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) { return &rnp->nocb_gp_wq[rnp->completed & 0x1]; @@ -2495,10 +2482,6 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) { } -static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) -{ -} - static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) { return NULL; -- cgit 1.4.1 From 51af970d19f395fc57b82514022126de6c5420cd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 14 Apr 2018 10:40:57 -0700 Subject: rcu: Avoid losing ->need_future_gp[] values due to GP start/end races The rcu_cbs_completed() function provides the value of ->completed at which new callbacks can safely be invoked. This is recorded in two-element ->need_future_gp[] arrays in the rcu_node structure, and the elements of these arrays corresponding to the just-completed grace period are zeroed at the end of that grace period. However, the rcu_cbs_completed() function can return the current ->completed value plus either one or two, so it is possible for the corresponding ->need_future_gp[] entry to be cleared just after it was set, thus losing a request for a future grace period. This commit avoids this race by expanding ->need_future_gp[] to four elements. Signed-off-by: Paul E. McKenney Tested-by: Nicholas Piggin --- kernel/rcu/tree.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel/rcu/tree.h') diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index bd1103763551..952cd0c223fe 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -150,8 +150,7 @@ struct rcu_node { struct swait_queue_head nocb_gp_wq[2]; /* Place for rcu_nocb_kthread() to wait GP. */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ - int need_future_gp[2]; - /* Counts of upcoming no-CB GP requests. */ + int need_future_gp[4]; /* Counts of upcoming no-CB GP requests. */ raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; spinlock_t exp_lock ____cacheline_internodealigned_in_smp; -- cgit 1.4.1 From 0ae94e00ce40e4447080ab7675220f725c690330 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 18 Apr 2018 14:14:42 -0700 Subject: rcu: Make rcu_future_needs_gp() check all ->need_future_gps[] elements Currently, the rcu_future_needs_gp() function checks only the current element of the ->need_future_gps[] array, which might miss elements that were offset from the expected element, for example, due to races with the start or the end of a grace period. This commit therefore makes rcu_future_needs_gp() use the need_any_future_gp() macro to check all of the elements of this array. Signed-off-by: Paul E. McKenney Tested-by: Nicholas Piggin --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree.h | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel/rcu/tree.h') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index afc5e32f0da4..b05ab6379562 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -720,7 +720,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp) struct rcu_node *rnp = rcu_get_root(rsp); lockdep_assert_irqs_disabled(); - return READ_ONCE(need_future_gp_element(rnp, rnp->completed)); + return need_any_future_gp(rnp); } /* diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 952cd0c223fe..123c30eac8b5 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -169,7 +169,8 @@ struct rcu_node { bool __nonzero = false; \ \ for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++) \ - __nonzero = __nonzero || (rnp)->need_future_gp[__i]; \ + __nonzero = __nonzero || \ + READ_ONCE((rnp)->need_future_gp[__i]); \ __nonzero; \ }) -- cgit 1.4.1 From 6f576e281690316270275bbef17c79ea304ad511 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 18 Apr 2018 16:50:31 -0700 Subject: rcu: Convert ->need_future_gp[] array to boolean There is no longer any need for ->need_future_gp[] to count the number of requests for future grace periods, so this commit converts the additions to assignments to "true" and reduces the size of each element to one byte. While we are in the area, fix an obsolete comment. Signed-off-by: Paul E. McKenney Tested-by: Nicholas Piggin --- kernel/rcu/tree.c | 6 +++--- kernel/rcu/tree.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel/rcu/tree.h') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b05ab6379562..6ef1f2b4a6d3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1709,7 +1709,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, * current grace period, we don't need to explicitly start one. */ if (rnp->gpnum != rnp->completed) { - need_future_gp_element(rnp, c)++; + need_future_gp_element(rnp, c) = true; trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); goto out; } @@ -1741,7 +1741,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, } /* Record the need for the future grace period. */ - need_future_gp_element(rnp_root, c)++; + need_future_gp_element(rnp_root, c) = true; /* If a grace period is not already in progress, start one. */ if (rnp_root->gpnum != rnp_root->completed) { @@ -1769,7 +1769,7 @@ static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) bool needmore; struct rcu_data *rdp = this_cpu_ptr(rsp->rda); - need_future_gp_element(rnp, c) = 0; + need_future_gp_element(rnp, c) = false; needmore = need_any_future_gp(rnp); trace_rcu_future_gp(rnp, rdp, c, needmore ? TPS("CleanupMore") : TPS("Cleanup")); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 123c30eac8b5..9f97fd7f648c 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -150,7 +150,7 @@ struct rcu_node { struct swait_queue_head nocb_gp_wq[2]; /* Place for rcu_nocb_kthread() to wait GP. */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ - int need_future_gp[4]; /* Counts of upcoming no-CB GP requests. */ + u8 need_future_gp[4]; /* Counts of upcoming GP requests. */ raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; spinlock_t exp_lock ____cacheline_internodealigned_in_smp; -- cgit 1.4.1