1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
5 * Copyright IBM Corporation, 2008
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
14 * For detailed explanation of Read-Copy Update mechanism see -
18 #define pr_fmt(fmt) "rcu: " fmt
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/percpu.h>
36 #include <linux/notifier.h>
37 #include <linux/cpu.h>
38 #include <linux/mutex.h>
39 #include <linux/time.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/wait.h>
42 #include <linux/kthread.h>
43 #include <uapi/linux/sched/types.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
46 #include <linux/random.h>
47 #include <linux/trace_events.h>
48 #include <linux/suspend.h>
49 #include <linux/ftrace.h>
50 #include <linux/tick.h>
51 #include <linux/sysrq.h>
52 #include <linux/kprobes.h>
53 #include <linux/gfp.h>
54 #include <linux/oom.h>
55 #include <linux/smpboot.h>
56 #include <linux/jiffies.h>
57 #include <linux/slab.h>
58 #include <linux/sched/isolation.h>
59 #include <linux/sched/clock.h>
60 #include <linux/vmalloc.h>
62 #include <linux/kasan.h>
63 #include "../time/tick-internal.h"
68 #ifdef MODULE_PARAM_PREFIX
69 #undef MODULE_PARAM_PREFIX
71 #define MODULE_PARAM_PREFIX "rcutree."
73 /* Data structures. */
76 * Steal a bit from the bottom of ->dynticks for idle entry/exit
77 * control. Initially this is for TLB flushing.
79 #define RCU_DYNTICK_CTRL_MASK 0x1
80 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
83 .dynticks_nesting = 1,
84 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
85 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
87 static struct rcu_state rcu_state = {
88 .level = { &rcu_state.node[0] },
89 .gp_state = RCU_GP_IDLE,
90 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
91 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
94 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
95 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
96 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
99 /* Dump rcu_node combining tree at boot to verify correct setup. */
100 static bool dump_tree;
101 module_param(dump_tree, bool, 0444);
102 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
103 static bool use_softirq = true;
104 module_param(use_softirq, bool, 0444);
105 /* Control rcu_node-tree auto-balancing at boot time. */
106 static bool rcu_fanout_exact;
107 module_param(rcu_fanout_exact, bool, 0444);
108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 module_param(rcu_fanout_leaf, int, 0444);
111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 /* Number of rcu_nodes at specified level. */
113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
117 * The rcu_scheduler_active variable is initialized to the value
118 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
120 * RCU can assume that there is but one task, allowing RCU to (for example)
121 * optimize synchronize_rcu() to a simple barrier(). When this variable
122 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123 * to detect real grace periods. This variable is also used to suppress
124 * boot-time false positives from lockdep-RCU error checking. Finally, it
125 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126 * is fully initialized, including all of its kthreads having been spawned.
128 int rcu_scheduler_active __read_mostly;
129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
132 * The rcu_scheduler_fully_active variable transitions from zero to one
133 * during the early_initcall() processing, which is after the scheduler
134 * is capable of creating new tasks. So RCU processing (for example,
135 * creating tasks for RCU priority boosting) must be delayed until after
136 * rcu_scheduler_fully_active transitions from zero to one. We also
137 * currently delay invocation of any RCU callbacks until after this point.
139 * It might later prove better for people registering RCU callbacks during
140 * early boot to take responsibility for these callbacks, but one step at
143 static int rcu_scheduler_fully_active __read_mostly;
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 unsigned long gps, unsigned long flags);
147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150 static void invoke_rcu_core(void);
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
152 static void sync_sched_exp_online_cleanup(int cpu);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
155 /* rcuc/rcub kthread realtime priority */
156 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
157 module_param(kthread_prio, int, 0444);
159 /* Delay in jiffies for grace-period initialization delays, debug only. */
161 static int gp_preinit_delay;
162 module_param(gp_preinit_delay, int, 0444);
163 static int gp_init_delay;
164 module_param(gp_init_delay, int, 0444);
165 static int gp_cleanup_delay;
166 module_param(gp_cleanup_delay, int, 0444);
168 // Add delay to rcu_read_unlock() for strict grace periods.
169 static int rcu_unlock_delay;
170 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
171 module_param(rcu_unlock_delay, int, 0444);
175 * This rcu parameter is runtime-read-only. It reflects
176 * a minimum allowed number of objects which can be cached
177 * per-CPU. Object size is equal to one page. This value
178 * can be changed at boot time.
180 static int rcu_min_cached_objs = 2;
181 module_param(rcu_min_cached_objs, int, 0444);
183 /* Retrieve RCU kthreads priority for rcutorture */
184 int rcu_get_gp_kthreads_prio(void)
188 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
191 * Number of grace periods between delays, normalized by the duration of
192 * the delay. The longer the delay, the more the grace periods between
193 * each delay. The reason for this normalization is that it means that,
194 * for non-zero delays, the overall slowdown of grace periods is constant
195 * regardless of the duration of the delay. This arrangement balances
196 * the need for long delays to increase some race probabilities with the
197 * need for fast grace periods to increase other race probabilities.
199 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
202 * Compute the mask of online CPUs for the specified rcu_node structure.
203 * This will not be stable unless the rcu_node structure's ->lock is
204 * held, but the bit corresponding to the current CPU will be stable
207 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
209 return READ_ONCE(rnp->qsmaskinitnext);
213 * Return true if an RCU grace period is in progress. The READ_ONCE()s
214 * permit this function to be invoked without holding the root rcu_node
215 * structure's ->lock, but of course results can be subject to change.
217 static int rcu_gp_in_progress(void)
219 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
223 * Return the number of callbacks queued on the specified CPU.
224 * Handles both the nocbs and normal cases.
226 static long rcu_get_n_cbs_cpu(int cpu)
228 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
230 if (rcu_segcblist_is_enabled(&rdp->cblist))
231 return rcu_segcblist_n_cbs(&rdp->cblist);
235 void rcu_softirq_qs(void)
238 rcu_preempt_deferred_qs(current);
242 * Record entry into an extended quiescent state. This is only to be
243 * called when not already in an extended quiescent state, that is,
244 * RCU is watching prior to the call to this function and is no longer
245 * watching upon return.
247 static noinstr void rcu_dynticks_eqs_enter(void)
249 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
253 * CPUs seeing atomic_add_return() must see prior RCU read-side
254 * critical sections, and we also must force ordering with the
257 rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
258 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
259 // RCU is no longer watching. Better be in extended quiescent state!
260 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
261 (seq & RCU_DYNTICK_CTRL_CTR));
262 /* Better not have special action (TLB flush) pending! */
263 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
264 (seq & RCU_DYNTICK_CTRL_MASK));
268 * Record exit from an extended quiescent state. This is only to be
269 * called from an extended quiescent state, that is, RCU is not watching
270 * prior to the call to this function and is watching upon return.
272 static noinstr void rcu_dynticks_eqs_exit(void)
274 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
278 * CPUs seeing atomic_add_return() must see prior idle sojourns,
279 * and we also must force ordering with the next RCU read-side
282 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
283 // RCU is now watching. Better not be in an extended quiescent state!
284 rcu_dynticks_task_trace_exit(); // After ->dynticks update!
285 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
286 !(seq & RCU_DYNTICK_CTRL_CTR));
287 if (seq & RCU_DYNTICK_CTRL_MASK) {
288 arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
289 smp_mb__after_atomic(); /* _exit after clearing mask. */
294 * Reset the current CPU's ->dynticks counter to indicate that the
295 * newly onlined CPU is no longer in an extended quiescent state.
296 * This will either leave the counter unchanged, or increment it
297 * to the next non-quiescent value.
299 * The non-atomic test/increment sequence works because the upper bits
300 * of the ->dynticks counter are manipulated only by the corresponding CPU,
301 * or when the corresponding CPU is offline.
303 static void rcu_dynticks_eqs_online(void)
305 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
307 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
309 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
313 * Is the current CPU in an extended quiescent state?
315 * No ordering, as we are sampling CPU-local information.
317 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
319 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
321 return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
325 * Snapshot the ->dynticks counter with full ordering so as to allow
326 * stable comparison of this counter with past and future snapshots.
328 static int rcu_dynticks_snap(struct rcu_data *rdp)
330 int snap = atomic_add_return(0, &rdp->dynticks);
332 return snap & ~RCU_DYNTICK_CTRL_MASK;
336 * Return true if the snapshot returned from rcu_dynticks_snap()
337 * indicates that RCU is in an extended quiescent state.
339 static bool rcu_dynticks_in_eqs(int snap)
341 return !(snap & RCU_DYNTICK_CTRL_CTR);
345 * Return true if the CPU corresponding to the specified rcu_data
346 * structure has spent some time in an extended quiescent state since
347 * rcu_dynticks_snap() returned the specified snapshot.
349 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
351 return snap != rcu_dynticks_snap(rdp);
355 * Return true if the referenced integer is zero while the specified
356 * CPU remains within a single extended quiescent state.
358 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
360 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
363 // If not quiescent, force back to earlier extended quiescent state.
364 snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
365 RCU_DYNTICK_CTRL_CTR);
367 smp_rmb(); // Order ->dynticks and *vp reads.
369 return false; // Non-zero, so report failure;
370 smp_rmb(); // Order *vp read and ->dynticks re-read.
372 // If still in the same extended quiescent state, we are good!
373 return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
377 * Set the special (bottom) bit of the specified CPU so that it
378 * will take special action (such as flushing its TLB) on the
379 * next exit from an extended quiescent state. Returns true if
380 * the bit was successfully set, or false if the CPU was not in
381 * an extended quiescent state.
383 bool rcu_eqs_special_set(int cpu)
388 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
390 new_old = atomic_read(&rdp->dynticks);
393 if (old & RCU_DYNTICK_CTRL_CTR)
395 new = old | RCU_DYNTICK_CTRL_MASK;
396 new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
397 } while (new_old != old);
402 * Let the RCU core know that this CPU has gone through the scheduler,
403 * which is a quiescent state. This is called when the need for a
404 * quiescent state is urgent, so we burn an atomic operation and full
405 * memory barriers to let the RCU core know about it, regardless of what
406 * this CPU might (or might not) do in the near future.
408 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
410 * The caller must have disabled interrupts and must not be idle.
412 void rcu_momentary_dyntick_idle(void)
416 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
417 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
418 &this_cpu_ptr(&rcu_data)->dynticks);
419 /* It is illegal to call this from idle state. */
420 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
421 rcu_preempt_deferred_qs(current);
423 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
426 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
428 * If the current CPU is idle and running at a first-level (not nested)
429 * interrupt, or directly, from idle, return true.
431 * The caller must have at least disabled IRQs.
433 static int rcu_is_cpu_rrupt_from_idle(void)
438 * Usually called from the tick; but also used from smp_function_call()
439 * for expedited grace periods. This latter can result in running from
440 * the idle task, instead of an actual IPI.
442 lockdep_assert_irqs_disabled();
444 /* Check for counter underflows */
445 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
446 "RCU dynticks_nesting counter underflow!");
447 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
448 "RCU dynticks_nmi_nesting counter underflow/zero!");
450 /* Are we at first interrupt nesting level? */
451 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
456 * If we're not in an interrupt, we must be in the idle task!
458 WARN_ON_ONCE(!nesting && !is_idle_task(current));
460 /* Does CPU appear to be idle from an RCU standpoint? */
461 return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
464 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
465 // Maximum callbacks per rcu_do_batch ...
466 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
467 static long blimit = DEFAULT_RCU_BLIMIT;
468 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
469 static long qhimark = DEFAULT_RCU_QHIMARK;
470 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
471 static long qlowmark = DEFAULT_RCU_QLOMARK;
472 #define DEFAULT_RCU_QOVLD_MULT 2
473 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
474 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
475 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
477 module_param(blimit, long, 0444);
478 module_param(qhimark, long, 0444);
479 module_param(qlowmark, long, 0444);
480 module_param(qovld, long, 0444);
482 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
483 static ulong jiffies_till_next_fqs = ULONG_MAX;
484 static bool rcu_kick_kthreads;
485 static int rcu_divisor = 7;
486 module_param(rcu_divisor, int, 0644);
488 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
489 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
490 module_param(rcu_resched_ns, long, 0644);
493 * How long the grace period must be before we start recruiting
494 * quiescent-state help from rcu_note_context_switch().
496 static ulong jiffies_till_sched_qs = ULONG_MAX;
497 module_param(jiffies_till_sched_qs, ulong, 0444);
498 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
499 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
502 * Make sure that we give the grace-period kthread time to detect any
503 * idle CPUs before taking active measures to force quiescent states.
504 * However, don't go below 100 milliseconds, adjusted upwards for really
507 static void adjust_jiffies_till_sched_qs(void)
511 /* If jiffies_till_sched_qs was specified, respect the request. */
512 if (jiffies_till_sched_qs != ULONG_MAX) {
513 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
516 /* Otherwise, set to third fqs scan, but bound below on large system. */
517 j = READ_ONCE(jiffies_till_first_fqs) +
518 2 * READ_ONCE(jiffies_till_next_fqs);
519 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
520 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
521 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
522 WRITE_ONCE(jiffies_to_sched_qs, j);
525 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
528 int ret = kstrtoul(val, 0, &j);
531 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
532 adjust_jiffies_till_sched_qs();
537 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
540 int ret = kstrtoul(val, 0, &j);
543 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
544 adjust_jiffies_till_sched_qs();
549 static const struct kernel_param_ops first_fqs_jiffies_ops = {
550 .set = param_set_first_fqs_jiffies,
551 .get = param_get_ulong,
554 static const struct kernel_param_ops next_fqs_jiffies_ops = {
555 .set = param_set_next_fqs_jiffies,
556 .get = param_get_ulong,
559 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
560 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
561 module_param(rcu_kick_kthreads, bool, 0644);
563 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
564 static int rcu_pending(int user);
567 * Return the number of RCU GPs completed thus far for debug & stats.
569 unsigned long rcu_get_gp_seq(void)
571 return READ_ONCE(rcu_state.gp_seq);
573 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
576 * Return the number of RCU expedited batches completed thus far for
577 * debug & stats. Odd numbers mean that a batch is in progress, even
578 * numbers mean idle. The value returned will thus be roughly double
579 * the cumulative batches since boot.
581 unsigned long rcu_exp_batches_completed(void)
583 return rcu_state.expedited_sequence;
585 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
588 * Return the root node of the rcu_state structure.
590 static struct rcu_node *rcu_get_root(void)
592 return &rcu_state.node[0];
596 * Send along grace-period-related data for rcutorture diagnostics.
598 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
599 unsigned long *gp_seq)
603 *flags = READ_ONCE(rcu_state.gp_flags);
604 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
610 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
613 * Enter an RCU extended quiescent state, which can be either the
614 * idle loop or adaptive-tickless usermode execution.
616 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
617 * the possibility of usermode upcalls having messed up our count
618 * of interrupt nesting level during the prior busy period.
620 static noinstr void rcu_eqs_enter(bool user)
622 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
624 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
625 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
626 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
627 rdp->dynticks_nesting == 0);
628 if (rdp->dynticks_nesting != 1) {
629 // RCU will still be watching, so just do accounting and leave.
630 rdp->dynticks_nesting--;
634 lockdep_assert_irqs_disabled();
635 instrumentation_begin();
636 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
637 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
638 rdp = this_cpu_ptr(&rcu_data);
639 do_nocb_deferred_wakeup(rdp);
640 rcu_prepare_for_idle();
641 rcu_preempt_deferred_qs(current);
643 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
644 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
646 instrumentation_end();
647 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
648 // RCU is watching here ...
649 rcu_dynticks_eqs_enter();
650 // ... but is no longer watching here.
651 rcu_dynticks_task_enter();
655 * rcu_idle_enter - inform RCU that current CPU is entering idle
657 * Enter idle mode, in other words, -leave- the mode in which RCU
658 * read-side critical sections can occur. (Though RCU read-side
659 * critical sections can occur in irq handlers in idle, a possibility
660 * handled by irq_enter() and irq_exit().)
662 * If you add or remove a call to rcu_idle_enter(), be sure to test with
663 * CONFIG_RCU_EQS_DEBUG=y.
665 void rcu_idle_enter(void)
667 lockdep_assert_irqs_disabled();
668 rcu_eqs_enter(false);
670 EXPORT_SYMBOL_GPL(rcu_idle_enter);
672 #ifdef CONFIG_NO_HZ_FULL
674 * rcu_user_enter - inform RCU that we are resuming userspace.
676 * Enter RCU idle mode right before resuming userspace. No use of RCU
677 * is permitted between this call and rcu_user_exit(). This way the
678 * CPU doesn't need to maintain the tick for RCU maintenance purposes
679 * when the CPU runs in userspace.
681 * If you add or remove a call to rcu_user_enter(), be sure to test with
682 * CONFIG_RCU_EQS_DEBUG=y.
684 noinstr void rcu_user_enter(void)
686 lockdep_assert_irqs_disabled();
689 #endif /* CONFIG_NO_HZ_FULL */
692 * rcu_nmi_exit - inform RCU of exit from NMI context
694 * If we are returning from the outermost NMI handler that interrupted an
695 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
696 * to let the RCU grace-period handling know that the CPU is back to
699 * If you add or remove a call to rcu_nmi_exit(), be sure to test
700 * with CONFIG_RCU_EQS_DEBUG=y.
702 noinstr void rcu_nmi_exit(void)
704 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
706 instrumentation_begin();
708 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
709 * (We are exiting an NMI handler, so RCU better be paying attention
712 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
713 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
716 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
717 * leave it in non-RCU-idle state.
719 if (rdp->dynticks_nmi_nesting != 1) {
720 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
721 atomic_read(&rdp->dynticks));
722 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
723 rdp->dynticks_nmi_nesting - 2);
724 instrumentation_end();
728 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
729 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
730 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
733 rcu_prepare_for_idle();
735 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
736 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
737 instrumentation_end();
739 // RCU is watching here ...
740 rcu_dynticks_eqs_enter();
741 // ... but is no longer watching here.
744 rcu_dynticks_task_enter();
748 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
750 * Exit from an interrupt handler, which might possibly result in entering
751 * idle mode, in other words, leaving the mode in which read-side critical
752 * sections can occur. The caller must have disabled interrupts.
754 * This code assumes that the idle loop never does anything that might
755 * result in unbalanced calls to irq_enter() and irq_exit(). If your
756 * architecture's idle loop violates this assumption, RCU will give you what
757 * you deserve, good and hard. But very infrequently and irreproducibly.
759 * Use things like work queues to work around this limitation.
761 * You have been warned.
763 * If you add or remove a call to rcu_irq_exit(), be sure to test with
764 * CONFIG_RCU_EQS_DEBUG=y.
766 void noinstr rcu_irq_exit(void)
768 lockdep_assert_irqs_disabled();
773 * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
774 * towards in kernel preemption
776 * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
777 * from RCU point of view. Invoked from return from interrupt before kernel
780 void rcu_irq_exit_preempt(void)
782 lockdep_assert_irqs_disabled();
785 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
786 "RCU dynticks_nesting counter underflow/zero!");
787 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
789 "Bad RCU dynticks_nmi_nesting counter\n");
790 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
791 "RCU in extended quiescent state!");
794 #ifdef CONFIG_PROVE_RCU
796 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
798 void rcu_irq_exit_check_preempt(void)
800 lockdep_assert_irqs_disabled();
802 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
803 "RCU dynticks_nesting counter underflow/zero!");
804 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
806 "Bad RCU dynticks_nmi_nesting counter\n");
807 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
808 "RCU in extended quiescent state!");
810 #endif /* #ifdef CONFIG_PROVE_RCU */
813 * Wrapper for rcu_irq_exit() where interrupts are enabled.
815 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
816 * with CONFIG_RCU_EQS_DEBUG=y.
818 void rcu_irq_exit_irqson(void)
822 local_irq_save(flags);
824 local_irq_restore(flags);
828 * Exit an RCU extended quiescent state, which can be either the
829 * idle loop or adaptive-tickless usermode execution.
831 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
832 * allow for the possibility of usermode upcalls messing up our count of
833 * interrupt nesting level during the busy period that is just now starting.
835 static void noinstr rcu_eqs_exit(bool user)
837 struct rcu_data *rdp;
840 lockdep_assert_irqs_disabled();
841 rdp = this_cpu_ptr(&rcu_data);
842 oldval = rdp->dynticks_nesting;
843 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
845 // RCU was already watching, so just do accounting and leave.
846 rdp->dynticks_nesting++;
849 rcu_dynticks_task_exit();
850 // RCU is not watching here ...
851 rcu_dynticks_eqs_exit();
852 // ... but is watching here.
853 instrumentation_begin();
855 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
856 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
858 rcu_cleanup_after_idle();
859 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
860 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
861 WRITE_ONCE(rdp->dynticks_nesting, 1);
862 WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
863 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
864 instrumentation_end();
868 * rcu_idle_exit - inform RCU that current CPU is leaving idle
870 * Exit idle mode, in other words, -enter- the mode in which RCU
871 * read-side critical sections can occur.
873 * If you add or remove a call to rcu_idle_exit(), be sure to test with
874 * CONFIG_RCU_EQS_DEBUG=y.
876 void rcu_idle_exit(void)
880 local_irq_save(flags);
882 local_irq_restore(flags);
884 EXPORT_SYMBOL_GPL(rcu_idle_exit);
886 #ifdef CONFIG_NO_HZ_FULL
888 * rcu_user_exit - inform RCU that we are exiting userspace.
890 * Exit RCU idle mode while entering the kernel because it can
891 * run a RCU read side critical section anytime.
893 * If you add or remove a call to rcu_user_exit(), be sure to test with
894 * CONFIG_RCU_EQS_DEBUG=y.
896 void noinstr rcu_user_exit(void)
902 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
904 * The scheduler tick is not normally enabled when CPUs enter the kernel
905 * from nohz_full userspace execution. After all, nohz_full userspace
906 * execution is an RCU quiescent state and the time executing in the kernel
907 * is quite short. Except of course when it isn't. And it is not hard to
908 * cause a large system to spend tens of seconds or even minutes looping
909 * in the kernel, which can cause a number of problems, include RCU CPU
912 * Therefore, if a nohz_full CPU fails to report a quiescent state
913 * in a timely manner, the RCU grace-period kthread sets that CPU's
914 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
915 * exception will invoke this function, which will turn on the scheduler
916 * tick, which will enable RCU to detect that CPU's quiescent states,
917 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
918 * The tick will be disabled once a quiescent state is reported for
921 * Of course, in carefully tuned systems, there might never be an
922 * interrupt or exception. In that case, the RCU grace-period kthread
923 * will eventually cause one to happen. However, in less carefully
924 * controlled environments, this function allows RCU to get what it
925 * needs without creating otherwise useless interruptions.
927 void __rcu_irq_enter_check_tick(void)
929 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
931 // If we're here from NMI there's nothing to do.
935 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
936 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
938 if (!tick_nohz_full_cpu(rdp->cpu) ||
939 !READ_ONCE(rdp->rcu_urgent_qs) ||
940 READ_ONCE(rdp->rcu_forced_tick)) {
941 // RCU doesn't need nohz_full help from this CPU, or it is
942 // already getting that help.
946 // We get here only when not in an extended quiescent state and
947 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
948 // already watching and (2) The fact that we are in an interrupt
949 // handler and that the rcu_node lock is an irq-disabled lock
950 // prevents self-deadlock. So we can safely recheck under the lock.
951 // Note that the nohz_full state currently cannot change.
952 raw_spin_lock_rcu_node(rdp->mynode);
953 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
954 // A nohz_full CPU is in the kernel and RCU needs a
955 // quiescent state. Turn on the tick!
956 WRITE_ONCE(rdp->rcu_forced_tick, true);
957 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
959 raw_spin_unlock_rcu_node(rdp->mynode);
961 #endif /* CONFIG_NO_HZ_FULL */
964 * rcu_nmi_enter - inform RCU of entry to NMI context
966 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
967 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
968 * that the CPU is active. This implementation permits nested NMIs, as
969 * long as the nesting level does not overflow an int. (You will probably
970 * run out of stack space first.)
972 * If you add or remove a call to rcu_nmi_enter(), be sure to test
973 * with CONFIG_RCU_EQS_DEBUG=y.
975 noinstr void rcu_nmi_enter(void)
978 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
980 /* Complain about underflow. */
981 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
984 * If idle from RCU viewpoint, atomically increment ->dynticks
985 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
986 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
987 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
988 * to be in the outermost NMI handler that interrupted an RCU-idle
989 * period (observation due to Andy Lutomirski).
991 if (rcu_dynticks_curr_cpu_in_eqs()) {
994 rcu_dynticks_task_exit();
996 // RCU is not watching here ...
997 rcu_dynticks_eqs_exit();
998 // ... but is watching here.
1001 instrumentation_begin();
1002 rcu_cleanup_after_idle();
1003 instrumentation_end();
1006 instrumentation_begin();
1007 // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1008 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1009 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
1010 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1013 } else if (!in_nmi()) {
1014 instrumentation_begin();
1015 rcu_irq_enter_check_tick();
1016 instrumentation_end();
1018 instrumentation_begin();
1021 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1022 rdp->dynticks_nmi_nesting,
1023 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1024 instrumentation_end();
1025 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1026 rdp->dynticks_nmi_nesting + incby);
1031 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1033 * Enter an interrupt handler, which might possibly result in exiting
1034 * idle mode, in other words, entering the mode in which read-side critical
1035 * sections can occur. The caller must have disabled interrupts.
1037 * Note that the Linux kernel is fully capable of entering an interrupt
1038 * handler that it never exits, for example when doing upcalls to user mode!
1039 * This code assumes that the idle loop never does upcalls to user mode.
1040 * If your architecture's idle loop does do upcalls to user mode (or does
1041 * anything else that results in unbalanced calls to the irq_enter() and
1042 * irq_exit() functions), RCU will give you what you deserve, good and hard.
1043 * But very infrequently and irreproducibly.
1045 * Use things like work queues to work around this limitation.
1047 * You have been warned.
1049 * If you add or remove a call to rcu_irq_enter(), be sure to test with
1050 * CONFIG_RCU_EQS_DEBUG=y.
1052 noinstr void rcu_irq_enter(void)
1054 lockdep_assert_irqs_disabled();
1059 * Wrapper for rcu_irq_enter() where interrupts are enabled.
1061 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1062 * with CONFIG_RCU_EQS_DEBUG=y.
1064 void rcu_irq_enter_irqson(void)
1066 unsigned long flags;
1068 local_irq_save(flags);
1070 local_irq_restore(flags);
1074 * If any sort of urgency was applied to the current CPU (for example,
1075 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1076 * to get to a quiescent state, disable it.
1078 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1080 raw_lockdep_assert_held_rcu_node(rdp->mynode);
1081 WRITE_ONCE(rdp->rcu_urgent_qs, false);
1082 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1083 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1084 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1085 WRITE_ONCE(rdp->rcu_forced_tick, false);
1090 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1092 * Return true if RCU is watching the running CPU, which means that this
1093 * CPU can safely enter RCU read-side critical sections. In other words,
1094 * if the current CPU is not in its idle loop or is in an interrupt or
1095 * NMI handler, return true.
1097 * Make notrace because it can be called by the internal functions of
1098 * ftrace, and making this notrace removes unnecessary recursion calls.
1100 notrace bool rcu_is_watching(void)
1104 preempt_disable_notrace();
1105 ret = !rcu_dynticks_curr_cpu_in_eqs();
1106 preempt_enable_notrace();
1109 EXPORT_SYMBOL_GPL(rcu_is_watching);
1112 * If a holdout task is actually running, request an urgent quiescent
1113 * state from its CPU. This is unsynchronized, so migrations can cause
1114 * the request to go to the wrong CPU. Which is OK, all that will happen
1115 * is that the CPU's next context switch will be a bit slower and next
1116 * time around this task will generate another request.
1118 void rcu_request_urgent_qs_task(struct task_struct *t)
1125 return; /* This task is not running on that CPU. */
1126 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1129 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1132 * Is the current CPU online as far as RCU is concerned?
1134 * Disable preemption to avoid false positives that could otherwise
1135 * happen due to the current CPU number being sampled, this task being
1136 * preempted, its old CPU being taken offline, resuming on some other CPU,
1137 * then determining that its old CPU is now offline.
1139 * Disable checking if in an NMI handler because we cannot safely
1140 * report errors from NMI handlers anyway. In addition, it is OK to use
1141 * RCU on an offline processor during initial boot, hence the check for
1142 * rcu_scheduler_fully_active.
1144 bool rcu_lockdep_current_cpu_online(void)
1146 struct rcu_data *rdp;
1147 struct rcu_node *rnp;
1150 if (in_nmi() || !rcu_scheduler_fully_active)
1152 preempt_disable_notrace();
1153 rdp = this_cpu_ptr(&rcu_data);
1155 if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
1157 preempt_enable_notrace();
1160 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1162 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1165 * We are reporting a quiescent state on behalf of some other CPU, so
1166 * it is our responsibility to check for and handle potential overflow
1167 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1168 * After all, the CPU might be in deep idle state, and thus executing no
1171 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1173 raw_lockdep_assert_held_rcu_node(rnp);
1174 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1176 WRITE_ONCE(rdp->gpwrap, true);
1177 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1178 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1182 * Snapshot the specified CPU's dynticks counter so that we can later
1183 * credit them with an implicit quiescent state. Return 1 if this CPU
1184 * is in dynticks idle mode, which is an extended quiescent state.
1186 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1188 rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1189 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1190 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1191 rcu_gpnum_ovf(rdp->mynode, rdp);
1198 * Return true if the specified CPU has passed through a quiescent
1199 * state by virtue of being in or having passed through an dynticks
1200 * idle state since the last call to dyntick_save_progress_counter()
1201 * for this same CPU, or by virtue of having been offline.
1203 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1208 struct rcu_node *rnp = rdp->mynode;
1211 * If the CPU passed through or entered a dynticks idle phase with
1212 * no active irq/NMI handlers, then we can safely pretend that the CPU
1213 * already acknowledged the request to pass through a quiescent
1214 * state. Either way, that CPU cannot possibly be in an RCU
1215 * read-side critical section that started before the beginning
1216 * of the current RCU grace period.
1218 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1219 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1220 rcu_gpnum_ovf(rnp, rdp);
1225 * Complain if a CPU that is considered to be offline from RCU's
1226 * perspective has not yet reported a quiescent state. After all,
1227 * the offline CPU should have reported a quiescent state during
1228 * the CPU-offline process, or, failing that, by rcu_gp_init()
1229 * if it ran concurrently with either the CPU going offline or the
1230 * last task on a leaf rcu_node structure exiting its RCU read-side
1231 * critical section while all CPUs corresponding to that structure
1232 * are offline. This added warning detects bugs in any of these
1235 * The rcu_node structure's ->lock is held here, which excludes
1236 * the relevant portions the CPU-hotplug code, the grace-period
1237 * initialization code, and the rcu_read_unlock() code paths.
1239 * For more detail, please refer to the "Hotplug CPU" section
1240 * of RCU's Requirements documentation.
1242 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1244 struct rcu_node *rnp1;
1246 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1247 __func__, rnp->grplo, rnp->grphi, rnp->level,
1248 (long)rnp->gp_seq, (long)rnp->completedqs);
1249 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1250 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1251 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1252 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1253 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1254 __func__, rdp->cpu, ".o"[onl],
1255 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1256 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1257 return 1; /* Break things loose after complaining. */
1261 * A CPU running for an extended time within the kernel can
1262 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1263 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1264 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
1265 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1266 * variable are safe because the assignments are repeated if this
1267 * CPU failed to pass through a quiescent state. This code
1268 * also checks .jiffies_resched in case jiffies_to_sched_qs
1271 jtsq = READ_ONCE(jiffies_to_sched_qs);
1272 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1273 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1274 if (!READ_ONCE(*rnhqp) &&
1275 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1276 time_after(jiffies, rcu_state.jiffies_resched) ||
1277 rcu_state.cbovld)) {
1278 WRITE_ONCE(*rnhqp, true);
1279 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1280 smp_store_release(ruqp, true);
1281 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1282 WRITE_ONCE(*ruqp, true);
1286 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1287 * The above code handles this, but only for straight cond_resched().
1288 * And some in-kernel loops check need_resched() before calling
1289 * cond_resched(), which defeats the above code for CPUs that are
1290 * running in-kernel with scheduling-clock interrupts disabled.
1291 * So hit them over the head with the resched_cpu() hammer!
1293 if (tick_nohz_full_cpu(rdp->cpu) &&
1294 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1295 rcu_state.cbovld)) {
1296 WRITE_ONCE(*ruqp, true);
1297 resched_cpu(rdp->cpu);
1298 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1302 * If more than halfway to RCU CPU stall-warning time, invoke
1303 * resched_cpu() more frequently to try to loosen things up a bit.
1304 * Also check to see if the CPU is getting hammered with interrupts,
1305 * but only once per grace period, just to keep the IPIs down to
1308 if (time_after(jiffies, rcu_state.jiffies_resched)) {
1309 if (time_after(jiffies,
1310 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1311 resched_cpu(rdp->cpu);
1312 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1314 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1315 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1316 (rnp->ffmask & rdp->grpmask)) {
1317 init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1318 atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
1319 rdp->rcu_iw_pending = true;
1320 rdp->rcu_iw_gp_seq = rnp->gp_seq;
1321 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1328 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
1329 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1330 unsigned long gp_seq_req, const char *s)
1332 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1333 gp_seq_req, rnp->level,
1334 rnp->grplo, rnp->grphi, s);
1338 * rcu_start_this_gp - Request the start of a particular grace period
1339 * @rnp_start: The leaf node of the CPU from which to start.
1340 * @rdp: The rcu_data corresponding to the CPU from which to start.
1341 * @gp_seq_req: The gp_seq of the grace period to start.
1343 * Start the specified grace period, as needed to handle newly arrived
1344 * callbacks. The required future grace periods are recorded in each
1345 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1346 * is reason to awaken the grace-period kthread.
1348 * The caller must hold the specified rcu_node structure's ->lock, which
1349 * is why the caller is responsible for waking the grace-period kthread.
1351 * Returns true if the GP thread needs to be awakened else false.
1353 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1354 unsigned long gp_seq_req)
1357 struct rcu_node *rnp;
1360 * Use funnel locking to either acquire the root rcu_node
1361 * structure's lock or bail out if the need for this grace period
1362 * has already been recorded -- or if that grace period has in
1363 * fact already started. If there is already a grace period in
1364 * progress in a non-leaf node, no recording is needed because the
1365 * end of the grace period will scan the leaf rcu_node structures.
1366 * Note that rnp_start->lock must not be released.
1368 raw_lockdep_assert_held_rcu_node(rnp_start);
1369 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1370 for (rnp = rnp_start; 1; rnp = rnp->parent) {
1371 if (rnp != rnp_start)
1372 raw_spin_lock_rcu_node(rnp);
1373 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1374 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1375 (rnp != rnp_start &&
1376 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1377 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1381 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1382 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1384 * We just marked the leaf or internal node, and a
1385 * grace period is in progress, which means that
1386 * rcu_gp_cleanup() will see the marking. Bail to
1387 * reduce contention.
1389 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1390 TPS("Startedleaf"));
1393 if (rnp != rnp_start && rnp->parent != NULL)
1394 raw_spin_unlock_rcu_node(rnp);
1396 break; /* At root, and perhaps also leaf. */
1399 /* If GP already in progress, just leave, otherwise start one. */
1400 if (rcu_gp_in_progress()) {
1401 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1404 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1405 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1406 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1407 if (!READ_ONCE(rcu_state.gp_kthread)) {
1408 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1411 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1412 ret = true; /* Caller must wake GP kthread. */
1414 /* Push furthest requested GP to leaf node and rcu_data structure. */
1415 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1416 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1417 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1419 if (rnp != rnp_start)
1420 raw_spin_unlock_rcu_node(rnp);
1425 * Clean up any old requests for the just-ended grace period. Also return
1426 * whether any additional grace periods have been requested.
1428 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1431 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1433 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1435 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1436 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1437 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1442 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1443 * interrupt or softirq handler, in which case we just might immediately
1444 * sleep upon return, resulting in a grace-period hang), and don't bother
1445 * awakening when there is nothing for the grace-period kthread to do
1446 * (as in several CPUs raced to awaken, we lost), and finally don't try
1447 * to awaken a kthread that has not yet been created. If all those checks
1448 * are passed, track some debug information and awaken.
1450 * So why do the self-wakeup when in an interrupt or softirq handler
1451 * in the grace-period kthread's context? Because the kthread might have
1452 * been interrupted just as it was going to sleep, and just after the final
1453 * pre-sleep check of the awaken condition. In this case, a wakeup really
1454 * is required, and is therefore supplied.
1456 static void rcu_gp_kthread_wake(void)
1458 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1460 if ((current == t && !in_irq() && !in_serving_softirq()) ||
1461 !READ_ONCE(rcu_state.gp_flags) || !t)
1463 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1464 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1465 swake_up_one(&rcu_state.gp_wq);
1469 * If there is room, assign a ->gp_seq number to any callbacks on this
1470 * CPU that have not already been assigned. Also accelerate any callbacks
1471 * that were previously assigned a ->gp_seq number that has since proven
1472 * to be too conservative, which can happen if callbacks get assigned a
1473 * ->gp_seq number while RCU is idle, but with reference to a non-root
1474 * rcu_node structure. This function is idempotent, so it does not hurt
1475 * to call it repeatedly. Returns an flag saying that we should awaken
1476 * the RCU grace-period kthread.
1478 * The caller must hold rnp->lock with interrupts disabled.
1480 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1482 unsigned long gp_seq_req;
1485 rcu_lockdep_assert_cblist_protected(rdp);
1486 raw_lockdep_assert_held_rcu_node(rnp);
1488 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1489 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1493 * Callbacks are often registered with incomplete grace-period
1494 * information. Something about the fact that getting exact
1495 * information requires acquiring a global lock... RCU therefore
1496 * makes a conservative estimate of the grace period number at which
1497 * a given callback will become ready to invoke. The following
1498 * code checks this estimate and improves it when possible, thus
1499 * accelerating callback invocation to an earlier grace-period
1502 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1503 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1504 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1506 /* Trace depending on how much we were able to accelerate. */
1507 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1508 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1510 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1516 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1517 * rcu_node structure's ->lock be held. It consults the cached value
1518 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1519 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1520 * while holding the leaf rcu_node structure's ->lock.
1522 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1523 struct rcu_data *rdp)
1528 rcu_lockdep_assert_cblist_protected(rdp);
1529 c = rcu_seq_snap(&rcu_state.gp_seq);
1530 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1531 /* Old request still live, so mark recent callbacks. */
1532 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1535 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1536 needwake = rcu_accelerate_cbs(rnp, rdp);
1537 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1539 rcu_gp_kthread_wake();
1543 * Move any callbacks whose grace period has completed to the
1544 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1545 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1546 * sublist. This function is idempotent, so it does not hurt to
1547 * invoke it repeatedly. As long as it is not invoked -too- often...
1548 * Returns true if the RCU grace-period kthread needs to be awakened.
1550 * The caller must hold rnp->lock with interrupts disabled.
1552 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1554 rcu_lockdep_assert_cblist_protected(rdp);
1555 raw_lockdep_assert_held_rcu_node(rnp);
1557 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1558 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1562 * Find all callbacks whose ->gp_seq numbers indicate that they
1563 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1565 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1567 /* Classify any remaining callbacks. */
1568 return rcu_accelerate_cbs(rnp, rdp);
1572 * Move and classify callbacks, but only if doing so won't require
1573 * that the RCU grace-period kthread be awakened.
1575 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1576 struct rcu_data *rdp)
1578 rcu_lockdep_assert_cblist_protected(rdp);
1579 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
1580 !raw_spin_trylock_rcu_node(rnp))
1582 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1583 raw_spin_unlock_rcu_node(rnp);
1587 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1588 * quiescent state. This is intended to be invoked when the CPU notices
1589 * a new grace period.
1591 static void rcu_strict_gp_check_qs(void)
1593 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1600 * Update CPU-local rcu_data state to record the beginnings and ends of
1601 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1602 * structure corresponding to the current CPU, and must have irqs disabled.
1603 * Returns true if the grace-period kthread needs to be awakened.
1605 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1609 const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
1611 raw_lockdep_assert_held_rcu_node(rnp);
1613 if (rdp->gp_seq == rnp->gp_seq)
1614 return false; /* Nothing to do. */
1616 /* Handle the ends of any preceding grace periods first. */
1617 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1618 unlikely(READ_ONCE(rdp->gpwrap))) {
1620 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1621 rdp->core_needs_qs = false;
1622 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1625 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1626 if (rdp->core_needs_qs)
1627 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1630 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1631 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1632 unlikely(READ_ONCE(rdp->gpwrap))) {
1634 * If the current grace period is waiting for this CPU,
1635 * set up to detect a quiescent state, otherwise don't
1636 * go looking for one.
1638 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1639 need_qs = !!(rnp->qsmask & rdp->grpmask);
1640 rdp->cpu_no_qs.b.norm = need_qs;
1641 rdp->core_needs_qs = need_qs;
1642 zero_cpu_stall_ticks(rdp);
1644 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1645 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1646 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1647 WRITE_ONCE(rdp->gpwrap, false);
1648 rcu_gpnum_ovf(rnp, rdp);
1652 static void note_gp_changes(struct rcu_data *rdp)
1654 unsigned long flags;
1656 struct rcu_node *rnp;
1658 local_irq_save(flags);
1660 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1661 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1662 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1663 local_irq_restore(flags);
1666 needwake = __note_gp_changes(rnp, rdp);
1667 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1668 rcu_strict_gp_check_qs();
1670 rcu_gp_kthread_wake();
1673 static void rcu_gp_slow(int delay)
1676 !(rcu_seq_ctr(rcu_state.gp_seq) %
1677 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1678 schedule_timeout_idle(delay);
1681 static unsigned long sleep_duration;
1683 /* Allow rcutorture to stall the grace-period kthread. */
1684 void rcu_gp_set_torture_wait(int duration)
1686 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1687 WRITE_ONCE(sleep_duration, duration);
1689 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1691 /* Actually implement the aforementioned wait. */
1692 static void rcu_gp_torture_wait(void)
1694 unsigned long duration;
1696 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1698 duration = xchg(&sleep_duration, 0UL);
1700 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1701 schedule_timeout_idle(duration);
1702 pr_alert("%s: Wait complete\n", __func__);
1707 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1710 static void rcu_strict_gp_boundary(void *unused)
1716 * Initialize a new grace period. Return false if no grace period required.
1718 static bool rcu_gp_init(void)
1720 unsigned long flags;
1721 unsigned long oldmask;
1723 struct rcu_data *rdp;
1724 struct rcu_node *rnp = rcu_get_root();
1726 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1727 raw_spin_lock_irq_rcu_node(rnp);
1728 if (!READ_ONCE(rcu_state.gp_flags)) {
1729 /* Spurious wakeup, tell caller to go back to sleep. */
1730 raw_spin_unlock_irq_rcu_node(rnp);
1733 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1735 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1737 * Grace period already in progress, don't start another.
1738 * Not supposed to be able to happen.
1740 raw_spin_unlock_irq_rcu_node(rnp);
1744 /* Advance to a new grace period and initialize state. */
1745 record_gp_stall_check_time();
1746 /* Record GP times before starting GP, hence rcu_seq_start(). */
1747 rcu_seq_start(&rcu_state.gp_seq);
1748 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1749 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1750 raw_spin_unlock_irq_rcu_node(rnp);
1753 * Apply per-leaf buffered online and offline operations to
1754 * the rcu_node tree. Note that this new grace period need not
1755 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1756 * offlining path, when combined with checks in this function,
1757 * will handle CPUs that are currently going offline or that will
1758 * go offline later. Please also refer to "Hotplug CPU" section
1759 * of RCU's Requirements documentation.
1761 rcu_state.gp_state = RCU_GP_ONOFF;
1762 rcu_for_each_leaf_node(rnp) {
1763 raw_spin_lock(&rcu_state.ofl_lock);
1764 raw_spin_lock_irq_rcu_node(rnp);
1765 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1766 !rnp->wait_blkd_tasks) {
1767 /* Nothing to do on this leaf rcu_node structure. */
1768 raw_spin_unlock_irq_rcu_node(rnp);
1769 raw_spin_unlock(&rcu_state.ofl_lock);
1773 /* Record old state, apply changes to ->qsmaskinit field. */
1774 oldmask = rnp->qsmaskinit;
1775 rnp->qsmaskinit = rnp->qsmaskinitnext;
1777 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1778 if (!oldmask != !rnp->qsmaskinit) {
1779 if (!oldmask) { /* First online CPU for rcu_node. */
1780 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1781 rcu_init_new_rnp(rnp);
1782 } else if (rcu_preempt_has_tasks(rnp)) {
1783 rnp->wait_blkd_tasks = true; /* blocked tasks */
1784 } else { /* Last offline CPU and can propagate. */
1785 rcu_cleanup_dead_rnp(rnp);
1790 * If all waited-on tasks from prior grace period are
1791 * done, and if all this rcu_node structure's CPUs are
1792 * still offline, propagate up the rcu_node tree and
1793 * clear ->wait_blkd_tasks. Otherwise, if one of this
1794 * rcu_node structure's CPUs has since come back online,
1795 * simply clear ->wait_blkd_tasks.
1797 if (rnp->wait_blkd_tasks &&
1798 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1799 rnp->wait_blkd_tasks = false;
1800 if (!rnp->qsmaskinit)
1801 rcu_cleanup_dead_rnp(rnp);
1804 raw_spin_unlock_irq_rcu_node(rnp);
1805 raw_spin_unlock(&rcu_state.ofl_lock);
1807 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1810 * Set the quiescent-state-needed bits in all the rcu_node
1811 * structures for all currently online CPUs in breadth-first
1812 * order, starting from the root rcu_node structure, relying on the
1813 * layout of the tree within the rcu_state.node[] array. Note that
1814 * other CPUs will access only the leaves of the hierarchy, thus
1815 * seeing that no grace period is in progress, at least until the
1816 * corresponding leaf node has been initialized.
1818 * The grace period cannot complete until the initialization
1819 * process finishes, because this kthread handles both.
1821 rcu_state.gp_state = RCU_GP_INIT;
1822 rcu_for_each_node_breadth_first(rnp) {
1823 rcu_gp_slow(gp_init_delay);
1824 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1825 rdp = this_cpu_ptr(&rcu_data);
1826 rcu_preempt_check_blocked_tasks(rnp);
1827 rnp->qsmask = rnp->qsmaskinit;
1828 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1829 if (rnp == rdp->mynode)
1830 (void)__note_gp_changes(rnp, rdp);
1831 rcu_preempt_boost_start_gp(rnp);
1832 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1833 rnp->level, rnp->grplo,
1834 rnp->grphi, rnp->qsmask);
1835 /* Quiescent states for tasks on any now-offline CPUs. */
1836 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1837 rnp->rcu_gp_init_mask = mask;
1838 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1839 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1841 raw_spin_unlock_irq_rcu_node(rnp);
1842 cond_resched_tasks_rcu_qs();
1843 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1846 // If strict, make all CPUs aware of new grace period.
1847 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1848 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1854 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1857 static bool rcu_gp_fqs_check_wake(int *gfp)
1859 struct rcu_node *rnp = rcu_get_root();
1861 // If under overload conditions, force an immediate FQS scan.
1862 if (*gfp & RCU_GP_FLAG_OVLD)
1865 // Someone like call_rcu() requested a force-quiescent-state scan.
1866 *gfp = READ_ONCE(rcu_state.gp_flags);
1867 if (*gfp & RCU_GP_FLAG_FQS)
1870 // The current grace period has completed.
1871 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1878 * Do one round of quiescent-state forcing.
1880 static void rcu_gp_fqs(bool first_time)
1882 struct rcu_node *rnp = rcu_get_root();
1884 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1885 rcu_state.n_force_qs++;
1887 /* Collect dyntick-idle snapshots. */
1888 force_qs_rnp(dyntick_save_progress_counter);
1890 /* Handle dyntick-idle and offline CPUs. */
1891 force_qs_rnp(rcu_implicit_dynticks_qs);
1893 /* Clear flag to prevent immediate re-entry. */
1894 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1895 raw_spin_lock_irq_rcu_node(rnp);
1896 WRITE_ONCE(rcu_state.gp_flags,
1897 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1898 raw_spin_unlock_irq_rcu_node(rnp);
1903 * Loop doing repeated quiescent-state forcing until the grace period ends.
1905 static void rcu_gp_fqs_loop(void)
1911 struct rcu_node *rnp = rcu_get_root();
1913 first_gp_fqs = true;
1914 j = READ_ONCE(jiffies_till_first_fqs);
1915 if (rcu_state.cbovld)
1916 gf = RCU_GP_FLAG_OVLD;
1920 rcu_state.jiffies_force_qs = jiffies + j;
1921 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1922 jiffies + (j ? 3 * j : 2));
1924 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1926 rcu_state.gp_state = RCU_GP_WAIT_FQS;
1927 ret = swait_event_idle_timeout_exclusive(
1928 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
1929 rcu_gp_torture_wait();
1930 rcu_state.gp_state = RCU_GP_DOING_FQS;
1931 /* Locking provides needed memory barriers. */
1932 /* If grace period done, leave loop. */
1933 if (!READ_ONCE(rnp->qsmask) &&
1934 !rcu_preempt_blocked_readers_cgp(rnp))
1936 /* If time for quiescent-state forcing, do it. */
1937 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1938 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1939 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1941 rcu_gp_fqs(first_gp_fqs);
1944 first_gp_fqs = false;
1945 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1947 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1949 cond_resched_tasks_rcu_qs();
1950 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1951 ret = 0; /* Force full wait till next FQS. */
1952 j = READ_ONCE(jiffies_till_next_fqs);
1954 /* Deal with stray signal. */
1955 cond_resched_tasks_rcu_qs();
1956 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1957 WARN_ON(signal_pending(current));
1958 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1960 ret = 1; /* Keep old FQS timing. */
1962 if (time_after(jiffies, rcu_state.jiffies_force_qs))
1965 j = rcu_state.jiffies_force_qs - j;
1972 * Clean up after the old grace period.
1974 static void rcu_gp_cleanup(void)
1977 bool needgp = false;
1978 unsigned long gp_duration;
1979 unsigned long new_gp_seq;
1981 struct rcu_data *rdp;
1982 struct rcu_node *rnp = rcu_get_root();
1983 struct swait_queue_head *sq;
1985 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1986 raw_spin_lock_irq_rcu_node(rnp);
1987 rcu_state.gp_end = jiffies;
1988 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1989 if (gp_duration > rcu_state.gp_max)
1990 rcu_state.gp_max = gp_duration;
1993 * We know the grace period is complete, but to everyone else
1994 * it appears to still be ongoing. But it is also the case
1995 * that to everyone else it looks like there is nothing that
1996 * they can do to advance the grace period. It is therefore
1997 * safe for us to drop the lock in order to mark the grace
1998 * period as completed in all of the rcu_node structures.
2000 raw_spin_unlock_irq_rcu_node(rnp);
2003 * Propagate new ->gp_seq value to rcu_node structures so that
2004 * other CPUs don't have to wait until the start of the next grace
2005 * period to process their callbacks. This also avoids some nasty
2006 * RCU grace-period initialization races by forcing the end of
2007 * the current grace period to be completely recorded in all of
2008 * the rcu_node structures before the beginning of the next grace
2009 * period is recorded in any of the rcu_node structures.
2011 new_gp_seq = rcu_state.gp_seq;
2012 rcu_seq_end(&new_gp_seq);
2013 rcu_for_each_node_breadth_first(rnp) {
2014 raw_spin_lock_irq_rcu_node(rnp);
2015 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2016 dump_blkd_tasks(rnp, 10);
2017 WARN_ON_ONCE(rnp->qsmask);
2018 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2019 rdp = this_cpu_ptr(&rcu_data);
2020 if (rnp == rdp->mynode)
2021 needgp = __note_gp_changes(rnp, rdp) || needgp;
2022 /* smp_mb() provided by prior unlock-lock pair. */
2023 needgp = rcu_future_gp_cleanup(rnp) || needgp;
2024 // Reset overload indication for CPUs no longer overloaded
2025 if (rcu_is_leaf_node(rnp))
2026 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2027 rdp = per_cpu_ptr(&rcu_data, cpu);
2028 check_cb_ovld_locked(rdp, rnp);
2030 sq = rcu_nocb_gp_get(rnp);
2031 raw_spin_unlock_irq_rcu_node(rnp);
2032 rcu_nocb_gp_cleanup(sq);
2033 cond_resched_tasks_rcu_qs();
2034 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2035 rcu_gp_slow(gp_cleanup_delay);
2037 rnp = rcu_get_root();
2038 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2040 /* Declare grace period done, trace first to use old GP number. */
2041 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2042 rcu_seq_end(&rcu_state.gp_seq);
2043 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2044 rcu_state.gp_state = RCU_GP_IDLE;
2045 /* Check for GP requests since above loop. */
2046 rdp = this_cpu_ptr(&rcu_data);
2047 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2048 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2049 TPS("CleanupMore"));
2052 /* Advance CBs to reduce false positives below. */
2053 offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
2054 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2055 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2056 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2057 trace_rcu_grace_period(rcu_state.name,
2061 WRITE_ONCE(rcu_state.gp_flags,
2062 rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2064 raw_spin_unlock_irq_rcu_node(rnp);
2066 // If strict, make all CPUs aware of the end of the old grace period.
2067 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2068 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2072 * Body of kthread that handles grace periods.
2074 static int __noreturn rcu_gp_kthread(void *unused)
2076 rcu_bind_gp_kthread();
2079 /* Handle grace-period start. */
2081 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2083 rcu_state.gp_state = RCU_GP_WAIT_GPS;
2084 swait_event_idle_exclusive(rcu_state.gp_wq,
2085 READ_ONCE(rcu_state.gp_flags) &
2087 rcu_gp_torture_wait();
2088 rcu_state.gp_state = RCU_GP_DONE_GPS;
2089 /* Locking provides needed memory barrier. */
2092 cond_resched_tasks_rcu_qs();
2093 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2094 WARN_ON(signal_pending(current));
2095 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2099 /* Handle quiescent-state forcing. */
2102 /* Handle grace-period end. */
2103 rcu_state.gp_state = RCU_GP_CLEANUP;
2105 rcu_state.gp_state = RCU_GP_CLEANED;
2110 * Report a full set of quiescent states to the rcu_state data structure.
2111 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2112 * another grace period is required. Whether we wake the grace-period
2113 * kthread or it awakens itself for the next round of quiescent-state
2114 * forcing, that kthread will clean up after the just-completed grace
2115 * period. Note that the caller must hold rnp->lock, which is released
2118 static void rcu_report_qs_rsp(unsigned long flags)
2119 __releases(rcu_get_root()->lock)
2121 raw_lockdep_assert_held_rcu_node(rcu_get_root());
2122 WARN_ON_ONCE(!rcu_gp_in_progress());
2123 WRITE_ONCE(rcu_state.gp_flags,
2124 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2125 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2126 rcu_gp_kthread_wake();
2130 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2131 * Allows quiescent states for a group of CPUs to be reported at one go
2132 * to the specified rcu_node structure, though all the CPUs in the group
2133 * must be represented by the same rcu_node structure (which need not be a
2134 * leaf rcu_node structure, though it often will be). The gps parameter
2135 * is the grace-period snapshot, which means that the quiescent states
2136 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2137 * must be held upon entry, and it is released before return.
2139 * As a special case, if mask is zero, the bit-already-cleared check is
2140 * disabled. This allows propagating quiescent state due to resumed tasks
2141 * during grace-period initialization.
2143 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2144 unsigned long gps, unsigned long flags)
2145 __releases(rnp->lock)
2147 unsigned long oldmask = 0;
2148 struct rcu_node *rnp_c;
2150 raw_lockdep_assert_held_rcu_node(rnp);
2152 /* Walk up the rcu_node hierarchy. */
2154 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2157 * Our bit has already been cleared, or the
2158 * relevant grace period is already over, so done.
2160 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2163 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2164 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2165 rcu_preempt_blocked_readers_cgp(rnp));
2166 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2167 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2168 mask, rnp->qsmask, rnp->level,
2169 rnp->grplo, rnp->grphi,
2171 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2173 /* Other bits still set at this level, so done. */
2174 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2177 rnp->completedqs = rnp->gp_seq;
2178 mask = rnp->grpmask;
2179 if (rnp->parent == NULL) {
2181 /* No more levels. Exit loop holding root lock. */
2185 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2188 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2189 oldmask = READ_ONCE(rnp_c->qsmask);
2193 * Get here if we are the last CPU to pass through a quiescent
2194 * state for this grace period. Invoke rcu_report_qs_rsp()
2195 * to clean up and start the next grace period if one is needed.
2197 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2201 * Record a quiescent state for all tasks that were previously queued
2202 * on the specified rcu_node structure and that were blocking the current
2203 * RCU grace period. The caller must hold the corresponding rnp->lock with
2204 * irqs disabled, and this lock is released upon return, but irqs remain
2207 static void __maybe_unused
2208 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2209 __releases(rnp->lock)
2213 struct rcu_node *rnp_p;
2215 raw_lockdep_assert_held_rcu_node(rnp);
2216 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2217 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2219 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2220 return; /* Still need more quiescent states! */
2223 rnp->completedqs = rnp->gp_seq;
2224 rnp_p = rnp->parent;
2225 if (rnp_p == NULL) {
2227 * Only one rcu_node structure in the tree, so don't
2228 * try to report up to its nonexistent parent!
2230 rcu_report_qs_rsp(flags);
2234 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2236 mask = rnp->grpmask;
2237 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2238 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2239 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2243 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2244 * structure. This must be called from the specified CPU.
2247 rcu_report_qs_rdp(struct rcu_data *rdp)
2249 unsigned long flags;
2251 bool needwake = false;
2252 const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
2253 struct rcu_node *rnp;
2255 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2257 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2258 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2262 * The grace period in which this quiescent state was
2263 * recorded has ended, so don't report it upwards.
2264 * We will instead need a new quiescent state that lies
2265 * within the current grace period.
2267 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2268 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2271 mask = rdp->grpmask;
2272 rdp->core_needs_qs = false;
2273 if ((rnp->qsmask & mask) == 0) {
2274 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2277 * This GP can't end until cpu checks in, so all of our
2278 * callbacks can be processed during the next GP.
2281 needwake = rcu_accelerate_cbs(rnp, rdp);
2283 rcu_disable_urgency_upon_qs(rdp);
2284 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2285 /* ^^^ Released rnp->lock */
2287 rcu_gp_kthread_wake();
2292 * Check to see if there is a new grace period of which this CPU
2293 * is not yet aware, and if so, set up local rcu_data state for it.
2294 * Otherwise, see if this CPU has just passed through its first
2295 * quiescent state for this grace period, and record that fact if so.
2298 rcu_check_quiescent_state(struct rcu_data *rdp)
2300 /* Check for grace-period ends and beginnings. */
2301 note_gp_changes(rdp);
2304 * Does this CPU still need to do its part for current grace period?
2305 * If no, return and let the other CPUs do their part as well.
2307 if (!rdp->core_needs_qs)
2311 * Was there a quiescent state since the beginning of the grace
2312 * period? If no, then exit and wait for the next call.
2314 if (rdp->cpu_no_qs.b.norm)
2318 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2321 rcu_report_qs_rdp(rdp);
2325 * Near the end of the offline process. Trace the fact that this CPU
2328 int rcutree_dying_cpu(unsigned int cpu)
2331 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2332 struct rcu_node *rnp = rdp->mynode;
2334 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2337 blkd = !!(rnp->qsmask & rdp->grpmask);
2338 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2339 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2344 * All CPUs for the specified rcu_node structure have gone offline,
2345 * and all tasks that were preempted within an RCU read-side critical
2346 * section while running on one of those CPUs have since exited their RCU
2347 * read-side critical section. Some other CPU is reporting this fact with
2348 * the specified rcu_node structure's ->lock held and interrupts disabled.
2349 * This function therefore goes up the tree of rcu_node structures,
2350 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2351 * the leaf rcu_node structure's ->qsmaskinit field has already been
2354 * This function does check that the specified rcu_node structure has
2355 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2356 * prematurely. That said, invoking it after the fact will cost you
2357 * a needless lock acquisition. So once it has done its work, don't
2360 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2363 struct rcu_node *rnp = rnp_leaf;
2365 raw_lockdep_assert_held_rcu_node(rnp_leaf);
2366 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2367 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2368 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2371 mask = rnp->grpmask;
2375 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2376 rnp->qsmaskinit &= ~mask;
2377 /* Between grace periods, so better already be zero! */
2378 WARN_ON_ONCE(rnp->qsmask);
2379 if (rnp->qsmaskinit) {
2380 raw_spin_unlock_rcu_node(rnp);
2381 /* irqs remain disabled. */
2384 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2389 * The CPU has been completely removed, and some other CPU is reporting
2390 * this fact from process context. Do the remainder of the cleanup.
2391 * There can only be one CPU hotplug operation at a time, so no need for
2394 int rcutree_dead_cpu(unsigned int cpu)
2396 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2397 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2399 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2402 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2403 /* Adjust any no-longer-needed kthreads. */
2404 rcu_boost_kthread_setaffinity(rnp, -1);
2405 /* Do any needed no-CB deferred wakeups from this CPU. */
2406 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2408 // Stop-machine done, so allow nohz_full to disable tick.
2409 tick_dep_clear(TICK_DEP_BIT_RCU);
2414 * Invoke any RCU callbacks that have made it to the end of their grace
2415 * period. Thottle as specified by rdp->blimit.
2417 static void rcu_do_batch(struct rcu_data *rdp)
2420 unsigned long flags;
2421 const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
2422 struct rcu_head *rhp;
2423 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2425 long pending, tlimit = 0;
2427 /* If no callbacks are ready, just return. */
2428 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2429 trace_rcu_batch_start(rcu_state.name,
2430 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2431 trace_rcu_batch_end(rcu_state.name, 0,
2432 !rcu_segcblist_empty(&rdp->cblist),
2433 need_resched(), is_idle_task(current),
2434 rcu_is_callbacks_kthread());
2439 * Extract the list of ready callbacks, disabling to prevent
2440 * races with call_rcu() from interrupt handlers. Leave the
2441 * callback counts, as rcu_barrier() needs to be conservative.
2443 local_irq_save(flags);
2445 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2446 pending = rcu_segcblist_n_cbs(&rdp->cblist);
2447 div = READ_ONCE(rcu_divisor);
2448 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2449 bl = max(rdp->blimit, pending >> div);
2450 if (unlikely(bl > 100)) {
2451 long rrn = READ_ONCE(rcu_resched_ns);
2453 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2454 tlimit = local_clock() + rrn;
2456 trace_rcu_batch_start(rcu_state.name,
2457 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2458 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2460 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2461 rcu_nocb_unlock_irqrestore(rdp, flags);
2463 /* Invoke callbacks. */
2464 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2465 rhp = rcu_cblist_dequeue(&rcl);
2466 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2469 debug_rcu_head_unqueue(rhp);
2471 rcu_lock_acquire(&rcu_callback_map);
2472 trace_rcu_invoke_callback(rcu_state.name, rhp);
2475 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2478 rcu_lock_release(&rcu_callback_map);
2481 * Stop only if limit reached and CPU has something to do.
2482 * Note: The rcl structure counts down from zero.
2484 if (-rcl.len >= bl && !offloaded &&
2486 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2488 if (unlikely(tlimit)) {
2489 /* only call local_clock() every 32 callbacks */
2490 if (likely((-rcl.len & 31) || local_clock() < tlimit))
2492 /* Exceeded the time limit, so leave. */
2496 WARN_ON_ONCE(in_serving_softirq());
2498 lockdep_assert_irqs_enabled();
2499 cond_resched_tasks_rcu_qs();
2500 lockdep_assert_irqs_enabled();
2505 local_irq_save(flags);
2508 rdp->n_cbs_invoked += count;
2509 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2510 is_idle_task(current), rcu_is_callbacks_kthread());
2512 /* Update counts and requeue any remaining callbacks. */
2513 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2514 smp_mb(); /* List handling before counting for rcu_barrier(). */
2515 rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2517 /* Reinstate batch limit if we have worked down the excess. */
2518 count = rcu_segcblist_n_cbs(&rdp->cblist);
2519 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2520 rdp->blimit = blimit;
2522 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2523 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2524 rdp->qlen_last_fqs_check = 0;
2525 rdp->n_force_qs_snap = rcu_state.n_force_qs;
2526 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2527 rdp->qlen_last_fqs_check = count;
2530 * The following usually indicates a double call_rcu(). To track
2531 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2533 WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
2534 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2535 count != 0 && rcu_segcblist_empty(&rdp->cblist));
2537 rcu_nocb_unlock_irqrestore(rdp, flags);
2539 /* Re-invoke RCU core processing if there are callbacks remaining. */
2540 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2542 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2546 * This function is invoked from each scheduling-clock interrupt,
2547 * and checks to see if this CPU is in a non-context-switch quiescent
2548 * state, for example, user mode or idle loop. It also schedules RCU
2549 * core processing. If the current grace period has gone on too long,
2550 * it will ask the scheduler to manufacture a context switch for the sole
2551 * purpose of providing a providing the needed quiescent state.
2553 void rcu_sched_clock_irq(int user)
2555 trace_rcu_utilization(TPS("Start scheduler-tick"));
2556 raw_cpu_inc(rcu_data.ticks_this_gp);
2557 /* The load-acquire pairs with the store-release setting to true. */
2558 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2559 /* Idle and userspace execution already are quiescent states. */
2560 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2561 set_tsk_need_resched(current);
2562 set_preempt_need_resched();
2564 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2566 rcu_flavor_sched_clock_irq(user);
2567 if (rcu_pending(user))
2570 trace_rcu_utilization(TPS("End scheduler-tick"));
2574 * Scan the leaf rcu_node structures. For each structure on which all
2575 * CPUs have reported a quiescent state and on which there are tasks
2576 * blocking the current grace period, initiate RCU priority boosting.
2577 * Otherwise, invoke the specified function to check dyntick state for
2578 * each CPU that has not yet reported a quiescent state.
2580 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2583 unsigned long flags;
2585 struct rcu_data *rdp;
2586 struct rcu_node *rnp;
2588 rcu_state.cbovld = rcu_state.cbovldnext;
2589 rcu_state.cbovldnext = false;
2590 rcu_for_each_leaf_node(rnp) {
2591 cond_resched_tasks_rcu_qs();
2593 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2594 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2595 if (rnp->qsmask == 0) {
2596 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2598 * No point in scanning bits because they
2599 * are all zero. But we might need to
2600 * priority-boost blocked readers.
2602 rcu_initiate_boost(rnp, flags);
2603 /* rcu_initiate_boost() releases rnp->lock */
2606 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2609 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2610 rdp = per_cpu_ptr(&rcu_data, cpu);
2612 mask |= rdp->grpmask;
2613 rcu_disable_urgency_upon_qs(rdp);
2617 /* Idle/offline CPUs, report (releases rnp->lock). */
2618 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2620 /* Nothing to do here, so just drop the lock. */
2621 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2627 * Force quiescent states on reluctant CPUs, and also detect which
2628 * CPUs are in dyntick-idle mode.
2630 void rcu_force_quiescent_state(void)
2632 unsigned long flags;
2634 struct rcu_node *rnp;
2635 struct rcu_node *rnp_old = NULL;
2637 /* Funnel through hierarchy to reduce memory contention. */
2638 rnp = __this_cpu_read(rcu_data.mynode);
2639 for (; rnp != NULL; rnp = rnp->parent) {
2640 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2641 !raw_spin_trylock(&rnp->fqslock);
2642 if (rnp_old != NULL)
2643 raw_spin_unlock(&rnp_old->fqslock);
2648 /* rnp_old == rcu_get_root(), rnp == NULL. */
2650 /* Reached the root of the rcu_node tree, acquire lock. */
2651 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2652 raw_spin_unlock(&rnp_old->fqslock);
2653 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2654 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2655 return; /* Someone beat us to it. */
2657 WRITE_ONCE(rcu_state.gp_flags,
2658 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2659 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2660 rcu_gp_kthread_wake();
2662 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2664 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2666 static void strict_work_handler(struct work_struct *work)
2672 /* Perform RCU core processing work for the current CPU. */
2673 static __latent_entropy void rcu_core(void)
2675 unsigned long flags;
2676 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2677 struct rcu_node *rnp = rdp->mynode;
2678 const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
2680 if (cpu_is_offline(smp_processor_id()))
2682 trace_rcu_utilization(TPS("Start RCU core"));
2683 WARN_ON_ONCE(!rdp->beenonline);
2685 /* Report any deferred quiescent states if preemption enabled. */
2686 if (!(preempt_count() & PREEMPT_MASK)) {
2687 rcu_preempt_deferred_qs(current);
2688 } else if (rcu_preempt_need_deferred_qs(current)) {
2689 set_tsk_need_resched(current);
2690 set_preempt_need_resched();
2693 /* Update RCU state based on any recent quiescent states. */
2694 rcu_check_quiescent_state(rdp);
2696 /* No grace period and unregistered callbacks? */
2697 if (!rcu_gp_in_progress() &&
2698 rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
2699 local_irq_save(flags);
2700 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2701 rcu_accelerate_cbs_unlocked(rnp, rdp);
2702 local_irq_restore(flags);
2705 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2707 /* If there are callbacks ready, invoke them. */
2708 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2709 likely(READ_ONCE(rcu_scheduler_fully_active)))
2712 /* Do any needed deferred wakeups of rcuo kthreads. */
2713 do_nocb_deferred_wakeup(rdp);
2714 trace_rcu_utilization(TPS("End RCU core"));
2716 // If strict GPs, schedule an RCU reader in a clean environment.
2717 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2718 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2721 static void rcu_core_si(struct softirq_action *h)
2726 static void rcu_wake_cond(struct task_struct *t, int status)
2729 * If the thread is yielding, only wake it when this
2730 * is invoked from idle
2732 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2736 static void invoke_rcu_core_kthread(void)
2738 struct task_struct *t;
2739 unsigned long flags;
2741 local_irq_save(flags);
2742 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2743 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2744 if (t != NULL && t != current)
2745 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2746 local_irq_restore(flags);
2750 * Wake up this CPU's rcuc kthread to do RCU core processing.
2752 static void invoke_rcu_core(void)
2754 if (!cpu_online(smp_processor_id()))
2757 raise_softirq(RCU_SOFTIRQ);
2759 invoke_rcu_core_kthread();
2762 static void rcu_cpu_kthread_park(unsigned int cpu)
2764 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2767 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2769 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2773 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2774 * the RCU softirq used in configurations of RCU that do not support RCU
2775 * priority boosting.
2777 static void rcu_cpu_kthread(unsigned int cpu)
2779 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2780 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2783 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2784 for (spincnt = 0; spincnt < 10; spincnt++) {
2786 *statusp = RCU_KTHREAD_RUNNING;
2787 local_irq_disable();
2795 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2796 *statusp = RCU_KTHREAD_WAITING;
2800 *statusp = RCU_KTHREAD_YIELDING;
2801 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2802 schedule_timeout_idle(2);
2803 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2804 *statusp = RCU_KTHREAD_WAITING;
2807 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2808 .store = &rcu_data.rcu_cpu_kthread_task,
2809 .thread_should_run = rcu_cpu_kthread_should_run,
2810 .thread_fn = rcu_cpu_kthread,
2811 .thread_comm = "rcuc/%u",
2812 .setup = rcu_cpu_kthread_setup,
2813 .park = rcu_cpu_kthread_park,
2817 * Spawn per-CPU RCU core processing kthreads.
2819 static int __init rcu_spawn_core_kthreads(void)
2823 for_each_possible_cpu(cpu)
2824 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2825 if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2827 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2828 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2831 early_initcall(rcu_spawn_core_kthreads);
2834 * Handle any core-RCU processing required by a call_rcu() invocation.
2836 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2837 unsigned long flags)
2840 * If called from an extended quiescent state, invoke the RCU
2841 * core in order to force a re-evaluation of RCU's idleness.
2843 if (!rcu_is_watching())
2846 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2847 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2851 * Force the grace period if too many callbacks or too long waiting.
2852 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2853 * if some other CPU has recently done so. Also, don't bother
2854 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2855 * is the only one waiting for a grace period to complete.
2857 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2858 rdp->qlen_last_fqs_check + qhimark)) {
2860 /* Are we ignoring a completed grace period? */
2861 note_gp_changes(rdp);
2863 /* Start a new grace period if one not already started. */
2864 if (!rcu_gp_in_progress()) {
2865 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2867 /* Give the grace period a kick. */
2868 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2869 if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
2870 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2871 rcu_force_quiescent_state();
2872 rdp->n_force_qs_snap = rcu_state.n_force_qs;
2873 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2879 * RCU callback function to leak a callback.
2881 static void rcu_leak_callback(struct rcu_head *rhp)
2886 * Check and if necessary update the leaf rcu_node structure's
2887 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2888 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
2889 * structure's ->lock.
2891 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2893 raw_lockdep_assert_held_rcu_node(rnp);
2894 if (qovld_calc <= 0)
2895 return; // Early boot and wildcard value set.
2896 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2897 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2899 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2903 * Check and if necessary update the leaf rcu_node structure's
2904 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2905 * number of queued RCU callbacks. No locks need be held, but the
2906 * caller must have disabled interrupts.
2908 * Note that this function ignores the possibility that there are a lot
2909 * of callbacks all of which have already seen the end of their respective
2910 * grace periods. This omission is due to the need for no-CBs CPUs to
2911 * be holding ->nocb_lock to do this check, which is too heavy for a
2912 * common-case operation.
2914 static void check_cb_ovld(struct rcu_data *rdp)
2916 struct rcu_node *const rnp = rdp->mynode;
2918 if (qovld_calc <= 0 ||
2919 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2920 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2921 return; // Early boot wildcard value or already set correctly.
2922 raw_spin_lock_rcu_node(rnp);
2923 check_cb_ovld_locked(rdp, rnp);
2924 raw_spin_unlock_rcu_node(rnp);
2927 /* Helper function for call_rcu() and friends. */
2929 __call_rcu(struct rcu_head *head, rcu_callback_t func)
2931 unsigned long flags;
2932 struct rcu_data *rdp;
2935 /* Misaligned rcu_head! */
2936 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2938 if (debug_rcu_head_queue(head)) {
2940 * Probable double call_rcu(), so leak the callback.
2941 * Use rcu:rcu_callback trace event to find the previous
2942 * time callback was passed to __call_rcu().
2944 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
2946 WRITE_ONCE(head->func, rcu_leak_callback);
2951 local_irq_save(flags);
2952 kasan_record_aux_stack(head);
2953 rdp = this_cpu_ptr(&rcu_data);
2955 /* Add the callback to our list. */
2956 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2957 // This can trigger due to call_rcu() from offline CPU:
2958 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2959 WARN_ON_ONCE(!rcu_is_watching());
2960 // Very early boot, before rcu_init(). Initialize if needed
2961 // and then drop through to queue the callback.
2962 if (rcu_segcblist_empty(&rdp->cblist))
2963 rcu_segcblist_init(&rdp->cblist);
2967 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2968 return; // Enqueued onto ->nocb_bypass, so just leave.
2969 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2970 rcu_segcblist_enqueue(&rdp->cblist, head);
2971 if (__is_kvfree_rcu_offset((unsigned long)func))
2972 trace_rcu_kvfree_callback(rcu_state.name, head,
2973 (unsigned long)func,
2974 rcu_segcblist_n_cbs(&rdp->cblist));
2976 trace_rcu_callback(rcu_state.name, head,
2977 rcu_segcblist_n_cbs(&rdp->cblist));
2979 /* Go handle any RCU core processing required. */
2980 if (unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
2981 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2983 __call_rcu_core(rdp, head, flags);
2984 local_irq_restore(flags);
2989 * call_rcu() - Queue an RCU callback for invocation after a grace period.
2990 * @head: structure to be used for queueing the RCU updates.
2991 * @func: actual callback function to be invoked after the grace period
2993 * The callback function will be invoked some time after a full grace
2994 * period elapses, in other words after all pre-existing RCU read-side
2995 * critical sections have completed. However, the callback function
2996 * might well execute concurrently with RCU read-side critical sections
2997 * that started after call_rcu() was invoked. RCU read-side critical
2998 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
2999 * may be nested. In addition, regions of code across which interrupts,
3000 * preemption, or softirqs have been disabled also serve as RCU read-side
3001 * critical sections. This includes hardware interrupt handlers, softirq
3002 * handlers, and NMI handlers.
3004 * Note that all CPUs must agree that the grace period extended beyond
3005 * all pre-existing RCU read-side critical section. On systems with more
3006 * than one CPU, this means that when "func()" is invoked, each CPU is
3007 * guaranteed to have executed a full memory barrier since the end of its
3008 * last RCU read-side critical section whose beginning preceded the call
3009 * to call_rcu(). It also means that each CPU executing an RCU read-side
3010 * critical section that continues beyond the start of "func()" must have
3011 * executed a memory barrier after the call_rcu() but before the beginning
3012 * of that RCU read-side critical section. Note that these guarantees
3013 * include CPUs that are offline, idle, or executing in user mode, as
3014 * well as CPUs that are executing in the kernel.
3016 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3017 * resulting RCU callback function "func()", then both CPU A and CPU B are
3018 * guaranteed to execute a full memory barrier during the time interval
3019 * between the call to call_rcu() and the invocation of "func()" -- even
3020 * if CPU A and CPU B are the same CPU (but again only if the system has
3021 * more than one CPU).
3023 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3025 __call_rcu(head, func);
3027 EXPORT_SYMBOL_GPL(call_rcu);
3030 /* Maximum number of jiffies to wait before draining a batch. */
3031 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3032 #define KFREE_N_BATCHES 2
3033 #define FREE_N_CHANNELS 2
3036 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3037 * @nr_records: Number of active pointers in the array
3038 * @next: Next bulk object in the block chain
3039 * @records: Array of the kvfree_rcu() pointers
3041 struct kvfree_rcu_bulk_data {
3042 unsigned long nr_records;
3043 struct kvfree_rcu_bulk_data *next;
3048 * This macro defines how many entries the "records" array
3049 * will contain. It is based on the fact that the size of
3050 * kvfree_rcu_bulk_data structure becomes exactly one page.
3052 #define KVFREE_BULK_MAX_ENTR \
3053 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3056 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3057 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3058 * @head_free: List of kfree_rcu() objects waiting for a grace period
3059 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3060 * @krcp: Pointer to @kfree_rcu_cpu structure
3063 struct kfree_rcu_cpu_work {
3064 struct rcu_work rcu_work;
3065 struct rcu_head *head_free;
3066 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3067 struct kfree_rcu_cpu *krcp;
3071 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3072 * @head: List of kfree_rcu() objects not yet waiting for a grace period
3073 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3074 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3075 * @lock: Synchronize access to this structure
3076 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3077 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3078 * @initialized: The @rcu_work fields have been initialized
3079 * @count: Number of objects for which GP not started
3081 * A simple cache list that contains objects for reuse purpose.
3082 * In order to save some per-cpu space the list is singular.
3083 * Even though it is lockless an access has to be protected by the
3085 * @nr_bkv_objs: number of allocated objects at @bkvcache.
3087 * This is a per-CPU structure. The reason that it is not included in
3088 * the rcu_data structure is to permit this code to be extracted from
3089 * the RCU files. Such extraction could allow further optimization of
3090 * the interactions with the slab allocators.
3092 struct kfree_rcu_cpu {
3093 struct rcu_head *head;
3094 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3095 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3096 raw_spinlock_t lock;
3097 struct delayed_work monitor_work;
3101 struct llist_head bkvcache;
3105 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3106 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3109 static __always_inline void
3110 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3112 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3115 for (i = 0; i < bhead->nr_records; i++)
3116 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3120 static inline struct kfree_rcu_cpu *
3121 krc_this_cpu_lock(unsigned long *flags)
3123 struct kfree_rcu_cpu *krcp;
3125 local_irq_save(*flags); // For safely calling this_cpu_ptr().
3126 krcp = this_cpu_ptr(&krc);
3127 raw_spin_lock(&krcp->lock);
3133 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3135 raw_spin_unlock(&krcp->lock);
3136 local_irq_restore(flags);
3139 static inline struct kvfree_rcu_bulk_data *
3140 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3142 if (!krcp->nr_bkv_objs)
3145 krcp->nr_bkv_objs--;
3146 return (struct kvfree_rcu_bulk_data *)
3147 llist_del_first(&krcp->bkvcache);
3151 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3152 struct kvfree_rcu_bulk_data *bnode)
3155 if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3158 llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3159 krcp->nr_bkv_objs++;
3165 * This function is invoked in workqueue context after a grace period.
3166 * It frees all the objects queued on ->bhead_free or ->head_free.
3168 static void kfree_rcu_work(struct work_struct *work)
3170 unsigned long flags;
3171 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3172 struct rcu_head *head, *next;
3173 struct kfree_rcu_cpu *krcp;
3174 struct kfree_rcu_cpu_work *krwp;
3177 krwp = container_of(to_rcu_work(work),
3178 struct kfree_rcu_cpu_work, rcu_work);
3181 raw_spin_lock_irqsave(&krcp->lock, flags);
3182 // Channels 1 and 2.
3183 for (i = 0; i < FREE_N_CHANNELS; i++) {
3184 bkvhead[i] = krwp->bkvhead_free[i];
3185 krwp->bkvhead_free[i] = NULL;
3189 head = krwp->head_free;
3190 krwp->head_free = NULL;
3191 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3193 // Handle two first channels.
3194 for (i = 0; i < FREE_N_CHANNELS; i++) {
3195 for (; bkvhead[i]; bkvhead[i] = bnext) {
3196 bnext = bkvhead[i]->next;
3197 debug_rcu_bhead_unqueue(bkvhead[i]);
3199 rcu_lock_acquire(&rcu_callback_map);
3200 if (i == 0) { // kmalloc() / kfree().
3201 trace_rcu_invoke_kfree_bulk_callback(
3202 rcu_state.name, bkvhead[i]->nr_records,
3203 bkvhead[i]->records);
3205 kfree_bulk(bkvhead[i]->nr_records,
3206 bkvhead[i]->records);
3207 } else { // vmalloc() / vfree().
3208 for (j = 0; j < bkvhead[i]->nr_records; j++) {
3209 trace_rcu_invoke_kvfree_callback(
3211 bkvhead[i]->records[j], 0);
3213 vfree(bkvhead[i]->records[j]);
3216 rcu_lock_release(&rcu_callback_map);
3218 krcp = krc_this_cpu_lock(&flags);
3219 if (put_cached_bnode(krcp, bkvhead[i]))
3221 krc_this_cpu_unlock(krcp, flags);
3224 free_page((unsigned long) bkvhead[i]);
3226 cond_resched_tasks_rcu_qs();
3231 * Emergency case only. It can happen under low memory
3232 * condition when an allocation gets failed, so the "bulk"
3233 * path can not be temporary maintained.
3235 for (; head; head = next) {
3236 unsigned long offset = (unsigned long)head->func;
3237 void *ptr = (void *)head - offset;
3240 debug_rcu_head_unqueue((struct rcu_head *)ptr);
3241 rcu_lock_acquire(&rcu_callback_map);
3242 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3244 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3247 rcu_lock_release(&rcu_callback_map);
3248 cond_resched_tasks_rcu_qs();
3253 * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3255 * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3256 * timeout has been reached.
3258 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3260 struct kfree_rcu_cpu_work *krwp;
3261 bool repeat = false;
3264 lockdep_assert_held(&krcp->lock);
3266 for (i = 0; i < KFREE_N_BATCHES; i++) {
3267 krwp = &(krcp->krw_arr[i]);
3270 * Try to detach bkvhead or head and attach it over any
3271 * available corresponding free channel. It can be that
3272 * a previous RCU batch is in progress, it means that
3273 * immediately to queue another one is not possible so
3274 * return false to tell caller to retry.
3276 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3277 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3278 (krcp->head && !krwp->head_free)) {
3279 // Channel 1 corresponds to SLAB ptrs.
3280 // Channel 2 corresponds to vmalloc ptrs.
3281 for (j = 0; j < FREE_N_CHANNELS; j++) {
3282 if (!krwp->bkvhead_free[j]) {
3283 krwp->bkvhead_free[j] = krcp->bkvhead[j];
3284 krcp->bkvhead[j] = NULL;
3288 // Channel 3 corresponds to emergency path.
3289 if (!krwp->head_free) {
3290 krwp->head_free = krcp->head;
3294 WRITE_ONCE(krcp->count, 0);
3297 * One work is per one batch, so there are three
3298 * "free channels", the batch can handle. It can
3299 * be that the work is in the pending state when
3300 * channels have been detached following by each
3303 queue_rcu_work(system_wq, &krwp->rcu_work);
3306 // Repeat if any "free" corresponding channel is still busy.
3307 if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
3314 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3315 unsigned long flags)
3317 // Attempt to start a new batch.
3318 krcp->monitor_todo = false;
3319 if (queue_kfree_rcu_work(krcp)) {
3320 // Success! Our job is done here.
3321 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3325 // Previous RCU batch still in progress, try again later.
3326 krcp->monitor_todo = true;
3327 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3328 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3332 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3333 * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3335 static void kfree_rcu_monitor(struct work_struct *work)
3337 unsigned long flags;
3338 struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3341 raw_spin_lock_irqsave(&krcp->lock, flags);
3342 if (krcp->monitor_todo)
3343 kfree_rcu_drain_unlock(krcp, flags);
3345 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3349 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
3351 struct kvfree_rcu_bulk_data *bnode;
3354 if (unlikely(!krcp->initialized))
3357 lockdep_assert_held(&krcp->lock);
3358 idx = !!is_vmalloc_addr(ptr);
3360 /* Check if a new block is required. */
3361 if (!krcp->bkvhead[idx] ||
3362 krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3363 bnode = get_cached_bnode(krcp);
3366 * To keep this path working on raw non-preemptible
3367 * sections, prevent the optional entry into the
3368 * allocator as it uses sleeping locks. In fact, even
3369 * if the caller of kfree_rcu() is preemptible, this
3370 * path still is not, as krcp->lock is a raw spinlock.
3371 * With additional page pre-allocation in the works,
3372 * hitting this return is going to be much less likely.
3374 if (IS_ENABLED(CONFIG_PREEMPT_RT))
3378 * NOTE: For one argument of kvfree_rcu() we can
3379 * drop the lock and get the page in sleepable
3380 * context. That would allow to maintain an array
3381 * for the CONFIG_PREEMPT_RT as well if no cached
3382 * pages are available.
3384 bnode = (struct kvfree_rcu_bulk_data *)
3385 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
3388 /* Switch to emergency path. */
3389 if (unlikely(!bnode))
3392 /* Initialize the new block. */
3393 bnode->nr_records = 0;
3394 bnode->next = krcp->bkvhead[idx];
3396 /* Attach it to the head. */
3397 krcp->bkvhead[idx] = bnode;
3400 /* Finally insert. */
3401 krcp->bkvhead[idx]->records
3402 [krcp->bkvhead[idx]->nr_records++] = ptr;
3408 * Queue a request for lazy invocation of appropriate free routine after a
3409 * grace period. Please note there are three paths are maintained, two are the
3410 * main ones that use array of pointers interface and third one is emergency
3411 * one, that is used only when the main path can not be maintained temporary,
3412 * due to memory pressure.
3414 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3415 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3416 * be free'd in workqueue context. This allows us to: batch requests together to
3417 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3419 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3421 unsigned long flags;
3422 struct kfree_rcu_cpu *krcp;
3427 ptr = (void *) head - (unsigned long) func;
3430 * Please note there is a limitation for the head-less
3431 * variant, that is why there is a clear rule for such
3432 * objects: it can be used from might_sleep() context
3433 * only. For other places please embed an rcu_head to
3437 ptr = (unsigned long *) func;
3440 krcp = krc_this_cpu_lock(&flags);
3442 // Queue the object but don't yet schedule the batch.
3443 if (debug_rcu_head_queue(ptr)) {
3444 // Probable double kfree_rcu(), just leak.
3445 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3448 // Mark as success and leave.
3454 * Under high memory pressure GFP_NOWAIT can fail,
3455 * in that case the emergency path is maintained.
3457 success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
3460 // Inline if kvfree_rcu(one_arg) call.
3464 head->next = krcp->head;
3469 WRITE_ONCE(krcp->count, krcp->count + 1);
3471 // Set timer to drain after KFREE_DRAIN_JIFFIES.
3472 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3473 !krcp->monitor_todo) {
3474 krcp->monitor_todo = true;
3475 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3479 krc_this_cpu_unlock(krcp, flags);
3482 * Inline kvfree() after synchronize_rcu(). We can do
3483 * it from might_sleep() context only, so the current
3484 * CPU can pass the QS state.
3487 debug_rcu_head_unqueue((struct rcu_head *) ptr);
3492 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3494 static unsigned long
3495 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3498 unsigned long count = 0;
3500 /* Snapshot count of all CPUs */
3501 for_each_possible_cpu(cpu) {
3502 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3504 count += READ_ONCE(krcp->count);
3510 static unsigned long
3511 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3514 unsigned long flags;
3516 for_each_possible_cpu(cpu) {
3518 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3520 count = krcp->count;
3521 raw_spin_lock_irqsave(&krcp->lock, flags);
3522 if (krcp->monitor_todo)
3523 kfree_rcu_drain_unlock(krcp, flags);
3525 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3527 sc->nr_to_scan -= count;
3530 if (sc->nr_to_scan <= 0)
3534 return freed == 0 ? SHRINK_STOP : freed;
3537 static struct shrinker kfree_rcu_shrinker = {
3538 .count_objects = kfree_rcu_shrink_count,
3539 .scan_objects = kfree_rcu_shrink_scan,
3541 .seeks = DEFAULT_SEEKS,
3544 void __init kfree_rcu_scheduler_running(void)
3547 unsigned long flags;
3549 for_each_possible_cpu(cpu) {
3550 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3552 raw_spin_lock_irqsave(&krcp->lock, flags);
3553 if (!krcp->head || krcp->monitor_todo) {
3554 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3557 krcp->monitor_todo = true;
3558 schedule_delayed_work_on(cpu, &krcp->monitor_work,
3559 KFREE_DRAIN_JIFFIES);
3560 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3565 * During early boot, any blocking grace-period wait automatically
3566 * implies a grace period. Later on, this is never the case for PREEMPTION.
3568 * Howevr, because a context switch is a grace period for !PREEMPTION, any
3569 * blocking grace-period wait automatically implies a grace period if
3570 * there is only one CPU online at any point time during execution of
3571 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
3572 * occasionally incorrectly indicate that there are multiple CPUs online
3573 * when there was in fact only one the whole time, as this just adds some
3574 * overhead: RCU still operates correctly.
3576 static int rcu_blocking_is_gp(void)
3580 if (IS_ENABLED(CONFIG_PREEMPTION))
3581 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3582 might_sleep(); /* Check for RCU read-side critical section. */
3585 * If the rcu_state.n_online_cpus counter is equal to one,
3586 * there is only one CPU, and that CPU sees all prior accesses
3587 * made by any CPU that was online at the time of its access.
3588 * Furthermore, if this counter is equal to one, its value cannot
3589 * change until after the preempt_enable() below.
3591 * Furthermore, if rcu_state.n_online_cpus is equal to one here,
3592 * all later CPUs (both this one and any that come online later
3593 * on) are guaranteed to see all accesses prior to this point
3594 * in the code, without the need for additional memory barriers.
3595 * Those memory barriers are provided by CPU-hotplug code.
3597 ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
3603 * synchronize_rcu - wait until a grace period has elapsed.
3605 * Control will return to the caller some time after a full grace
3606 * period has elapsed, in other words after all currently executing RCU
3607 * read-side critical sections have completed. Note, however, that
3608 * upon return from synchronize_rcu(), the caller might well be executing
3609 * concurrently with new RCU read-side critical sections that began while
3610 * synchronize_rcu() was waiting. RCU read-side critical sections are
3611 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3612 * In addition, regions of code across which interrupts, preemption, or
3613 * softirqs have been disabled also serve as RCU read-side critical
3614 * sections. This includes hardware interrupt handlers, softirq handlers,
3617 * Note that this guarantee implies further memory-ordering guarantees.
3618 * On systems with more than one CPU, when synchronize_rcu() returns,
3619 * each CPU is guaranteed to have executed a full memory barrier since
3620 * the end of its last RCU read-side critical section whose beginning
3621 * preceded the call to synchronize_rcu(). In addition, each CPU having
3622 * an RCU read-side critical section that extends beyond the return from
3623 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3624 * after the beginning of synchronize_rcu() and before the beginning of
3625 * that RCU read-side critical section. Note that these guarantees include
3626 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3627 * that are executing in the kernel.
3629 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3630 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3631 * to have executed a full memory barrier during the execution of
3632 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3633 * again only if the system has more than one CPU).
3635 void synchronize_rcu(void)
3637 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3638 lock_is_held(&rcu_lock_map) ||
3639 lock_is_held(&rcu_sched_lock_map),
3640 "Illegal synchronize_rcu() in RCU read-side critical section");
3641 if (rcu_blocking_is_gp())
3642 return; // Context allows vacuous grace periods.
3643 if (rcu_gp_is_expedited())
3644 synchronize_rcu_expedited();
3646 wait_rcu_gp(call_rcu);
3648 EXPORT_SYMBOL_GPL(synchronize_rcu);
3651 * get_state_synchronize_rcu - Snapshot current RCU state
3653 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3654 * to determine whether or not a full grace period has elapsed in the
3657 unsigned long get_state_synchronize_rcu(void)
3660 * Any prior manipulation of RCU-protected data must happen
3661 * before the load from ->gp_seq.
3664 return rcu_seq_snap(&rcu_state.gp_seq);
3666 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3669 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3671 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3673 * If a full RCU grace period has elapsed since the earlier call to
3674 * get_state_synchronize_rcu(), just return. Otherwise, invoke
3675 * synchronize_rcu() to wait for a full grace period.
3677 * Yes, this function does not take counter wrap into account. But
3678 * counter wrap is harmless. If the counter wraps, we have waited for
3679 * more than 2 billion grace periods (and way more on a 64-bit system!),
3680 * so waiting for one additional grace period should be just fine.
3682 void cond_synchronize_rcu(unsigned long oldstate)
3684 if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
3687 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3689 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3692 * Check to see if there is any immediate RCU-related work to be done by
3693 * the current CPU, returning 1 if so and zero otherwise. The checks are
3694 * in order of increasing expense: checks that can be carried out against
3695 * CPU-local state are performed first. However, we must check for CPU
3696 * stalls first, else we might not get a chance.
3698 static int rcu_pending(int user)
3700 bool gp_in_progress;
3701 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3702 struct rcu_node *rnp = rdp->mynode;
3704 /* Check for CPU stalls, if enabled. */
3705 check_cpu_stall(rdp);
3707 /* Does this CPU need a deferred NOCB wakeup? */
3708 if (rcu_nocb_need_deferred_wakeup(rdp))
3711 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3712 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3715 /* Is the RCU core waiting for a quiescent state from this CPU? */
3716 gp_in_progress = rcu_gp_in_progress();
3717 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3720 /* Does this CPU have callbacks ready to invoke? */
3721 if (!rcu_segcblist_is_offloaded(&rdp->cblist) &&
3722 rcu_segcblist_ready_cbs(&rdp->cblist))
3725 /* Has RCU gone idle with this CPU needing another grace period? */
3726 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3727 !rcu_segcblist_is_offloaded(&rdp->cblist) &&
3728 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3731 /* Have RCU grace period completed or started? */
3732 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3733 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3741 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3742 * the compiler is expected to optimize this away.
3744 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3746 trace_rcu_barrier(rcu_state.name, s, cpu,
3747 atomic_read(&rcu_state.barrier_cpu_count), done);
3751 * RCU callback function for rcu_barrier(). If we are last, wake
3752 * up the task executing rcu_barrier().
3754 * Note that the value of rcu_state.barrier_sequence must be captured
3755 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3756 * other CPUs might count the value down to zero before this CPU gets
3757 * around to invoking rcu_barrier_trace(), which might result in bogus
3758 * data from the next instance of rcu_barrier().
3760 static void rcu_barrier_callback(struct rcu_head *rhp)
3762 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3764 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3765 rcu_barrier_trace(TPS("LastCB"), -1, s);
3766 complete(&rcu_state.barrier_completion);
3768 rcu_barrier_trace(TPS("CB"), -1, s);
3773 * Called with preemption disabled, and from cross-cpu IRQ context.
3775 static void rcu_barrier_func(void *cpu_in)
3777 uintptr_t cpu = (uintptr_t)cpu_in;
3778 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3780 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3781 rdp->barrier_head.func = rcu_barrier_callback;
3782 debug_rcu_head_queue(&rdp->barrier_head);
3784 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3785 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3786 atomic_inc(&rcu_state.barrier_cpu_count);
3788 debug_rcu_head_unqueue(&rdp->barrier_head);
3789 rcu_barrier_trace(TPS("IRQNQ"), -1,
3790 rcu_state.barrier_sequence);
3792 rcu_nocb_unlock(rdp);
3796 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3798 * Note that this primitive does not necessarily wait for an RCU grace period
3799 * to complete. For example, if there are no RCU callbacks queued anywhere
3800 * in the system, then rcu_barrier() is within its rights to return
3801 * immediately, without waiting for anything, much less an RCU grace period.
3803 void rcu_barrier(void)
3806 struct rcu_data *rdp;
3807 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3809 rcu_barrier_trace(TPS("Begin"), -1, s);
3811 /* Take mutex to serialize concurrent rcu_barrier() requests. */
3812 mutex_lock(&rcu_state.barrier_mutex);
3814 /* Did someone else do our work for us? */
3815 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3816 rcu_barrier_trace(TPS("EarlyExit"), -1,
3817 rcu_state.barrier_sequence);
3818 smp_mb(); /* caller's subsequent code after above check. */
3819 mutex_unlock(&rcu_state.barrier_mutex);
3823 /* Mark the start of the barrier operation. */
3824 rcu_seq_start(&rcu_state.barrier_sequence);
3825 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3828 * Initialize the count to two rather than to zero in order
3829 * to avoid a too-soon return to zero in case of an immediate
3830 * invocation of the just-enqueued callback (or preemption of
3831 * this task). Exclude CPU-hotplug operations to ensure that no
3832 * offline non-offloaded CPU has callbacks queued.
3834 init_completion(&rcu_state.barrier_completion);
3835 atomic_set(&rcu_state.barrier_cpu_count, 2);
3839 * Force each CPU with callbacks to register a new callback.
3840 * When that callback is invoked, we will know that all of the
3841 * corresponding CPU's preceding callbacks have been invoked.
3843 for_each_possible_cpu(cpu) {
3844 rdp = per_cpu_ptr(&rcu_data, cpu);
3845 if (cpu_is_offline(cpu) &&
3846 !rcu_segcblist_is_offloaded(&rdp->cblist))
3848 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
3849 rcu_barrier_trace(TPS("OnlineQ"), cpu,
3850 rcu_state.barrier_sequence);
3851 smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
3852 } else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
3853 cpu_is_offline(cpu)) {
3854 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
3855 rcu_state.barrier_sequence);
3856 local_irq_disable();
3857 rcu_barrier_func((void *)cpu);
3859 } else if (cpu_is_offline(cpu)) {
3860 rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
3861 rcu_state.barrier_sequence);
3863 rcu_barrier_trace(TPS("OnlineNQ"), cpu,
3864 rcu_state.barrier_sequence);
3870 * Now that we have an rcu_barrier_callback() callback on each
3871 * CPU, and thus each counted, remove the initial count.
3873 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3874 complete(&rcu_state.barrier_completion);
3876 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3877 wait_for_completion(&rcu_state.barrier_completion);
3879 /* Mark the end of the barrier operation. */
3880 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3881 rcu_seq_end(&rcu_state.barrier_sequence);
3883 /* Other rcu_barrier() invocations can now safely proceed. */
3884 mutex_unlock(&rcu_state.barrier_mutex);
3886 EXPORT_SYMBOL_GPL(rcu_barrier);
3889 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3890 * first CPU in a given leaf rcu_node structure coming online. The caller
3891 * must hold the corresponding leaf rcu_node ->lock with interrrupts
3894 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3898 struct rcu_node *rnp = rnp_leaf;
3900 raw_lockdep_assert_held_rcu_node(rnp_leaf);
3901 WARN_ON_ONCE(rnp->wait_blkd_tasks);
3903 mask = rnp->grpmask;
3907 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3908 oldmask = rnp->qsmaskinit;
3909 rnp->qsmaskinit |= mask;
3910 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3917 * Do boot-time initialization of a CPU's per-CPU RCU data.
3920 rcu_boot_init_percpu_data(int cpu)
3922 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3924 /* Set up local state, ensuring consistent view of global state. */
3925 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3926 INIT_WORK(&rdp->strict_work, strict_work_handler);
3927 WARN_ON_ONCE(rdp->dynticks_nesting != 1);
3928 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
3929 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
3930 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3931 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
3932 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3934 rcu_boot_init_nocb_percpu_data(rdp);
3938 * Invoked early in the CPU-online process, when pretty much all services
3939 * are available. The incoming CPU is not present.
3941 * Initializes a CPU's per-CPU RCU data. Note that only one online or
3942 * offline event can be happening at a given time. Note also that we can
3943 * accept some slop in the rsp->gp_seq access due to the fact that this
3944 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3945 * And any offloaded callbacks are being numbered elsewhere.
3947 int rcutree_prepare_cpu(unsigned int cpu)
3949 unsigned long flags;
3950 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3951 struct rcu_node *rnp = rcu_get_root();
3953 /* Set up local state, ensuring consistent view of global state. */
3954 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3955 rdp->qlen_last_fqs_check = 0;
3956 rdp->n_force_qs_snap = rcu_state.n_force_qs;
3957 rdp->blimit = blimit;
3958 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
3959 !rcu_segcblist_is_offloaded(&rdp->cblist))
3960 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
3961 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
3962 rcu_dynticks_eqs_online();
3963 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
3966 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
3967 * propagation up the rcu_node tree will happen at the beginning
3968 * of the next grace period.
3971 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
3972 rdp->beenonline = true; /* We have now been online. */
3973 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
3974 rdp->gp_seq_needed = rdp->gp_seq;
3975 rdp->cpu_no_qs.b.norm = true;
3976 rdp->core_needs_qs = false;
3977 rdp->rcu_iw_pending = false;
3978 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
3979 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
3980 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3981 rcu_prepare_kthreads(cpu);
3982 rcu_spawn_cpu_nocb_kthread(cpu);
3983 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
3989 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
3991 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3993 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3995 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3999 * Near the end of the CPU-online process. Pretty much all services
4000 * enabled, and the CPU is now very much alive.
4002 int rcutree_online_cpu(unsigned int cpu)
4004 unsigned long flags;
4005 struct rcu_data *rdp;
4006 struct rcu_node *rnp;
4008 rdp = per_cpu_ptr(&rcu_data, cpu);
4010 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4011 rnp->ffmask |= rdp->grpmask;
4012 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4013 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4014 return 0; /* Too early in boot for scheduler work. */
4015 sync_sched_exp_online_cleanup(cpu);
4016 rcutree_affinity_setting(cpu, -1);
4018 // Stop-machine done, so allow nohz_full to disable tick.
4019 tick_dep_clear(TICK_DEP_BIT_RCU);
4024 * Near the beginning of the process. The CPU is still very much alive
4025 * with pretty much all services enabled.
4027 int rcutree_offline_cpu(unsigned int cpu)
4029 unsigned long flags;
4030 struct rcu_data *rdp;
4031 struct rcu_node *rnp;
4033 rdp = per_cpu_ptr(&rcu_data, cpu);
4035 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4036 rnp->ffmask &= ~rdp->grpmask;
4037 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4039 rcutree_affinity_setting(cpu, cpu);
4041 // nohz_full CPUs need the tick for stop-machine to work quickly
4042 tick_dep_set(TICK_DEP_BIT_RCU);
4047 * Mark the specified CPU as being online so that subsequent grace periods
4048 * (both expedited and normal) will wait on it. Note that this means that
4049 * incoming CPUs are not allowed to use RCU read-side critical sections
4050 * until this function is called. Failing to observe this restriction
4051 * will result in lockdep splats.
4053 * Note that this function is special in that it is invoked directly
4054 * from the incoming CPU rather than from the cpuhp_step mechanism.
4055 * This is because this function must be invoked at a precise location.
4057 void rcu_cpu_starting(unsigned int cpu)
4059 unsigned long flags;
4061 struct rcu_data *rdp;
4062 struct rcu_node *rnp;
4065 rdp = per_cpu_ptr(&rcu_data, cpu);
4066 if (rdp->cpu_started)
4068 rdp->cpu_started = true;
4071 mask = rdp->grpmask;
4072 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4073 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4074 newcpu = !(rnp->expmaskinitnext & mask);
4075 rnp->expmaskinitnext |= mask;
4076 /* Allow lockless access for expedited grace periods. */
4077 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4078 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4079 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4080 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4081 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4083 /* An incoming CPU should never be blocking a grace period. */
4084 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4085 rcu_disable_urgency_upon_qs(rdp);
4086 /* Report QS -after- changing ->qsmaskinitnext! */
4087 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4089 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4091 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4094 #ifdef CONFIG_HOTPLUG_CPU
4096 * The outgoing function has no further need of RCU, so remove it from
4097 * the rcu_node tree's ->qsmaskinitnext bit masks.
4099 * Note that this function is special in that it is invoked directly
4100 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4101 * This is because this function must be invoked at a precise location.
4103 void rcu_report_dead(unsigned int cpu)
4105 unsigned long flags;
4107 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4108 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4110 /* QS for any half-done expedited grace period. */
4112 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4114 rcu_preempt_deferred_qs(current);
4116 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4117 mask = rdp->grpmask;
4118 raw_spin_lock(&rcu_state.ofl_lock);
4119 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4120 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4121 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4122 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4123 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4124 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4125 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4127 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4128 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4129 raw_spin_unlock(&rcu_state.ofl_lock);
4131 rdp->cpu_started = false;
4135 * The outgoing CPU has just passed through the dying-idle state, and we
4136 * are being invoked from the CPU that was IPIed to continue the offline
4137 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4139 void rcutree_migrate_callbacks(int cpu)
4141 unsigned long flags;
4142 struct rcu_data *my_rdp;
4143 struct rcu_node *my_rnp;
4144 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4147 if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
4148 rcu_segcblist_empty(&rdp->cblist))
4149 return; /* No callbacks to migrate. */
4151 local_irq_save(flags);
4152 my_rdp = this_cpu_ptr(&rcu_data);
4153 my_rnp = my_rdp->mynode;
4154 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4155 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4156 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4157 /* Leverage recent GPs and set GP for new callbacks. */
4158 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4159 rcu_advance_cbs(my_rnp, my_rdp);
4160 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4161 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4162 rcu_segcblist_disable(&rdp->cblist);
4163 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4164 !rcu_segcblist_n_cbs(&my_rdp->cblist));
4165 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
4166 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4167 __call_rcu_nocb_wake(my_rdp, true, flags);
4169 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4170 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4173 rcu_gp_kthread_wake();
4174 lockdep_assert_irqs_enabled();
4175 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4176 !rcu_segcblist_empty(&rdp->cblist),
4177 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4178 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4179 rcu_segcblist_first_cb(&rdp->cblist));
4184 * On non-huge systems, use expedited RCU grace periods to make suspend
4185 * and hibernation run faster.
4187 static int rcu_pm_notify(struct notifier_block *self,
4188 unsigned long action, void *hcpu)
4191 case PM_HIBERNATION_PREPARE:
4192 case PM_SUSPEND_PREPARE:
4195 case PM_POST_HIBERNATION:
4196 case PM_POST_SUSPEND:
4197 rcu_unexpedite_gp();
4206 * Spawn the kthreads that handle RCU's grace periods.
4208 static int __init rcu_spawn_gp_kthread(void)
4210 unsigned long flags;
4211 int kthread_prio_in = kthread_prio;
4212 struct rcu_node *rnp;
4213 struct sched_param sp;
4214 struct task_struct *t;
4216 /* Force priority into range. */
4217 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4218 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4220 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4222 else if (kthread_prio < 0)
4224 else if (kthread_prio > 99)
4227 if (kthread_prio != kthread_prio_in)
4228 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4229 kthread_prio, kthread_prio_in);
4231 rcu_scheduler_fully_active = 1;
4232 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4233 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4236 sp.sched_priority = kthread_prio;
4237 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4239 rnp = rcu_get_root();
4240 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4241 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4242 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4243 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4244 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4245 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4247 rcu_spawn_nocb_kthreads();
4248 rcu_spawn_boost_kthreads();
4251 early_initcall(rcu_spawn_gp_kthread);
4254 * This function is invoked towards the end of the scheduler's
4255 * initialization process. Before this is called, the idle task might
4256 * contain synchronous grace-period primitives (during which time, this idle
4257 * task is booting the system, and such primitives are no-ops). After this
4258 * function is called, any synchronous grace-period primitives are run as
4259 * expedited, with the requesting task driving the grace period forward.
4260 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4261 * runtime RCU functionality.
4263 void rcu_scheduler_starting(void)
4265 WARN_ON(num_online_cpus() != 1);
4266 WARN_ON(nr_context_switches() > 0);
4267 rcu_test_sync_prims();
4268 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4269 rcu_test_sync_prims();
4273 * Helper function for rcu_init() that initializes the rcu_state structure.
4275 static void __init rcu_init_one(void)
4277 static const char * const buf[] = RCU_NODE_NAME_INIT;
4278 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4279 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4280 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4282 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4286 struct rcu_node *rnp;
4288 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4290 /* Silence gcc 4.8 false positive about array index out of range. */
4291 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4292 panic("rcu_init_one: rcu_num_lvls out of range");
4294 /* Initialize the level-tracking arrays. */
4296 for (i = 1; i < rcu_num_lvls; i++)
4297 rcu_state.level[i] =
4298 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4299 rcu_init_levelspread(levelspread, num_rcu_lvl);
4301 /* Initialize the elements themselves, starting from the leaves. */
4303 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4304 cpustride *= levelspread[i];
4305 rnp = rcu_state.level[i];
4306 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4307 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4308 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4309 &rcu_node_class[i], buf[i]);
4310 raw_spin_lock_init(&rnp->fqslock);
4311 lockdep_set_class_and_name(&rnp->fqslock,
4312 &rcu_fqs_class[i], fqs[i]);
4313 rnp->gp_seq = rcu_state.gp_seq;
4314 rnp->gp_seq_needed = rcu_state.gp_seq;
4315 rnp->completedqs = rcu_state.gp_seq;
4317 rnp->qsmaskinit = 0;
4318 rnp->grplo = j * cpustride;
4319 rnp->grphi = (j + 1) * cpustride - 1;
4320 if (rnp->grphi >= nr_cpu_ids)
4321 rnp->grphi = nr_cpu_ids - 1;
4327 rnp->grpnum = j % levelspread[i - 1];
4328 rnp->grpmask = BIT(rnp->grpnum);
4329 rnp->parent = rcu_state.level[i - 1] +
4330 j / levelspread[i - 1];
4333 INIT_LIST_HEAD(&rnp->blkd_tasks);
4334 rcu_init_one_nocb(rnp);
4335 init_waitqueue_head(&rnp->exp_wq[0]);
4336 init_waitqueue_head(&rnp->exp_wq[1]);
4337 init_waitqueue_head(&rnp->exp_wq[2]);
4338 init_waitqueue_head(&rnp->exp_wq[3]);
4339 spin_lock_init(&rnp->exp_lock);
4343 init_swait_queue_head(&rcu_state.gp_wq);
4344 init_swait_queue_head(&rcu_state.expedited_wq);
4345 rnp = rcu_first_leaf_node();
4346 for_each_possible_cpu(i) {
4347 while (i > rnp->grphi)
4349 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4350 rcu_boot_init_percpu_data(i);
4355 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4356 * replace the definitions in tree.h because those are needed to size
4357 * the ->node array in the rcu_state structure.
4359 static void __init rcu_init_geometry(void)
4363 int rcu_capacity[RCU_NUM_LVLS];
4366 * Initialize any unspecified boot parameters.
4367 * The default values of jiffies_till_first_fqs and
4368 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4369 * value, which is a function of HZ, then adding one for each
4370 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4372 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4373 if (jiffies_till_first_fqs == ULONG_MAX)
4374 jiffies_till_first_fqs = d;
4375 if (jiffies_till_next_fqs == ULONG_MAX)
4376 jiffies_till_next_fqs = d;
4377 adjust_jiffies_till_sched_qs();
4379 /* If the compile-time values are accurate, just leave. */
4380 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4381 nr_cpu_ids == NR_CPUS)
4383 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4384 rcu_fanout_leaf, nr_cpu_ids);
4387 * The boot-time rcu_fanout_leaf parameter must be at least two
4388 * and cannot exceed the number of bits in the rcu_node masks.
4389 * Complain and fall back to the compile-time values if this
4390 * limit is exceeded.
4392 if (rcu_fanout_leaf < 2 ||
4393 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4394 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4400 * Compute number of nodes that can be handled an rcu_node tree
4401 * with the given number of levels.
4403 rcu_capacity[0] = rcu_fanout_leaf;
4404 for (i = 1; i < RCU_NUM_LVLS; i++)
4405 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4408 * The tree must be able to accommodate the configured number of CPUs.
4409 * If this limit is exceeded, fall back to the compile-time values.
4411 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4412 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4417 /* Calculate the number of levels in the tree. */
4418 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4420 rcu_num_lvls = i + 1;
4422 /* Calculate the number of rcu_nodes at each level of the tree. */
4423 for (i = 0; i < rcu_num_lvls; i++) {
4424 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4425 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4428 /* Calculate the total number of rcu_node structures. */
4430 for (i = 0; i < rcu_num_lvls; i++)
4431 rcu_num_nodes += num_rcu_lvl[i];
4435 * Dump out the structure of the rcu_node combining tree associated
4436 * with the rcu_state structure.
4438 static void __init rcu_dump_rcu_node_tree(void)
4441 struct rcu_node *rnp;
4443 pr_info("rcu_node tree layout dump\n");
4445 rcu_for_each_node_breadth_first(rnp) {
4446 if (rnp->level != level) {
4451 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4456 struct workqueue_struct *rcu_gp_wq;
4457 struct workqueue_struct *rcu_par_gp_wq;
4459 static void __init kfree_rcu_batch_init(void)
4464 for_each_possible_cpu(cpu) {
4465 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4466 struct kvfree_rcu_bulk_data *bnode;
4468 for (i = 0; i < KFREE_N_BATCHES; i++) {
4469 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4470 krcp->krw_arr[i].krcp = krcp;
4473 for (i = 0; i < rcu_min_cached_objs; i++) {
4474 bnode = (struct kvfree_rcu_bulk_data *)
4475 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
4478 put_cached_bnode(krcp, bnode);
4480 pr_err("Failed to preallocate for %d CPU!\n", cpu);
4483 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4484 krcp->initialized = true;
4486 if (register_shrinker(&kfree_rcu_shrinker))
4487 pr_err("Failed to register kfree_rcu() shrinker!\n");
4490 void __init rcu_init(void)
4494 rcu_early_boot_tests();
4496 kfree_rcu_batch_init();
4497 rcu_bootup_announce();
4498 rcu_init_geometry();
4501 rcu_dump_rcu_node_tree();
4503 open_softirq(RCU_SOFTIRQ, rcu_core_si);
4506 * We don't need protection against CPU-hotplug here because
4507 * this is called early in boot, before either interrupts
4508 * or the scheduler are operational.
4510 pm_notifier(rcu_pm_notify, 0);
4511 for_each_online_cpu(cpu) {
4512 rcutree_prepare_cpu(cpu);
4513 rcu_cpu_starting(cpu);
4514 rcutree_online_cpu(cpu);
4517 /* Create workqueue for expedited GPs and for Tree SRCU. */
4518 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4519 WARN_ON(!rcu_gp_wq);
4520 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4521 WARN_ON(!rcu_par_gp_wq);
4524 /* Fill in default value for rcutree.qovld boot parameter. */
4525 /* -After- the rcu_node ->lock fields are initialized! */
4527 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4532 #include "tree_stall.h"
4533 #include "tree_exp.h"
4534 #include "tree_plugin.h"