1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
5 * Copyright (C) 2020 Paul E. McKenney
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 #include "rcu_segcblist.h"
11 ////////////////////////////////////////////////////////////////////////
13 // Generic data structures.
16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17 typedef void (*pregp_func_t)(void);
18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19 typedef void (*postscan_func_t)(struct list_head *hop);
20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25 * @cblist: Callback list.
26 * @lock: Lock protecting per-CPU callback list.
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @rtp_n_lock_retries: Rough lock-contention statistic.
29 * @rtp_work: Work queue for invoking callbacks.
30 * @rtp_irq_work: IRQ work queue for deferred wakeups.
31 * @barrier_q_head: RCU callback for barrier operation.
32 * @cpu: CPU number corresponding to this entry.
33 * @rtpp: Pointer to the rcu_tasks structure.
35 struct rcu_tasks_percpu {
36 struct rcu_segcblist cblist;
37 raw_spinlock_t __private lock;
38 unsigned long rtp_jiffies;
39 unsigned long rtp_n_lock_retries;
40 struct work_struct rtp_work;
41 struct irq_work rtp_irq_work;
42 struct rcu_head barrier_q_head;
44 struct rcu_tasks *rtpp;
48 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
49 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
50 * @cbs_gbl_lock: Lock protecting callback list.
51 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
52 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
53 * @gp_func: This flavor's grace-period-wait function.
54 * @gp_state: Grace period's most recent state transition (debugging).
55 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
56 * @init_fract: Initial backoff sleep interval.
57 * @gp_jiffies: Time of last @gp_state transition.
58 * @gp_start: Most recent grace-period start in jiffies.
59 * @tasks_gp_seq: Number of grace periods completed since boot.
60 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
61 * @n_ipis_fails: Number of IPI-send failures.
62 * @pregp_func: This flavor's pre-grace-period function (optional).
63 * @pertask_func: This flavor's per-task scan function (optional).
64 * @postscan_func: This flavor's post-task scan function (optional).
65 * @holdouts_func: This flavor's holdout-list scan function (optional).
66 * @postgp_func: This flavor's post-grace-period function (optional).
67 * @call_func: This flavor's call_rcu()-equivalent function.
68 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
69 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
70 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
71 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
72 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
73 * @barrier_q_mutex: Serialize barrier operations.
74 * @barrier_q_count: Number of queues being waited on.
75 * @barrier_q_completion: Barrier wait/wakeup mechanism.
76 * @barrier_q_seq: Sequence number for barrier operations.
77 * @name: This flavor's textual name.
78 * @kname: This flavor's kthread name.
81 struct rcuwait cbs_wait;
82 raw_spinlock_t cbs_gbl_lock;
83 struct mutex tasks_gp_mutex;
87 unsigned long gp_jiffies;
88 unsigned long gp_start;
89 unsigned long tasks_gp_seq;
91 unsigned long n_ipis_fails;
92 struct task_struct *kthread_ptr;
93 rcu_tasks_gp_func_t gp_func;
94 pregp_func_t pregp_func;
95 pertask_func_t pertask_func;
96 postscan_func_t postscan_func;
97 holdouts_func_t holdouts_func;
98 postgp_func_t postgp_func;
99 call_rcu_func_t call_func;
100 struct rcu_tasks_percpu __percpu *rtpcpu;
101 int percpu_enqueue_shift;
102 int percpu_enqueue_lim;
103 int percpu_dequeue_lim;
104 unsigned long percpu_dequeue_gpseq;
105 struct mutex barrier_q_mutex;
106 atomic_t barrier_q_count;
107 struct completion barrier_q_completion;
108 unsigned long barrier_q_seq;
113 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
115 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
116 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
117 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
118 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
120 static struct rcu_tasks rt_name = \
122 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
123 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
124 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
127 .rtpcpu = &rt_name ## __percpu, \
129 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
130 .percpu_enqueue_lim = 1, \
131 .percpu_dequeue_lim = 1, \
132 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
133 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
137 /* Track exiting tasks in order to allow them to be waited for. */
138 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
140 /* Avoid IPIing CPUs early in the grace period. */
141 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
142 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
143 module_param(rcu_task_ipi_delay, int, 0644);
145 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
146 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
147 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
148 module_param(rcu_task_stall_timeout, int, 0644);
149 #define RCU_TASK_STALL_INFO (HZ * 10)
150 static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
151 module_param(rcu_task_stall_info, int, 0644);
152 static int rcu_task_stall_info_mult __read_mostly = 3;
153 module_param(rcu_task_stall_info_mult, int, 0444);
155 static int rcu_task_enqueue_lim __read_mostly = -1;
156 module_param(rcu_task_enqueue_lim, int, 0444);
158 static bool rcu_task_cb_adjust;
159 static int rcu_task_contend_lim __read_mostly = 100;
160 module_param(rcu_task_contend_lim, int, 0444);
161 static int rcu_task_collapse_lim __read_mostly = 10;
162 module_param(rcu_task_collapse_lim, int, 0444);
164 /* RCU tasks grace-period state for debugging. */
166 #define RTGS_WAIT_WAIT_CBS 1
167 #define RTGS_WAIT_GP 2
168 #define RTGS_PRE_WAIT_GP 3
169 #define RTGS_SCAN_TASKLIST 4
170 #define RTGS_POST_SCAN_TASKLIST 5
171 #define RTGS_WAIT_SCAN_HOLDOUTS 6
172 #define RTGS_SCAN_HOLDOUTS 7
173 #define RTGS_POST_GP 8
174 #define RTGS_WAIT_READERS 9
175 #define RTGS_INVOKE_CBS 10
176 #define RTGS_WAIT_CBS 11
177 #ifndef CONFIG_TINY_RCU
178 static const char * const rcu_tasks_gp_state_names[] = {
180 "RTGS_WAIT_WAIT_CBS",
183 "RTGS_SCAN_TASKLIST",
184 "RTGS_POST_SCAN_TASKLIST",
185 "RTGS_WAIT_SCAN_HOLDOUTS",
186 "RTGS_SCAN_HOLDOUTS",
192 #endif /* #ifndef CONFIG_TINY_RCU */
194 ////////////////////////////////////////////////////////////////////////
198 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
200 /* Record grace-period phase and time. */
201 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
203 rtp->gp_state = newstate;
204 rtp->gp_jiffies = jiffies;
207 #ifndef CONFIG_TINY_RCU
208 /* Return state name. */
209 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
211 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
212 int j = READ_ONCE(i); // Prevent the compiler from reading twice
214 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
216 return rcu_tasks_gp_state_names[j];
218 #endif /* #ifndef CONFIG_TINY_RCU */
220 // Initialize per-CPU callback lists for the specified flavor of
222 static void cblist_init_generic(struct rcu_tasks *rtp)
229 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
230 if (rcu_task_enqueue_lim < 0) {
231 rcu_task_enqueue_lim = 1;
232 rcu_task_cb_adjust = true;
233 pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
234 } else if (rcu_task_enqueue_lim == 0) {
235 rcu_task_enqueue_lim = 1;
237 lim = rcu_task_enqueue_lim;
239 if (lim > nr_cpu_ids)
241 shift = ilog2(nr_cpu_ids / lim);
242 if (((nr_cpu_ids - 1) >> shift) >= lim)
244 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
245 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
246 smp_store_release(&rtp->percpu_enqueue_lim, lim);
247 for_each_possible_cpu(cpu) {
248 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
250 WARN_ON_ONCE(!rtpcp);
252 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
253 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
254 if (rcu_segcblist_empty(&rtpcp->cblist))
255 rcu_segcblist_init(&rtpcp->cblist);
256 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
259 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
261 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
262 pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
265 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
266 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
268 struct rcu_tasks *rtp;
269 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
272 rcuwait_wake_up(&rtp->cbs_wait);
275 // Enqueue a callback for the specified flavor of Tasks RCU.
276 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
277 struct rcu_tasks *rtp)
283 bool needadjust = false;
285 struct rcu_tasks_percpu *rtpcp;
289 local_irq_save(flags);
291 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
292 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
293 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
294 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
295 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
297 if (rtpcp->rtp_jiffies != j) {
298 rtpcp->rtp_jiffies = j;
299 rtpcp->rtp_n_lock_retries = 0;
301 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
302 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
303 needadjust = true; // Defer adjustment to avoid deadlock.
305 if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
306 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
307 cblist_init_generic(rtp);
308 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
310 needwake = rcu_segcblist_empty(&rtpcp->cblist);
311 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
312 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
313 if (unlikely(needadjust)) {
314 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
315 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
316 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
317 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
318 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
319 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
321 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
324 /* We can't create the thread unless interrupts are enabled. */
325 if (needwake && READ_ONCE(rtp->kthread_ptr))
326 irq_work_queue(&rtpcp->rtp_irq_work);
329 // RCU callback function for rcu_barrier_tasks_generic().
330 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
332 struct rcu_tasks *rtp;
333 struct rcu_tasks_percpu *rtpcp;
335 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
337 if (atomic_dec_and_test(&rtp->barrier_q_count))
338 complete(&rtp->barrier_q_completion);
341 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
342 // Operates in a manner similar to rcu_barrier().
343 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
347 struct rcu_tasks_percpu *rtpcp;
348 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
350 mutex_lock(&rtp->barrier_q_mutex);
351 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
353 mutex_unlock(&rtp->barrier_q_mutex);
356 rcu_seq_start(&rtp->barrier_q_seq);
357 init_completion(&rtp->barrier_q_completion);
358 atomic_set(&rtp->barrier_q_count, 2);
359 for_each_possible_cpu(cpu) {
360 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
362 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
363 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
364 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
365 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
366 atomic_inc(&rtp->barrier_q_count);
367 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
369 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
370 complete(&rtp->barrier_q_completion);
371 wait_for_completion(&rtp->barrier_q_completion);
372 rcu_seq_end(&rtp->barrier_q_seq);
373 mutex_unlock(&rtp->barrier_q_mutex);
376 // Advance callbacks and indicate whether either a grace period or
377 // callback invocation is needed.
378 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
387 for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
388 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
390 /* Advance and accelerate any new callbacks. */
391 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
393 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
394 // Should we shrink down to a single callback queue?
395 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
401 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
402 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
403 if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
405 if (!rcu_segcblist_empty(&rtpcp->cblist))
407 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
410 // Shrink down to a single callback queue if appropriate.
411 // This is done in two stages: (1) If there are no more than
412 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
413 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
414 // if there has not been an increase in callbacks, limit dequeuing
415 // to CPU 0. Note the matching RCU read-side critical section in
416 // call_rcu_tasks_generic().
417 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
418 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
419 if (rtp->percpu_enqueue_lim > 1) {
420 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
421 smp_store_release(&rtp->percpu_enqueue_lim, 1);
422 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
423 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
425 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
427 if (rcu_task_cb_adjust && !ncbsnz &&
428 poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
429 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
430 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
431 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
432 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
434 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
435 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
437 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
439 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
445 // Advance callbacks and invoke any that are ready.
446 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
452 struct rcu_head *rhp;
453 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
454 struct rcu_tasks_percpu *rtpcp_next;
457 cpunext = cpu * 2 + 1;
458 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
459 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
460 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
462 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
463 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
464 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
468 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
470 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
471 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
472 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
473 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
475 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
481 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
482 rcu_segcblist_add_len(&rtpcp->cblist, -len);
483 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
484 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
487 // Workqueue flood to advance callbacks and invoke any that are ready.
488 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
490 struct rcu_tasks *rtp;
491 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
494 rcu_tasks_invoke_cbs(rtp, rtpcp);
497 // Wait for one grace period.
498 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
502 mutex_lock(&rtp->tasks_gp_mutex);
504 // If there were none, wait a bit and start over.
505 if (unlikely(midboot)) {
508 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
509 rcuwait_wait_event(&rtp->cbs_wait,
510 (needgpcb = rcu_tasks_need_gpcb(rtp)),
514 if (needgpcb & 0x2) {
515 // Wait for one grace period.
516 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
517 rtp->gp_start = jiffies;
518 rcu_seq_start(&rtp->tasks_gp_seq);
520 rcu_seq_end(&rtp->tasks_gp_seq);
524 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
525 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
526 mutex_unlock(&rtp->tasks_gp_mutex);
529 // RCU-tasks kthread that detects grace periods and invokes callbacks.
530 static int __noreturn rcu_tasks_kthread(void *arg)
532 struct rcu_tasks *rtp = arg;
534 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
535 housekeeping_affine(current, HK_TYPE_RCU);
536 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
539 * Each pass through the following loop makes one check for
540 * newly arrived callbacks, and, if there are some, waits for
541 * one RCU-tasks grace period and then invokes the callbacks.
542 * This loop is terminated by the system going down. ;-)
545 // Wait for one grace period and invoke any callbacks
547 rcu_tasks_one_gp(rtp, false);
549 // Paranoid sleep to keep this from entering a tight loop.
550 schedule_timeout_idle(rtp->gp_sleep);
554 // Wait for a grace period for the specified flavor of Tasks RCU.
555 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
557 /* Complain if the scheduler has not started. */
558 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
559 "synchronize_rcu_tasks called too soon");
561 // If the grace-period kthread is running, use it.
562 if (READ_ONCE(rtp->kthread_ptr)) {
563 wait_rcu_gp(rtp->call_func);
566 rcu_tasks_one_gp(rtp, true);
569 /* Spawn RCU-tasks grace-period kthread. */
570 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
572 struct task_struct *t;
574 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
575 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
577 smp_mb(); /* Ensure others see full kthread. */
580 #ifndef CONFIG_TINY_RCU
583 * Print any non-default Tasks RCU settings.
585 static void __init rcu_tasks_bootup_oddness(void)
587 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
590 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
591 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
592 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
593 if (rtsimc != rcu_task_stall_info_mult) {
594 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
595 rcu_task_stall_info_mult = rtsimc;
597 #endif /* #ifdef CONFIG_TASKS_RCU */
598 #ifdef CONFIG_TASKS_RCU
599 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
600 #endif /* #ifdef CONFIG_TASKS_RCU */
601 #ifdef CONFIG_TASKS_RUDE_RCU
602 pr_info("\tRude variant of Tasks RCU enabled.\n");
603 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
604 #ifdef CONFIG_TASKS_TRACE_RCU
605 pr_info("\tTracing variant of Tasks RCU enabled.\n");
606 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
609 #endif /* #ifndef CONFIG_TINY_RCU */
611 #ifndef CONFIG_TINY_RCU
612 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
613 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
616 bool havecbs = false;
618 for_each_possible_cpu(cpu) {
619 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
621 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
626 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
628 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
629 jiffies - data_race(rtp->gp_jiffies),
630 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
631 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
632 ".k"[!!data_race(rtp->kthread_ptr)],
636 #endif // #ifndef CONFIG_TINY_RCU
638 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
640 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
642 ////////////////////////////////////////////////////////////////////////
644 // Shared code between task-list-scanning variants of Tasks RCU.
646 /* Wait for one RCU-tasks grace period. */
647 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
649 struct task_struct *g;
653 unsigned long lastinfo;
654 unsigned long lastreport;
655 bool reported = false;
657 struct task_struct *t;
659 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
663 * There were callbacks, so we need to wait for an RCU-tasks
664 * grace period. Start off by scanning the task list for tasks
665 * that are not already voluntarily blocked. Mark these tasks
666 * and make a list of them in holdouts.
668 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
670 for_each_process_thread(g, t)
671 rtp->pertask_func(t, &holdouts);
674 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
675 rtp->postscan_func(&holdouts);
678 * Each pass through the following loop scans the list of holdout
679 * tasks, removing any that are no longer holdouts. When the list
680 * is empty, we are done.
682 lastreport = jiffies;
683 lastinfo = lastreport;
684 rtsi = READ_ONCE(rcu_task_stall_info);
686 // Start off with initial wait and slowly back off to 1 HZ wait.
687 fract = rtp->init_fract;
689 while (!list_empty(&holdouts)) {
695 // Slowly back off waiting for holdouts
696 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
697 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
698 schedule_timeout_idle(fract);
700 exp = jiffies_to_nsecs(fract);
701 __set_current_state(TASK_IDLE);
702 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
708 rtst = READ_ONCE(rcu_task_stall_timeout);
709 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
711 lastreport = jiffies;
715 WARN_ON(signal_pending(current));
716 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
717 rtp->holdouts_func(&holdouts, needreport, &firstreport);
719 // Print pre-stall informational messages if needed.
721 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
723 rtsi = rtsi * rcu_task_stall_info_mult;
724 pr_info("%s: %s grace period %lu is %lu jiffies old.\n",
725 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
729 set_tasks_gp_state(rtp, RTGS_POST_GP);
730 rtp->postgp_func(rtp);
733 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
735 #ifdef CONFIG_TASKS_RCU
737 ////////////////////////////////////////////////////////////////////////
739 // Simple variant of RCU whose quiescent states are voluntary context
740 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
741 // As such, grace periods can take one good long time. There are no
742 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
743 // because this implementation is intended to get the system into a safe
744 // state for some of the manipulations involved in tracing and the like.
745 // Finally, this implementation does not support high call_rcu_tasks()
746 // rates from multiple CPUs. If this is required, per-CPU callback lists
749 // The implementation uses rcu_tasks_wait_gp(), which relies on function
750 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
751 // function sets these function pointers up so that rcu_tasks_wait_gp()
752 // invokes these functions in this order:
754 // rcu_tasks_pregp_step():
755 // Invokes synchronize_rcu() in order to wait for all in-flight
756 // t->on_rq and t->nvcsw transitions to complete. This works because
757 // all such transitions are carried out with interrupts disabled.
758 // rcu_tasks_pertask(), invoked on every non-idle task:
759 // For every runnable non-idle task other than the current one, use
760 // get_task_struct() to pin down that task, snapshot that task's
761 // number of voluntary context switches, and add that task to the
763 // rcu_tasks_postscan():
764 // Invoke synchronize_srcu() to ensure that all tasks that were
765 // in the process of exiting (and which thus might not know to
766 // synchronize with this RCU Tasks grace period) have completed
768 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
769 // Scans the holdout list, attempting to identify a quiescent state
770 // for each task on the list. If there is a quiescent state, the
771 // corresponding task is removed from the holdout list.
772 // rcu_tasks_postgp():
773 // Invokes synchronize_rcu() in order to ensure that all prior
774 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
775 // to have happened before the end of this RCU Tasks grace period.
776 // Again, this works because all such transitions are carried out
777 // with interrupts disabled.
779 // For each exiting task, the exit_tasks_rcu_start() and
780 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
781 // read-side critical sections waited for by rcu_tasks_postscan().
783 // Pre-grace-period update-side code is ordered before the grace
784 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
785 // is ordered before the grace period via synchronize_rcu() call in
786 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
789 /* Pre-grace-period preparation. */
790 static void rcu_tasks_pregp_step(void)
793 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
794 * to complete. Invoking synchronize_rcu() suffices because all
795 * these transitions occur with interrupts disabled. Without this
796 * synchronize_rcu(), a read-side critical section that started
797 * before the grace period might be incorrectly seen as having
798 * started after the grace period.
800 * This synchronize_rcu() also dispenses with the need for a
801 * memory barrier on the first store to t->rcu_tasks_holdout,
802 * as it forces the store to happen after the beginning of the
808 /* Per-task initial processing. */
809 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
811 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
813 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
814 WRITE_ONCE(t->rcu_tasks_holdout, true);
815 list_add(&t->rcu_tasks_holdout_list, hop);
819 /* Processing between scanning taskslist and draining the holdout list. */
820 static void rcu_tasks_postscan(struct list_head *hop)
823 * Wait for tasks that are in the process of exiting. This
824 * does only part of the job, ensuring that all tasks that were
825 * previously exiting reach the point where they have disabled
826 * preemption, allowing the later synchronize_rcu() to finish
829 synchronize_srcu(&tasks_rcu_exit_srcu);
832 /* See if tasks are still holding out, complain if so. */
833 static void check_holdout_task(struct task_struct *t,
834 bool needreport, bool *firstreport)
838 if (!READ_ONCE(t->rcu_tasks_holdout) ||
839 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
840 !READ_ONCE(t->on_rq) ||
841 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
842 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
843 WRITE_ONCE(t->rcu_tasks_holdout, false);
844 list_del_init(&t->rcu_tasks_holdout_list);
848 rcu_request_urgent_qs_task(t);
852 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
853 *firstreport = false;
856 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
857 t, ".I"[is_idle_task(t)],
858 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
859 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
860 t->rcu_tasks_idle_cpu, cpu);
864 /* Scan the holdout lists for tasks no longer holding out. */
865 static void check_all_holdout_tasks(struct list_head *hop,
866 bool needreport, bool *firstreport)
868 struct task_struct *t, *t1;
870 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
871 check_holdout_task(t, needreport, firstreport);
876 /* Finish off the Tasks-RCU grace period. */
877 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
880 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
881 * memory barriers prior to them in the schedule() path, memory
882 * reordering on other CPUs could cause their RCU-tasks read-side
883 * critical sections to extend past the end of the grace period.
884 * However, because these ->nvcsw updates are carried out with
885 * interrupts disabled, we can use synchronize_rcu() to force the
886 * needed ordering on all such CPUs.
888 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
889 * accesses to be within the grace period, avoiding the need for
890 * memory barriers for ->rcu_tasks_holdout accesses.
892 * In addition, this synchronize_rcu() waits for exiting tasks
893 * to complete their final preempt_disable() region of execution,
894 * cleaning up after the synchronize_srcu() above.
899 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
900 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
903 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
904 * @rhp: structure to be used for queueing the RCU updates.
905 * @func: actual callback function to be invoked after the grace period
907 * The callback function will be invoked some time after a full grace
908 * period elapses, in other words after all currently executing RCU
909 * read-side critical sections have completed. call_rcu_tasks() assumes
910 * that the read-side critical sections end at a voluntary context
911 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
912 * or transition to usermode execution. As such, there are no read-side
913 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
914 * this primitive is intended to determine that all tasks have passed
915 * through a safe state, not so much for data-structure synchronization.
917 * See the description of call_rcu() for more detailed information on
918 * memory ordering guarantees.
920 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
922 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
924 EXPORT_SYMBOL_GPL(call_rcu_tasks);
927 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
929 * Control will return to the caller some time after a full rcu-tasks
930 * grace period has elapsed, in other words after all currently
931 * executing rcu-tasks read-side critical sections have elapsed. These
932 * read-side critical sections are delimited by calls to schedule(),
933 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
934 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
936 * This is a very specialized primitive, intended only for a few uses in
937 * tracing and other situations requiring manipulation of function
938 * preambles and profiling hooks. The synchronize_rcu_tasks() function
939 * is not (yet) intended for heavy use from multiple CPUs.
941 * See the description of synchronize_rcu() for more detailed information
942 * on memory ordering guarantees.
944 void synchronize_rcu_tasks(void)
946 synchronize_rcu_tasks_generic(&rcu_tasks);
948 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
951 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
953 * Although the current implementation is guaranteed to wait, it is not
954 * obligated to, for example, if there are no pending callbacks.
956 void rcu_barrier_tasks(void)
958 rcu_barrier_tasks_generic(&rcu_tasks);
960 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
962 static int __init rcu_spawn_tasks_kthread(void)
964 cblist_init_generic(&rcu_tasks);
965 rcu_tasks.gp_sleep = HZ / 10;
966 rcu_tasks.init_fract = HZ / 10;
967 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
968 rcu_tasks.pertask_func = rcu_tasks_pertask;
969 rcu_tasks.postscan_func = rcu_tasks_postscan;
970 rcu_tasks.holdouts_func = check_all_holdout_tasks;
971 rcu_tasks.postgp_func = rcu_tasks_postgp;
972 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
976 #if !defined(CONFIG_TINY_RCU)
977 void show_rcu_tasks_classic_gp_kthread(void)
979 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
981 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
982 #endif // !defined(CONFIG_TINY_RCU)
984 /* Do the srcu_read_lock() for the above synchronize_srcu(). */
985 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
988 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
992 /* Do the srcu_read_unlock() for the above synchronize_srcu(). */
993 void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
995 struct task_struct *t = current;
998 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
1000 exit_tasks_rcu_finish_trace(t);
1003 #else /* #ifdef CONFIG_TASKS_RCU */
1004 void exit_tasks_rcu_start(void) { }
1005 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1006 #endif /* #else #ifdef CONFIG_TASKS_RCU */
1008 #ifdef CONFIG_TASKS_RUDE_RCU
1010 ////////////////////////////////////////////////////////////////////////
1012 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1013 // passing an empty function to schedule_on_each_cpu(). This approach
1014 // provides an asynchronous call_rcu_tasks_rude() API and batching of
1015 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
1016 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1017 // and induces otherwise unnecessary context switches on all online CPUs,
1018 // whether idle or not.
1020 // Callback handling is provided by the rcu_tasks_kthread() function.
1022 // Ordering is provided by the scheduler's context-switch code.
1024 // Empty function to allow workqueues to force a context switch.
1025 static void rcu_tasks_be_rude(struct work_struct *work)
1029 // Wait for one rude RCU-tasks grace period.
1030 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1032 if (num_online_cpus() <= 1)
1033 return; // Fastpath for only one CPU.
1035 rtp->n_ipis += cpumask_weight(cpu_online_mask);
1036 schedule_on_each_cpu(rcu_tasks_be_rude);
1039 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1040 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1044 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1045 * @rhp: structure to be used for queueing the RCU updates.
1046 * @func: actual callback function to be invoked after the grace period
1048 * The callback function will be invoked some time after a full grace
1049 * period elapses, in other words after all currently executing RCU
1050 * read-side critical sections have completed. call_rcu_tasks_rude()
1051 * assumes that the read-side critical sections end at context switch,
1052 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1053 * usermode execution is schedulable). As such, there are no read-side
1054 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1055 * this primitive is intended to determine that all tasks have passed
1056 * through a safe state, not so much for data-structure synchronization.
1058 * See the description of call_rcu() for more detailed information on
1059 * memory ordering guarantees.
1061 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1063 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1065 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1068 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1070 * Control will return to the caller some time after a rude rcu-tasks
1071 * grace period has elapsed, in other words after all currently
1072 * executing rcu-tasks read-side critical sections have elapsed. These
1073 * read-side critical sections are delimited by calls to schedule(),
1074 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1075 * context), and (in theory, anyway) cond_resched().
1077 * This is a very specialized primitive, intended only for a few uses in
1078 * tracing and other situations requiring manipulation of function preambles
1079 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1080 * (yet) intended for heavy use from multiple CPUs.
1082 * See the description of synchronize_rcu() for more detailed information
1083 * on memory ordering guarantees.
1085 void synchronize_rcu_tasks_rude(void)
1087 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1089 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1092 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1094 * Although the current implementation is guaranteed to wait, it is not
1095 * obligated to, for example, if there are no pending callbacks.
1097 void rcu_barrier_tasks_rude(void)
1099 rcu_barrier_tasks_generic(&rcu_tasks_rude);
1101 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1103 static int __init rcu_spawn_tasks_rude_kthread(void)
1105 cblist_init_generic(&rcu_tasks_rude);
1106 rcu_tasks_rude.gp_sleep = HZ / 10;
1107 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1111 #if !defined(CONFIG_TINY_RCU)
1112 void show_rcu_tasks_rude_gp_kthread(void)
1114 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1116 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1117 #endif // !defined(CONFIG_TINY_RCU)
1118 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1120 ////////////////////////////////////////////////////////////////////////
1122 // Tracing variant of Tasks RCU. This variant is designed to be used
1123 // to protect tracing hooks, including those of BPF. This variant
1126 // 1. Has explicit read-side markers to allow finite grace periods
1127 // in the face of in-kernel loops for PREEMPT=n builds.
1129 // 2. Protects code in the idle loop, exception entry/exit, and
1130 // CPU-hotplug code paths, similar to the capabilities of SRCU.
1132 // 3. Avoids expensive read-side instructions, having overhead similar
1133 // to that of Preemptible RCU.
1135 // There are of course downsides. The grace-period code can send IPIs to
1136 // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
1137 // It is necessary to scan the full tasklist, much as for Tasks RCU. There
1138 // is a single callback queue guarded by a single lock, again, much as for
1139 // Tasks RCU. If needed, these downsides can be at least partially remedied.
1141 // Perhaps most important, this variant of RCU does not affect the vanilla
1142 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1143 // readers can operate from idle, offline, and exception entry/exit in no
1144 // way allows rcu_preempt and rcu_sched readers to also do so.
1146 // The implementation uses rcu_tasks_wait_gp(), which relies on function
1147 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1148 // function sets these function pointers up so that rcu_tasks_wait_gp()
1149 // invokes these functions in this order:
1151 // rcu_tasks_trace_pregp_step():
1152 // Initialize the count of readers and block CPU-hotplug operations.
1153 // rcu_tasks_trace_pertask(), invoked on every non-idle task:
1154 // Initialize per-task state and attempt to identify an immediate
1155 // quiescent state for that task, or, failing that, attempt to
1156 // set that task's .need_qs flag so that task's next outermost
1157 // rcu_read_unlock_trace() will report the quiescent state (in which
1158 // case the count of readers is incremented). If both attempts fail,
1159 // the task is added to a "holdout" list. Note that IPIs are used
1160 // to invoke trc_read_check_handler() in the context of running tasks
1161 // in order to avoid ordering overhead on common-case shared-variable
1163 // rcu_tasks_trace_postscan():
1164 // Initialize state and attempt to identify an immediate quiescent
1165 // state as above (but only for idle tasks), unblock CPU-hotplug
1166 // operations, and wait for an RCU grace period to avoid races with
1167 // tasks that are in the process of exiting.
1168 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1169 // Scans the holdout list, attempting to identify a quiescent state
1170 // for each task on the list. If there is a quiescent state, the
1171 // corresponding task is removed from the holdout list.
1172 // rcu_tasks_trace_postgp():
1173 // Wait for the count of readers do drop to zero, reporting any stalls.
1174 // Also execute full memory barriers to maintain ordering with code
1175 // executing after the grace period.
1177 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1179 // Pre-grace-period update-side code is ordered before the grace
1180 // period via the ->cbs_lock and barriers in rcu_tasks_kthread().
1181 // Pre-grace-period read-side code is ordered before the grace period by
1182 // atomic_dec_and_test() of the count of readers (for IPIed readers) and by
1183 // scheduler context-switch ordering (for locked-down non-running readers).
1185 // The lockdep state must be outside of #ifdef to be useful.
1186 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1187 static struct lock_class_key rcu_lock_trace_key;
1188 struct lockdep_map rcu_trace_lock_map =
1189 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1190 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1191 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1193 #ifdef CONFIG_TASKS_TRACE_RCU
1195 // Record outstanding IPIs to each CPU. No point in sending two...
1196 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1198 // The number of detections of task quiescent state relying on
1199 // heavyweight readers executing explicit memory barriers.
1200 static unsigned long n_heavy_reader_attempts;
1201 static unsigned long n_heavy_reader_updates;
1202 static unsigned long n_heavy_reader_ofl_updates;
1204 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1205 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1208 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1209 static u8 rcu_ld_need_qs(struct task_struct *t)
1211 smp_mb(); // Enforce full grace-period ordering.
1212 return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1215 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1216 static void rcu_st_need_qs(struct task_struct *t, u8 v)
1218 smp_store_release(&t->trc_reader_special.b.need_qs, v);
1219 smp_mb(); // Enforce full grace-period ordering.
1223 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1224 * the four-byte operand-size restriction of some platforms.
1225 * Returns the old value, which is often ignored.
1227 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1229 union rcu_special ret;
1230 union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1231 union rcu_special trs_new = trs_old;
1233 if (trs_old.b.need_qs != old)
1234 return trs_old.b.need_qs;
1235 trs_new.b.need_qs = new;
1236 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1237 return ret.b.need_qs;
1239 EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1241 /* If we are the last reader, wake up the grace-period kthread. */
1242 void rcu_read_unlock_trace_special(struct task_struct *t)
1244 int nqs = (rcu_ld_need_qs(t) == (TRC_NEED_QS_CHECKED | TRC_NEED_QS));
1246 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1247 smp_mb(); // Pairs with update-side barriers.
1248 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1250 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1251 TRC_NEED_QS_CHECKED);
1253 WARN_ONCE(result != (TRC_NEED_QS_CHECKED | TRC_NEED_QS),
1254 "%s: result = %d", __func__, result);
1256 WRITE_ONCE(t->trc_reader_nesting, 0);
1258 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1260 /* Add a task to the holdout list, if it is not already on the list. */
1261 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1263 if (list_empty(&t->trc_holdout_list)) {
1265 list_add(&t->trc_holdout_list, bhp);
1269 /* Remove a task from the holdout list, if it is in fact present. */
1270 static void trc_del_holdout(struct task_struct *t)
1272 if (!list_empty(&t->trc_holdout_list)) {
1273 list_del_init(&t->trc_holdout_list);
1278 /* IPI handler to check task state. */
1279 static void trc_read_check_handler(void *t_in)
1282 struct task_struct *t = current;
1283 struct task_struct *texp = t_in;
1285 // If the task is no longer running on this CPU, leave.
1286 if (unlikely(texp != t))
1287 goto reset_ipi; // Already on holdout list, so will check later.
1289 // If the task is not in a read-side critical section, and
1290 // if this is the last reader, awaken the grace-period kthread.
1291 nesting = READ_ONCE(t->trc_reader_nesting);
1292 if (likely(!nesting)) {
1293 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1296 // If we are racing with an rcu_read_unlock_trace(), try again later.
1297 if (unlikely(nesting < 0))
1300 // Get here if the task is in a read-side critical section. Set
1301 // its state so that it will awaken the grace-period kthread upon
1302 // exit from that critical section.
1303 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1306 // Allow future IPIs to be sent on CPU and for task.
1307 // Also order this IPI handler against any later manipulations of
1308 // the intended task.
1309 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1310 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1313 /* Callback function for scheduler to check locked-down task. */
1314 static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1316 struct list_head *bhp = bhp_in;
1317 int cpu = task_cpu(t);
1319 bool ofl = cpu_is_offline(cpu);
1321 if (task_curr(t) && !ofl) {
1322 // If no chance of heavyweight readers, do it the hard way.
1323 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1326 // If heavyweight readers are enabled on the remote task,
1327 // we can inspect its state despite its currently running.
1328 // However, we cannot safely change its state.
1329 n_heavy_reader_attempts++;
1330 // Check for "running" idle tasks on offline CPUs.
1331 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1332 return -EINVAL; // No quiescent state, do it the hard way.
1333 n_heavy_reader_updates++;
1336 // The task is not running, so C-language access is safe.
1337 nesting = t->trc_reader_nesting;
1338 WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t));
1339 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1340 n_heavy_reader_ofl_updates++;
1343 // If not exiting a read-side critical section, mark as checked
1344 // so that the grace-period kthread will remove it from the
1347 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1348 return 0; // In QS, so done.
1351 return -EINVAL; // QS transitioning, try again later.
1353 // The task is in a read-side critical section, so set up its
1354 // state so that it will update state upon exit from that critical
1356 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1357 trc_add_holdout(t, bhp);
1361 /* Attempt to extract the state for the specified task. */
1362 static void trc_wait_for_one_reader(struct task_struct *t,
1363 struct list_head *bhp)
1367 // If a previous IPI is still in flight, let it complete.
1368 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1371 // The current task had better be in a quiescent state.
1373 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1374 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1378 // Attempt to nail down the task for inspection.
1380 if (!task_call_func(t, trc_inspect_reader, bhp)) {
1386 // If this task is not yet on the holdout list, then we are in
1387 // an RCU read-side critical section. Otherwise, the invocation of
1388 // trc_add_holdout() that added it to the list did the necessary
1389 // get_task_struct(). Either way, the task cannot be freed out
1390 // from under this code.
1392 // If currently running, send an IPI, either way, add to list.
1393 trc_add_holdout(t, bhp);
1395 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1396 // The task is currently running, so try IPIing it.
1399 // If there is already an IPI outstanding, let it happen.
1400 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1403 per_cpu(trc_ipi_to_cpu, cpu) = true;
1404 t->trc_ipi_to_cpu = cpu;
1405 rcu_tasks_trace.n_ipis++;
1406 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1407 // Just in case there is some other reason for
1408 // failure than the target CPU being offline.
1409 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1411 rcu_tasks_trace.n_ipis_fails++;
1412 per_cpu(trc_ipi_to_cpu, cpu) = false;
1413 t->trc_ipi_to_cpu = -1;
1418 /* Initialize for a new RCU-tasks-trace grace period. */
1419 static void rcu_tasks_trace_pregp_step(void)
1423 // There shouldn't be any old IPIs, but...
1424 for_each_possible_cpu(cpu)
1425 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1427 // Disable CPU hotplug across the tasklist scan.
1428 // This also waits for all readers in CPU-hotplug code paths.
1432 /* Do first-round processing for the specified task. */
1433 static void rcu_tasks_trace_pertask(struct task_struct *t,
1434 struct list_head *hop)
1436 // During early boot when there is only the one boot CPU, there
1437 // is no idle task for the other CPUs. Also, the grace-period
1438 // kthread is always in a quiescent state. Either way, just return.
1439 if (unlikely(t == NULL) || t == current)
1442 rcu_st_need_qs(t, 0);
1443 t->trc_ipi_to_cpu = -1;
1444 trc_wait_for_one_reader(t, hop);
1448 * Do intermediate processing between task and holdout scans and
1449 * pick up the idle tasks.
1451 static void rcu_tasks_trace_postscan(struct list_head *hop)
1455 for_each_online_cpu(cpu)
1456 rcu_tasks_trace_pertask(idle_task(cpu), hop);
1458 // Re-enable CPU hotplug now that the tasklist scan has completed.
1461 // Wait for late-stage exiting tasks to finish exiting.
1462 // These might have passed the call to exit_tasks_rcu_finish().
1464 // Any tasks that exit after this point will set
1465 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1468 /* Communicate task state back to the RCU tasks trace stall warning request. */
1469 struct trc_stall_chk_rdr {
1475 static int trc_check_slow_task(struct task_struct *t, void *arg)
1477 struct trc_stall_chk_rdr *trc_rdrp = arg;
1479 if (task_curr(t) && cpu_online(task_cpu(t)))
1480 return false; // It is running, so decline to inspect it.
1481 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1482 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1483 trc_rdrp->needqs = rcu_ld_need_qs(t);
1487 /* Show the state of a task stalling the current RCU tasks trace GP. */
1488 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1491 struct trc_stall_chk_rdr trc_rdr;
1492 bool is_idle_tsk = is_idle_task(t);
1495 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1496 *firstreport = false;
1499 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1500 pr_alert("P%d: %c%c\n",
1502 ".I"[t->trc_ipi_to_cpu >= 0],
1505 pr_alert("P%d: %c%c%c nesting: %d%c%c cpu: %d%s\n",
1507 ".I"[trc_rdr.ipi_to_cpu >= 0],
1509 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1511 " !CN"[trc_rdr.needqs & 0x3],
1512 " ?"[trc_rdr.needqs > 0x3],
1513 cpu, cpu_online(cpu) ? "" : "(offline)");
1517 /* List stalled IPIs for RCU tasks trace. */
1518 static void show_stalled_ipi_trace(void)
1522 for_each_possible_cpu(cpu)
1523 if (per_cpu(trc_ipi_to_cpu, cpu))
1524 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1527 /* Do one scan of the holdout list. */
1528 static void check_all_holdout_tasks_trace(struct list_head *hop,
1529 bool needreport, bool *firstreport)
1531 struct task_struct *g, *t;
1533 // Disable CPU hotplug across the holdout list scan.
1536 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1537 // If safe and needed, try to check the current task.
1538 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1539 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1540 trc_wait_for_one_reader(t, hop);
1542 // If check succeeded, remove this task from the list.
1543 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1544 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1546 else if (needreport)
1547 show_stalled_task_trace(t, firstreport);
1550 // Re-enable CPU hotplug now that the holdout list scan has completed.
1555 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1556 show_stalled_ipi_trace();
1560 static void rcu_tasks_trace_empty_fn(void *unused)
1564 /* Wait for grace period to complete and provide ordering. */
1565 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1569 // Wait for any lingering IPI handlers to complete. Note that
1570 // if a CPU has gone offline or transitioned to userspace in the
1571 // meantime, all IPI handlers should have been drained beforehand.
1572 // Yes, this assumes that CPUs process IPIs in order. If that ever
1573 // changes, there will need to be a recheck and/or timed wait.
1574 for_each_online_cpu(cpu)
1575 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1576 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1578 smp_mb(); // Caller's code must be ordered after wakeup.
1579 // Pairs with pretty much every ordering primitive.
1582 /* Report any needed quiescent state for this exiting task. */
1583 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1585 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1586 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1587 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS))
1588 rcu_read_unlock_trace_special(t);
1590 WRITE_ONCE(t->trc_reader_nesting, 0);
1594 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1595 * @rhp: structure to be used for queueing the RCU updates.
1596 * @func: actual callback function to be invoked after the grace period
1598 * The callback function will be invoked some time after a trace rcu-tasks
1599 * grace period elapses, in other words after all currently executing
1600 * trace rcu-tasks read-side critical sections have completed. These
1601 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1602 * and rcu_read_unlock_trace().
1604 * See the description of call_rcu() for more detailed information on
1605 * memory ordering guarantees.
1607 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1609 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1611 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1614 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1616 * Control will return to the caller some time after a trace rcu-tasks
1617 * grace period has elapsed, in other words after all currently executing
1618 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1619 * critical sections are delimited by calls to rcu_read_lock_trace()
1620 * and rcu_read_unlock_trace().
1622 * This is a very specialized primitive, intended only for a few uses in
1623 * tracing and other situations requiring manipulation of function preambles
1624 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1625 * (yet) intended for heavy use from multiple CPUs.
1627 * See the description of synchronize_rcu() for more detailed information
1628 * on memory ordering guarantees.
1630 void synchronize_rcu_tasks_trace(void)
1632 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1633 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1635 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1638 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1640 * Although the current implementation is guaranteed to wait, it is not
1641 * obligated to, for example, if there are no pending callbacks.
1643 void rcu_barrier_tasks_trace(void)
1645 rcu_barrier_tasks_generic(&rcu_tasks_trace);
1647 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1649 static int __init rcu_spawn_tasks_trace_kthread(void)
1651 cblist_init_generic(&rcu_tasks_trace);
1652 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1653 rcu_tasks_trace.gp_sleep = HZ / 10;
1654 rcu_tasks_trace.init_fract = HZ / 10;
1656 rcu_tasks_trace.gp_sleep = HZ / 200;
1657 if (rcu_tasks_trace.gp_sleep <= 0)
1658 rcu_tasks_trace.gp_sleep = 1;
1659 rcu_tasks_trace.init_fract = HZ / 200;
1660 if (rcu_tasks_trace.init_fract <= 0)
1661 rcu_tasks_trace.init_fract = 1;
1663 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1664 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1665 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1666 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1667 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1668 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1672 #if !defined(CONFIG_TINY_RCU)
1673 void show_rcu_tasks_trace_gp_kthread(void)
1677 sprintf(buf, "h:%lu/%lu/%lu",
1678 data_race(n_heavy_reader_ofl_updates),
1679 data_race(n_heavy_reader_updates),
1680 data_race(n_heavy_reader_attempts));
1681 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1683 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1684 #endif // !defined(CONFIG_TINY_RCU)
1686 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1687 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1688 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1690 #ifndef CONFIG_TINY_RCU
1691 void show_rcu_tasks_gp_kthreads(void)
1693 show_rcu_tasks_classic_gp_kthread();
1694 show_rcu_tasks_rude_gp_kthread();
1695 show_rcu_tasks_trace_gp_kthread();
1697 #endif /* #ifndef CONFIG_TINY_RCU */
1699 #ifdef CONFIG_PROVE_RCU
1700 struct rcu_tasks_test_desc {
1706 static struct rcu_tasks_test_desc tests[] = {
1708 .name = "call_rcu_tasks()",
1709 /* If not defined, the test is skipped. */
1710 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1713 .name = "call_rcu_tasks_rude()",
1714 /* If not defined, the test is skipped. */
1715 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1718 .name = "call_rcu_tasks_trace()",
1719 /* If not defined, the test is skipped. */
1720 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1724 static void test_rcu_tasks_callback(struct rcu_head *rhp)
1726 struct rcu_tasks_test_desc *rttd =
1727 container_of(rhp, struct rcu_tasks_test_desc, rh);
1729 pr_info("Callback from %s invoked.\n", rttd->name);
1731 rttd->notrun = true;
1734 static void rcu_tasks_initiate_self_tests(void)
1736 pr_info("Running RCU-tasks wait API self tests\n");
1737 #ifdef CONFIG_TASKS_RCU
1738 synchronize_rcu_tasks();
1739 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1742 #ifdef CONFIG_TASKS_RUDE_RCU
1743 synchronize_rcu_tasks_rude();
1744 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1747 #ifdef CONFIG_TASKS_TRACE_RCU
1748 synchronize_rcu_tasks_trace();
1749 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1753 static int rcu_tasks_verify_self_tests(void)
1758 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1759 if (!tests[i].notrun) { // still hanging.
1760 pr_err("%s has been failed.\n", tests[i].name);
1770 late_initcall(rcu_tasks_verify_self_tests);
1771 #else /* #ifdef CONFIG_PROVE_RCU */
1772 static void rcu_tasks_initiate_self_tests(void) { }
1773 #endif /* #else #ifdef CONFIG_PROVE_RCU */
1775 void __init rcu_init_tasks_generic(void)
1777 #ifdef CONFIG_TASKS_RCU
1778 rcu_spawn_tasks_kthread();
1781 #ifdef CONFIG_TASKS_RUDE_RCU
1782 rcu_spawn_tasks_rude_kthread();
1785 #ifdef CONFIG_TASKS_TRACE_RCU
1786 rcu_spawn_tasks_trace_kthread();
1789 // Run the self-tests.
1790 rcu_tasks_initiate_self_tests();
1793 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1794 static inline void rcu_tasks_bootup_oddness(void) {}
1795 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */