1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
5 * Copyright (C) 2020 Paul E. McKenney
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 #include "rcu_segcblist.h"
11 ////////////////////////////////////////////////////////////////////////
13 // Generic data structures.
16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17 typedef void (*pregp_func_t)(void);
18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19 typedef void (*postscan_func_t)(struct list_head *hop);
20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25 * @cblist: Callback list.
26 * @lock: Lock protecting per-CPU callback list.
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @rtp_n_lock_retries: Rough lock-contention statistic.
29 * @rtp_work: Work queue for invoking callbacks.
30 * @rtp_irq_work: IRQ work queue for deferred wakeups.
31 * @barrier_q_head: RCU callback for barrier operation.
32 * @cpu: CPU number corresponding to this entry.
33 * @rtpp: Pointer to the rcu_tasks structure.
35 struct rcu_tasks_percpu {
36 struct rcu_segcblist cblist;
37 raw_spinlock_t __private lock;
38 unsigned long rtp_jiffies;
39 unsigned long rtp_n_lock_retries;
40 struct work_struct rtp_work;
41 struct irq_work rtp_irq_work;
42 struct rcu_head barrier_q_head;
44 struct rcu_tasks *rtpp;
48 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
49 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
50 * @cbs_gbl_lock: Lock protecting callback list.
51 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
52 * @gp_func: This flavor's grace-period-wait function.
53 * @gp_state: Grace period's most recent state transition (debugging).
54 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
55 * @init_fract: Initial backoff sleep interval.
56 * @gp_jiffies: Time of last @gp_state transition.
57 * @gp_start: Most recent grace-period start in jiffies.
58 * @tasks_gp_seq: Number of grace periods completed since boot.
59 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
60 * @n_ipis_fails: Number of IPI-send failures.
61 * @pregp_func: This flavor's pre-grace-period function (optional).
62 * @pertask_func: This flavor's per-task scan function (optional).
63 * @postscan_func: This flavor's post-task scan function (optional).
64 * @holdouts_func: This flavor's holdout-list scan function (optional).
65 * @postgp_func: This flavor's post-grace-period function (optional).
66 * @call_func: This flavor's call_rcu()-equivalent function.
67 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
68 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
69 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
70 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
71 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
72 * @barrier_q_mutex: Serialize barrier operations.
73 * @barrier_q_count: Number of queues being waited on.
74 * @barrier_q_completion: Barrier wait/wakeup mechanism.
75 * @barrier_q_seq: Sequence number for barrier operations.
76 * @name: This flavor's textual name.
77 * @kname: This flavor's kthread name.
80 struct rcuwait cbs_wait;
81 raw_spinlock_t cbs_gbl_lock;
85 unsigned long gp_jiffies;
86 unsigned long gp_start;
87 unsigned long tasks_gp_seq;
89 unsigned long n_ipis_fails;
90 struct task_struct *kthread_ptr;
91 rcu_tasks_gp_func_t gp_func;
92 pregp_func_t pregp_func;
93 pertask_func_t pertask_func;
94 postscan_func_t postscan_func;
95 holdouts_func_t holdouts_func;
96 postgp_func_t postgp_func;
97 call_rcu_func_t call_func;
98 struct rcu_tasks_percpu __percpu *rtpcpu;
99 int percpu_enqueue_shift;
100 int percpu_enqueue_lim;
101 int percpu_dequeue_lim;
102 unsigned long percpu_dequeue_gpseq;
103 struct mutex barrier_q_mutex;
104 atomic_t barrier_q_count;
105 struct completion barrier_q_completion;
106 unsigned long barrier_q_seq;
111 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
113 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
114 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
115 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
116 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
118 static struct rcu_tasks rt_name = \
120 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
121 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
124 .rtpcpu = &rt_name ## __percpu, \
126 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
127 .percpu_enqueue_lim = 1, \
128 .percpu_dequeue_lim = 1, \
129 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
130 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
134 /* Track exiting tasks in order to allow them to be waited for. */
135 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
137 /* Avoid IPIing CPUs early in the grace period. */
138 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
139 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
140 module_param(rcu_task_ipi_delay, int, 0644);
142 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
143 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
144 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
145 module_param(rcu_task_stall_timeout, int, 0644);
146 #define RCU_TASK_STALL_INFO (HZ * 10)
147 static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
148 module_param(rcu_task_stall_info, int, 0644);
149 static int rcu_task_stall_info_mult __read_mostly = 3;
150 module_param(rcu_task_stall_info_mult, int, 0444);
152 static int rcu_task_enqueue_lim __read_mostly = -1;
153 module_param(rcu_task_enqueue_lim, int, 0444);
155 static bool rcu_task_cb_adjust;
156 static int rcu_task_contend_lim __read_mostly = 100;
157 module_param(rcu_task_contend_lim, int, 0444);
158 static int rcu_task_collapse_lim __read_mostly = 10;
159 module_param(rcu_task_collapse_lim, int, 0444);
161 /* RCU tasks grace-period state for debugging. */
163 #define RTGS_WAIT_WAIT_CBS 1
164 #define RTGS_WAIT_GP 2
165 #define RTGS_PRE_WAIT_GP 3
166 #define RTGS_SCAN_TASKLIST 4
167 #define RTGS_POST_SCAN_TASKLIST 5
168 #define RTGS_WAIT_SCAN_HOLDOUTS 6
169 #define RTGS_SCAN_HOLDOUTS 7
170 #define RTGS_POST_GP 8
171 #define RTGS_WAIT_READERS 9
172 #define RTGS_INVOKE_CBS 10
173 #define RTGS_WAIT_CBS 11
174 #ifndef CONFIG_TINY_RCU
175 static const char * const rcu_tasks_gp_state_names[] = {
177 "RTGS_WAIT_WAIT_CBS",
180 "RTGS_SCAN_TASKLIST",
181 "RTGS_POST_SCAN_TASKLIST",
182 "RTGS_WAIT_SCAN_HOLDOUTS",
183 "RTGS_SCAN_HOLDOUTS",
189 #endif /* #ifndef CONFIG_TINY_RCU */
191 ////////////////////////////////////////////////////////////////////////
195 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
197 /* Record grace-period phase and time. */
198 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
200 rtp->gp_state = newstate;
201 rtp->gp_jiffies = jiffies;
204 #ifndef CONFIG_TINY_RCU
205 /* Return state name. */
206 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
208 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
209 int j = READ_ONCE(i); // Prevent the compiler from reading twice
211 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
213 return rcu_tasks_gp_state_names[j];
215 #endif /* #ifndef CONFIG_TINY_RCU */
217 // Initialize per-CPU callback lists for the specified flavor of
219 static void cblist_init_generic(struct rcu_tasks *rtp)
226 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
227 if (rcu_task_enqueue_lim < 0) {
228 rcu_task_enqueue_lim = 1;
229 rcu_task_cb_adjust = true;
230 pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
231 } else if (rcu_task_enqueue_lim == 0) {
232 rcu_task_enqueue_lim = 1;
234 lim = rcu_task_enqueue_lim;
236 if (lim > nr_cpu_ids)
238 shift = ilog2(nr_cpu_ids / lim);
239 if (((nr_cpu_ids - 1) >> shift) >= lim)
241 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
242 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
243 smp_store_release(&rtp->percpu_enqueue_lim, lim);
244 for_each_possible_cpu(cpu) {
245 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
247 WARN_ON_ONCE(!rtpcp);
249 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
250 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
251 if (rcu_segcblist_empty(&rtpcp->cblist))
252 rcu_segcblist_init(&rtpcp->cblist);
253 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
256 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
258 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
259 pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
262 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
263 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
265 struct rcu_tasks *rtp;
266 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
269 rcuwait_wake_up(&rtp->cbs_wait);
272 // Enqueue a callback for the specified flavor of Tasks RCU.
273 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
274 struct rcu_tasks *rtp)
280 bool needadjust = false;
282 struct rcu_tasks_percpu *rtpcp;
286 local_irq_save(flags);
288 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
289 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
290 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
291 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
292 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
294 if (rtpcp->rtp_jiffies != j) {
295 rtpcp->rtp_jiffies = j;
296 rtpcp->rtp_n_lock_retries = 0;
298 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
299 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
300 needadjust = true; // Defer adjustment to avoid deadlock.
302 if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
303 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
304 cblist_init_generic(rtp);
305 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
307 needwake = rcu_segcblist_empty(&rtpcp->cblist);
308 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
309 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
310 if (unlikely(needadjust)) {
311 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
312 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
313 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
314 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
315 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
316 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
318 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
321 /* We can't create the thread unless interrupts are enabled. */
322 if (needwake && READ_ONCE(rtp->kthread_ptr))
323 irq_work_queue(&rtpcp->rtp_irq_work);
326 // Wait for a grace period for the specified flavor of Tasks RCU.
327 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
329 /* Complain if the scheduler has not started. */
330 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
331 "synchronize_rcu_tasks called too soon");
333 /* Wait for the grace period. */
334 wait_rcu_gp(rtp->call_func);
337 // RCU callback function for rcu_barrier_tasks_generic().
338 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
340 struct rcu_tasks *rtp;
341 struct rcu_tasks_percpu *rtpcp;
343 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
345 if (atomic_dec_and_test(&rtp->barrier_q_count))
346 complete(&rtp->barrier_q_completion);
349 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
350 // Operates in a manner similar to rcu_barrier().
351 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
355 struct rcu_tasks_percpu *rtpcp;
356 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
358 mutex_lock(&rtp->barrier_q_mutex);
359 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
361 mutex_unlock(&rtp->barrier_q_mutex);
364 rcu_seq_start(&rtp->barrier_q_seq);
365 init_completion(&rtp->barrier_q_completion);
366 atomic_set(&rtp->barrier_q_count, 2);
367 for_each_possible_cpu(cpu) {
368 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
370 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
371 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
372 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
373 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
374 atomic_inc(&rtp->barrier_q_count);
375 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
377 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
378 complete(&rtp->barrier_q_completion);
379 wait_for_completion(&rtp->barrier_q_completion);
380 rcu_seq_end(&rtp->barrier_q_seq);
381 mutex_unlock(&rtp->barrier_q_mutex);
384 // Advance callbacks and indicate whether either a grace period or
385 // callback invocation is needed.
386 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
395 for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
396 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
398 /* Advance and accelerate any new callbacks. */
399 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
401 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
402 // Should we shrink down to a single callback queue?
403 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
409 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
410 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
411 if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
413 if (!rcu_segcblist_empty(&rtpcp->cblist))
415 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
418 // Shrink down to a single callback queue if appropriate.
419 // This is done in two stages: (1) If there are no more than
420 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
421 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
422 // if there has not been an increase in callbacks, limit dequeuing
423 // to CPU 0. Note the matching RCU read-side critical section in
424 // call_rcu_tasks_generic().
425 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
426 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
427 if (rtp->percpu_enqueue_lim > 1) {
428 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
429 smp_store_release(&rtp->percpu_enqueue_lim, 1);
430 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
431 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
433 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
435 if (rcu_task_cb_adjust && !ncbsnz &&
436 poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
437 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
438 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
439 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
440 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
442 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
443 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
445 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
447 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
453 // Advance callbacks and invoke any that are ready.
454 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
460 struct rcu_head *rhp;
461 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
462 struct rcu_tasks_percpu *rtpcp_next;
465 cpunext = cpu * 2 + 1;
466 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
467 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
468 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
470 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
471 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
472 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
476 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
478 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
479 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
480 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
481 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
483 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
489 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
490 rcu_segcblist_add_len(&rtpcp->cblist, -len);
491 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
492 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
495 // Workqueue flood to advance callbacks and invoke any that are ready.
496 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
498 struct rcu_tasks *rtp;
499 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
502 rcu_tasks_invoke_cbs(rtp, rtpcp);
505 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
506 static int __noreturn rcu_tasks_kthread(void *arg)
509 struct rcu_tasks *rtp = arg;
511 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
512 housekeeping_affine(current, HK_TYPE_RCU);
513 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
516 * Each pass through the following loop makes one check for
517 * newly arrived callbacks, and, if there are some, waits for
518 * one RCU-tasks grace period and then invokes the callbacks.
519 * This loop is terminated by the system going down. ;-)
522 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
524 /* If there were none, wait a bit and start over. */
525 rcuwait_wait_event(&rtp->cbs_wait,
526 (needgpcb = rcu_tasks_need_gpcb(rtp)),
529 if (needgpcb & 0x2) {
530 // Wait for one grace period.
531 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
532 rtp->gp_start = jiffies;
533 rcu_seq_start(&rtp->tasks_gp_seq);
535 rcu_seq_end(&rtp->tasks_gp_seq);
538 /* Invoke callbacks. */
539 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
540 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
542 /* Paranoid sleep to keep this from entering a tight loop */
543 schedule_timeout_idle(rtp->gp_sleep);
547 /* Spawn RCU-tasks grace-period kthread. */
548 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
550 struct task_struct *t;
552 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
553 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
555 smp_mb(); /* Ensure others see full kthread. */
558 #ifndef CONFIG_TINY_RCU
561 * Print any non-default Tasks RCU settings.
563 static void __init rcu_tasks_bootup_oddness(void)
565 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
568 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
569 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
570 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
571 if (rtsimc != rcu_task_stall_info_mult) {
572 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
573 rcu_task_stall_info_mult = rtsimc;
575 #endif /* #ifdef CONFIG_TASKS_RCU */
576 #ifdef CONFIG_TASKS_RCU
577 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
578 #endif /* #ifdef CONFIG_TASKS_RCU */
579 #ifdef CONFIG_TASKS_RUDE_RCU
580 pr_info("\tRude variant of Tasks RCU enabled.\n");
581 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
582 #ifdef CONFIG_TASKS_TRACE_RCU
583 pr_info("\tTracing variant of Tasks RCU enabled.\n");
584 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
587 #endif /* #ifndef CONFIG_TINY_RCU */
589 #ifndef CONFIG_TINY_RCU
590 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
591 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
594 bool havecbs = false;
596 for_each_possible_cpu(cpu) {
597 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
599 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
604 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
606 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
607 jiffies - data_race(rtp->gp_jiffies),
608 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
609 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
610 ".k"[!!data_race(rtp->kthread_ptr)],
614 #endif // #ifndef CONFIG_TINY_RCU
616 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
618 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
620 ////////////////////////////////////////////////////////////////////////
622 // Shared code between task-list-scanning variants of Tasks RCU.
624 /* Wait for one RCU-tasks grace period. */
625 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
627 struct task_struct *g;
631 unsigned long lastinfo;
632 unsigned long lastreport;
633 bool reported = false;
635 struct task_struct *t;
637 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
641 * There were callbacks, so we need to wait for an RCU-tasks
642 * grace period. Start off by scanning the task list for tasks
643 * that are not already voluntarily blocked. Mark these tasks
644 * and make a list of them in holdouts.
646 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
648 for_each_process_thread(g, t)
649 rtp->pertask_func(t, &holdouts);
652 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
653 rtp->postscan_func(&holdouts);
656 * Each pass through the following loop scans the list of holdout
657 * tasks, removing any that are no longer holdouts. When the list
658 * is empty, we are done.
660 lastreport = jiffies;
661 lastinfo = lastreport;
662 rtsi = READ_ONCE(rcu_task_stall_info);
664 // Start off with initial wait and slowly back off to 1 HZ wait.
665 fract = rtp->init_fract;
667 while (!list_empty(&holdouts)) {
673 // Slowly back off waiting for holdouts
674 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
675 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
676 schedule_timeout_idle(fract);
678 exp = jiffies_to_nsecs(fract);
679 __set_current_state(TASK_IDLE);
680 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
686 rtst = READ_ONCE(rcu_task_stall_timeout);
687 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
689 lastreport = jiffies;
693 WARN_ON(signal_pending(current));
694 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
695 rtp->holdouts_func(&holdouts, needreport, &firstreport);
697 // Print pre-stall informational messages if needed.
699 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
701 rtsi = rtsi * rcu_task_stall_info_mult;
702 pr_info("%s: %s grace period %lu is %lu jiffies old.\n",
703 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
707 set_tasks_gp_state(rtp, RTGS_POST_GP);
708 rtp->postgp_func(rtp);
711 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
713 #ifdef CONFIG_TASKS_RCU
715 ////////////////////////////////////////////////////////////////////////
717 // Simple variant of RCU whose quiescent states are voluntary context
718 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
719 // As such, grace periods can take one good long time. There are no
720 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
721 // because this implementation is intended to get the system into a safe
722 // state for some of the manipulations involved in tracing and the like.
723 // Finally, this implementation does not support high call_rcu_tasks()
724 // rates from multiple CPUs. If this is required, per-CPU callback lists
727 // The implementation uses rcu_tasks_wait_gp(), which relies on function
728 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
729 // function sets these function pointers up so that rcu_tasks_wait_gp()
730 // invokes these functions in this order:
732 // rcu_tasks_pregp_step():
733 // Invokes synchronize_rcu() in order to wait for all in-flight
734 // t->on_rq and t->nvcsw transitions to complete. This works because
735 // all such transitions are carried out with interrupts disabled.
736 // rcu_tasks_pertask(), invoked on every non-idle task:
737 // For every runnable non-idle task other than the current one, use
738 // get_task_struct() to pin down that task, snapshot that task's
739 // number of voluntary context switches, and add that task to the
741 // rcu_tasks_postscan():
742 // Invoke synchronize_srcu() to ensure that all tasks that were
743 // in the process of exiting (and which thus might not know to
744 // synchronize with this RCU Tasks grace period) have completed
746 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
747 // Scans the holdout list, attempting to identify a quiescent state
748 // for each task on the list. If there is a quiescent state, the
749 // corresponding task is removed from the holdout list.
750 // rcu_tasks_postgp():
751 // Invokes synchronize_rcu() in order to ensure that all prior
752 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
753 // to have happened before the end of this RCU Tasks grace period.
754 // Again, this works because all such transitions are carried out
755 // with interrupts disabled.
757 // For each exiting task, the exit_tasks_rcu_start() and
758 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
759 // read-side critical sections waited for by rcu_tasks_postscan().
761 // Pre-grace-period update-side code is ordered before the grace
762 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
763 // is ordered before the grace period via synchronize_rcu() call in
764 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
767 /* Pre-grace-period preparation. */
768 static void rcu_tasks_pregp_step(void)
771 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
772 * to complete. Invoking synchronize_rcu() suffices because all
773 * these transitions occur with interrupts disabled. Without this
774 * synchronize_rcu(), a read-side critical section that started
775 * before the grace period might be incorrectly seen as having
776 * started after the grace period.
778 * This synchronize_rcu() also dispenses with the need for a
779 * memory barrier on the first store to t->rcu_tasks_holdout,
780 * as it forces the store to happen after the beginning of the
786 /* Per-task initial processing. */
787 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
789 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
791 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
792 WRITE_ONCE(t->rcu_tasks_holdout, true);
793 list_add(&t->rcu_tasks_holdout_list, hop);
797 /* Processing between scanning taskslist and draining the holdout list. */
798 static void rcu_tasks_postscan(struct list_head *hop)
801 * Wait for tasks that are in the process of exiting. This
802 * does only part of the job, ensuring that all tasks that were
803 * previously exiting reach the point where they have disabled
804 * preemption, allowing the later synchronize_rcu() to finish
807 synchronize_srcu(&tasks_rcu_exit_srcu);
810 /* See if tasks are still holding out, complain if so. */
811 static void check_holdout_task(struct task_struct *t,
812 bool needreport, bool *firstreport)
816 if (!READ_ONCE(t->rcu_tasks_holdout) ||
817 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
818 !READ_ONCE(t->on_rq) ||
819 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
820 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
821 WRITE_ONCE(t->rcu_tasks_holdout, false);
822 list_del_init(&t->rcu_tasks_holdout_list);
826 rcu_request_urgent_qs_task(t);
830 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
831 *firstreport = false;
834 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
835 t, ".I"[is_idle_task(t)],
836 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
837 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
838 t->rcu_tasks_idle_cpu, cpu);
842 /* Scan the holdout lists for tasks no longer holding out. */
843 static void check_all_holdout_tasks(struct list_head *hop,
844 bool needreport, bool *firstreport)
846 struct task_struct *t, *t1;
848 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
849 check_holdout_task(t, needreport, firstreport);
854 /* Finish off the Tasks-RCU grace period. */
855 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
858 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
859 * memory barriers prior to them in the schedule() path, memory
860 * reordering on other CPUs could cause their RCU-tasks read-side
861 * critical sections to extend past the end of the grace period.
862 * However, because these ->nvcsw updates are carried out with
863 * interrupts disabled, we can use synchronize_rcu() to force the
864 * needed ordering on all such CPUs.
866 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
867 * accesses to be within the grace period, avoiding the need for
868 * memory barriers for ->rcu_tasks_holdout accesses.
870 * In addition, this synchronize_rcu() waits for exiting tasks
871 * to complete their final preempt_disable() region of execution,
872 * cleaning up after the synchronize_srcu() above.
877 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
878 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
881 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
882 * @rhp: structure to be used for queueing the RCU updates.
883 * @func: actual callback function to be invoked after the grace period
885 * The callback function will be invoked some time after a full grace
886 * period elapses, in other words after all currently executing RCU
887 * read-side critical sections have completed. call_rcu_tasks() assumes
888 * that the read-side critical sections end at a voluntary context
889 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
890 * or transition to usermode execution. As such, there are no read-side
891 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
892 * this primitive is intended to determine that all tasks have passed
893 * through a safe state, not so much for data-structure synchronization.
895 * See the description of call_rcu() for more detailed information on
896 * memory ordering guarantees.
898 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
900 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
902 EXPORT_SYMBOL_GPL(call_rcu_tasks);
905 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
907 * Control will return to the caller some time after a full rcu-tasks
908 * grace period has elapsed, in other words after all currently
909 * executing rcu-tasks read-side critical sections have elapsed. These
910 * read-side critical sections are delimited by calls to schedule(),
911 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
912 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
914 * This is a very specialized primitive, intended only for a few uses in
915 * tracing and other situations requiring manipulation of function
916 * preambles and profiling hooks. The synchronize_rcu_tasks() function
917 * is not (yet) intended for heavy use from multiple CPUs.
919 * See the description of synchronize_rcu() for more detailed information
920 * on memory ordering guarantees.
922 void synchronize_rcu_tasks(void)
924 synchronize_rcu_tasks_generic(&rcu_tasks);
926 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
929 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
931 * Although the current implementation is guaranteed to wait, it is not
932 * obligated to, for example, if there are no pending callbacks.
934 void rcu_barrier_tasks(void)
936 rcu_barrier_tasks_generic(&rcu_tasks);
938 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
940 static int __init rcu_spawn_tasks_kthread(void)
942 cblist_init_generic(&rcu_tasks);
943 rcu_tasks.gp_sleep = HZ / 10;
944 rcu_tasks.init_fract = HZ / 10;
945 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
946 rcu_tasks.pertask_func = rcu_tasks_pertask;
947 rcu_tasks.postscan_func = rcu_tasks_postscan;
948 rcu_tasks.holdouts_func = check_all_holdout_tasks;
949 rcu_tasks.postgp_func = rcu_tasks_postgp;
950 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
954 #if !defined(CONFIG_TINY_RCU)
955 void show_rcu_tasks_classic_gp_kthread(void)
957 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
959 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
960 #endif // !defined(CONFIG_TINY_RCU)
962 /* Do the srcu_read_lock() for the above synchronize_srcu(). */
963 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
966 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
970 /* Do the srcu_read_unlock() for the above synchronize_srcu(). */
971 void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
973 struct task_struct *t = current;
976 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
978 exit_tasks_rcu_finish_trace(t);
981 #else /* #ifdef CONFIG_TASKS_RCU */
982 void exit_tasks_rcu_start(void) { }
983 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
984 #endif /* #else #ifdef CONFIG_TASKS_RCU */
986 #ifdef CONFIG_TASKS_RUDE_RCU
988 ////////////////////////////////////////////////////////////////////////
990 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
991 // passing an empty function to schedule_on_each_cpu(). This approach
992 // provides an asynchronous call_rcu_tasks_rude() API and batching of
993 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
994 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
995 // and induces otherwise unnecessary context switches on all online CPUs,
996 // whether idle or not.
998 // Callback handling is provided by the rcu_tasks_kthread() function.
1000 // Ordering is provided by the scheduler's context-switch code.
1002 // Empty function to allow workqueues to force a context switch.
1003 static void rcu_tasks_be_rude(struct work_struct *work)
1007 // Wait for one rude RCU-tasks grace period.
1008 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1010 if (num_online_cpus() <= 1)
1011 return; // Fastpath for only one CPU.
1013 rtp->n_ipis += cpumask_weight(cpu_online_mask);
1014 schedule_on_each_cpu(rcu_tasks_be_rude);
1017 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1018 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1022 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1023 * @rhp: structure to be used for queueing the RCU updates.
1024 * @func: actual callback function to be invoked after the grace period
1026 * The callback function will be invoked some time after a full grace
1027 * period elapses, in other words after all currently executing RCU
1028 * read-side critical sections have completed. call_rcu_tasks_rude()
1029 * assumes that the read-side critical sections end at context switch,
1030 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1031 * usermode execution is schedulable). As such, there are no read-side
1032 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1033 * this primitive is intended to determine that all tasks have passed
1034 * through a safe state, not so much for data-structure synchronization.
1036 * See the description of call_rcu() for more detailed information on
1037 * memory ordering guarantees.
1039 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1041 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1043 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1046 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1048 * Control will return to the caller some time after a rude rcu-tasks
1049 * grace period has elapsed, in other words after all currently
1050 * executing rcu-tasks read-side critical sections have elapsed. These
1051 * read-side critical sections are delimited by calls to schedule(),
1052 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1053 * context), and (in theory, anyway) cond_resched().
1055 * This is a very specialized primitive, intended only for a few uses in
1056 * tracing and other situations requiring manipulation of function preambles
1057 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1058 * (yet) intended for heavy use from multiple CPUs.
1060 * See the description of synchronize_rcu() for more detailed information
1061 * on memory ordering guarantees.
1063 void synchronize_rcu_tasks_rude(void)
1065 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1067 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1070 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1072 * Although the current implementation is guaranteed to wait, it is not
1073 * obligated to, for example, if there are no pending callbacks.
1075 void rcu_barrier_tasks_rude(void)
1077 rcu_barrier_tasks_generic(&rcu_tasks_rude);
1079 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1081 static int __init rcu_spawn_tasks_rude_kthread(void)
1083 cblist_init_generic(&rcu_tasks_rude);
1084 rcu_tasks_rude.gp_sleep = HZ / 10;
1085 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1089 #if !defined(CONFIG_TINY_RCU)
1090 void show_rcu_tasks_rude_gp_kthread(void)
1092 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1094 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1095 #endif // !defined(CONFIG_TINY_RCU)
1096 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1098 ////////////////////////////////////////////////////////////////////////
1100 // Tracing variant of Tasks RCU. This variant is designed to be used
1101 // to protect tracing hooks, including those of BPF. This variant
1104 // 1. Has explicit read-side markers to allow finite grace periods
1105 // in the face of in-kernel loops for PREEMPT=n builds.
1107 // 2. Protects code in the idle loop, exception entry/exit, and
1108 // CPU-hotplug code paths, similar to the capabilities of SRCU.
1110 // 3. Avoids expensive read-side instructions, having overhead similar
1111 // to that of Preemptible RCU.
1113 // There are of course downsides. The grace-period code can send IPIs to
1114 // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
1115 // It is necessary to scan the full tasklist, much as for Tasks RCU. There
1116 // is a single callback queue guarded by a single lock, again, much as for
1117 // Tasks RCU. If needed, these downsides can be at least partially remedied.
1119 // Perhaps most important, this variant of RCU does not affect the vanilla
1120 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1121 // readers can operate from idle, offline, and exception entry/exit in no
1122 // way allows rcu_preempt and rcu_sched readers to also do so.
1124 // The implementation uses rcu_tasks_wait_gp(), which relies on function
1125 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1126 // function sets these function pointers up so that rcu_tasks_wait_gp()
1127 // invokes these functions in this order:
1129 // rcu_tasks_trace_pregp_step():
1130 // Initialize the count of readers and block CPU-hotplug operations.
1131 // rcu_tasks_trace_pertask(), invoked on every non-idle task:
1132 // Initialize per-task state and attempt to identify an immediate
1133 // quiescent state for that task, or, failing that, attempt to
1134 // set that task's .need_qs flag so that task's next outermost
1135 // rcu_read_unlock_trace() will report the quiescent state (in which
1136 // case the count of readers is incremented). If both attempts fail,
1137 // the task is added to a "holdout" list. Note that IPIs are used
1138 // to invoke trc_read_check_handler() in the context of running tasks
1139 // in order to avoid ordering overhead on common-case shared-variable
1141 // rcu_tasks_trace_postscan():
1142 // Initialize state and attempt to identify an immediate quiescent
1143 // state as above (but only for idle tasks), unblock CPU-hotplug
1144 // operations, and wait for an RCU grace period to avoid races with
1145 // tasks that are in the process of exiting.
1146 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1147 // Scans the holdout list, attempting to identify a quiescent state
1148 // for each task on the list. If there is a quiescent state, the
1149 // corresponding task is removed from the holdout list.
1150 // rcu_tasks_trace_postgp():
1151 // Wait for the count of readers do drop to zero, reporting any stalls.
1152 // Also execute full memory barriers to maintain ordering with code
1153 // executing after the grace period.
1155 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1157 // Pre-grace-period update-side code is ordered before the grace
1158 // period via the ->cbs_lock and barriers in rcu_tasks_kthread().
1159 // Pre-grace-period read-side code is ordered before the grace period by
1160 // atomic_dec_and_test() of the count of readers (for IPIed readers) and by
1161 // scheduler context-switch ordering (for locked-down non-running readers).
1163 // The lockdep state must be outside of #ifdef to be useful.
1164 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1165 static struct lock_class_key rcu_lock_trace_key;
1166 struct lockdep_map rcu_trace_lock_map =
1167 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1168 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1169 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1171 #ifdef CONFIG_TASKS_TRACE_RCU
1173 static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
1174 static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
1176 // Record outstanding IPIs to each CPU. No point in sending two...
1177 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1179 // The number of detections of task quiescent state relying on
1180 // heavyweight readers executing explicit memory barriers.
1181 static unsigned long n_heavy_reader_attempts;
1182 static unsigned long n_heavy_reader_updates;
1183 static unsigned long n_heavy_reader_ofl_updates;
1185 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1186 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1190 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
1191 * while the scheduler locks are held.
1193 static void rcu_read_unlock_iw(struct irq_work *iwp)
1197 static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
1199 /* If we are the last reader, wake up the grace-period kthread. */
1200 void rcu_read_unlock_trace_special(struct task_struct *t)
1202 int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
1204 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
1205 t->trc_reader_special.b.need_mb)
1206 smp_mb(); // Pairs with update-side barriers.
1207 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1209 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1210 WRITE_ONCE(t->trc_reader_nesting, 0);
1211 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
1212 irq_work_queue(&rcu_tasks_trace_iw);
1214 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1216 /* Add a task to the holdout list, if it is not already on the list. */
1217 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1219 if (list_empty(&t->trc_holdout_list)) {
1221 list_add(&t->trc_holdout_list, bhp);
1225 /* Remove a task from the holdout list, if it is in fact present. */
1226 static void trc_del_holdout(struct task_struct *t)
1228 if (!list_empty(&t->trc_holdout_list)) {
1229 list_del_init(&t->trc_holdout_list);
1234 /* IPI handler to check task state. */
1235 static void trc_read_check_handler(void *t_in)
1237 struct task_struct *t = current;
1238 struct task_struct *texp = t_in;
1240 // If the task is no longer running on this CPU, leave.
1241 if (unlikely(texp != t)) {
1242 goto reset_ipi; // Already on holdout list, so will check later.
1245 // If the task is not in a read-side critical section, and
1246 // if this is the last reader, awaken the grace-period kthread.
1247 if (likely(!READ_ONCE(t->trc_reader_nesting))) {
1248 WRITE_ONCE(t->trc_reader_checked, true);
1251 // If we are racing with an rcu_read_unlock_trace(), try again later.
1252 if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
1254 WRITE_ONCE(t->trc_reader_checked, true);
1256 // Get here if the task is in a read-side critical section. Set
1257 // its state so that it will awaken the grace-period kthread upon
1258 // exit from that critical section.
1259 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
1260 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
1261 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
1264 // Allow future IPIs to be sent on CPU and for task.
1265 // Also order this IPI handler against any later manipulations of
1266 // the intended task.
1267 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1268 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1271 /* Callback function for scheduler to check locked-down task. */
1272 static int trc_inspect_reader(struct task_struct *t, void *arg)
1274 int cpu = task_cpu(t);
1276 bool ofl = cpu_is_offline(cpu);
1279 WARN_ON_ONCE(ofl && !is_idle_task(t));
1281 // If no chance of heavyweight readers, do it the hard way.
1282 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1285 // If heavyweight readers are enabled on the remote task,
1286 // we can inspect its state despite its currently running.
1287 // However, we cannot safely change its state.
1288 n_heavy_reader_attempts++;
1289 if (!ofl && // Check for "running" idle tasks on offline CPUs.
1290 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1291 return -EINVAL; // No quiescent state, do it the hard way.
1292 n_heavy_reader_updates++;
1294 n_heavy_reader_ofl_updates++;
1297 // The task is not running, so C-language access is safe.
1298 nesting = t->trc_reader_nesting;
1301 // If not exiting a read-side critical section, mark as checked
1302 // so that the grace-period kthread will remove it from the
1304 t->trc_reader_checked = nesting >= 0;
1306 return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later.
1308 // The task is in a read-side critical section, so set up its
1309 // state so that it will awaken the grace-period kthread upon exit
1310 // from that critical section.
1311 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
1312 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
1313 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
1317 /* Attempt to extract the state for the specified task. */
1318 static void trc_wait_for_one_reader(struct task_struct *t,
1319 struct list_head *bhp)
1323 // If a previous IPI is still in flight, let it complete.
1324 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1327 // The current task had better be in a quiescent state.
1329 t->trc_reader_checked = true;
1330 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1334 // Attempt to nail down the task for inspection.
1336 if (!task_call_func(t, trc_inspect_reader, NULL)) {
1342 // If this task is not yet on the holdout list, then we are in
1343 // an RCU read-side critical section. Otherwise, the invocation of
1344 // trc_add_holdout() that added it to the list did the necessary
1345 // get_task_struct(). Either way, the task cannot be freed out
1346 // from under this code.
1348 // If currently running, send an IPI, either way, add to list.
1349 trc_add_holdout(t, bhp);
1351 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1352 // The task is currently running, so try IPIing it.
1355 // If there is already an IPI outstanding, let it happen.
1356 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1359 per_cpu(trc_ipi_to_cpu, cpu) = true;
1360 t->trc_ipi_to_cpu = cpu;
1361 rcu_tasks_trace.n_ipis++;
1362 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1363 // Just in case there is some other reason for
1364 // failure than the target CPU being offline.
1365 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1367 rcu_tasks_trace.n_ipis_fails++;
1368 per_cpu(trc_ipi_to_cpu, cpu) = false;
1369 t->trc_ipi_to_cpu = -1;
1374 /* Initialize for a new RCU-tasks-trace grace period. */
1375 static void rcu_tasks_trace_pregp_step(void)
1379 // Allow for fast-acting IPIs.
1380 atomic_set(&trc_n_readers_need_end, 1);
1382 // There shouldn't be any old IPIs, but...
1383 for_each_possible_cpu(cpu)
1384 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1386 // Disable CPU hotplug across the tasklist scan.
1387 // This also waits for all readers in CPU-hotplug code paths.
1391 /* Do first-round processing for the specified task. */
1392 static void rcu_tasks_trace_pertask(struct task_struct *t,
1393 struct list_head *hop)
1395 // During early boot when there is only the one boot CPU, there
1396 // is no idle task for the other CPUs. Just return.
1397 if (unlikely(t == NULL))
1400 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1401 WRITE_ONCE(t->trc_reader_checked, false);
1402 t->trc_ipi_to_cpu = -1;
1403 trc_wait_for_one_reader(t, hop);
1407 * Do intermediate processing between task and holdout scans and
1408 * pick up the idle tasks.
1410 static void rcu_tasks_trace_postscan(struct list_head *hop)
1414 for_each_possible_cpu(cpu)
1415 rcu_tasks_trace_pertask(idle_task(cpu), hop);
1417 // Re-enable CPU hotplug now that the tasklist scan has completed.
1420 // Wait for late-stage exiting tasks to finish exiting.
1421 // These might have passed the call to exit_tasks_rcu_finish().
1423 // Any tasks that exit after this point will set ->trc_reader_checked.
1426 /* Communicate task state back to the RCU tasks trace stall warning request. */
1427 struct trc_stall_chk_rdr {
1433 static int trc_check_slow_task(struct task_struct *t, void *arg)
1435 struct trc_stall_chk_rdr *trc_rdrp = arg;
1438 return false; // It is running, so decline to inspect it.
1439 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1440 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1441 trc_rdrp->needqs = READ_ONCE(t->trc_reader_special.b.need_qs);
1445 /* Show the state of a task stalling the current RCU tasks trace GP. */
1446 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1449 struct trc_stall_chk_rdr trc_rdr;
1450 bool is_idle_tsk = is_idle_task(t);
1453 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1454 *firstreport = false;
1457 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1458 pr_alert("P%d: %c\n",
1462 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1464 ".I"[trc_rdr.ipi_to_cpu >= 0],
1466 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1468 " N"[!!trc_rdr.needqs],
1473 /* List stalled IPIs for RCU tasks trace. */
1474 static void show_stalled_ipi_trace(void)
1478 for_each_possible_cpu(cpu)
1479 if (per_cpu(trc_ipi_to_cpu, cpu))
1480 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1483 /* Do one scan of the holdout list. */
1484 static void check_all_holdout_tasks_trace(struct list_head *hop,
1485 bool needreport, bool *firstreport)
1487 struct task_struct *g, *t;
1489 // Disable CPU hotplug across the holdout list scan.
1492 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1493 // If safe and needed, try to check the current task.
1494 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1495 !READ_ONCE(t->trc_reader_checked))
1496 trc_wait_for_one_reader(t, hop);
1498 // If check succeeded, remove this task from the list.
1499 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1500 READ_ONCE(t->trc_reader_checked))
1502 else if (needreport)
1503 show_stalled_task_trace(t, firstreport);
1506 // Re-enable CPU hotplug now that the holdout list scan has completed.
1511 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1512 show_stalled_ipi_trace();
1516 static void rcu_tasks_trace_empty_fn(void *unused)
1520 /* Wait for grace period to complete and provide ordering. */
1521 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1525 struct task_struct *g, *t;
1526 LIST_HEAD(holdouts);
1529 // Wait for any lingering IPI handlers to complete. Note that
1530 // if a CPU has gone offline or transitioned to userspace in the
1531 // meantime, all IPI handlers should have been drained beforehand.
1532 // Yes, this assumes that CPUs process IPIs in order. If that ever
1533 // changes, there will need to be a recheck and/or timed wait.
1534 for_each_online_cpu(cpu)
1535 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1536 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1538 // Remove the safety count.
1539 smp_mb__before_atomic(); // Order vs. earlier atomics
1540 atomic_dec(&trc_n_readers_need_end);
1541 smp_mb__after_atomic(); // Order vs. later atomics
1543 // Wait for readers.
1544 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1546 ret = wait_event_idle_exclusive_timeout(
1548 atomic_read(&trc_n_readers_need_end) == 0,
1549 READ_ONCE(rcu_task_stall_timeout));
1551 break; // Count reached zero.
1552 // Stall warning time, so make a list of the offenders.
1554 for_each_process_thread(g, t)
1555 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1556 trc_add_holdout(t, &holdouts);
1559 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1560 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1561 show_stalled_task_trace(t, &firstreport);
1562 trc_del_holdout(t); // Release task_struct reference.
1565 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1566 show_stalled_ipi_trace();
1567 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1569 smp_mb(); // Caller's code must be ordered after wakeup.
1570 // Pairs with pretty much every ordering primitive.
1573 /* Report any needed quiescent state for this exiting task. */
1574 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1576 WRITE_ONCE(t->trc_reader_checked, true);
1577 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1578 WRITE_ONCE(t->trc_reader_nesting, 0);
1579 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1580 rcu_read_unlock_trace_special(t);
1584 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1585 * @rhp: structure to be used for queueing the RCU updates.
1586 * @func: actual callback function to be invoked after the grace period
1588 * The callback function will be invoked some time after a trace rcu-tasks
1589 * grace period elapses, in other words after all currently executing
1590 * trace rcu-tasks read-side critical sections have completed. These
1591 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1592 * and rcu_read_unlock_trace().
1594 * See the description of call_rcu() for more detailed information on
1595 * memory ordering guarantees.
1597 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1599 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1601 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1604 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1606 * Control will return to the caller some time after a trace rcu-tasks
1607 * grace period has elapsed, in other words after all currently executing
1608 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1609 * critical sections are delimited by calls to rcu_read_lock_trace()
1610 * and rcu_read_unlock_trace().
1612 * This is a very specialized primitive, intended only for a few uses in
1613 * tracing and other situations requiring manipulation of function preambles
1614 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1615 * (yet) intended for heavy use from multiple CPUs.
1617 * See the description of synchronize_rcu() for more detailed information
1618 * on memory ordering guarantees.
1620 void synchronize_rcu_tasks_trace(void)
1622 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1623 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1625 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1628 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1630 * Although the current implementation is guaranteed to wait, it is not
1631 * obligated to, for example, if there are no pending callbacks.
1633 void rcu_barrier_tasks_trace(void)
1635 rcu_barrier_tasks_generic(&rcu_tasks_trace);
1637 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1639 static int __init rcu_spawn_tasks_trace_kthread(void)
1641 cblist_init_generic(&rcu_tasks_trace);
1642 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1643 rcu_tasks_trace.gp_sleep = HZ / 10;
1644 rcu_tasks_trace.init_fract = HZ / 10;
1646 rcu_tasks_trace.gp_sleep = HZ / 200;
1647 if (rcu_tasks_trace.gp_sleep <= 0)
1648 rcu_tasks_trace.gp_sleep = 1;
1649 rcu_tasks_trace.init_fract = HZ / 200;
1650 if (rcu_tasks_trace.init_fract <= 0)
1651 rcu_tasks_trace.init_fract = 1;
1653 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1654 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1655 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1656 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1657 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1658 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1662 #if !defined(CONFIG_TINY_RCU)
1663 void show_rcu_tasks_trace_gp_kthread(void)
1667 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1668 data_race(n_heavy_reader_ofl_updates),
1669 data_race(n_heavy_reader_updates),
1670 data_race(n_heavy_reader_attempts));
1671 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1673 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1674 #endif // !defined(CONFIG_TINY_RCU)
1676 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1677 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1678 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1680 #ifndef CONFIG_TINY_RCU
1681 void show_rcu_tasks_gp_kthreads(void)
1683 show_rcu_tasks_classic_gp_kthread();
1684 show_rcu_tasks_rude_gp_kthread();
1685 show_rcu_tasks_trace_gp_kthread();
1687 #endif /* #ifndef CONFIG_TINY_RCU */
1689 #ifdef CONFIG_PROVE_RCU
1690 struct rcu_tasks_test_desc {
1696 static struct rcu_tasks_test_desc tests[] = {
1698 .name = "call_rcu_tasks()",
1699 /* If not defined, the test is skipped. */
1700 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1703 .name = "call_rcu_tasks_rude()",
1704 /* If not defined, the test is skipped. */
1705 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1708 .name = "call_rcu_tasks_trace()",
1709 /* If not defined, the test is skipped. */
1710 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1714 static void test_rcu_tasks_callback(struct rcu_head *rhp)
1716 struct rcu_tasks_test_desc *rttd =
1717 container_of(rhp, struct rcu_tasks_test_desc, rh);
1719 pr_info("Callback from %s invoked.\n", rttd->name);
1721 rttd->notrun = true;
1724 static void rcu_tasks_initiate_self_tests(void)
1726 pr_info("Running RCU-tasks wait API self tests\n");
1727 #ifdef CONFIG_TASKS_RCU
1728 synchronize_rcu_tasks();
1729 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1732 #ifdef CONFIG_TASKS_RUDE_RCU
1733 synchronize_rcu_tasks_rude();
1734 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1737 #ifdef CONFIG_TASKS_TRACE_RCU
1738 synchronize_rcu_tasks_trace();
1739 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1743 static int rcu_tasks_verify_self_tests(void)
1748 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1749 if (!tests[i].notrun) { // still hanging.
1750 pr_err("%s has been failed.\n", tests[i].name);
1760 late_initcall(rcu_tasks_verify_self_tests);
1761 #else /* #ifdef CONFIG_PROVE_RCU */
1762 static void rcu_tasks_initiate_self_tests(void) { }
1763 #endif /* #else #ifdef CONFIG_PROVE_RCU */
1765 void __init rcu_init_tasks_generic(void)
1767 #ifdef CONFIG_TASKS_RCU
1768 rcu_spawn_tasks_kthread();
1771 #ifdef CONFIG_TASKS_RUDE_RCU
1772 rcu_spawn_tasks_rude_kthread();
1775 #ifdef CONFIG_TASKS_TRACE_RCU
1776 rcu_spawn_tasks_trace_kthread();
1779 // Run the self-tests.
1780 rcu_tasks_initiate_self_tests();
1783 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1784 static inline void rcu_tasks_bootup_oddness(void) {}
1785 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */