Merge tag 'for-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux...
[linux-2.6-microblaze.git] / kernel / rcu / tree_stall.h
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * RCU CPU stall warnings for normal RCU grace periods
4  *
5  * Copyright IBM Corporation, 2019
6  *
7  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9
10 //////////////////////////////////////////////////////////////////////////////
11 //
12 // Controlling CPU stall warnings, including delay calculation.
13
14 /* panic() on RCU Stall sysctl. */
15 int sysctl_panic_on_rcu_stall __read_mostly;
16
17 #ifdef CONFIG_PROVE_RCU
18 #define RCU_STALL_DELAY_DELTA           (5 * HZ)
19 #else
20 #define RCU_STALL_DELAY_DELTA           0
21 #endif
22 #define RCU_STALL_MIGHT_DIV             8
23 #define RCU_STALL_MIGHT_MIN             (2 * HZ)
24
25 /* Limit-check stall timeouts specified at boottime and runtime. */
26 int rcu_jiffies_till_stall_check(void)
27 {
28         int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
29
30         /*
31          * Limit check must be consistent with the Kconfig limits
32          * for CONFIG_RCU_CPU_STALL_TIMEOUT.
33          */
34         if (till_stall_check < 3) {
35                 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
36                 till_stall_check = 3;
37         } else if (till_stall_check > 300) {
38                 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
39                 till_stall_check = 300;
40         }
41         return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
42 }
43 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
44
45 /**
46  * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
47  *
48  * Returns @true if the current grace period is sufficiently old that
49  * it is reasonable to assume that it might be stalled.  This can be
50  * useful when deciding whether to allocate memory to enable RCU-mediated
51  * freeing on the one hand or just invoking synchronize_rcu() on the other.
52  * The latter is preferable when the grace period is stalled.
53  *
54  * Note that sampling of the .gp_start and .gp_seq fields must be done
55  * carefully to avoid false positives at the beginnings and ends of
56  * grace periods.
57  */
58 bool rcu_gp_might_be_stalled(void)
59 {
60         unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
61         unsigned long j = jiffies;
62
63         if (d < RCU_STALL_MIGHT_MIN)
64                 d = RCU_STALL_MIGHT_MIN;
65         smp_mb(); // jiffies before .gp_seq to avoid false positives.
66         if (!rcu_gp_in_progress())
67                 return false;
68         // Long delays at this point avoids false positive, but a delay
69         // of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
70         smp_mb(); // .gp_seq before second .gp_start
71         // And ditto here.
72         return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
73 }
74
75 /* Don't do RCU CPU stall warnings during long sysrq printouts. */
76 void rcu_sysrq_start(void)
77 {
78         if (!rcu_cpu_stall_suppress)
79                 rcu_cpu_stall_suppress = 2;
80 }
81
82 void rcu_sysrq_end(void)
83 {
84         if (rcu_cpu_stall_suppress == 2)
85                 rcu_cpu_stall_suppress = 0;
86 }
87
88 /* Don't print RCU CPU stall warnings during a kernel panic. */
89 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
90 {
91         rcu_cpu_stall_suppress = 1;
92         return NOTIFY_DONE;
93 }
94
95 static struct notifier_block rcu_panic_block = {
96         .notifier_call = rcu_panic,
97 };
98
99 static int __init check_cpu_stall_init(void)
100 {
101         atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
102         return 0;
103 }
104 early_initcall(check_cpu_stall_init);
105
106 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
107 static void panic_on_rcu_stall(void)
108 {
109         if (sysctl_panic_on_rcu_stall)
110                 panic("RCU Stall\n");
111 }
112
113 /**
114  * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
115  *
116  * Set the stall-warning timeout way off into the future, thus preventing
117  * any RCU CPU stall-warning messages from appearing in the current set of
118  * RCU grace periods.
119  *
120  * The caller must disable hard irqs.
121  */
122 void rcu_cpu_stall_reset(void)
123 {
124         WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
125 }
126
127 //////////////////////////////////////////////////////////////////////////////
128 //
129 // Interaction with RCU grace periods
130
131 /* Start of new grace period, so record stall time (and forcing times). */
132 static void record_gp_stall_check_time(void)
133 {
134         unsigned long j = jiffies;
135         unsigned long j1;
136
137         WRITE_ONCE(rcu_state.gp_start, j);
138         j1 = rcu_jiffies_till_stall_check();
139         smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
140         WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
141         rcu_state.jiffies_resched = j + j1 / 2;
142         rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
143 }
144
145 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
146 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
147 {
148         rdp->ticks_this_gp = 0;
149         rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
150         WRITE_ONCE(rdp->last_fqs_resched, jiffies);
151 }
152
153 /*
154  * If too much time has passed in the current grace period, and if
155  * so configured, go kick the relevant kthreads.
156  */
157 static void rcu_stall_kick_kthreads(void)
158 {
159         unsigned long j;
160
161         if (!READ_ONCE(rcu_kick_kthreads))
162                 return;
163         j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
164         if (time_after(jiffies, j) && rcu_state.gp_kthread &&
165             (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
166                 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
167                           rcu_state.name);
168                 rcu_ftrace_dump(DUMP_ALL);
169                 wake_up_process(rcu_state.gp_kthread);
170                 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
171         }
172 }
173
174 /*
175  * Handler for the irq_work request posted about halfway into the RCU CPU
176  * stall timeout, and used to detect excessive irq disabling.  Set state
177  * appropriately, but just complain if there is unexpected state on entry.
178  */
179 static void rcu_iw_handler(struct irq_work *iwp)
180 {
181         struct rcu_data *rdp;
182         struct rcu_node *rnp;
183
184         rdp = container_of(iwp, struct rcu_data, rcu_iw);
185         rnp = rdp->mynode;
186         raw_spin_lock_rcu_node(rnp);
187         if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
188                 rdp->rcu_iw_gp_seq = rnp->gp_seq;
189                 rdp->rcu_iw_pending = false;
190         }
191         raw_spin_unlock_rcu_node(rnp);
192 }
193
194 //////////////////////////////////////////////////////////////////////////////
195 //
196 // Printing RCU CPU stall warnings
197
198 #ifdef CONFIG_PREEMPT_RCU
199
200 /*
201  * Dump detailed information for all tasks blocking the current RCU
202  * grace period on the specified rcu_node structure.
203  */
204 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
205 {
206         unsigned long flags;
207         struct task_struct *t;
208
209         raw_spin_lock_irqsave_rcu_node(rnp, flags);
210         if (!rcu_preempt_blocked_readers_cgp(rnp)) {
211                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
212                 return;
213         }
214         t = list_entry(rnp->gp_tasks->prev,
215                        struct task_struct, rcu_node_entry);
216         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
217                 /*
218                  * We could be printing a lot while holding a spinlock.
219                  * Avoid triggering hard lockup.
220                  */
221                 touch_nmi_watchdog();
222                 sched_show_task(t);
223         }
224         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
225 }
226
227 // Communicate task state back to the RCU CPU stall warning request.
228 struct rcu_stall_chk_rdr {
229         int nesting;
230         union rcu_special rs;
231         bool on_blkd_list;
232 };
233
234 /*
235  * Report out the state of a not-running task that is stalling the
236  * current RCU grace period.
237  */
238 static bool check_slow_task(struct task_struct *t, void *arg)
239 {
240         struct rcu_stall_chk_rdr *rscrp = arg;
241
242         if (task_curr(t))
243                 return false; // It is running, so decline to inspect it.
244         rscrp->nesting = t->rcu_read_lock_nesting;
245         rscrp->rs = t->rcu_read_unlock_special;
246         rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
247         return true;
248 }
249
250 /*
251  * Scan the current list of tasks blocked within RCU read-side critical
252  * sections, printing out the tid of each.
253  */
254 static int rcu_print_task_stall(struct rcu_node *rnp)
255 {
256         int ndetected = 0;
257         struct rcu_stall_chk_rdr rscr;
258         struct task_struct *t;
259
260         if (!rcu_preempt_blocked_readers_cgp(rnp))
261                 return 0;
262         pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
263                rnp->level, rnp->grplo, rnp->grphi);
264         t = list_entry(rnp->gp_tasks->prev,
265                        struct task_struct, rcu_node_entry);
266         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
267                 if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
268                         pr_cont(" P%d", t->pid);
269                 else
270                         pr_cont(" P%d/%d:%c%c%c%c",
271                                 t->pid, rscr.nesting,
272                                 ".b"[rscr.rs.b.blocked],
273                                 ".q"[rscr.rs.b.need_qs],
274                                 ".e"[rscr.rs.b.exp_hint],
275                                 ".l"[rscr.on_blkd_list]);
276                 ndetected++;
277         }
278         pr_cont("\n");
279         return ndetected;
280 }
281
282 #else /* #ifdef CONFIG_PREEMPT_RCU */
283
284 /*
285  * Because preemptible RCU does not exist, we never have to check for
286  * tasks blocked within RCU read-side critical sections.
287  */
288 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
289 {
290 }
291
292 /*
293  * Because preemptible RCU does not exist, we never have to check for
294  * tasks blocked within RCU read-side critical sections.
295  */
296 static int rcu_print_task_stall(struct rcu_node *rnp)
297 {
298         return 0;
299 }
300 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
301
302 /*
303  * Dump stacks of all tasks running on stalled CPUs.  First try using
304  * NMIs, but fall back to manual remote stack tracing on architectures
305  * that don't support NMI-based stack dumps.  The NMI-triggered stack
306  * traces are more accurate because they are printed by the target CPU.
307  */
308 static void rcu_dump_cpu_stacks(void)
309 {
310         int cpu;
311         unsigned long flags;
312         struct rcu_node *rnp;
313
314         rcu_for_each_leaf_node(rnp) {
315                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
316                 for_each_leaf_node_possible_cpu(rnp, cpu)
317                         if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
318                                 if (!trigger_single_cpu_backtrace(cpu))
319                                         dump_cpu_task(cpu);
320                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
321         }
322 }
323
324 #ifdef CONFIG_RCU_FAST_NO_HZ
325
326 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
327 {
328         struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
329
330         sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
331                 rdp->last_accelerate & 0xffff, jiffies & 0xffff,
332                 !!rdp->tick_nohz_enabled_snap);
333 }
334
335 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
336
337 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
338 {
339         *cp = '\0';
340 }
341
342 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
343
344 static const char * const gp_state_names[] = {
345         [RCU_GP_IDLE] = "RCU_GP_IDLE",
346         [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
347         [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
348         [RCU_GP_ONOFF] = "RCU_GP_ONOFF",
349         [RCU_GP_INIT] = "RCU_GP_INIT",
350         [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
351         [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
352         [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
353         [RCU_GP_CLEANED] = "RCU_GP_CLEANED",
354 };
355
356 /*
357  * Convert a ->gp_state value to a character string.
358  */
359 static const char *gp_state_getname(short gs)
360 {
361         if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
362                 return "???";
363         return gp_state_names[gs];
364 }
365
366 /* Is the RCU grace-period kthread being starved of CPU time? */
367 static bool rcu_is_gp_kthread_starving(unsigned long *jp)
368 {
369         unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
370
371         if (jp)
372                 *jp = j;
373         return j > 2 * HZ;
374 }
375
376 /*
377  * Print out diagnostic information for the specified stalled CPU.
378  *
379  * If the specified CPU is aware of the current RCU grace period, then
380  * print the number of scheduling clock interrupts the CPU has taken
381  * during the time that it has been aware.  Otherwise, print the number
382  * of RCU grace periods that this CPU is ignorant of, for example, "1"
383  * if the CPU was aware of the previous grace period.
384  *
385  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
386  */
387 static void print_cpu_stall_info(int cpu)
388 {
389         unsigned long delta;
390         bool falsepositive;
391         char fast_no_hz[72];
392         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
393         char *ticks_title;
394         unsigned long ticks_value;
395
396         /*
397          * We could be printing a lot while holding a spinlock.  Avoid
398          * triggering hard lockup.
399          */
400         touch_nmi_watchdog();
401
402         ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
403         if (ticks_value) {
404                 ticks_title = "GPs behind";
405         } else {
406                 ticks_title = "ticks this GP";
407                 ticks_value = rdp->ticks_this_gp;
408         }
409         print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
410         delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
411         falsepositive = rcu_is_gp_kthread_starving(NULL) &&
412                         rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
413         pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n",
414                cpu,
415                "O."[!!cpu_online(cpu)],
416                "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
417                "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
418                !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
419                         rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
420                                 "!."[!delta],
421                ticks_value, ticks_title,
422                rcu_dynticks_snap(rdp) & 0xfff,
423                rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
424                rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
425                data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
426                fast_no_hz,
427                falsepositive ? " (false positive?)" : "");
428 }
429
430 /* Complain about starvation of grace-period kthread.  */
431 static void rcu_check_gp_kthread_starvation(void)
432 {
433         struct task_struct *gpk = rcu_state.gp_kthread;
434         unsigned long j;
435
436         if (rcu_is_gp_kthread_starving(&j)) {
437                 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
438                        rcu_state.name, j,
439                        (long)rcu_seq_current(&rcu_state.gp_seq),
440                        data_race(rcu_state.gp_flags),
441                        gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
442                        gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
443                 if (gpk) {
444                         pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
445                         pr_err("RCU grace-period kthread stack dump:\n");
446                         sched_show_task(gpk);
447                         wake_up_process(gpk);
448                 }
449         }
450 }
451
452 static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
453 {
454         int cpu;
455         unsigned long flags;
456         unsigned long gpa;
457         unsigned long j;
458         int ndetected = 0;
459         struct rcu_node *rnp;
460         long totqlen = 0;
461
462         /* Kick and suppress, if so configured. */
463         rcu_stall_kick_kthreads();
464         if (rcu_stall_is_suppressed())
465                 return;
466
467         /*
468          * OK, time to rat on our buddy...
469          * See Documentation/RCU/stallwarn.rst for info on how to debug
470          * RCU CPU stall warnings.
471          */
472         pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
473         rcu_for_each_leaf_node(rnp) {
474                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
475                 ndetected += rcu_print_task_stall(rnp);
476                 if (rnp->qsmask != 0) {
477                         for_each_leaf_node_possible_cpu(rnp, cpu)
478                                 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
479                                         print_cpu_stall_info(cpu);
480                                         ndetected++;
481                                 }
482                 }
483                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
484         }
485
486         for_each_possible_cpu(cpu)
487                 totqlen += rcu_get_n_cbs_cpu(cpu);
488         pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
489                smp_processor_id(), (long)(jiffies - gps),
490                (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
491         if (ndetected) {
492                 rcu_dump_cpu_stacks();
493
494                 /* Complain about tasks blocking the grace period. */
495                 rcu_for_each_leaf_node(rnp)
496                         rcu_print_detail_task_stall_rnp(rnp);
497         } else {
498                 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
499                         pr_err("INFO: Stall ended before state dump start\n");
500                 } else {
501                         j = jiffies;
502                         gpa = data_race(rcu_state.gp_activity);
503                         pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
504                                rcu_state.name, j - gpa, j, gpa,
505                                data_race(jiffies_till_next_fqs),
506                                rcu_get_root()->qsmask);
507                 }
508         }
509         /* Rewrite if needed in case of slow consoles. */
510         if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
511                 WRITE_ONCE(rcu_state.jiffies_stall,
512                            jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
513
514         rcu_check_gp_kthread_starvation();
515
516         panic_on_rcu_stall();
517
518         rcu_force_quiescent_state();  /* Kick them all. */
519 }
520
521 static void print_cpu_stall(unsigned long gps)
522 {
523         int cpu;
524         unsigned long flags;
525         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
526         struct rcu_node *rnp = rcu_get_root();
527         long totqlen = 0;
528
529         /* Kick and suppress, if so configured. */
530         rcu_stall_kick_kthreads();
531         if (rcu_stall_is_suppressed())
532                 return;
533
534         /*
535          * OK, time to rat on ourselves...
536          * See Documentation/RCU/stallwarn.rst for info on how to debug
537          * RCU CPU stall warnings.
538          */
539         pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
540         raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
541         print_cpu_stall_info(smp_processor_id());
542         raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
543         for_each_possible_cpu(cpu)
544                 totqlen += rcu_get_n_cbs_cpu(cpu);
545         pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
546                 jiffies - gps,
547                 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
548
549         rcu_check_gp_kthread_starvation();
550
551         rcu_dump_cpu_stacks();
552
553         raw_spin_lock_irqsave_rcu_node(rnp, flags);
554         /* Rewrite if needed in case of slow consoles. */
555         if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
556                 WRITE_ONCE(rcu_state.jiffies_stall,
557                            jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
558         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
559
560         panic_on_rcu_stall();
561
562         /*
563          * Attempt to revive the RCU machinery by forcing a context switch.
564          *
565          * A context switch would normally allow the RCU state machine to make
566          * progress and it could be we're stuck in kernel space without context
567          * switches for an entirely unreasonable amount of time.
568          */
569         set_tsk_need_resched(current);
570         set_preempt_need_resched();
571 }
572
573 static void check_cpu_stall(struct rcu_data *rdp)
574 {
575         unsigned long gs1;
576         unsigned long gs2;
577         unsigned long gps;
578         unsigned long j;
579         unsigned long jn;
580         unsigned long js;
581         struct rcu_node *rnp;
582
583         if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
584             !rcu_gp_in_progress())
585                 return;
586         rcu_stall_kick_kthreads();
587         j = jiffies;
588
589         /*
590          * Lots of memory barriers to reject false positives.
591          *
592          * The idea is to pick up rcu_state.gp_seq, then
593          * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
594          * another copy of rcu_state.gp_seq.  These values are updated in
595          * the opposite order with memory barriers (or equivalent) during
596          * grace-period initialization and cleanup.  Now, a false positive
597          * can occur if we get an new value of rcu_state.gp_start and a old
598          * value of rcu_state.jiffies_stall.  But given the memory barriers,
599          * the only way that this can happen is if one grace period ends
600          * and another starts between these two fetches.  This is detected
601          * by comparing the second fetch of rcu_state.gp_seq with the
602          * previous fetch from rcu_state.gp_seq.
603          *
604          * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
605          * and rcu_state.gp_start suffice to forestall false positives.
606          */
607         gs1 = READ_ONCE(rcu_state.gp_seq);
608         smp_rmb(); /* Pick up ->gp_seq first... */
609         js = READ_ONCE(rcu_state.jiffies_stall);
610         smp_rmb(); /* ...then ->jiffies_stall before the rest... */
611         gps = READ_ONCE(rcu_state.gp_start);
612         smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
613         gs2 = READ_ONCE(rcu_state.gp_seq);
614         if (gs1 != gs2 ||
615             ULONG_CMP_LT(j, js) ||
616             ULONG_CMP_GE(gps, js))
617                 return; /* No stall or GP completed since entering function. */
618         rnp = rdp->mynode;
619         jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
620         if (rcu_gp_in_progress() &&
621             (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
622             cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
623
624                 /* We haven't checked in, so go dump stack. */
625                 print_cpu_stall(gps);
626                 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
627                         rcu_ftrace_dump(DUMP_ALL);
628
629         } else if (rcu_gp_in_progress() &&
630                    ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
631                    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
632
633                 /* They had a few time units to dump stack, so complain. */
634                 print_other_cpu_stall(gs2, gps);
635                 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
636                         rcu_ftrace_dump(DUMP_ALL);
637         }
638 }
639
640 //////////////////////////////////////////////////////////////////////////////
641 //
642 // RCU forward-progress mechanisms, including of callback invocation.
643
644
645 /*
646  * Show the state of the grace-period kthreads.
647  */
648 void show_rcu_gp_kthreads(void)
649 {
650         unsigned long cbs = 0;
651         int cpu;
652         unsigned long j;
653         unsigned long ja;
654         unsigned long jr;
655         unsigned long jw;
656         struct rcu_data *rdp;
657         struct rcu_node *rnp;
658         struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
659
660         j = jiffies;
661         ja = j - data_race(rcu_state.gp_activity);
662         jr = j - data_race(rcu_state.gp_req_activity);
663         jw = j - data_race(rcu_state.gp_wake_time);
664         pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
665                 rcu_state.name, gp_state_getname(rcu_state.gp_state),
666                 rcu_state.gp_state, t ? t->state : 0x1ffffL,
667                 ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
668                 (long)data_race(rcu_state.gp_seq),
669                 (long)data_race(rcu_get_root()->gp_seq_needed),
670                 data_race(rcu_state.gp_flags));
671         rcu_for_each_node_breadth_first(rnp) {
672                 if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
673                                  READ_ONCE(rnp->gp_seq_needed)))
674                         continue;
675                 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
676                         rnp->grplo, rnp->grphi, (long)data_race(rnp->gp_seq),
677                         (long)data_race(rnp->gp_seq_needed));
678                 if (!rcu_is_leaf_node(rnp))
679                         continue;
680                 for_each_leaf_node_possible_cpu(rnp, cpu) {
681                         rdp = per_cpu_ptr(&rcu_data, cpu);
682                         if (READ_ONCE(rdp->gpwrap) ||
683                             ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
684                                          READ_ONCE(rdp->gp_seq_needed)))
685                                 continue;
686                         pr_info("\tcpu %d ->gp_seq_needed %ld\n",
687                                 cpu, (long)data_race(rdp->gp_seq_needed));
688                 }
689         }
690         for_each_possible_cpu(cpu) {
691                 rdp = per_cpu_ptr(&rcu_data, cpu);
692                 cbs += data_race(rdp->n_cbs_invoked);
693                 if (rcu_segcblist_is_offloaded(&rdp->cblist))
694                         show_rcu_nocb_state(rdp);
695         }
696         pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
697         show_rcu_tasks_gp_kthreads();
698 }
699 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
700
701 /*
702  * This function checks for grace-period requests that fail to motivate
703  * RCU to come out of its idle mode.
704  */
705 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
706                                      const unsigned long gpssdelay)
707 {
708         unsigned long flags;
709         unsigned long j;
710         struct rcu_node *rnp_root = rcu_get_root();
711         static atomic_t warned = ATOMIC_INIT(0);
712
713         if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
714             ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
715                          READ_ONCE(rnp_root->gp_seq_needed)) ||
716             !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
717                 return;
718         j = jiffies; /* Expensive access, and in common case don't get here. */
719         if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
720             time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
721             atomic_read(&warned))
722                 return;
723
724         raw_spin_lock_irqsave_rcu_node(rnp, flags);
725         j = jiffies;
726         if (rcu_gp_in_progress() ||
727             ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
728                          READ_ONCE(rnp_root->gp_seq_needed)) ||
729             time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
730             time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
731             atomic_read(&warned)) {
732                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
733                 return;
734         }
735         /* Hold onto the leaf lock to make others see warned==1. */
736
737         if (rnp_root != rnp)
738                 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
739         j = jiffies;
740         if (rcu_gp_in_progress() ||
741             ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
742                          READ_ONCE(rnp_root->gp_seq_needed)) ||
743             time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
744             time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
745             atomic_xchg(&warned, 1)) {
746                 if (rnp_root != rnp)
747                         /* irqs remain disabled. */
748                         raw_spin_unlock_rcu_node(rnp_root);
749                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
750                 return;
751         }
752         WARN_ON(1);
753         if (rnp_root != rnp)
754                 raw_spin_unlock_rcu_node(rnp_root);
755         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
756         show_rcu_gp_kthreads();
757 }
758
759 /*
760  * Do a forward-progress check for rcutorture.  This is normally invoked
761  * due to an OOM event.  The argument "j" gives the time period during
762  * which rcutorture would like progress to have been made.
763  */
764 void rcu_fwd_progress_check(unsigned long j)
765 {
766         unsigned long cbs;
767         int cpu;
768         unsigned long max_cbs = 0;
769         int max_cpu = -1;
770         struct rcu_data *rdp;
771
772         if (rcu_gp_in_progress()) {
773                 pr_info("%s: GP age %lu jiffies\n",
774                         __func__, jiffies - rcu_state.gp_start);
775                 show_rcu_gp_kthreads();
776         } else {
777                 pr_info("%s: Last GP end %lu jiffies ago\n",
778                         __func__, jiffies - rcu_state.gp_end);
779                 preempt_disable();
780                 rdp = this_cpu_ptr(&rcu_data);
781                 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
782                 preempt_enable();
783         }
784         for_each_possible_cpu(cpu) {
785                 cbs = rcu_get_n_cbs_cpu(cpu);
786                 if (!cbs)
787                         continue;
788                 if (max_cpu < 0)
789                         pr_info("%s: callbacks", __func__);
790                 pr_cont(" %d: %lu", cpu, cbs);
791                 if (cbs <= max_cbs)
792                         continue;
793                 max_cbs = cbs;
794                 max_cpu = cpu;
795         }
796         if (max_cpu >= 0)
797                 pr_cont("\n");
798 }
799 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
800
801 /* Commandeer a sysrq key to dump RCU's tree. */
802 static bool sysrq_rcu;
803 module_param(sysrq_rcu, bool, 0444);
804
805 /* Dump grace-period-request information due to commandeered sysrq. */
806 static void sysrq_show_rcu(int key)
807 {
808         show_rcu_gp_kthreads();
809 }
810
811 static const struct sysrq_key_op sysrq_rcudump_op = {
812         .handler = sysrq_show_rcu,
813         .help_msg = "show-rcu(y)",
814         .action_msg = "Show RCU tree",
815         .enable_mask = SYSRQ_ENABLE_DUMP,
816 };
817
818 static int __init rcu_sysrq_init(void)
819 {
820         if (sysrq_rcu)
821                 return register_sysrq_key('y', &sysrq_rcudump_op);
822         return 0;
823 }
824 early_initcall(rcu_sysrq_init);