2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@joshtriplett.org>
23 * See also: Documentation/RCU/torture.txt
26 #define pr_fmt(fmt) fmt
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/kthread.h>
33 #include <linux/err.h>
34 #include <linux/spinlock.h>
35 #include <linux/smp.h>
36 #include <linux/rcupdate.h>
37 #include <linux/interrupt.h>
38 #include <linux/sched/signal.h>
39 #include <uapi/linux/sched/types.h>
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/completion.h>
43 #include <linux/moduleparam.h>
44 #include <linux/percpu.h>
45 #include <linux/notifier.h>
46 #include <linux/reboot.h>
47 #include <linux/freezer.h>
48 #include <linux/cpu.h>
49 #include <linux/delay.h>
50 #include <linux/stat.h>
51 #include <linux/srcu.h>
52 #include <linux/slab.h>
53 #include <linux/trace_clock.h>
54 #include <asm/byteorder.h>
55 #include <linux/torture.h>
56 #include <linux/vmalloc.h>
57 #include <linux/sched/debug.h>
58 #include <linux/sched/sysctl.h>
59 #include <linux/oom.h>
63 MODULE_LICENSE("GPL");
64 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
67 /* Bits for ->extendables field, extendables param, and related definitions. */
68 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
69 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
70 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
71 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
72 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
73 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
74 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
75 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
76 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
77 #define RCUTORTURE_MAX_EXTEND \
78 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
79 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
80 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
81 /* Must be power of two minus one. */
82 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
84 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
85 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
86 torture_param(int, fqs_duration, 0,
87 "Duration of fqs bursts (us), 0 to disable");
88 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
89 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
90 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
91 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
92 torture_param(int, fwd_progress_holdoff, 60,
93 "Time between forward-progress tests (s)");
94 torture_param(bool, fwd_progress_need_resched, 1,
95 "Hide cond_resched() behind need_resched()");
96 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
97 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
98 torture_param(bool, gp_normal, false,
99 "Use normal (non-expedited) GP wait primitives");
100 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
101 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
102 torture_param(int, n_barrier_cbs, 0,
103 "# of callbacks/kthreads for barrier testing");
104 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
105 torture_param(int, nreaders, -1, "Number of RCU reader threads");
106 torture_param(int, object_debug, 0,
107 "Enable debug-object double call_rcu() testing");
108 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
109 torture_param(int, onoff_interval, 0,
110 "Time between CPU hotplugs (jiffies), 0=disable");
111 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
112 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
113 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
114 torture_param(int, stall_cpu_holdoff, 10,
115 "Time to wait before starting stall (s).");
116 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
117 torture_param(int, stat_interval, 60,
118 "Number of seconds between stats printk()s");
119 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
120 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
121 torture_param(int, test_boost_duration, 4,
122 "Duration of each boost test, seconds.");
123 torture_param(int, test_boost_interval, 7,
124 "Interval between boost tests, seconds.");
125 torture_param(bool, test_no_idle_hz, true,
126 "Test support for tickless idle CPUs");
127 torture_param(int, verbose, 1,
128 "Enable verbose debugging printk()s");
130 static char *torture_type = "rcu";
131 module_param(torture_type, charp, 0444);
132 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
134 static int nrealreaders;
135 static struct task_struct *writer_task;
136 static struct task_struct **fakewriter_tasks;
137 static struct task_struct **reader_tasks;
138 static struct task_struct *stats_task;
139 static struct task_struct *fqs_task;
140 static struct task_struct *boost_tasks[NR_CPUS];
141 static struct task_struct *stall_task;
142 static struct task_struct *fwd_prog_task;
143 static struct task_struct **barrier_cbs_tasks;
144 static struct task_struct *barrier_task;
146 #define RCU_TORTURE_PIPE_LEN 10
149 struct rcu_head rtort_rcu;
150 int rtort_pipe_count;
151 struct list_head rtort_free;
155 static LIST_HEAD(rcu_torture_freelist);
156 static struct rcu_torture __rcu *rcu_torture_current;
157 static unsigned long rcu_torture_current_version;
158 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
159 static DEFINE_SPINLOCK(rcu_torture_lock);
160 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
161 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
162 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
163 static atomic_t n_rcu_torture_alloc;
164 static atomic_t n_rcu_torture_alloc_fail;
165 static atomic_t n_rcu_torture_free;
166 static atomic_t n_rcu_torture_mberror;
167 static atomic_t n_rcu_torture_error;
168 static long n_rcu_torture_barrier_error;
169 static long n_rcu_torture_boost_ktrerror;
170 static long n_rcu_torture_boost_rterror;
171 static long n_rcu_torture_boost_failure;
172 static long n_rcu_torture_boosts;
173 static atomic_long_t n_rcu_torture_timers;
174 static long n_barrier_attempts;
175 static long n_barrier_successes; /* did rcu_barrier test succeed? */
176 static struct list_head rcu_torture_removed;
178 static int rcu_torture_writer_state;
179 #define RTWS_FIXED_DELAY 0
181 #define RTWS_REPLACE 2
182 #define RTWS_DEF_FREE 3
183 #define RTWS_EXP_SYNC 4
184 #define RTWS_COND_GET 5
185 #define RTWS_COND_SYNC 6
187 #define RTWS_STUTTER 8
188 #define RTWS_STOPPING 9
189 static const char * const rcu_torture_writer_state_names[] = {
202 /* Record reader segment types and duration for first failing read. */
205 unsigned long rt_delay_jiffies;
206 unsigned long rt_delay_ms;
207 unsigned long rt_delay_us;
210 static int err_segs_recorded;
211 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
212 static int rt_read_nsegs;
214 static const char *rcu_torture_writer_state_getname(void)
216 unsigned int i = READ_ONCE(rcu_torture_writer_state);
218 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
220 return rcu_torture_writer_state_names[i];
223 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
224 #define rcu_can_boost() 1
225 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
226 #define rcu_can_boost() 0
227 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
229 #ifdef CONFIG_RCU_TRACE
230 static u64 notrace rcu_trace_clock_local(void)
232 u64 ts = trace_clock_local();
234 (void)do_div(ts, NSEC_PER_USEC);
237 #else /* #ifdef CONFIG_RCU_TRACE */
238 static u64 notrace rcu_trace_clock_local(void)
242 #endif /* #else #ifdef CONFIG_RCU_TRACE */
244 static unsigned long boost_starttime; /* jiffies of next boost test start. */
245 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
246 /* and boost task create/destroy. */
247 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
248 static bool barrier_phase; /* Test phase. */
249 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
250 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
251 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
253 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
256 * Allocate an element from the rcu_tortures pool.
258 static struct rcu_torture *
259 rcu_torture_alloc(void)
263 spin_lock_bh(&rcu_torture_lock);
264 if (list_empty(&rcu_torture_freelist)) {
265 atomic_inc(&n_rcu_torture_alloc_fail);
266 spin_unlock_bh(&rcu_torture_lock);
269 atomic_inc(&n_rcu_torture_alloc);
270 p = rcu_torture_freelist.next;
272 spin_unlock_bh(&rcu_torture_lock);
273 return container_of(p, struct rcu_torture, rtort_free);
277 * Free an element to the rcu_tortures pool.
280 rcu_torture_free(struct rcu_torture *p)
282 atomic_inc(&n_rcu_torture_free);
283 spin_lock_bh(&rcu_torture_lock);
284 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
285 spin_unlock_bh(&rcu_torture_lock);
289 * Operations vector for selecting different types of tests.
292 struct rcu_torture_ops {
295 void (*cleanup)(void);
296 int (*readlock)(void);
297 void (*read_delay)(struct torture_random_state *rrsp,
298 struct rt_read_seg *rtrsp);
299 void (*readunlock)(int idx);
300 unsigned long (*get_gp_seq)(void);
301 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
302 void (*deferred_free)(struct rcu_torture *p);
304 void (*exp_sync)(void);
305 unsigned long (*get_state)(void);
306 void (*cond_sync)(unsigned long oldstate);
307 call_rcu_func_t call;
308 void (*cb_barrier)(void);
311 int (*stall_dur)(void);
315 int ext_irq_conflict;
319 static struct rcu_torture_ops *cur_ops;
322 * Definitions for rcu torture testing.
325 static int rcu_torture_read_lock(void) __acquires(RCU)
332 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
334 unsigned long started;
335 unsigned long completed;
336 const unsigned long shortdelay_us = 200;
337 unsigned long longdelay_ms = 300;
338 unsigned long long ts;
340 /* We want a short delay sometimes to make a reader delay the grace
341 * period, and we want a long delay occasionally to trigger
342 * force_quiescent_state. */
344 if (!rcu_fwd_cb_nodelay &&
345 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
346 started = cur_ops->get_gp_seq();
347 ts = rcu_trace_clock_local();
348 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
349 longdelay_ms = 5; /* Avoid triggering BH limits. */
350 mdelay(longdelay_ms);
351 rtrsp->rt_delay_ms = longdelay_ms;
352 completed = cur_ops->get_gp_seq();
353 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
356 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
357 udelay(shortdelay_us);
358 rtrsp->rt_delay_us = shortdelay_us;
360 if (!preempt_count() &&
361 !(torture_random(rrsp) % (nrealreaders * 500))) {
362 torture_preempt_schedule(); /* QS only if preemptible. */
363 rtrsp->rt_preempted = true;
367 static void rcu_torture_read_unlock(int idx) __releases(RCU)
373 * Update callback in the pipe. This should be invoked after a grace period.
376 rcu_torture_pipe_update_one(struct rcu_torture *rp)
380 i = rp->rtort_pipe_count;
381 if (i > RCU_TORTURE_PIPE_LEN)
382 i = RCU_TORTURE_PIPE_LEN;
383 atomic_inc(&rcu_torture_wcount[i]);
384 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
385 rp->rtort_mbtest = 0;
392 * Update all callbacks in the pipe. Suitable for synchronous grace-period
396 rcu_torture_pipe_update(struct rcu_torture *old_rp)
398 struct rcu_torture *rp;
399 struct rcu_torture *rp1;
402 list_add(&old_rp->rtort_free, &rcu_torture_removed);
403 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
404 if (rcu_torture_pipe_update_one(rp)) {
405 list_del(&rp->rtort_free);
406 rcu_torture_free(rp);
412 rcu_torture_cb(struct rcu_head *p)
414 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
416 if (torture_must_stop_irq()) {
417 /* Test is ending, just drop callbacks on the floor. */
418 /* The next initialization will pick up the pieces. */
421 if (rcu_torture_pipe_update_one(rp))
422 rcu_torture_free(rp);
424 cur_ops->deferred_free(rp);
427 static unsigned long rcu_no_completed(void)
432 static void rcu_torture_deferred_free(struct rcu_torture *p)
434 call_rcu(&p->rtort_rcu, rcu_torture_cb);
437 static void rcu_sync_torture_init(void)
439 INIT_LIST_HEAD(&rcu_torture_removed);
442 static struct rcu_torture_ops rcu_ops = {
444 .init = rcu_sync_torture_init,
445 .readlock = rcu_torture_read_lock,
446 .read_delay = rcu_read_delay,
447 .readunlock = rcu_torture_read_unlock,
448 .get_gp_seq = rcu_get_gp_seq,
449 .gp_diff = rcu_seq_diff,
450 .deferred_free = rcu_torture_deferred_free,
451 .sync = synchronize_rcu,
452 .exp_sync = synchronize_rcu_expedited,
453 .get_state = get_state_synchronize_rcu,
454 .cond_sync = cond_synchronize_rcu,
456 .cb_barrier = rcu_barrier,
457 .fqs = rcu_force_quiescent_state,
459 .stall_dur = rcu_jiffies_till_stall_check,
461 .can_boost = rcu_can_boost(),
462 .extendables = RCUTORTURE_MAX_EXTEND,
467 * Don't even think about trying any of these in real life!!!
468 * The names includes "busted", and they really means it!
469 * The only purpose of these functions is to provide a buggy RCU
470 * implementation to make sure that rcutorture correctly emits
471 * buggy-RCU error messages.
473 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
475 /* This is a deliberate bug for testing purposes only! */
476 rcu_torture_cb(&p->rtort_rcu);
479 static void synchronize_rcu_busted(void)
481 /* This is a deliberate bug for testing purposes only! */
485 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
487 /* This is a deliberate bug for testing purposes only! */
491 static struct rcu_torture_ops rcu_busted_ops = {
492 .ttype = INVALID_RCU_FLAVOR,
493 .init = rcu_sync_torture_init,
494 .readlock = rcu_torture_read_lock,
495 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
496 .readunlock = rcu_torture_read_unlock,
497 .get_gp_seq = rcu_no_completed,
498 .deferred_free = rcu_busted_torture_deferred_free,
499 .sync = synchronize_rcu_busted,
500 .exp_sync = synchronize_rcu_busted,
501 .call = call_rcu_busted,
510 * Definitions for srcu torture testing.
513 DEFINE_STATIC_SRCU(srcu_ctl);
514 static struct srcu_struct srcu_ctld;
515 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
517 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
519 return srcu_read_lock(srcu_ctlp);
523 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
526 const long uspertick = 1000000 / HZ;
527 const long longdelay = 10;
529 /* We want there to be long-running readers, but not all the time. */
531 delay = torture_random(rrsp) %
532 (nrealreaders * 2 * longdelay * uspertick);
533 if (!delay && in_task()) {
534 schedule_timeout_interruptible(longdelay);
535 rtrsp->rt_delay_jiffies = longdelay;
537 rcu_read_delay(rrsp, rtrsp);
541 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
543 srcu_read_unlock(srcu_ctlp, idx);
546 static unsigned long srcu_torture_completed(void)
548 return srcu_batches_completed(srcu_ctlp);
551 static void srcu_torture_deferred_free(struct rcu_torture *rp)
553 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
556 static void srcu_torture_synchronize(void)
558 synchronize_srcu(srcu_ctlp);
561 static void srcu_torture_call(struct rcu_head *head,
564 call_srcu(srcu_ctlp, head, func);
567 static void srcu_torture_barrier(void)
569 srcu_barrier(srcu_ctlp);
572 static void srcu_torture_stats(void)
574 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
577 static void srcu_torture_synchronize_expedited(void)
579 synchronize_srcu_expedited(srcu_ctlp);
582 static struct rcu_torture_ops srcu_ops = {
583 .ttype = SRCU_FLAVOR,
584 .init = rcu_sync_torture_init,
585 .readlock = srcu_torture_read_lock,
586 .read_delay = srcu_read_delay,
587 .readunlock = srcu_torture_read_unlock,
588 .get_gp_seq = srcu_torture_completed,
589 .deferred_free = srcu_torture_deferred_free,
590 .sync = srcu_torture_synchronize,
591 .exp_sync = srcu_torture_synchronize_expedited,
592 .call = srcu_torture_call,
593 .cb_barrier = srcu_torture_barrier,
594 .stats = srcu_torture_stats,
599 static void srcu_torture_init(void)
601 rcu_sync_torture_init();
602 WARN_ON(init_srcu_struct(&srcu_ctld));
603 srcu_ctlp = &srcu_ctld;
606 static void srcu_torture_cleanup(void)
608 static DEFINE_TORTURE_RANDOM(rand);
610 if (torture_random(&rand) & 0x800)
611 cleanup_srcu_struct(&srcu_ctld);
613 cleanup_srcu_struct_quiesced(&srcu_ctld);
614 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
617 /* As above, but dynamically allocated. */
618 static struct rcu_torture_ops srcud_ops = {
619 .ttype = SRCU_FLAVOR,
620 .init = srcu_torture_init,
621 .cleanup = srcu_torture_cleanup,
622 .readlock = srcu_torture_read_lock,
623 .read_delay = srcu_read_delay,
624 .readunlock = srcu_torture_read_unlock,
625 .get_gp_seq = srcu_torture_completed,
626 .deferred_free = srcu_torture_deferred_free,
627 .sync = srcu_torture_synchronize,
628 .exp_sync = srcu_torture_synchronize_expedited,
629 .call = srcu_torture_call,
630 .cb_barrier = srcu_torture_barrier,
631 .stats = srcu_torture_stats,
636 /* As above, but broken due to inappropriate reader extension. */
637 static struct rcu_torture_ops busted_srcud_ops = {
638 .ttype = SRCU_FLAVOR,
639 .init = srcu_torture_init,
640 .cleanup = srcu_torture_cleanup,
641 .readlock = srcu_torture_read_lock,
642 .read_delay = rcu_read_delay,
643 .readunlock = srcu_torture_read_unlock,
644 .get_gp_seq = srcu_torture_completed,
645 .deferred_free = srcu_torture_deferred_free,
646 .sync = srcu_torture_synchronize,
647 .exp_sync = srcu_torture_synchronize_expedited,
648 .call = srcu_torture_call,
649 .cb_barrier = srcu_torture_barrier,
650 .stats = srcu_torture_stats,
652 .extendables = RCUTORTURE_MAX_EXTEND,
653 .name = "busted_srcud"
657 * Definitions for RCU-tasks torture testing.
660 static int tasks_torture_read_lock(void)
665 static void tasks_torture_read_unlock(int idx)
669 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
671 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
674 static struct rcu_torture_ops tasks_ops = {
675 .ttype = RCU_TASKS_FLAVOR,
676 .init = rcu_sync_torture_init,
677 .readlock = tasks_torture_read_lock,
678 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
679 .readunlock = tasks_torture_read_unlock,
680 .get_gp_seq = rcu_no_completed,
681 .deferred_free = rcu_tasks_torture_deferred_free,
682 .sync = synchronize_rcu_tasks,
683 .exp_sync = synchronize_rcu_tasks,
684 .call = call_rcu_tasks,
685 .cb_barrier = rcu_barrier_tasks,
692 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
694 if (!cur_ops->gp_diff)
696 return cur_ops->gp_diff(new, old);
699 static bool __maybe_unused torturing_tasks(void)
701 return cur_ops == &tasks_ops;
705 * RCU torture priority-boost testing. Runs one real-time thread per
706 * CPU for moderate bursts, repeatedly registering RCU callbacks and
707 * spinning waiting for them to be invoked. If a given callback takes
708 * too long to be invoked, we assume that priority inversion has occurred.
711 struct rcu_boost_inflight {
716 static void rcu_torture_boost_cb(struct rcu_head *head)
718 struct rcu_boost_inflight *rbip =
719 container_of(head, struct rcu_boost_inflight, rcu);
721 /* Ensure RCU-core accesses precede clearing ->inflight */
722 smp_store_release(&rbip->inflight, 0);
725 static int old_rt_runtime = -1;
727 static void rcu_torture_disable_rt_throttle(void)
730 * Disable RT throttling so that rcutorture's boost threads don't get
731 * throttled. Only possible if rcutorture is built-in otherwise the
732 * user should manually do this by setting the sched_rt_period_us and
733 * sched_rt_runtime sysctls.
735 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
738 old_rt_runtime = sysctl_sched_rt_runtime;
739 sysctl_sched_rt_runtime = -1;
742 static void rcu_torture_enable_rt_throttle(void)
744 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
747 sysctl_sched_rt_runtime = old_rt_runtime;
751 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
753 if (end - start > test_boost_duration * HZ - HZ / 2) {
754 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
755 n_rcu_torture_boost_failure++;
757 return true; /* failed */
760 return false; /* passed */
763 static int rcu_torture_boost(void *arg)
765 unsigned long call_rcu_time;
766 unsigned long endtime;
767 unsigned long oldstarttime;
768 struct rcu_boost_inflight rbi = { .inflight = 0 };
769 struct sched_param sp;
771 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
773 /* Set real-time priority. */
774 sp.sched_priority = 1;
775 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
776 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
777 n_rcu_torture_boost_rterror++;
780 init_rcu_head_on_stack(&rbi.rcu);
781 /* Each pass through the following loop does one boost-test cycle. */
783 /* Track if the test failed already in this test interval? */
786 /* Increment n_rcu_torture_boosts once per boost-test */
787 while (!kthread_should_stop()) {
788 if (mutex_trylock(&boost_mutex)) {
789 n_rcu_torture_boosts++;
790 mutex_unlock(&boost_mutex);
793 schedule_timeout_uninterruptible(1);
795 if (kthread_should_stop())
798 /* Wait for the next test interval. */
799 oldstarttime = boost_starttime;
800 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
801 schedule_timeout_interruptible(oldstarttime - jiffies);
802 stutter_wait("rcu_torture_boost");
803 if (torture_must_stop())
807 /* Do one boost-test interval. */
808 endtime = oldstarttime + test_boost_duration * HZ;
809 call_rcu_time = jiffies;
810 while (ULONG_CMP_LT(jiffies, endtime)) {
811 /* If we don't have a callback in flight, post one. */
812 if (!smp_load_acquire(&rbi.inflight)) {
813 /* RCU core before ->inflight = 1. */
814 smp_store_release(&rbi.inflight, 1);
815 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
816 /* Check if the boost test failed */
818 rcu_torture_boost_failed(call_rcu_time,
820 call_rcu_time = jiffies;
822 stutter_wait("rcu_torture_boost");
823 if (torture_must_stop())
828 * If boost never happened, then inflight will always be 1, in
829 * this case the boost check would never happen in the above
830 * loop so do another one here.
832 if (!failed && smp_load_acquire(&rbi.inflight))
833 rcu_torture_boost_failed(call_rcu_time, jiffies);
836 * Set the start time of the next test interval.
837 * Yes, this is vulnerable to long delays, but such
838 * delays simply cause a false negative for the next
839 * interval. Besides, we are running at RT priority,
840 * so delays should be relatively rare.
842 while (oldstarttime == boost_starttime &&
843 !kthread_should_stop()) {
844 if (mutex_trylock(&boost_mutex)) {
845 boost_starttime = jiffies +
846 test_boost_interval * HZ;
847 mutex_unlock(&boost_mutex);
850 schedule_timeout_uninterruptible(1);
853 /* Go do the stutter. */
854 checkwait: stutter_wait("rcu_torture_boost");
855 } while (!torture_must_stop());
857 /* Clean up and exit. */
858 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
859 torture_shutdown_absorb("rcu_torture_boost");
860 schedule_timeout_uninterruptible(1);
862 destroy_rcu_head_on_stack(&rbi.rcu);
863 torture_kthread_stopping("rcu_torture_boost");
868 * RCU torture force-quiescent-state kthread. Repeatedly induces
869 * bursts of calls to force_quiescent_state(), increasing the probability
870 * of occurrence of some important types of race conditions.
873 rcu_torture_fqs(void *arg)
875 unsigned long fqs_resume_time;
876 int fqs_burst_remaining;
878 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
880 fqs_resume_time = jiffies + fqs_stutter * HZ;
881 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
882 !kthread_should_stop()) {
883 schedule_timeout_interruptible(1);
885 fqs_burst_remaining = fqs_duration;
886 while (fqs_burst_remaining > 0 &&
887 !kthread_should_stop()) {
890 fqs_burst_remaining -= fqs_holdoff;
892 stutter_wait("rcu_torture_fqs");
893 } while (!torture_must_stop());
894 torture_kthread_stopping("rcu_torture_fqs");
899 * RCU torture writer kthread. Repeatedly substitutes a new structure
900 * for that pointed to by rcu_torture_current, freeing the old structure
901 * after a series of grace periods (the "pipeline").
904 rcu_torture_writer(void *arg)
906 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
908 unsigned long gp_snap;
909 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
910 bool gp_sync1 = gp_sync;
912 struct rcu_torture *rp;
913 struct rcu_torture *old_rp;
914 static DEFINE_TORTURE_RANDOM(rand);
915 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
916 RTWS_COND_GET, RTWS_SYNC };
919 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
921 pr_alert("%s" TORTURE_FLAG
922 " GP expediting controlled from boot/sysfs for %s.\n",
923 torture_type, cur_ops->name);
925 /* Initialize synctype[] array. If none set, take default. */
926 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
927 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
928 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
929 synctype[nsynctypes++] = RTWS_COND_GET;
930 pr_info("%s: Testing conditional GPs.\n", __func__);
931 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
932 pr_alert("%s: gp_cond without primitives.\n", __func__);
934 if (gp_exp1 && cur_ops->exp_sync) {
935 synctype[nsynctypes++] = RTWS_EXP_SYNC;
936 pr_info("%s: Testing expedited GPs.\n", __func__);
937 } else if (gp_exp && !cur_ops->exp_sync) {
938 pr_alert("%s: gp_exp without primitives.\n", __func__);
940 if (gp_normal1 && cur_ops->deferred_free) {
941 synctype[nsynctypes++] = RTWS_DEF_FREE;
942 pr_info("%s: Testing asynchronous GPs.\n", __func__);
943 } else if (gp_normal && !cur_ops->deferred_free) {
944 pr_alert("%s: gp_normal without primitives.\n", __func__);
946 if (gp_sync1 && cur_ops->sync) {
947 synctype[nsynctypes++] = RTWS_SYNC;
948 pr_info("%s: Testing normal GPs.\n", __func__);
949 } else if (gp_sync && !cur_ops->sync) {
950 pr_alert("%s: gp_sync without primitives.\n", __func__);
952 if (WARN_ONCE(nsynctypes == 0,
953 "rcu_torture_writer: No update-side primitives.\n")) {
955 * No updates primitives, so don't try updating.
956 * The resulting test won't be testing much, hence the
959 rcu_torture_writer_state = RTWS_STOPPING;
960 torture_kthread_stopping("rcu_torture_writer");
964 rcu_torture_writer_state = RTWS_FIXED_DELAY;
965 schedule_timeout_uninterruptible(1);
966 rp = rcu_torture_alloc();
969 rp->rtort_pipe_count = 0;
970 rcu_torture_writer_state = RTWS_DELAY;
971 udelay(torture_random(&rand) & 0x3ff);
972 rcu_torture_writer_state = RTWS_REPLACE;
973 old_rp = rcu_dereference_check(rcu_torture_current,
974 current == writer_task);
975 rp->rtort_mbtest = 1;
976 rcu_assign_pointer(rcu_torture_current, rp);
977 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
979 i = old_rp->rtort_pipe_count;
980 if (i > RCU_TORTURE_PIPE_LEN)
981 i = RCU_TORTURE_PIPE_LEN;
982 atomic_inc(&rcu_torture_wcount[i]);
983 old_rp->rtort_pipe_count++;
984 switch (synctype[torture_random(&rand) % nsynctypes]) {
986 rcu_torture_writer_state = RTWS_DEF_FREE;
987 cur_ops->deferred_free(old_rp);
990 rcu_torture_writer_state = RTWS_EXP_SYNC;
992 rcu_torture_pipe_update(old_rp);
995 rcu_torture_writer_state = RTWS_COND_GET;
996 gp_snap = cur_ops->get_state();
997 i = torture_random(&rand) % 16;
999 schedule_timeout_interruptible(i);
1000 udelay(torture_random(&rand) % 1000);
1001 rcu_torture_writer_state = RTWS_COND_SYNC;
1002 cur_ops->cond_sync(gp_snap);
1003 rcu_torture_pipe_update(old_rp);
1006 rcu_torture_writer_state = RTWS_SYNC;
1008 rcu_torture_pipe_update(old_rp);
1015 WRITE_ONCE(rcu_torture_current_version,
1016 rcu_torture_current_version + 1);
1017 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1019 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1020 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1021 if (expediting >= 0)
1024 rcu_unexpedite_gp();
1025 if (++expediting > 3)
1026 expediting = -expediting;
1027 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1028 can_expedite = !rcu_gp_is_expedited() &&
1029 !rcu_gp_is_normal();
1031 rcu_torture_writer_state = RTWS_STUTTER;
1032 if (stutter_wait("rcu_torture_writer"))
1033 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1034 if (list_empty(&rcu_tortures[i].rtort_free))
1036 } while (!torture_must_stop());
1037 /* Reset expediting back to unexpedited. */
1039 expediting = -expediting;
1040 while (can_expedite && expediting++ < 0)
1041 rcu_unexpedite_gp();
1042 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1044 pr_alert("%s" TORTURE_FLAG
1045 " Dynamic grace-period expediting was disabled.\n",
1047 rcu_torture_writer_state = RTWS_STOPPING;
1048 torture_kthread_stopping("rcu_torture_writer");
1053 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1054 * delay between calls.
1057 rcu_torture_fakewriter(void *arg)
1059 DEFINE_TORTURE_RANDOM(rand);
1061 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1062 set_user_nice(current, MAX_NICE);
1065 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1066 udelay(torture_random(&rand) & 0x3ff);
1067 if (cur_ops->cb_barrier != NULL &&
1068 torture_random(&rand) % (nfakewriters * 8) == 0) {
1069 cur_ops->cb_barrier();
1070 } else if (gp_normal == gp_exp) {
1071 if (cur_ops->sync && torture_random(&rand) & 0x80)
1073 else if (cur_ops->exp_sync)
1074 cur_ops->exp_sync();
1075 } else if (gp_normal && cur_ops->sync) {
1077 } else if (cur_ops->exp_sync) {
1078 cur_ops->exp_sync();
1080 stutter_wait("rcu_torture_fakewriter");
1081 } while (!torture_must_stop());
1083 torture_kthread_stopping("rcu_torture_fakewriter");
1087 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1093 * Do one extension of an RCU read-side critical section using the
1094 * current reader state in readstate (set to zero for initial entry
1095 * to extended critical section), set the new state as specified by
1096 * newstate (set to zero for final exit from extended critical section),
1097 * and random-number-generator state in trsp. If this is neither the
1098 * beginning or end of the critical section and if there was actually a
1099 * change, do a ->read_delay().
1101 static void rcutorture_one_extend(int *readstate, int newstate,
1102 struct torture_random_state *trsp,
1103 struct rt_read_seg *rtrsp)
1106 int idxold = *readstate;
1107 int statesnew = ~*readstate & newstate;
1108 int statesold = *readstate & ~newstate;
1110 WARN_ON_ONCE(idxold < 0);
1111 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1112 rtrsp->rt_readstate = newstate;
1114 /* First, put new protection in place to avoid critical-section gap. */
1115 if (statesnew & RCUTORTURE_RDR_BH)
1117 if (statesnew & RCUTORTURE_RDR_IRQ)
1118 local_irq_disable();
1119 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1121 if (statesnew & RCUTORTURE_RDR_RBH)
1123 if (statesnew & RCUTORTURE_RDR_SCHED)
1124 rcu_read_lock_sched();
1125 if (statesnew & RCUTORTURE_RDR_RCU)
1126 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1128 /* Next, remove old protection, irq first due to bh conflict. */
1129 if (statesold & RCUTORTURE_RDR_IRQ)
1131 if (statesold & RCUTORTURE_RDR_BH)
1133 if (statesold & RCUTORTURE_RDR_PREEMPT)
1135 if (statesold & RCUTORTURE_RDR_RBH)
1136 rcu_read_unlock_bh();
1137 if (statesold & RCUTORTURE_RDR_SCHED)
1138 rcu_read_unlock_sched();
1139 if (statesold & RCUTORTURE_RDR_RCU)
1140 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1142 /* Delay if neither beginning nor end and there was a change. */
1143 if ((statesnew || statesold) && *readstate && newstate)
1144 cur_ops->read_delay(trsp, rtrsp);
1146 /* Update the reader state. */
1148 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1149 WARN_ON_ONCE(idxnew < 0);
1150 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1151 *readstate = idxnew | newstate;
1152 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1153 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1156 /* Return the biggest extendables mask given current RCU and boot parameters. */
1157 static int rcutorture_extend_mask_max(void)
1161 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1162 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1163 mask = mask | RCUTORTURE_RDR_RCU;
1167 /* Return a random protection state mask, but with at least one bit set. */
1169 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1171 int mask = rcutorture_extend_mask_max();
1172 unsigned long randmask1 = torture_random(trsp) >> 8;
1173 unsigned long randmask2 = randmask1 >> 3;
1175 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1176 /* Most of the time lots of bits, half the time only one bit. */
1177 if (!(randmask1 & 0x7))
1178 mask = mask & randmask2;
1180 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1181 /* Can't enable bh w/irq disabled. */
1182 if ((mask & RCUTORTURE_RDR_IRQ) &&
1183 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1184 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1185 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1186 if ((mask & RCUTORTURE_RDR_IRQ) &&
1187 !(mask & cur_ops->ext_irq_conflict) &&
1188 (oldmask & cur_ops->ext_irq_conflict))
1189 mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
1190 return mask ?: RCUTORTURE_RDR_RCU;
1194 * Do a randomly selected number of extensions of an existing RCU read-side
1197 static struct rt_read_seg *
1198 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1199 struct rt_read_seg *rtrsp)
1203 int mask = rcutorture_extend_mask_max();
1205 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1206 if (!((mask - 1) & mask))
1207 return rtrsp; /* Current RCU reader not extendable. */
1208 /* Bias towards larger numbers of loops. */
1209 i = (torture_random(trsp) >> 3);
1210 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1211 for (j = 0; j < i; j++) {
1212 mask = rcutorture_extend_mask(*readstate, trsp);
1213 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1219 * Do one read-side critical section, returning false if there was
1220 * no data to read. Can be invoked both from process context and
1221 * from a timer handler.
1223 static bool rcu_torture_one_read(struct torture_random_state *trsp)
1226 unsigned long started;
1227 unsigned long completed;
1229 struct rcu_torture *p;
1232 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1233 struct rt_read_seg *rtrsp = &rtseg[0];
1234 struct rt_read_seg *rtrsp1;
1235 unsigned long long ts;
1237 newstate = rcutorture_extend_mask(readstate, trsp);
1238 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1239 started = cur_ops->get_gp_seq();
1240 ts = rcu_trace_clock_local();
1241 p = rcu_dereference_check(rcu_torture_current,
1242 rcu_read_lock_bh_held() ||
1243 rcu_read_lock_sched_held() ||
1244 srcu_read_lock_held(srcu_ctlp) ||
1247 /* Wait for rcu_torture_writer to get underway */
1248 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1251 if (p->rtort_mbtest == 0)
1252 atomic_inc(&n_rcu_torture_mberror);
1253 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1255 pipe_count = p->rtort_pipe_count;
1256 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1257 /* Should not happen, but... */
1258 pipe_count = RCU_TORTURE_PIPE_LEN;
1260 completed = cur_ops->get_gp_seq();
1261 if (pipe_count > 1) {
1262 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1263 ts, started, completed);
1264 rcu_ftrace_dump(DUMP_ALL);
1266 __this_cpu_inc(rcu_torture_count[pipe_count]);
1267 completed = rcutorture_seq_diff(completed, started);
1268 if (completed > RCU_TORTURE_PIPE_LEN) {
1269 /* Should not happen, but... */
1270 completed = RCU_TORTURE_PIPE_LEN;
1272 __this_cpu_inc(rcu_torture_batch[completed]);
1274 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1275 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1277 /* If error or close call, record the sequence of reader protections. */
1278 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1280 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1281 err_segs[i++] = *rtrsp1;
1288 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1291 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1292 * incrementing the corresponding element of the pipeline array. The
1293 * counter in the element should never be greater than 1, otherwise, the
1294 * RCU implementation is broken.
1296 static void rcu_torture_timer(struct timer_list *unused)
1298 atomic_long_inc(&n_rcu_torture_timers);
1299 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
1301 /* Test call_rcu() invocation from interrupt handler. */
1302 if (cur_ops->call) {
1303 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1306 cur_ops->call(rhp, rcu_torture_timer_cb);
1311 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1312 * incrementing the corresponding element of the pipeline array. The
1313 * counter in the element should never be greater than 1, otherwise, the
1314 * RCU implementation is broken.
1317 rcu_torture_reader(void *arg)
1319 unsigned long lastsleep = jiffies;
1320 long myid = (long)arg;
1321 int mynumonline = myid;
1322 DEFINE_TORTURE_RANDOM(rand);
1323 struct timer_list t;
1325 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1326 set_user_nice(current, MAX_NICE);
1327 if (irqreader && cur_ops->irq_capable)
1328 timer_setup_on_stack(&t, rcu_torture_timer, 0);
1331 if (irqreader && cur_ops->irq_capable) {
1332 if (!timer_pending(&t))
1333 mod_timer(&t, jiffies + 1);
1335 if (!rcu_torture_one_read(&rand))
1336 schedule_timeout_interruptible(HZ);
1337 if (time_after(jiffies, lastsleep)) {
1338 schedule_timeout_interruptible(1);
1339 lastsleep = jiffies + 10;
1341 while (num_online_cpus() < mynumonline && !torture_must_stop())
1342 schedule_timeout_interruptible(HZ / 5);
1343 stutter_wait("rcu_torture_reader");
1344 } while (!torture_must_stop());
1345 if (irqreader && cur_ops->irq_capable) {
1347 destroy_timer_on_stack(&t);
1349 torture_kthread_stopping("rcu_torture_reader");
1354 * Print torture statistics. Caller must ensure that there is only
1355 * one call to this function at a given time!!! This is normally
1356 * accomplished by relying on the module system to only have one copy
1357 * of the module loaded, and then by giving the rcu_torture_stats
1358 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1359 * thread is not running).
1362 rcu_torture_stats_print(void)
1366 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1367 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1368 static unsigned long rtcv_snap = ULONG_MAX;
1369 static bool splatted;
1370 struct task_struct *wtp;
1372 for_each_possible_cpu(cpu) {
1373 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1374 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1375 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1378 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1379 if (pipesummary[i] != 0)
1383 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1384 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1385 rcu_torture_current,
1386 rcu_torture_current_version,
1387 list_empty(&rcu_torture_freelist),
1388 atomic_read(&n_rcu_torture_alloc),
1389 atomic_read(&n_rcu_torture_alloc_fail),
1390 atomic_read(&n_rcu_torture_free));
1391 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1392 atomic_read(&n_rcu_torture_mberror),
1393 n_rcu_torture_barrier_error,
1394 n_rcu_torture_boost_ktrerror,
1395 n_rcu_torture_boost_rterror);
1396 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1397 n_rcu_torture_boost_failure,
1398 n_rcu_torture_boosts,
1399 atomic_long_read(&n_rcu_torture_timers));
1400 torture_onoff_stats();
1401 pr_cont("barrier: %ld/%ld:%ld\n",
1402 n_barrier_successes,
1404 n_rcu_torture_barrier_error);
1406 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1407 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1408 n_rcu_torture_barrier_error != 0 ||
1409 n_rcu_torture_boost_ktrerror != 0 ||
1410 n_rcu_torture_boost_rterror != 0 ||
1411 n_rcu_torture_boost_failure != 0 ||
1413 pr_cont("%s", "!!! ");
1414 atomic_inc(&n_rcu_torture_error);
1417 pr_cont("Reader Pipe: ");
1418 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1419 pr_cont(" %ld", pipesummary[i]);
1422 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1423 pr_cont("Reader Batch: ");
1424 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1425 pr_cont(" %ld", batchsummary[i]);
1428 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1429 pr_cont("Free-Block Circulation: ");
1430 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1431 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1437 if (rtcv_snap == rcu_torture_current_version &&
1438 rcu_torture_current != NULL) {
1439 int __maybe_unused flags = 0;
1440 unsigned long __maybe_unused gp_seq = 0;
1442 rcutorture_get_gp_data(cur_ops->ttype,
1444 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1446 wtp = READ_ONCE(writer_task);
1447 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1448 rcu_torture_writer_state_getname(),
1449 rcu_torture_writer_state, gp_seq, flags,
1450 wtp == NULL ? ~0UL : wtp->state,
1451 wtp == NULL ? -1 : (int)task_cpu(wtp));
1452 if (!splatted && wtp) {
1453 sched_show_task(wtp);
1456 show_rcu_gp_kthreads();
1457 rcu_ftrace_dump(DUMP_ALL);
1459 rtcv_snap = rcu_torture_current_version;
1463 * Periodically prints torture statistics, if periodic statistics printing
1464 * was specified via the stat_interval module parameter.
1467 rcu_torture_stats(void *arg)
1469 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1471 schedule_timeout_interruptible(stat_interval * HZ);
1472 rcu_torture_stats_print();
1473 torture_shutdown_absorb("rcu_torture_stats");
1474 } while (!torture_must_stop());
1475 torture_kthread_stopping("rcu_torture_stats");
1480 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1482 pr_alert("%s" TORTURE_FLAG
1483 "--- %s: nreaders=%d nfakewriters=%d "
1484 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1485 "shuffle_interval=%d stutter=%d irqreader=%d "
1486 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1487 "test_boost=%d/%d test_boost_interval=%d "
1488 "test_boost_duration=%d shutdown_secs=%d "
1489 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1491 "onoff_interval=%d onoff_holdoff=%d\n",
1492 torture_type, tag, nrealreaders, nfakewriters,
1493 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1494 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1495 test_boost, cur_ops->can_boost,
1496 test_boost_interval, test_boost_duration, shutdown_secs,
1497 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1499 onoff_interval, onoff_holdoff);
1502 static int rcutorture_booster_cleanup(unsigned int cpu)
1504 struct task_struct *t;
1506 if (boost_tasks[cpu] == NULL)
1508 mutex_lock(&boost_mutex);
1509 t = boost_tasks[cpu];
1510 boost_tasks[cpu] = NULL;
1511 rcu_torture_enable_rt_throttle();
1512 mutex_unlock(&boost_mutex);
1514 /* This must be outside of the mutex, otherwise deadlock! */
1515 torture_stop_kthread(rcu_torture_boost, t);
1519 static int rcutorture_booster_init(unsigned int cpu)
1523 if (boost_tasks[cpu] != NULL)
1524 return 0; /* Already created, nothing more to do. */
1526 /* Don't allow time recalculation while creating a new task. */
1527 mutex_lock(&boost_mutex);
1528 rcu_torture_disable_rt_throttle();
1529 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1530 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1532 "rcu_torture_boost");
1533 if (IS_ERR(boost_tasks[cpu])) {
1534 retval = PTR_ERR(boost_tasks[cpu]);
1535 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1536 n_rcu_torture_boost_ktrerror++;
1537 boost_tasks[cpu] = NULL;
1538 mutex_unlock(&boost_mutex);
1541 kthread_bind(boost_tasks[cpu], cpu);
1542 wake_up_process(boost_tasks[cpu]);
1543 mutex_unlock(&boost_mutex);
1548 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1549 * induces a CPU stall for the time specified by stall_cpu.
1551 static int rcu_torture_stall(void *args)
1553 unsigned long stop_at;
1555 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1556 if (stall_cpu_holdoff > 0) {
1557 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1558 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1559 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1561 if (!kthread_should_stop()) {
1562 stop_at = ktime_get_seconds() + stall_cpu;
1563 /* RCU CPU stall is expected behavior in following code. */
1565 if (stall_cpu_irqsoff)
1566 local_irq_disable();
1569 pr_alert("rcu_torture_stall start on CPU %d.\n",
1570 smp_processor_id());
1571 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1573 continue; /* Induce RCU CPU stall warning. */
1574 if (stall_cpu_irqsoff)
1579 pr_alert("rcu_torture_stall end.\n");
1581 torture_shutdown_absorb("rcu_torture_stall");
1582 while (!kthread_should_stop())
1583 schedule_timeout_interruptible(10 * HZ);
1587 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1588 static int __init rcu_torture_stall_init(void)
1592 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
1595 /* State structure for forward-progress self-propagating RCU callback. */
1596 struct fwd_cb_state {
1602 * Forward-progress self-propagating RCU callback function. Because
1603 * callbacks run from softirq, this function is an implicit RCU read-side
1606 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1608 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1610 if (READ_ONCE(fcsp->stop)) {
1611 WRITE_ONCE(fcsp->stop, 2);
1614 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1617 /* State for continuous-flood RCU callbacks. */
1620 struct rcu_fwd_cb *rfc_next;
1623 static DEFINE_SPINLOCK(rcu_fwd_lock);
1624 static struct rcu_fwd_cb *rcu_fwd_cb_head;
1625 static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head;
1626 static long n_launders_cb;
1627 static unsigned long rcu_fwd_startat;
1628 static bool rcu_fwd_emergency_stop;
1629 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1630 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1631 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1632 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
1633 static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)];
1635 static void rcu_torture_fwd_cb_hist(void)
1640 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
1641 if (n_launders_hist[i] > 0)
1643 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
1644 __func__, jiffies - rcu_fwd_startat);
1645 for (j = 0; j <= i; j++)
1646 pr_cont(" %ds/%d: %ld",
1647 j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]);
1651 /* Callback function for continuous-flood RCU callbacks. */
1652 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1654 unsigned long flags;
1656 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
1657 struct rcu_fwd_cb **rfcpp;
1659 rfcp->rfc_next = NULL;
1661 spin_lock_irqsave(&rcu_fwd_lock, flags);
1662 rfcpp = rcu_fwd_cb_tail;
1663 rcu_fwd_cb_tail = &rfcp->rfc_next;
1664 WRITE_ONCE(*rfcpp, rfcp);
1665 WRITE_ONCE(n_launders_cb, n_launders_cb + 1);
1666 i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
1667 if (i >= ARRAY_SIZE(n_launders_hist))
1668 i = ARRAY_SIZE(n_launders_hist) - 1;
1669 n_launders_hist[i]++;
1670 spin_unlock_irqrestore(&rcu_fwd_lock, flags);
1674 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1675 * test is over or because we hit an OOM event.
1677 static unsigned long rcu_torture_fwd_prog_cbfree(void)
1679 unsigned long flags;
1680 unsigned long freed = 0;
1681 struct rcu_fwd_cb *rfcp;
1684 spin_lock_irqsave(&rcu_fwd_lock, flags);
1685 rfcp = rcu_fwd_cb_head;
1688 rcu_fwd_cb_head = rfcp->rfc_next;
1689 if (!rcu_fwd_cb_head)
1690 rcu_fwd_cb_tail = &rcu_fwd_cb_head;
1691 spin_unlock_irqrestore(&rcu_fwd_lock, flags);
1695 spin_unlock_irqrestore(&rcu_fwd_lock, flags);
1699 /* Carry out need_resched()/cond_resched() forward-progress testing. */
1700 static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
1704 struct fwd_cb_state fcs;
1709 bool selfpropcb = false;
1710 unsigned long stopat;
1711 static DEFINE_TORTURE_RANDOM(trs);
1713 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
1714 init_rcu_head_on_stack(&fcs.rh);
1718 /* Tight loop containing cond_resched(). */
1720 WRITE_ONCE(fcs.stop, 0);
1721 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1723 cver = READ_ONCE(rcu_torture_current_version);
1724 gps = cur_ops->get_gp_seq();
1725 sd = cur_ops->stall_dur() + 1;
1726 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
1727 dur = sd4 + torture_random(&trs) % (sd - sd4);
1728 WRITE_ONCE(rcu_fwd_startat, jiffies);
1729 stopat = rcu_fwd_startat + dur;
1730 while (time_before(jiffies, stopat) &&
1731 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1732 idx = cur_ops->readlock();
1734 cur_ops->readunlock(idx);
1735 if (!fwd_progress_need_resched || need_resched())
1739 if (!time_before(jiffies, stopat) &&
1740 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1742 cver = READ_ONCE(rcu_torture_current_version) - cver;
1743 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1744 WARN_ON(!cver && gps < 2);
1745 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
1748 WRITE_ONCE(fcs.stop, 1);
1749 cur_ops->sync(); /* Wait for running CB to complete. */
1750 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
1754 WARN_ON(READ_ONCE(fcs.stop) != 2);
1755 destroy_rcu_head_on_stack(&fcs.rh);
1759 /* Carry out call_rcu() forward-progress testing. */
1760 static void rcu_torture_fwd_prog_cr(void)
1766 long n_launders_cb_snap;
1770 struct rcu_fwd_cb *rfcp;
1771 struct rcu_fwd_cb *rfcpn;
1772 unsigned long stopat;
1773 unsigned long stoppedat;
1775 if (READ_ONCE(rcu_fwd_emergency_stop))
1776 return; /* Get out of the way quickly, no GP wait! */
1778 /* Loop continuously posting RCU callbacks. */
1779 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1780 cur_ops->sync(); /* Later readers see above write. */
1781 WRITE_ONCE(rcu_fwd_startat, jiffies);
1782 stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
1788 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
1789 n_launders_hist[i] = 0;
1790 cver = READ_ONCE(rcu_torture_current_version);
1791 gps = cur_ops->get_gp_seq();
1792 while (time_before(jiffies, stopat) &&
1793 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1794 rfcp = READ_ONCE(rcu_fwd_cb_head);
1797 rfcpn = READ_ONCE(rfcp->rfc_next);
1799 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
1800 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
1802 rcu_fwd_cb_head = rfcpn;
1806 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
1807 if (WARN_ON_ONCE(!rfcp)) {
1808 schedule_timeout_interruptible(1);
1815 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
1818 stoppedat = jiffies;
1819 n_launders_cb_snap = READ_ONCE(n_launders_cb);
1820 cver = READ_ONCE(rcu_torture_current_version) - cver;
1821 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1822 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
1823 (void)rcu_torture_fwd_prog_cbfree();
1825 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
1826 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) {
1827 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
1828 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
1830 stoppedat - rcu_fwd_startat, jiffies - stoppedat,
1831 n_launders + n_max_cbs - n_launders_cb_snap,
1832 n_launders, n_launders_sa,
1833 n_max_gps, n_max_cbs, cver, gps);
1834 rcu_torture_fwd_cb_hist();
1840 * OOM notifier, but this only prints diagnostic information for the
1841 * current forward-progress test.
1843 static int rcutorture_oom_notify(struct notifier_block *self,
1844 unsigned long notused, void *nfreed)
1846 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
1848 rcu_torture_fwd_cb_hist();
1849 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2));
1850 WRITE_ONCE(rcu_fwd_emergency_stop, true);
1851 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
1852 pr_info("%s: Freed %lu RCU callbacks.\n",
1853 __func__, rcu_torture_fwd_prog_cbfree());
1855 pr_info("%s: Freed %lu RCU callbacks.\n",
1856 __func__, rcu_torture_fwd_prog_cbfree());
1858 pr_info("%s: Freed %lu RCU callbacks.\n",
1859 __func__, rcu_torture_fwd_prog_cbfree());
1860 smp_mb(); /* Frees before return to avoid redoing OOM. */
1861 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
1862 pr_info("%s returning after OOM processing.\n", __func__);
1866 static struct notifier_block rcutorture_oom_nb = {
1867 .notifier_call = rcutorture_oom_notify
1870 /* Carry out grace-period forward-progress testing. */
1871 static int rcu_torture_fwd_prog(void *args)
1874 int tested_tries = 0;
1876 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
1877 rcu_bind_current_to_nocb();
1878 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
1879 set_user_nice(current, MAX_NICE);
1881 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
1882 WRITE_ONCE(rcu_fwd_emergency_stop, false);
1883 register_oom_notifier(&rcutorture_oom_nb);
1884 rcu_torture_fwd_prog_nr(&tested, &tested_tries);
1885 rcu_torture_fwd_prog_cr();
1886 unregister_oom_notifier(&rcutorture_oom_nb);
1888 /* Avoid slow periods, better to test when busy. */
1889 stutter_wait("rcu_torture_fwd_prog");
1890 } while (!torture_must_stop());
1891 /* Short runs might not contain a valid forward-progress attempt. */
1892 WARN_ON(!tested && tested_tries >= 5);
1893 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
1894 torture_kthread_stopping("rcu_torture_fwd_prog");
1898 /* If forward-progress checking is requested and feasible, spawn the thread. */
1899 static int __init rcu_torture_fwd_prog_init(void)
1902 return 0; /* Not requested, so don't do it. */
1903 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
1904 cur_ops == &rcu_busted_ops) {
1905 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
1908 if (stall_cpu > 0) {
1909 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
1910 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
1911 return -EINVAL; /* In module, can fail back to user. */
1912 WARN_ON(1); /* Make sure rcutorture notices conflict. */
1915 if (fwd_progress_holdoff <= 0)
1916 fwd_progress_holdoff = 1;
1917 if (fwd_progress_div <= 0)
1918 fwd_progress_div = 4;
1919 return torture_create_kthread(rcu_torture_fwd_prog,
1920 NULL, fwd_prog_task);
1923 /* Callback function for RCU barrier testing. */
1924 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
1926 atomic_inc(&barrier_cbs_invoked);
1929 /* kthread function to register callbacks used to test RCU barriers. */
1930 static int rcu_torture_barrier_cbs(void *arg)
1932 long myid = (long)arg;
1935 struct rcu_head rcu;
1937 init_rcu_head_on_stack(&rcu);
1938 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1939 set_user_nice(current, MAX_NICE);
1941 wait_event(barrier_cbs_wq[myid],
1943 smp_load_acquire(&barrier_phase)) != lastphase ||
1944 torture_must_stop());
1945 lastphase = newphase;
1946 if (torture_must_stop())
1949 * The above smp_load_acquire() ensures barrier_phase load
1950 * is ordered before the following ->call().
1952 local_irq_disable(); /* Just to test no-irq call_rcu(). */
1953 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
1955 if (atomic_dec_and_test(&barrier_cbs_count))
1956 wake_up(&barrier_wq);
1957 } while (!torture_must_stop());
1958 if (cur_ops->cb_barrier != NULL)
1959 cur_ops->cb_barrier();
1960 destroy_rcu_head_on_stack(&rcu);
1961 torture_kthread_stopping("rcu_torture_barrier_cbs");
1965 /* kthread function to drive and coordinate RCU barrier testing. */
1966 static int rcu_torture_barrier(void *arg)
1970 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1972 atomic_set(&barrier_cbs_invoked, 0);
1973 atomic_set(&barrier_cbs_count, n_barrier_cbs);
1974 /* Ensure barrier_phase ordered after prior assignments. */
1975 smp_store_release(&barrier_phase, !barrier_phase);
1976 for (i = 0; i < n_barrier_cbs; i++)
1977 wake_up(&barrier_cbs_wq[i]);
1978 wait_event(barrier_wq,
1979 atomic_read(&barrier_cbs_count) == 0 ||
1980 torture_must_stop());
1981 if (torture_must_stop())
1983 n_barrier_attempts++;
1984 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1985 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1986 n_rcu_torture_barrier_error++;
1987 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1988 atomic_read(&barrier_cbs_invoked),
1992 n_barrier_successes++;
1994 schedule_timeout_interruptible(HZ / 10);
1995 } while (!torture_must_stop());
1996 torture_kthread_stopping("rcu_torture_barrier");
2000 /* Initialize RCU barrier testing. */
2001 static int rcu_torture_barrier_init(void)
2006 if (n_barrier_cbs <= 0)
2008 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2009 pr_alert("%s" TORTURE_FLAG
2010 " Call or barrier ops missing for %s,\n",
2011 torture_type, cur_ops->name);
2012 pr_alert("%s" TORTURE_FLAG
2013 " RCU barrier testing omitted from run.\n",
2017 atomic_set(&barrier_cbs_count, 0);
2018 atomic_set(&barrier_cbs_invoked, 0);
2020 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2023 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2024 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2026 for (i = 0; i < n_barrier_cbs; i++) {
2027 init_waitqueue_head(&barrier_cbs_wq[i]);
2028 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2030 barrier_cbs_tasks[i]);
2034 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2037 /* Clean up after RCU barrier testing. */
2038 static void rcu_torture_barrier_cleanup(void)
2042 torture_stop_kthread(rcu_torture_barrier, barrier_task);
2043 if (barrier_cbs_tasks != NULL) {
2044 for (i = 0; i < n_barrier_cbs; i++)
2045 torture_stop_kthread(rcu_torture_barrier_cbs,
2046 barrier_cbs_tasks[i]);
2047 kfree(barrier_cbs_tasks);
2048 barrier_cbs_tasks = NULL;
2050 if (barrier_cbs_wq != NULL) {
2051 kfree(barrier_cbs_wq);
2052 barrier_cbs_wq = NULL;
2056 static bool rcu_torture_can_boost(void)
2058 static int boost_warn_once;
2061 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2064 prio = rcu_get_gp_kthreads_prio();
2069 if (boost_warn_once == 1)
2072 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2073 boost_warn_once = 1;
2080 static enum cpuhp_state rcutor_hp;
2083 rcu_torture_cleanup(void)
2087 unsigned long gp_seq = 0;
2090 if (torture_cleanup_begin()) {
2091 if (cur_ops->cb_barrier != NULL)
2092 cur_ops->cb_barrier();
2096 rcu_torture_barrier_cleanup();
2097 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2098 torture_stop_kthread(rcu_torture_stall, stall_task);
2099 torture_stop_kthread(rcu_torture_writer, writer_task);
2102 for (i = 0; i < nrealreaders; i++)
2103 torture_stop_kthread(rcu_torture_reader,
2105 kfree(reader_tasks);
2107 rcu_torture_current = NULL;
2109 if (fakewriter_tasks) {
2110 for (i = 0; i < nfakewriters; i++) {
2111 torture_stop_kthread(rcu_torture_fakewriter,
2112 fakewriter_tasks[i]);
2114 kfree(fakewriter_tasks);
2115 fakewriter_tasks = NULL;
2118 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2119 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2120 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2121 cur_ops->name, gp_seq, flags);
2122 torture_stop_kthread(rcu_torture_stats, stats_task);
2123 torture_stop_kthread(rcu_torture_fqs, fqs_task);
2124 if (rcu_torture_can_boost())
2125 cpuhp_remove_state(rcutor_hp);
2128 * Wait for all RCU callbacks to fire, then do torture-type-specific
2129 * cleanup operations.
2131 if (cur_ops->cb_barrier != NULL)
2132 cur_ops->cb_barrier();
2133 if (cur_ops->cleanup != NULL)
2136 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
2138 if (err_segs_recorded) {
2139 pr_alert("Failure/close-call rcutorture reader segments:\n");
2140 if (rt_read_nsegs == 0)
2141 pr_alert("\t: No segments recorded!!!\n");
2143 for (i = 0; i < rt_read_nsegs; i++) {
2144 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2145 if (err_segs[i].rt_delay_jiffies != 0) {
2146 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2147 err_segs[i].rt_delay_jiffies);
2150 if (err_segs[i].rt_delay_ms != 0) {
2151 pr_cont("%s%ldms", firsttime ? "" : "+",
2152 err_segs[i].rt_delay_ms);
2155 if (err_segs[i].rt_delay_us != 0) {
2156 pr_cont("%s%ldus", firsttime ? "" : "+",
2157 err_segs[i].rt_delay_us);
2161 err_segs[i].rt_preempted ? "preempted" : "");
2165 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2166 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2167 else if (torture_onoff_failures())
2168 rcu_torture_print_module_parms(cur_ops,
2169 "End of test: RCU_HOTPLUG");
2171 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2172 torture_cleanup_end();
2175 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2176 static void rcu_torture_leak_cb(struct rcu_head *rhp)
2180 static void rcu_torture_err_cb(struct rcu_head *rhp)
2183 * This -might- happen due to race conditions, but is unlikely.
2184 * The scenario that leads to this happening is that the
2185 * first of the pair of duplicate callbacks is queued,
2186 * someone else starts a grace period that includes that
2187 * callback, then the second of the pair must wait for the
2188 * next grace period. Unlikely, but can happen. If it
2189 * does happen, the debug-objects subsystem won't have splatted.
2191 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2193 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2196 * Verify that double-free causes debug-objects to complain, but only
2197 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2198 * cannot be carried out.
2200 static void rcu_test_debug_objects(void)
2202 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2203 struct rcu_head rh1;
2204 struct rcu_head rh2;
2206 init_rcu_head_on_stack(&rh1);
2207 init_rcu_head_on_stack(&rh2);
2208 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2210 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2211 preempt_disable(); /* Prevent preemption from interrupting test. */
2212 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2213 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2214 local_irq_disable(); /* Make it harder to start a new grace period. */
2215 call_rcu(&rh2, rcu_torture_leak_cb);
2216 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2221 /* Wait for them all to get done so we can safely return. */
2223 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2224 destroy_rcu_head_on_stack(&rh1);
2225 destroy_rcu_head_on_stack(&rh2);
2226 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2227 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2228 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2232 rcu_torture_init(void)
2237 static struct rcu_torture_ops *torture_ops[] = {
2238 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2239 &busted_srcud_ops, &tasks_ops,
2242 if (!torture_init_begin(torture_type, verbose))
2245 /* Process args and tell the world that the torturer is on the job. */
2246 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
2247 cur_ops = torture_ops[i];
2248 if (strcmp(torture_type, cur_ops->name) == 0)
2251 if (i == ARRAY_SIZE(torture_ops)) {
2252 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2254 pr_alert("rcu-torture types:");
2255 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
2256 pr_cont(" %s", torture_ops[i]->name);
2258 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
2262 if (cur_ops->fqs == NULL && fqs_duration != 0) {
2263 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2269 if (nreaders >= 0) {
2270 nrealreaders = nreaders;
2272 nrealreaders = num_online_cpus() - 2 - nreaders;
2273 if (nrealreaders <= 0)
2276 rcu_torture_print_module_parms(cur_ops, "Start of test");
2278 /* Set up the freelist. */
2280 INIT_LIST_HEAD(&rcu_torture_freelist);
2281 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
2282 rcu_tortures[i].rtort_mbtest = 0;
2283 list_add_tail(&rcu_tortures[i].rtort_free,
2284 &rcu_torture_freelist);
2287 /* Initialize the statistics so that each run gets its own numbers. */
2289 rcu_torture_current = NULL;
2290 rcu_torture_current_version = 0;
2291 atomic_set(&n_rcu_torture_alloc, 0);
2292 atomic_set(&n_rcu_torture_alloc_fail, 0);
2293 atomic_set(&n_rcu_torture_free, 0);
2294 atomic_set(&n_rcu_torture_mberror, 0);
2295 atomic_set(&n_rcu_torture_error, 0);
2296 n_rcu_torture_barrier_error = 0;
2297 n_rcu_torture_boost_ktrerror = 0;
2298 n_rcu_torture_boost_rterror = 0;
2299 n_rcu_torture_boost_failure = 0;
2300 n_rcu_torture_boosts = 0;
2301 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2302 atomic_set(&rcu_torture_wcount[i], 0);
2303 for_each_possible_cpu(cpu) {
2304 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2305 per_cpu(rcu_torture_count, cpu)[i] = 0;
2306 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2309 err_segs_recorded = 0;
2312 /* Start up the kthreads. */
2314 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2318 if (nfakewriters > 0) {
2319 fakewriter_tasks = kcalloc(nfakewriters,
2320 sizeof(fakewriter_tasks[0]),
2322 if (fakewriter_tasks == NULL) {
2323 VERBOSE_TOROUT_ERRSTRING("out of memory");
2328 for (i = 0; i < nfakewriters; i++) {
2329 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2330 NULL, fakewriter_tasks[i]);
2334 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
2336 if (reader_tasks == NULL) {
2337 VERBOSE_TOROUT_ERRSTRING("out of memory");
2341 for (i = 0; i < nrealreaders; i++) {
2342 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
2347 if (stat_interval > 0) {
2348 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2353 if (test_no_idle_hz && shuffle_interval > 0) {
2354 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2361 firsterr = torture_stutter_init(stutter * HZ);
2365 if (fqs_duration < 0)
2368 /* Create the fqs thread */
2369 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2374 if (test_boost_interval < 1)
2375 test_boost_interval = 1;
2376 if (test_boost_duration < 2)
2377 test_boost_duration = 2;
2378 if (rcu_torture_can_boost()) {
2380 boost_starttime = jiffies + test_boost_interval * HZ;
2382 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2383 rcutorture_booster_init,
2384 rcutorture_booster_cleanup);
2387 rcutor_hp = firsterr;
2389 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2392 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
2395 firsterr = rcu_torture_stall_init();
2398 firsterr = rcu_torture_fwd_prog_init();
2401 firsterr = rcu_torture_barrier_init();
2405 rcu_test_debug_objects();
2411 rcu_torture_cleanup();
2415 module_init(rcu_torture_init);
2416 module_exit(rcu_torture_cleanup);