1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
5 * Copyright (C) IBM Corporation, 2005, 2006
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
10 * See also: Documentation/RCU/torture.rst
13 #define pr_fmt(fmt) fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
49 #include <linux/nmi.h>
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
56 /* Bits for ->extendables field, extendables param, and related definitions. */
57 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
58 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1)
59 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */
60 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2)
61 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
62 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
63 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
64 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
65 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
66 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */
67 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */
68 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */
69 #define RCUTORTURE_MAX_EXTEND \
70 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
71 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
72 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
73 /* Must be power of two minus one. */
74 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
76 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
77 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
78 torture_param(int, fqs_duration, 0,
79 "Duration of fqs bursts (us), 0 to disable");
80 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
81 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
82 torture_param(int, fwd_progress, 1, "Test grace-period forward progress");
83 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
84 torture_param(int, fwd_progress_holdoff, 60,
85 "Time between forward-progress tests (s)");
86 torture_param(bool, fwd_progress_need_resched, 1,
87 "Hide cond_resched() behind need_resched()");
88 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
89 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
90 torture_param(bool, gp_normal, false,
91 "Use normal (non-expedited) GP wait primitives");
92 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
93 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
94 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
95 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
96 torture_param(int, n_barrier_cbs, 0,
97 "# of callbacks/kthreads for barrier testing");
98 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
99 torture_param(int, nreaders, -1, "Number of RCU reader threads");
100 torture_param(int, object_debug, 0,
101 "Enable debug-object double call_rcu() testing");
102 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
103 torture_param(int, onoff_interval, 0,
104 "Time between CPU hotplugs (jiffies), 0=disable");
105 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
106 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
107 torture_param(int, read_exit_delay, 13,
108 "Delay between read-then-exit episodes (s)");
109 torture_param(int, read_exit_burst, 16,
110 "# of read-then-exit bursts per episode, zero to disable");
111 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
112 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
113 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
114 torture_param(int, stall_cpu_holdoff, 10,
115 "Time to wait before starting stall (s).");
116 torture_param(bool, stall_no_softlockup, false,
117 "Avoid softlockup warning during cpu stall.");
118 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
119 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
120 torture_param(int, stall_gp_kthread, 0,
121 "Grace-period kthread stall duration (s).");
122 torture_param(int, stat_interval, 60,
123 "Number of seconds between stats printk()s");
124 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
125 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
126 torture_param(int, test_boost_duration, 4,
127 "Duration of each boost test, seconds.");
128 torture_param(int, test_boost_interval, 7,
129 "Interval between boost tests, seconds.");
130 torture_param(bool, test_no_idle_hz, true,
131 "Test support for tickless idle CPUs");
132 torture_param(int, verbose, 1,
133 "Enable verbose debugging printk()s");
135 static char *torture_type = "rcu";
136 module_param(torture_type, charp, 0444);
137 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
139 static int nrealnocbers;
140 static int nrealreaders;
141 static struct task_struct *writer_task;
142 static struct task_struct **fakewriter_tasks;
143 static struct task_struct **reader_tasks;
144 static struct task_struct **nocb_tasks;
145 static struct task_struct *stats_task;
146 static struct task_struct *fqs_task;
147 static struct task_struct *boost_tasks[NR_CPUS];
148 static struct task_struct *stall_task;
149 static struct task_struct **fwd_prog_tasks;
150 static struct task_struct **barrier_cbs_tasks;
151 static struct task_struct *barrier_task;
152 static struct task_struct *read_exit_task;
154 #define RCU_TORTURE_PIPE_LEN 10
156 // Mailbox-like structure to check RCU global memory ordering.
157 struct rcu_torture_reader_check {
158 unsigned long rtc_myloops;
160 unsigned long rtc_chkloops;
162 struct rcu_torture_reader_check *rtc_assigner;
163 } ____cacheline_internodealigned_in_smp;
165 // Update-side data structure used to check RCU readers.
167 struct rcu_head rtort_rcu;
168 int rtort_pipe_count;
169 struct list_head rtort_free;
171 struct rcu_torture_reader_check *rtort_chkp;
174 static LIST_HEAD(rcu_torture_freelist);
175 static struct rcu_torture __rcu *rcu_torture_current;
176 static unsigned long rcu_torture_current_version;
177 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
178 static DEFINE_SPINLOCK(rcu_torture_lock);
179 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
180 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
181 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
182 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
183 static atomic_t n_rcu_torture_alloc;
184 static atomic_t n_rcu_torture_alloc_fail;
185 static atomic_t n_rcu_torture_free;
186 static atomic_t n_rcu_torture_mberror;
187 static atomic_t n_rcu_torture_mbchk_fail;
188 static atomic_t n_rcu_torture_mbchk_tries;
189 static atomic_t n_rcu_torture_error;
190 static long n_rcu_torture_barrier_error;
191 static long n_rcu_torture_boost_ktrerror;
192 static long n_rcu_torture_boost_rterror;
193 static long n_rcu_torture_boost_failure;
194 static long n_rcu_torture_boosts;
195 static atomic_long_t n_rcu_torture_timers;
196 static long n_barrier_attempts;
197 static long n_barrier_successes; /* did rcu_barrier test succeed? */
198 static unsigned long n_read_exits;
199 static struct list_head rcu_torture_removed;
200 static unsigned long shutdown_jiffies;
201 static unsigned long start_gp_seq;
202 static atomic_long_t n_nocb_offload;
203 static atomic_long_t n_nocb_deoffload;
205 static int rcu_torture_writer_state;
206 #define RTWS_FIXED_DELAY 0
208 #define RTWS_REPLACE 2
209 #define RTWS_DEF_FREE 3
210 #define RTWS_EXP_SYNC 4
211 #define RTWS_COND_GET 5
212 #define RTWS_COND_SYNC 6
213 #define RTWS_POLL_GET 7
214 #define RTWS_POLL_WAIT 8
216 #define RTWS_STUTTER 10
217 #define RTWS_STOPPING 11
218 static const char * const rcu_torture_writer_state_names[] = {
233 /* Record reader segment types and duration for first failing read. */
236 unsigned long rt_delay_jiffies;
237 unsigned long rt_delay_ms;
238 unsigned long rt_delay_us;
241 static int err_segs_recorded;
242 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
243 static int rt_read_nsegs;
245 static const char *rcu_torture_writer_state_getname(void)
247 unsigned int i = READ_ONCE(rcu_torture_writer_state);
249 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
251 return rcu_torture_writer_state_names[i];
254 #ifdef CONFIG_RCU_TRACE
255 static u64 notrace rcu_trace_clock_local(void)
257 u64 ts = trace_clock_local();
259 (void)do_div(ts, NSEC_PER_USEC);
262 #else /* #ifdef CONFIG_RCU_TRACE */
263 static u64 notrace rcu_trace_clock_local(void)
267 #endif /* #else #ifdef CONFIG_RCU_TRACE */
270 * Stop aggressive CPU-hog tests a bit before the end of the test in order
271 * to avoid interfering with test shutdown.
273 static bool shutdown_time_arrived(void)
275 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
278 static unsigned long boost_starttime; /* jiffies of next boost test start. */
279 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
280 /* and boost task create/destroy. */
281 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
282 static bool barrier_phase; /* Test phase. */
283 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
284 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
285 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
287 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
290 * Allocate an element from the rcu_tortures pool.
292 static struct rcu_torture *
293 rcu_torture_alloc(void)
297 spin_lock_bh(&rcu_torture_lock);
298 if (list_empty(&rcu_torture_freelist)) {
299 atomic_inc(&n_rcu_torture_alloc_fail);
300 spin_unlock_bh(&rcu_torture_lock);
303 atomic_inc(&n_rcu_torture_alloc);
304 p = rcu_torture_freelist.next;
306 spin_unlock_bh(&rcu_torture_lock);
307 return container_of(p, struct rcu_torture, rtort_free);
311 * Free an element to the rcu_tortures pool.
314 rcu_torture_free(struct rcu_torture *p)
316 atomic_inc(&n_rcu_torture_free);
317 spin_lock_bh(&rcu_torture_lock);
318 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
319 spin_unlock_bh(&rcu_torture_lock);
323 * Operations vector for selecting different types of tests.
326 struct rcu_torture_ops {
329 void (*cleanup)(void);
330 int (*readlock)(void);
331 void (*read_delay)(struct torture_random_state *rrsp,
332 struct rt_read_seg *rtrsp);
333 void (*readunlock)(int idx);
334 int (*readlock_held)(void);
335 unsigned long (*get_gp_seq)(void);
336 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
337 void (*deferred_free)(struct rcu_torture *p);
339 void (*exp_sync)(void);
340 unsigned long (*get_gp_state)(void);
341 unsigned long (*start_gp_poll)(void);
342 bool (*poll_gp_state)(unsigned long oldstate);
343 void (*cond_sync)(unsigned long oldstate);
344 call_rcu_func_t call;
345 void (*cb_barrier)(void);
348 void (*gp_kthread_dbg)(void);
349 bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
350 int (*stall_dur)(void);
360 static struct rcu_torture_ops *cur_ops;
363 * Definitions for rcu torture testing.
366 static int torture_readlock_not_held(void)
368 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
371 static int rcu_torture_read_lock(void) __acquires(RCU)
378 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
380 unsigned long started;
381 unsigned long completed;
382 const unsigned long shortdelay_us = 200;
383 unsigned long longdelay_ms = 300;
384 unsigned long long ts;
386 /* We want a short delay sometimes to make a reader delay the grace
387 * period, and we want a long delay occasionally to trigger
388 * force_quiescent_state. */
390 if (!atomic_read(&rcu_fwd_cb_nodelay) &&
391 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
392 started = cur_ops->get_gp_seq();
393 ts = rcu_trace_clock_local();
394 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
395 longdelay_ms = 5; /* Avoid triggering BH limits. */
396 mdelay(longdelay_ms);
397 rtrsp->rt_delay_ms = longdelay_ms;
398 completed = cur_ops->get_gp_seq();
399 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
402 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
403 udelay(shortdelay_us);
404 rtrsp->rt_delay_us = shortdelay_us;
406 if (!preempt_count() &&
407 !(torture_random(rrsp) % (nrealreaders * 500))) {
408 torture_preempt_schedule(); /* QS only if preemptible. */
409 rtrsp->rt_preempted = true;
413 static void rcu_torture_read_unlock(int idx) __releases(RCU)
419 * Update callback in the pipe. This should be invoked after a grace period.
422 rcu_torture_pipe_update_one(struct rcu_torture *rp)
425 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
428 WRITE_ONCE(rp->rtort_chkp, NULL);
429 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
431 i = READ_ONCE(rp->rtort_pipe_count);
432 if (i > RCU_TORTURE_PIPE_LEN)
433 i = RCU_TORTURE_PIPE_LEN;
434 atomic_inc(&rcu_torture_wcount[i]);
435 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
436 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
437 rp->rtort_mbtest = 0;
444 * Update all callbacks in the pipe. Suitable for synchronous grace-period
448 rcu_torture_pipe_update(struct rcu_torture *old_rp)
450 struct rcu_torture *rp;
451 struct rcu_torture *rp1;
454 list_add(&old_rp->rtort_free, &rcu_torture_removed);
455 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
456 if (rcu_torture_pipe_update_one(rp)) {
457 list_del(&rp->rtort_free);
458 rcu_torture_free(rp);
464 rcu_torture_cb(struct rcu_head *p)
466 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
468 if (torture_must_stop_irq()) {
469 /* Test is ending, just drop callbacks on the floor. */
470 /* The next initialization will pick up the pieces. */
473 if (rcu_torture_pipe_update_one(rp))
474 rcu_torture_free(rp);
476 cur_ops->deferred_free(rp);
479 static unsigned long rcu_no_completed(void)
484 static void rcu_torture_deferred_free(struct rcu_torture *p)
486 call_rcu(&p->rtort_rcu, rcu_torture_cb);
489 static void rcu_sync_torture_init(void)
491 INIT_LIST_HEAD(&rcu_torture_removed);
494 static struct rcu_torture_ops rcu_ops = {
496 .init = rcu_sync_torture_init,
497 .readlock = rcu_torture_read_lock,
498 .read_delay = rcu_read_delay,
499 .readunlock = rcu_torture_read_unlock,
500 .readlock_held = torture_readlock_not_held,
501 .get_gp_seq = rcu_get_gp_seq,
502 .gp_diff = rcu_seq_diff,
503 .deferred_free = rcu_torture_deferred_free,
504 .sync = synchronize_rcu,
505 .exp_sync = synchronize_rcu_expedited,
506 .get_gp_state = get_state_synchronize_rcu,
507 .start_gp_poll = start_poll_synchronize_rcu,
508 .poll_gp_state = poll_state_synchronize_rcu,
509 .cond_sync = cond_synchronize_rcu,
511 .cb_barrier = rcu_barrier,
512 .fqs = rcu_force_quiescent_state,
514 .gp_kthread_dbg = show_rcu_gp_kthreads,
515 .check_boost_failed = rcu_check_boost_fail,
516 .stall_dur = rcu_jiffies_till_stall_check,
518 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
519 .extendables = RCUTORTURE_MAX_EXTEND,
524 * Don't even think about trying any of these in real life!!!
525 * The names includes "busted", and they really means it!
526 * The only purpose of these functions is to provide a buggy RCU
527 * implementation to make sure that rcutorture correctly emits
528 * buggy-RCU error messages.
530 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
532 /* This is a deliberate bug for testing purposes only! */
533 rcu_torture_cb(&p->rtort_rcu);
536 static void synchronize_rcu_busted(void)
538 /* This is a deliberate bug for testing purposes only! */
542 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
544 /* This is a deliberate bug for testing purposes only! */
548 static struct rcu_torture_ops rcu_busted_ops = {
549 .ttype = INVALID_RCU_FLAVOR,
550 .init = rcu_sync_torture_init,
551 .readlock = rcu_torture_read_lock,
552 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
553 .readunlock = rcu_torture_read_unlock,
554 .readlock_held = torture_readlock_not_held,
555 .get_gp_seq = rcu_no_completed,
556 .deferred_free = rcu_busted_torture_deferred_free,
557 .sync = synchronize_rcu_busted,
558 .exp_sync = synchronize_rcu_busted,
559 .call = call_rcu_busted,
568 * Definitions for srcu torture testing.
571 DEFINE_STATIC_SRCU(srcu_ctl);
572 static struct srcu_struct srcu_ctld;
573 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
575 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
577 return srcu_read_lock(srcu_ctlp);
581 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
584 const long uspertick = 1000000 / HZ;
585 const long longdelay = 10;
587 /* We want there to be long-running readers, but not all the time. */
589 delay = torture_random(rrsp) %
590 (nrealreaders * 2 * longdelay * uspertick);
591 if (!delay && in_task()) {
592 schedule_timeout_interruptible(longdelay);
593 rtrsp->rt_delay_jiffies = longdelay;
595 rcu_read_delay(rrsp, rtrsp);
599 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
601 srcu_read_unlock(srcu_ctlp, idx);
604 static int torture_srcu_read_lock_held(void)
606 return srcu_read_lock_held(srcu_ctlp);
609 static unsigned long srcu_torture_completed(void)
611 return srcu_batches_completed(srcu_ctlp);
614 static void srcu_torture_deferred_free(struct rcu_torture *rp)
616 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
619 static void srcu_torture_synchronize(void)
621 synchronize_srcu(srcu_ctlp);
624 static unsigned long srcu_torture_get_gp_state(void)
626 return get_state_synchronize_srcu(srcu_ctlp);
629 static unsigned long srcu_torture_start_gp_poll(void)
631 return start_poll_synchronize_srcu(srcu_ctlp);
634 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
636 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
639 static void srcu_torture_call(struct rcu_head *head,
642 call_srcu(srcu_ctlp, head, func);
645 static void srcu_torture_barrier(void)
647 srcu_barrier(srcu_ctlp);
650 static void srcu_torture_stats(void)
652 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
655 static void srcu_torture_synchronize_expedited(void)
657 synchronize_srcu_expedited(srcu_ctlp);
660 static struct rcu_torture_ops srcu_ops = {
661 .ttype = SRCU_FLAVOR,
662 .init = rcu_sync_torture_init,
663 .readlock = srcu_torture_read_lock,
664 .read_delay = srcu_read_delay,
665 .readunlock = srcu_torture_read_unlock,
666 .readlock_held = torture_srcu_read_lock_held,
667 .get_gp_seq = srcu_torture_completed,
668 .deferred_free = srcu_torture_deferred_free,
669 .sync = srcu_torture_synchronize,
670 .exp_sync = srcu_torture_synchronize_expedited,
671 .get_gp_state = srcu_torture_get_gp_state,
672 .start_gp_poll = srcu_torture_start_gp_poll,
673 .poll_gp_state = srcu_torture_poll_gp_state,
674 .call = srcu_torture_call,
675 .cb_barrier = srcu_torture_barrier,
676 .stats = srcu_torture_stats,
677 .cbflood_max = 50000,
679 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
683 static void srcu_torture_init(void)
685 rcu_sync_torture_init();
686 WARN_ON(init_srcu_struct(&srcu_ctld));
687 srcu_ctlp = &srcu_ctld;
690 static void srcu_torture_cleanup(void)
692 cleanup_srcu_struct(&srcu_ctld);
693 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
696 /* As above, but dynamically allocated. */
697 static struct rcu_torture_ops srcud_ops = {
698 .ttype = SRCU_FLAVOR,
699 .init = srcu_torture_init,
700 .cleanup = srcu_torture_cleanup,
701 .readlock = srcu_torture_read_lock,
702 .read_delay = srcu_read_delay,
703 .readunlock = srcu_torture_read_unlock,
704 .readlock_held = torture_srcu_read_lock_held,
705 .get_gp_seq = srcu_torture_completed,
706 .deferred_free = srcu_torture_deferred_free,
707 .sync = srcu_torture_synchronize,
708 .exp_sync = srcu_torture_synchronize_expedited,
709 .call = srcu_torture_call,
710 .cb_barrier = srcu_torture_barrier,
711 .stats = srcu_torture_stats,
712 .cbflood_max = 50000,
714 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
718 /* As above, but broken due to inappropriate reader extension. */
719 static struct rcu_torture_ops busted_srcud_ops = {
720 .ttype = SRCU_FLAVOR,
721 .init = srcu_torture_init,
722 .cleanup = srcu_torture_cleanup,
723 .readlock = srcu_torture_read_lock,
724 .read_delay = rcu_read_delay,
725 .readunlock = srcu_torture_read_unlock,
726 .readlock_held = torture_srcu_read_lock_held,
727 .get_gp_seq = srcu_torture_completed,
728 .deferred_free = srcu_torture_deferred_free,
729 .sync = srcu_torture_synchronize,
730 .exp_sync = srcu_torture_synchronize_expedited,
731 .call = srcu_torture_call,
732 .cb_barrier = srcu_torture_barrier,
733 .stats = srcu_torture_stats,
735 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
736 .extendables = RCUTORTURE_MAX_EXTEND,
737 .name = "busted_srcud"
741 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
742 * This implementation does not necessarily work well with CPU hotplug.
745 static void synchronize_rcu_trivial(void)
749 for_each_online_cpu(cpu) {
750 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
751 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
755 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
761 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
766 static struct rcu_torture_ops trivial_ops = {
767 .ttype = RCU_TRIVIAL_FLAVOR,
768 .init = rcu_sync_torture_init,
769 .readlock = rcu_torture_read_lock_trivial,
770 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
771 .readunlock = rcu_torture_read_unlock_trivial,
772 .readlock_held = torture_readlock_not_held,
773 .get_gp_seq = rcu_no_completed,
774 .sync = synchronize_rcu_trivial,
775 .exp_sync = synchronize_rcu_trivial,
782 #ifdef CONFIG_TASKS_RCU
785 * Definitions for RCU-tasks torture testing.
788 static int tasks_torture_read_lock(void)
793 static void tasks_torture_read_unlock(int idx)
797 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
799 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
802 static void synchronize_rcu_mult_test(void)
804 synchronize_rcu_mult(call_rcu_tasks, call_rcu);
807 static struct rcu_torture_ops tasks_ops = {
808 .ttype = RCU_TASKS_FLAVOR,
809 .init = rcu_sync_torture_init,
810 .readlock = tasks_torture_read_lock,
811 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
812 .readunlock = tasks_torture_read_unlock,
813 .get_gp_seq = rcu_no_completed,
814 .deferred_free = rcu_tasks_torture_deferred_free,
815 .sync = synchronize_rcu_tasks,
816 .exp_sync = synchronize_rcu_mult_test,
817 .call = call_rcu_tasks,
818 .cb_barrier = rcu_barrier_tasks,
819 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
827 #define TASKS_OPS &tasks_ops,
829 #else // #ifdef CONFIG_TASKS_RCU
833 #endif // #else #ifdef CONFIG_TASKS_RCU
836 #ifdef CONFIG_TASKS_RUDE_RCU
839 * Definitions for rude RCU-tasks torture testing.
842 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
844 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
847 static struct rcu_torture_ops tasks_rude_ops = {
848 .ttype = RCU_TASKS_RUDE_FLAVOR,
849 .init = rcu_sync_torture_init,
850 .readlock = rcu_torture_read_lock_trivial,
851 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
852 .readunlock = rcu_torture_read_unlock_trivial,
853 .get_gp_seq = rcu_no_completed,
854 .deferred_free = rcu_tasks_rude_torture_deferred_free,
855 .sync = synchronize_rcu_tasks_rude,
856 .exp_sync = synchronize_rcu_tasks_rude,
857 .call = call_rcu_tasks_rude,
858 .cb_barrier = rcu_barrier_tasks_rude,
859 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
860 .cbflood_max = 50000,
867 #define TASKS_RUDE_OPS &tasks_rude_ops,
869 #else // #ifdef CONFIG_TASKS_RUDE_RCU
871 #define TASKS_RUDE_OPS
873 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
876 #ifdef CONFIG_TASKS_TRACE_RCU
879 * Definitions for tracing RCU-tasks torture testing.
882 static int tasks_tracing_torture_read_lock(void)
884 rcu_read_lock_trace();
888 static void tasks_tracing_torture_read_unlock(int idx)
890 rcu_read_unlock_trace();
893 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
895 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
898 static struct rcu_torture_ops tasks_tracing_ops = {
899 .ttype = RCU_TASKS_TRACING_FLAVOR,
900 .init = rcu_sync_torture_init,
901 .readlock = tasks_tracing_torture_read_lock,
902 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
903 .readunlock = tasks_tracing_torture_read_unlock,
904 .readlock_held = rcu_read_lock_trace_held,
905 .get_gp_seq = rcu_no_completed,
906 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
907 .sync = synchronize_rcu_tasks_trace,
908 .exp_sync = synchronize_rcu_tasks_trace,
909 .call = call_rcu_tasks_trace,
910 .cb_barrier = rcu_barrier_tasks_trace,
911 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
912 .cbflood_max = 50000,
917 .name = "tasks-tracing"
920 #define TASKS_TRACING_OPS &tasks_tracing_ops,
922 #else // #ifdef CONFIG_TASKS_TRACE_RCU
924 #define TASKS_TRACING_OPS
926 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
929 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
931 if (!cur_ops->gp_diff)
933 return cur_ops->gp_diff(new, old);
937 * RCU torture priority-boost testing. Runs one real-time thread per
938 * CPU for moderate bursts, repeatedly starting grace periods and waiting
939 * for them to complete. If a given grace period takes too long, we assume
940 * that priority inversion has occurred.
943 static int old_rt_runtime = -1;
945 static void rcu_torture_disable_rt_throttle(void)
948 * Disable RT throttling so that rcutorture's boost threads don't get
949 * throttled. Only possible if rcutorture is built-in otherwise the
950 * user should manually do this by setting the sched_rt_period_us and
951 * sched_rt_runtime sysctls.
953 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
956 old_rt_runtime = sysctl_sched_rt_runtime;
957 sysctl_sched_rt_runtime = -1;
960 static void rcu_torture_enable_rt_throttle(void)
962 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
965 sysctl_sched_rt_runtime = old_rt_runtime;
969 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
973 unsigned long end = jiffies;
976 static unsigned long last_persist;
978 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
980 if (end - *start > mininterval) {
981 // Recheck after checking time to avoid false positives.
982 smp_mb(); // Time check before grace-period check.
983 if (cur_ops->poll_gp_state(gp_state))
984 return false; // passed, though perhaps just barely
985 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
986 // At most one persisted message per boost test.
988 lp = READ_ONCE(last_persist);
989 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
990 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
991 return false; // passed on a technicality
993 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
994 n_rcu_torture_boost_failure++;
995 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
996 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
997 current->rt_priority, gp_state, end - *start);
998 cur_ops->gp_kthread_dbg();
999 // Recheck after print to flag grace period ending during splat.
1000 gp_done = cur_ops->poll_gp_state(gp_state);
1001 pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1002 gp_done ? "ended already" : "still pending");
1006 return true; // failed
1007 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1011 return false; // passed
1014 static int rcu_torture_boost(void *arg)
1016 unsigned long endtime;
1017 unsigned long gp_state;
1018 unsigned long gp_state_time;
1019 unsigned long oldstarttime;
1021 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1023 /* Set real-time priority. */
1024 sched_set_fifo_low(current);
1026 /* Each pass through the following loop does one boost-test cycle. */
1028 bool failed = false; // Test failed already in this test interval
1029 bool gp_initiated = false;
1031 if (kthread_should_stop())
1034 /* Wait for the next test interval. */
1035 oldstarttime = READ_ONCE(boost_starttime);
1036 while (time_before(jiffies, oldstarttime)) {
1037 schedule_timeout_interruptible(oldstarttime - jiffies);
1038 if (stutter_wait("rcu_torture_boost"))
1039 sched_set_fifo_low(current);
1040 if (torture_must_stop())
1044 // Do one boost-test interval.
1045 endtime = oldstarttime + test_boost_duration * HZ;
1046 while (time_before(jiffies, endtime)) {
1047 // Has current GP gone too long?
1048 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1049 failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1050 // If we don't have a grace period in flight, start one.
1051 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1052 gp_state = cur_ops->start_gp_poll();
1053 gp_initiated = true;
1054 gp_state_time = jiffies;
1056 if (stutter_wait("rcu_torture_boost")) {
1057 sched_set_fifo_low(current);
1058 // If the grace period already ended,
1059 // we don't know when that happened, so
1061 if (cur_ops->poll_gp_state(gp_state))
1062 gp_initiated = false;
1064 if (torture_must_stop())
1068 // In case the grace period extended beyond the end of the loop.
1069 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1070 rcu_torture_boost_failed(gp_state, &gp_state_time);
1073 * Set the start time of the next test interval.
1074 * Yes, this is vulnerable to long delays, but such
1075 * delays simply cause a false negative for the next
1076 * interval. Besides, we are running at RT priority,
1077 * so delays should be relatively rare.
1079 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1080 if (mutex_trylock(&boost_mutex)) {
1081 if (oldstarttime == boost_starttime) {
1082 WRITE_ONCE(boost_starttime,
1083 jiffies + test_boost_interval * HZ);
1084 n_rcu_torture_boosts++;
1086 mutex_unlock(&boost_mutex);
1089 schedule_timeout_uninterruptible(1);
1092 /* Go do the stutter. */
1093 checkwait: if (stutter_wait("rcu_torture_boost"))
1094 sched_set_fifo_low(current);
1095 } while (!torture_must_stop());
1097 /* Clean up and exit. */
1098 while (!kthread_should_stop()) {
1099 torture_shutdown_absorb("rcu_torture_boost");
1100 schedule_timeout_uninterruptible(1);
1102 torture_kthread_stopping("rcu_torture_boost");
1107 * RCU torture force-quiescent-state kthread. Repeatedly induces
1108 * bursts of calls to force_quiescent_state(), increasing the probability
1109 * of occurrence of some important types of race conditions.
1112 rcu_torture_fqs(void *arg)
1114 unsigned long fqs_resume_time;
1115 int fqs_burst_remaining;
1116 int oldnice = task_nice(current);
1118 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1120 fqs_resume_time = jiffies + fqs_stutter * HZ;
1121 while (time_before(jiffies, fqs_resume_time) &&
1122 !kthread_should_stop()) {
1123 schedule_timeout_interruptible(1);
1125 fqs_burst_remaining = fqs_duration;
1126 while (fqs_burst_remaining > 0 &&
1127 !kthread_should_stop()) {
1129 udelay(fqs_holdoff);
1130 fqs_burst_remaining -= fqs_holdoff;
1132 if (stutter_wait("rcu_torture_fqs"))
1133 sched_set_normal(current, oldnice);
1134 } while (!torture_must_stop());
1135 torture_kthread_stopping("rcu_torture_fqs");
1139 // Used by writers to randomly choose from the available grace-period
1140 // primitives. The only purpose of the initialization is to size the array.
1141 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC };
1142 static int nsynctypes;
1145 * Determine which grace-period primitives are available.
1147 static void rcu_torture_write_types(void)
1149 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1150 bool gp_poll1 = gp_poll, gp_sync1 = gp_sync;
1152 /* Initialize synctype[] array. If none set, take default. */
1153 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1)
1154 gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true;
1155 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1156 synctype[nsynctypes++] = RTWS_COND_GET;
1157 pr_info("%s: Testing conditional GPs.\n", __func__);
1158 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1159 pr_alert("%s: gp_cond without primitives.\n", __func__);
1161 if (gp_exp1 && cur_ops->exp_sync) {
1162 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1163 pr_info("%s: Testing expedited GPs.\n", __func__);
1164 } else if (gp_exp && !cur_ops->exp_sync) {
1165 pr_alert("%s: gp_exp without primitives.\n", __func__);
1167 if (gp_normal1 && cur_ops->deferred_free) {
1168 synctype[nsynctypes++] = RTWS_DEF_FREE;
1169 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1170 } else if (gp_normal && !cur_ops->deferred_free) {
1171 pr_alert("%s: gp_normal without primitives.\n", __func__);
1173 if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1174 synctype[nsynctypes++] = RTWS_POLL_GET;
1175 pr_info("%s: Testing polling GPs.\n", __func__);
1176 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1177 pr_alert("%s: gp_poll without primitives.\n", __func__);
1179 if (gp_sync1 && cur_ops->sync) {
1180 synctype[nsynctypes++] = RTWS_SYNC;
1181 pr_info("%s: Testing normal GPs.\n", __func__);
1182 } else if (gp_sync && !cur_ops->sync) {
1183 pr_alert("%s: gp_sync without primitives.\n", __func__);
1188 * RCU torture writer kthread. Repeatedly substitutes a new structure
1189 * for that pointed to by rcu_torture_current, freeing the old structure
1190 * after a series of grace periods (the "pipeline").
1193 rcu_torture_writer(void *arg)
1196 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1197 unsigned long cookie;
1199 unsigned long gp_snap;
1202 int oldnice = task_nice(current);
1203 struct rcu_torture *rp;
1204 struct rcu_torture *old_rp;
1205 static DEFINE_TORTURE_RANDOM(rand);
1206 bool stutter_waited;
1208 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1210 pr_alert("%s" TORTURE_FLAG
1211 " GP expediting controlled from boot/sysfs for %s.\n",
1212 torture_type, cur_ops->name);
1213 if (WARN_ONCE(nsynctypes == 0,
1214 "%s: No update-side primitives.\n", __func__)) {
1216 * No updates primitives, so don't try updating.
1217 * The resulting test won't be testing much, hence the
1218 * above WARN_ONCE().
1220 rcu_torture_writer_state = RTWS_STOPPING;
1221 torture_kthread_stopping("rcu_torture_writer");
1226 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1227 torture_hrtimeout_us(500, 1000, &rand);
1228 rp = rcu_torture_alloc();
1231 rp->rtort_pipe_count = 0;
1232 rcu_torture_writer_state = RTWS_DELAY;
1233 udelay(torture_random(&rand) & 0x3ff);
1234 rcu_torture_writer_state = RTWS_REPLACE;
1235 old_rp = rcu_dereference_check(rcu_torture_current,
1236 current == writer_task);
1237 rp->rtort_mbtest = 1;
1238 rcu_assign_pointer(rcu_torture_current, rp);
1239 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1241 i = old_rp->rtort_pipe_count;
1242 if (i > RCU_TORTURE_PIPE_LEN)
1243 i = RCU_TORTURE_PIPE_LEN;
1244 atomic_inc(&rcu_torture_wcount[i]);
1245 WRITE_ONCE(old_rp->rtort_pipe_count,
1246 old_rp->rtort_pipe_count + 1);
1247 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1248 idx = cur_ops->readlock();
1249 cookie = cur_ops->get_gp_state();
1250 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1251 cur_ops->poll_gp_state(cookie),
1252 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1254 rcu_torture_writer_state_getname(),
1255 rcu_torture_writer_state,
1256 cookie, cur_ops->get_gp_state());
1257 cur_ops->readunlock(idx);
1259 switch (synctype[torture_random(&rand) % nsynctypes]) {
1261 rcu_torture_writer_state = RTWS_DEF_FREE;
1262 cur_ops->deferred_free(old_rp);
1265 rcu_torture_writer_state = RTWS_EXP_SYNC;
1266 cur_ops->exp_sync();
1267 rcu_torture_pipe_update(old_rp);
1270 rcu_torture_writer_state = RTWS_COND_GET;
1271 gp_snap = cur_ops->get_gp_state();
1272 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1273 rcu_torture_writer_state = RTWS_COND_SYNC;
1274 cur_ops->cond_sync(gp_snap);
1275 rcu_torture_pipe_update(old_rp);
1278 rcu_torture_writer_state = RTWS_POLL_GET;
1279 gp_snap = cur_ops->start_gp_poll();
1280 rcu_torture_writer_state = RTWS_POLL_WAIT;
1281 while (!cur_ops->poll_gp_state(gp_snap))
1282 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1284 rcu_torture_pipe_update(old_rp);
1287 rcu_torture_writer_state = RTWS_SYNC;
1289 rcu_torture_pipe_update(old_rp);
1296 WRITE_ONCE(rcu_torture_current_version,
1297 rcu_torture_current_version + 1);
1298 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1300 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1301 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1302 if (expediting >= 0)
1305 rcu_unexpedite_gp();
1306 if (++expediting > 3)
1307 expediting = -expediting;
1308 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1309 can_expedite = !rcu_gp_is_expedited() &&
1310 !rcu_gp_is_normal();
1312 rcu_torture_writer_state = RTWS_STUTTER;
1313 boot_ended = rcu_inkernel_boot_has_ended();
1314 stutter_waited = stutter_wait("rcu_torture_writer");
1315 if (stutter_waited &&
1316 !atomic_read(&rcu_fwd_cb_nodelay) &&
1317 !cur_ops->slow_gps &&
1318 !torture_must_stop() &&
1320 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1321 if (list_empty(&rcu_tortures[i].rtort_free) &&
1322 rcu_access_pointer(rcu_torture_current) !=
1324 rcu_ftrace_dump(DUMP_ALL);
1325 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1328 sched_set_normal(current, oldnice);
1329 } while (!torture_must_stop());
1330 rcu_torture_current = NULL; // Let stats task know that we are done.
1331 /* Reset expediting back to unexpedited. */
1333 expediting = -expediting;
1334 while (can_expedite && expediting++ < 0)
1335 rcu_unexpedite_gp();
1336 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1338 pr_alert("%s" TORTURE_FLAG
1339 " Dynamic grace-period expediting was disabled.\n",
1341 rcu_torture_writer_state = RTWS_STOPPING;
1342 torture_kthread_stopping("rcu_torture_writer");
1347 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1348 * delay between calls.
1351 rcu_torture_fakewriter(void *arg)
1353 unsigned long gp_snap;
1354 DEFINE_TORTURE_RANDOM(rand);
1356 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1357 set_user_nice(current, MAX_NICE);
1359 if (WARN_ONCE(nsynctypes == 0,
1360 "%s: No update-side primitives.\n", __func__)) {
1362 * No updates primitives, so don't try updating.
1363 * The resulting test won't be testing much, hence the
1364 * above WARN_ONCE().
1366 torture_kthread_stopping("rcu_torture_fakewriter");
1371 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1372 if (cur_ops->cb_barrier != NULL &&
1373 torture_random(&rand) % (nfakewriters * 8) == 0) {
1374 cur_ops->cb_barrier();
1376 switch (synctype[torture_random(&rand) % nsynctypes]) {
1380 cur_ops->exp_sync();
1383 gp_snap = cur_ops->get_gp_state();
1384 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1385 cur_ops->cond_sync(gp_snap);
1388 gp_snap = cur_ops->start_gp_poll();
1389 while (!cur_ops->poll_gp_state(gp_snap)) {
1390 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1402 stutter_wait("rcu_torture_fakewriter");
1403 } while (!torture_must_stop());
1405 torture_kthread_stopping("rcu_torture_fakewriter");
1409 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1414 // Set up and carry out testing of RCU's global memory ordering
1415 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1416 struct torture_random_state *trsp)
1418 unsigned long loops;
1419 int noc = torture_num_online_cpus();
1422 struct rcu_torture_reader_check *rtrcp; // Me.
1423 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1424 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1425 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1428 return; // Don't try this from timer handlers.
1430 // Increment my counter.
1431 rtrcp = &rcu_torture_reader_mbchk[myid];
1432 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1434 // Attempt to assign someone else some checking work.
1435 rdrchked = torture_random(trsp) % nrealreaders;
1436 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1437 rdrchker = torture_random(trsp) % nrealreaders;
1438 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1439 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1440 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1441 !READ_ONCE(rtp->rtort_chkp) &&
1442 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1443 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1444 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1445 rtrcp->rtc_chkrdr = rdrchked;
1446 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1447 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1448 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1449 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1452 // If assigned some completed work, do it!
1453 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1454 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1455 return; // No work or work not yet ready.
1456 rdrchked = rtrcp_assigner->rtc_chkrdr;
1457 if (WARN_ON_ONCE(rdrchked < 0))
1459 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1460 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1461 atomic_inc(&n_rcu_torture_mbchk_tries);
1462 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1463 atomic_inc(&n_rcu_torture_mbchk_fail);
1464 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1465 rtrcp_assigner->rtc_ready = 0;
1466 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1467 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1471 * Do one extension of an RCU read-side critical section using the
1472 * current reader state in readstate (set to zero for initial entry
1473 * to extended critical section), set the new state as specified by
1474 * newstate (set to zero for final exit from extended critical section),
1475 * and random-number-generator state in trsp. If this is neither the
1476 * beginning or end of the critical section and if there was actually a
1477 * change, do a ->read_delay().
1479 static void rcutorture_one_extend(int *readstate, int newstate,
1480 struct torture_random_state *trsp,
1481 struct rt_read_seg *rtrsp)
1483 unsigned long flags;
1486 int idxold1 = *readstate;
1487 int idxold2 = idxold1;
1488 int statesnew = ~*readstate & newstate;
1489 int statesold = *readstate & ~newstate;
1491 WARN_ON_ONCE(idxold2 < 0);
1492 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1493 rtrsp->rt_readstate = newstate;
1495 /* First, put new protection in place to avoid critical-section gap. */
1496 if (statesnew & RCUTORTURE_RDR_BH)
1498 if (statesnew & RCUTORTURE_RDR_RBH)
1500 if (statesnew & RCUTORTURE_RDR_IRQ)
1501 local_irq_disable();
1502 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1504 if (statesnew & RCUTORTURE_RDR_SCHED)
1505 rcu_read_lock_sched();
1506 if (statesnew & RCUTORTURE_RDR_RCU_1)
1507 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1508 if (statesnew & RCUTORTURE_RDR_RCU_2)
1509 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
1512 * Next, remove old protection, in decreasing order of strength
1513 * to avoid unlock paths that aren't safe in the stronger
1514 * context. Namely: BH can not be enabled with disabled interrupts.
1515 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1518 if (statesold & RCUTORTURE_RDR_IRQ)
1520 if (statesold & RCUTORTURE_RDR_PREEMPT)
1522 if (statesold & RCUTORTURE_RDR_SCHED)
1523 rcu_read_unlock_sched();
1524 if (statesold & RCUTORTURE_RDR_BH)
1526 if (statesold & RCUTORTURE_RDR_RBH)
1527 rcu_read_unlock_bh();
1528 if (statesold & RCUTORTURE_RDR_RCU_2) {
1529 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1530 WARN_ON_ONCE(idxnew2 != -1);
1533 if (statesold & RCUTORTURE_RDR_RCU_1) {
1536 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1538 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1539 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1540 WARN_ON_ONCE(idxnew1 != -1);
1543 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1546 /* Delay if neither beginning nor end and there was a change. */
1547 if ((statesnew || statesold) && *readstate && newstate)
1548 cur_ops->read_delay(trsp, rtrsp);
1550 /* Update the reader state. */
1552 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1553 WARN_ON_ONCE(idxnew1 < 0);
1554 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1555 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1557 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1558 WARN_ON_ONCE(idxnew2 < 0);
1559 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1560 *readstate = idxnew1 | idxnew2 | newstate;
1561 WARN_ON_ONCE(*readstate < 0);
1562 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1563 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
1566 /* Return the biggest extendables mask given current RCU and boot parameters. */
1567 static int rcutorture_extend_mask_max(void)
1571 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1572 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1573 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1577 /* Return a random protection state mask, but with at least one bit set. */
1579 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1581 int mask = rcutorture_extend_mask_max();
1582 unsigned long randmask1 = torture_random(trsp) >> 8;
1583 unsigned long randmask2 = randmask1 >> 3;
1584 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1585 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1586 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1588 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
1589 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1590 if (!(randmask1 & 0x7))
1591 mask = mask & randmask2;
1593 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1595 // Can't have nested RCU reader without outer RCU reader.
1596 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1597 if (oldmask & RCUTORTURE_RDR_RCU_1)
1598 mask &= ~RCUTORTURE_RDR_RCU_2;
1600 mask |= RCUTORTURE_RDR_RCU_1;
1604 * Can't enable bh w/irq disabled.
1606 if (mask & RCUTORTURE_RDR_IRQ)
1607 mask |= oldmask & bhs;
1610 * Ideally these sequences would be detected in debug builds
1611 * (regardless of RT), but until then don't stop testing
1614 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1615 /* Can't modify BH in atomic context */
1616 if (oldmask & preempts_irq)
1618 if ((oldmask | mask) & preempts_irq)
1619 mask |= oldmask & bhs;
1622 return mask ?: RCUTORTURE_RDR_RCU_1;
1626 * Do a randomly selected number of extensions of an existing RCU read-side
1629 static struct rt_read_seg *
1630 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1631 struct rt_read_seg *rtrsp)
1635 int mask = rcutorture_extend_mask_max();
1637 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1638 if (!((mask - 1) & mask))
1639 return rtrsp; /* Current RCU reader not extendable. */
1640 /* Bias towards larger numbers of loops. */
1641 i = (torture_random(trsp) >> 3);
1642 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1643 for (j = 0; j < i; j++) {
1644 mask = rcutorture_extend_mask(*readstate, trsp);
1645 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1651 * Do one read-side critical section, returning false if there was
1652 * no data to read. Can be invoked both from process context and
1653 * from a timer handler.
1655 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1657 unsigned long cookie;
1659 unsigned long started;
1660 unsigned long completed;
1662 struct rcu_torture *p;
1665 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1666 struct rt_read_seg *rtrsp = &rtseg[0];
1667 struct rt_read_seg *rtrsp1;
1668 unsigned long long ts;
1670 WARN_ON_ONCE(!rcu_is_watching());
1671 newstate = rcutorture_extend_mask(readstate, trsp);
1672 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1673 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1674 cookie = cur_ops->get_gp_state();
1675 started = cur_ops->get_gp_seq();
1676 ts = rcu_trace_clock_local();
1677 p = rcu_dereference_check(rcu_torture_current,
1678 !cur_ops->readlock_held || cur_ops->readlock_held());
1680 /* Wait for rcu_torture_writer to get underway */
1681 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1684 if (p->rtort_mbtest == 0)
1685 atomic_inc(&n_rcu_torture_mberror);
1686 rcu_torture_reader_do_mbchk(myid, p, trsp);
1687 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1689 pipe_count = READ_ONCE(p->rtort_pipe_count);
1690 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1691 /* Should not happen, but... */
1692 pipe_count = RCU_TORTURE_PIPE_LEN;
1694 completed = cur_ops->get_gp_seq();
1695 if (pipe_count > 1) {
1696 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1697 ts, started, completed);
1698 rcu_ftrace_dump(DUMP_ALL);
1700 __this_cpu_inc(rcu_torture_count[pipe_count]);
1701 completed = rcutorture_seq_diff(completed, started);
1702 if (completed > RCU_TORTURE_PIPE_LEN) {
1703 /* Should not happen, but... */
1704 completed = RCU_TORTURE_PIPE_LEN;
1706 __this_cpu_inc(rcu_torture_batch[completed]);
1708 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1709 WARN_ONCE(cur_ops->poll_gp_state(cookie),
1710 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
1712 rcu_torture_writer_state_getname(),
1713 rcu_torture_writer_state,
1714 cookie, cur_ops->get_gp_state());
1715 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1716 WARN_ON_ONCE(readstate);
1717 // This next splat is expected behavior if leakpointer, especially
1718 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1719 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
1721 /* If error or close call, record the sequence of reader protections. */
1722 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1724 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1725 err_segs[i++] = *rtrsp1;
1732 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1735 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1736 * incrementing the corresponding element of the pipeline array. The
1737 * counter in the element should never be greater than 1, otherwise, the
1738 * RCU implementation is broken.
1740 static void rcu_torture_timer(struct timer_list *unused)
1742 atomic_long_inc(&n_rcu_torture_timers);
1743 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
1745 /* Test call_rcu() invocation from interrupt handler. */
1746 if (cur_ops->call) {
1747 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1750 cur_ops->call(rhp, rcu_torture_timer_cb);
1755 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1756 * incrementing the corresponding element of the pipeline array. The
1757 * counter in the element should never be greater than 1, otherwise, the
1758 * RCU implementation is broken.
1761 rcu_torture_reader(void *arg)
1763 unsigned long lastsleep = jiffies;
1764 long myid = (long)arg;
1765 int mynumonline = myid;
1766 DEFINE_TORTURE_RANDOM(rand);
1767 struct timer_list t;
1769 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1770 set_user_nice(current, MAX_NICE);
1771 if (irqreader && cur_ops->irq_capable)
1772 timer_setup_on_stack(&t, rcu_torture_timer, 0);
1773 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1775 if (irqreader && cur_ops->irq_capable) {
1776 if (!timer_pending(&t))
1777 mod_timer(&t, jiffies + 1);
1779 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
1780 schedule_timeout_interruptible(HZ);
1781 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1782 torture_hrtimeout_us(500, 1000, &rand);
1783 lastsleep = jiffies + 10;
1785 while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
1786 schedule_timeout_interruptible(HZ / 5);
1787 stutter_wait("rcu_torture_reader");
1788 } while (!torture_must_stop());
1789 if (irqreader && cur_ops->irq_capable) {
1791 destroy_timer_on_stack(&t);
1793 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1794 torture_kthread_stopping("rcu_torture_reader");
1799 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
1800 * increase race probabilities and fuzzes the interval between toggling.
1802 static int rcu_nocb_toggle(void *arg)
1806 int oldnice = task_nice(current);
1808 DEFINE_TORTURE_RANDOM(rand);
1809 ktime_t toggle_delay;
1810 unsigned long toggle_fuzz;
1811 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
1813 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
1814 while (!rcu_inkernel_boot_has_ended())
1815 schedule_timeout_interruptible(HZ / 10);
1816 for_each_online_cpu(cpu)
1818 WARN_ON(maxcpu < 0);
1819 if (toggle_interval > ULONG_MAX)
1820 toggle_fuzz = ULONG_MAX >> 3;
1822 toggle_fuzz = toggle_interval >> 3;
1823 if (toggle_fuzz <= 0)
1824 toggle_fuzz = NSEC_PER_USEC;
1826 r = torture_random(&rand);
1827 cpu = (r >> 4) % (maxcpu + 1);
1829 rcu_nocb_cpu_offload(cpu);
1830 atomic_long_inc(&n_nocb_offload);
1832 rcu_nocb_cpu_deoffload(cpu);
1833 atomic_long_inc(&n_nocb_deoffload);
1835 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
1836 set_current_state(TASK_INTERRUPTIBLE);
1837 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
1838 if (stutter_wait("rcu_nocb_toggle"))
1839 sched_set_normal(current, oldnice);
1840 } while (!torture_must_stop());
1841 torture_kthread_stopping("rcu_nocb_toggle");
1846 * Print torture statistics. Caller must ensure that there is only
1847 * one call to this function at a given time!!! This is normally
1848 * accomplished by relying on the module system to only have one copy
1849 * of the module loaded, and then by giving the rcu_torture_stats
1850 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1851 * thread is not running).
1854 rcu_torture_stats_print(void)
1858 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1859 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1860 struct rcu_torture *rtcp;
1861 static unsigned long rtcv_snap = ULONG_MAX;
1862 static bool splatted;
1863 struct task_struct *wtp;
1865 for_each_possible_cpu(cpu) {
1866 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1867 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1868 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1871 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1872 if (pipesummary[i] != 0)
1876 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1877 rtcp = rcu_access_pointer(rcu_torture_current);
1878 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1880 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1881 rcu_torture_current_version,
1882 list_empty(&rcu_torture_freelist),
1883 atomic_read(&n_rcu_torture_alloc),
1884 atomic_read(&n_rcu_torture_alloc_fail),
1885 atomic_read(&n_rcu_torture_free));
1886 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
1887 atomic_read(&n_rcu_torture_mberror),
1888 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
1889 n_rcu_torture_barrier_error,
1890 n_rcu_torture_boost_ktrerror,
1891 n_rcu_torture_boost_rterror);
1892 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1893 n_rcu_torture_boost_failure,
1894 n_rcu_torture_boosts,
1895 atomic_long_read(&n_rcu_torture_timers));
1896 torture_onoff_stats();
1897 pr_cont("barrier: %ld/%ld:%ld ",
1898 data_race(n_barrier_successes),
1899 data_race(n_barrier_attempts),
1900 data_race(n_rcu_torture_barrier_error));
1901 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
1902 pr_cont("nocb-toggles: %ld:%ld\n",
1903 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
1905 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1906 if (atomic_read(&n_rcu_torture_mberror) ||
1907 atomic_read(&n_rcu_torture_mbchk_fail) ||
1908 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1909 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1911 pr_cont("%s", "!!! ");
1912 atomic_inc(&n_rcu_torture_error);
1913 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1914 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
1915 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
1916 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1917 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1918 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
1919 WARN_ON_ONCE(i > 1); // Too-short grace period
1921 pr_cont("Reader Pipe: ");
1922 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1923 pr_cont(" %ld", pipesummary[i]);
1926 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1927 pr_cont("Reader Batch: ");
1928 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1929 pr_cont(" %ld", batchsummary[i]);
1932 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1933 pr_cont("Free-Block Circulation: ");
1934 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1935 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1941 if (rtcv_snap == rcu_torture_current_version &&
1942 rcu_access_pointer(rcu_torture_current) &&
1943 !rcu_stall_is_suppressed()) {
1944 int __maybe_unused flags = 0;
1945 unsigned long __maybe_unused gp_seq = 0;
1947 rcutorture_get_gp_data(cur_ops->ttype,
1949 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1951 wtp = READ_ONCE(writer_task);
1952 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
1953 rcu_torture_writer_state_getname(),
1954 rcu_torture_writer_state, gp_seq, flags,
1955 wtp == NULL ? ~0U : wtp->__state,
1956 wtp == NULL ? -1 : (int)task_cpu(wtp));
1957 if (!splatted && wtp) {
1958 sched_show_task(wtp);
1961 if (cur_ops->gp_kthread_dbg)
1962 cur_ops->gp_kthread_dbg();
1963 rcu_ftrace_dump(DUMP_ALL);
1965 rtcv_snap = rcu_torture_current_version;
1969 * Periodically prints torture statistics, if periodic statistics printing
1970 * was specified via the stat_interval module parameter.
1973 rcu_torture_stats(void *arg)
1975 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1977 schedule_timeout_interruptible(stat_interval * HZ);
1978 rcu_torture_stats_print();
1979 torture_shutdown_absorb("rcu_torture_stats");
1980 } while (!torture_must_stop());
1981 torture_kthread_stopping("rcu_torture_stats");
1985 /* Test mem_dump_obj() and friends. */
1986 static void rcu_torture_mem_dump_obj(void)
1988 struct rcu_head *rhp;
1989 struct kmem_cache *kcp;
1992 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
1993 rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
1994 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
1995 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
1996 mem_dump_obj(ZERO_SIZE_PTR);
1997 pr_alert("mem_dump_obj(NULL):");
1999 pr_alert("mem_dump_obj(%px):", &rhp);
2001 pr_alert("mem_dump_obj(%px):", rhp);
2003 pr_alert("mem_dump_obj(%px):", &rhp->func);
2004 mem_dump_obj(&rhp->func);
2005 pr_alert("mem_dump_obj(%px):", &z);
2007 kmem_cache_free(kcp, rhp);
2008 kmem_cache_destroy(kcp);
2009 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2010 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2011 pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2013 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2014 mem_dump_obj(&rhp->func);
2016 rhp = vmalloc(4096);
2017 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2018 pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2020 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2021 mem_dump_obj(&rhp->func);
2026 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2028 pr_alert("%s" TORTURE_FLAG
2029 "--- %s: nreaders=%d nfakewriters=%d "
2030 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2031 "shuffle_interval=%d stutter=%d irqreader=%d "
2032 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2033 "test_boost=%d/%d test_boost_interval=%d "
2034 "test_boost_duration=%d shutdown_secs=%d "
2035 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2036 "stall_cpu_block=%d "
2038 "onoff_interval=%d onoff_holdoff=%d "
2039 "read_exit_delay=%d read_exit_burst=%d "
2040 "nocbs_nthreads=%d nocbs_toggle=%d\n",
2041 torture_type, tag, nrealreaders, nfakewriters,
2042 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2043 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2044 test_boost, cur_ops->can_boost,
2045 test_boost_interval, test_boost_duration, shutdown_secs,
2046 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2049 onoff_interval, onoff_holdoff,
2050 read_exit_delay, read_exit_burst,
2051 nocbs_nthreads, nocbs_toggle);
2054 static int rcutorture_booster_cleanup(unsigned int cpu)
2056 struct task_struct *t;
2058 if (boost_tasks[cpu] == NULL)
2060 mutex_lock(&boost_mutex);
2061 t = boost_tasks[cpu];
2062 boost_tasks[cpu] = NULL;
2063 rcu_torture_enable_rt_throttle();
2064 mutex_unlock(&boost_mutex);
2066 /* This must be outside of the mutex, otherwise deadlock! */
2067 torture_stop_kthread(rcu_torture_boost, t);
2071 static int rcutorture_booster_init(unsigned int cpu)
2075 if (boost_tasks[cpu] != NULL)
2076 return 0; /* Already created, nothing more to do. */
2078 /* Don't allow time recalculation while creating a new task. */
2079 mutex_lock(&boost_mutex);
2080 rcu_torture_disable_rt_throttle();
2081 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2082 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2083 cpu, "rcu_torture_boost_%u");
2084 if (IS_ERR(boost_tasks[cpu])) {
2085 retval = PTR_ERR(boost_tasks[cpu]);
2086 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2087 n_rcu_torture_boost_ktrerror++;
2088 boost_tasks[cpu] = NULL;
2089 mutex_unlock(&boost_mutex);
2092 mutex_unlock(&boost_mutex);
2097 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2098 * induces a CPU stall for the time specified by stall_cpu.
2100 static int rcu_torture_stall(void *args)
2103 unsigned long stop_at;
2105 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2106 if (stall_cpu_holdoff > 0) {
2107 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2108 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2109 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2111 if (!kthread_should_stop() && stall_gp_kthread > 0) {
2112 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2113 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2114 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2115 if (kthread_should_stop())
2117 schedule_timeout_uninterruptible(HZ);
2120 if (!kthread_should_stop() && stall_cpu > 0) {
2121 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2122 stop_at = ktime_get_seconds() + stall_cpu;
2123 /* RCU CPU stall is expected behavior in following code. */
2124 idx = cur_ops->readlock();
2125 if (stall_cpu_irqsoff)
2126 local_irq_disable();
2127 else if (!stall_cpu_block)
2129 pr_alert("%s start on CPU %d.\n",
2130 __func__, raw_smp_processor_id());
2131 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2133 if (stall_cpu_block) {
2134 #ifdef CONFIG_PREEMPTION
2137 schedule_timeout_uninterruptible(HZ);
2139 } else if (stall_no_softlockup) {
2140 touch_softlockup_watchdog();
2142 if (stall_cpu_irqsoff)
2144 else if (!stall_cpu_block)
2146 cur_ops->readunlock(idx);
2148 pr_alert("%s end.\n", __func__);
2149 torture_shutdown_absorb("rcu_torture_stall");
2150 while (!kthread_should_stop())
2151 schedule_timeout_interruptible(10 * HZ);
2155 /* Spawn CPU-stall kthread, if stall_cpu specified. */
2156 static int __init rcu_torture_stall_init(void)
2158 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2160 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2163 /* State structure for forward-progress self-propagating RCU callback. */
2164 struct fwd_cb_state {
2170 * Forward-progress self-propagating RCU callback function. Because
2171 * callbacks run from softirq, this function is an implicit RCU read-side
2174 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2176 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2178 if (READ_ONCE(fcsp->stop)) {
2179 WRITE_ONCE(fcsp->stop, 2);
2182 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2185 /* State for continuous-flood RCU callbacks. */
2188 struct rcu_fwd_cb *rfc_next;
2189 struct rcu_fwd *rfc_rfp;
2193 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
2194 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
2195 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
2196 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
2197 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2199 struct rcu_launder_hist {
2201 unsigned long launder_gp_seq;
2205 spinlock_t rcu_fwd_lock;
2206 struct rcu_fwd_cb *rcu_fwd_cb_head;
2207 struct rcu_fwd_cb **rcu_fwd_cb_tail;
2209 unsigned long rcu_fwd_startat;
2210 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2211 unsigned long rcu_launder_gp_seq_start;
2215 static DEFINE_MUTEX(rcu_fwd_mutex);
2216 static struct rcu_fwd *rcu_fwds;
2217 static unsigned long rcu_fwd_seq;
2218 static atomic_long_t rcu_fwd_max_cbs;
2219 static bool rcu_fwd_emergency_stop;
2221 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2224 unsigned long gps_old;
2228 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2229 if (rfp->n_launders_hist[i].n_launders > 0)
2231 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2232 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2233 gps_old = rfp->rcu_launder_gp_seq_start;
2234 for (j = 0; j <= i; j++) {
2235 gps = rfp->n_launders_hist[j].launder_gp_seq;
2236 pr_cont(" %ds/%d: %ld:%ld",
2237 j + 1, FWD_CBS_HIST_DIV,
2238 rfp->n_launders_hist[j].n_launders,
2239 rcutorture_seq_diff(gps, gps_old));
2245 /* Callback function for continuous-flood RCU callbacks. */
2246 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2248 unsigned long flags;
2250 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2251 struct rcu_fwd_cb **rfcpp;
2252 struct rcu_fwd *rfp = rfcp->rfc_rfp;
2254 rfcp->rfc_next = NULL;
2256 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2257 rfcpp = rfp->rcu_fwd_cb_tail;
2258 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2259 WRITE_ONCE(*rfcpp, rfcp);
2260 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2261 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2262 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2263 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2264 rfp->n_launders_hist[i].n_launders++;
2265 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2266 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2269 // Give the scheduler a chance, even on nohz_full CPUs.
2270 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2272 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2273 // Real call_rcu() floods hit userspace, so emulate that.
2274 if (need_resched() || (iter & 0xfff))
2278 // No userspace emulation: CB invocation throttles call_rcu()
2283 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2284 * test is over or because we hit an OOM event.
2286 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2288 unsigned long flags;
2289 unsigned long freed = 0;
2290 struct rcu_fwd_cb *rfcp;
2293 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2294 rfcp = rfp->rcu_fwd_cb_head;
2296 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2299 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2300 if (!rfp->rcu_fwd_cb_head)
2301 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2302 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2305 rcu_torture_fwd_prog_cond_resched(freed);
2306 if (tick_nohz_full_enabled()) {
2307 local_irq_save(flags);
2308 rcu_momentary_dyntick_idle();
2309 local_irq_restore(flags);
2315 /* Carry out need_resched()/cond_resched() forward-progress testing. */
2316 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2317 int *tested, int *tested_tries)
2321 struct fwd_cb_state fcs;
2326 bool selfpropcb = false;
2327 unsigned long stopat;
2328 static DEFINE_TORTURE_RANDOM(trs);
2330 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2332 return; // Cannot do need_resched() forward progress testing without ->sync.
2333 if (cur_ops->call && cur_ops->cb_barrier) {
2334 init_rcu_head_on_stack(&fcs.rh);
2338 /* Tight loop containing cond_resched(). */
2339 atomic_inc(&rcu_fwd_cb_nodelay);
2340 cur_ops->sync(); /* Later readers see above write. */
2342 WRITE_ONCE(fcs.stop, 0);
2343 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2345 cver = READ_ONCE(rcu_torture_current_version);
2346 gps = cur_ops->get_gp_seq();
2347 sd = cur_ops->stall_dur() + 1;
2348 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2349 dur = sd4 + torture_random(&trs) % (sd - sd4);
2350 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2351 stopat = rfp->rcu_fwd_startat + dur;
2352 while (time_before(jiffies, stopat) &&
2353 !shutdown_time_arrived() &&
2354 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2355 idx = cur_ops->readlock();
2357 cur_ops->readunlock(idx);
2358 if (!fwd_progress_need_resched || need_resched())
2362 if (!time_before(jiffies, stopat) &&
2363 !shutdown_time_arrived() &&
2364 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2366 cver = READ_ONCE(rcu_torture_current_version) - cver;
2367 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2368 WARN_ON(!cver && gps < 2);
2369 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2370 rfp->rcu_fwd_id, dur, cver, gps);
2373 WRITE_ONCE(fcs.stop, 1);
2374 cur_ops->sync(); /* Wait for running CB to complete. */
2375 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2376 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2380 WARN_ON(READ_ONCE(fcs.stop) != 2);
2381 destroy_rcu_head_on_stack(&fcs.rh);
2383 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2384 atomic_dec(&rcu_fwd_cb_nodelay);
2387 /* Carry out call_rcu() forward-progress testing. */
2388 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2391 unsigned long flags;
2395 long n_launders_cb_snap;
2399 struct rcu_fwd_cb *rfcp;
2400 struct rcu_fwd_cb *rfcpn;
2401 unsigned long stopat;
2402 unsigned long stoppedat;
2404 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2405 if (READ_ONCE(rcu_fwd_emergency_stop))
2406 return; /* Get out of the way quickly, no GP wait! */
2408 return; /* Can't do call_rcu() fwd prog without ->call. */
2410 /* Loop continuously posting RCU callbacks. */
2411 atomic_inc(&rcu_fwd_cb_nodelay);
2412 cur_ops->sync(); /* Later readers see above write. */
2413 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2414 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2416 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2420 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2421 rfp->n_launders_hist[i].n_launders = 0;
2422 cver = READ_ONCE(rcu_torture_current_version);
2423 gps = cur_ops->get_gp_seq();
2424 rfp->rcu_launder_gp_seq_start = gps;
2425 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2426 while (time_before(jiffies, stopat) &&
2427 !shutdown_time_arrived() &&
2428 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2429 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2432 rfcpn = READ_ONCE(rfcp->rfc_next);
2434 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2435 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2437 rfp->rcu_fwd_cb_head = rfcpn;
2440 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2441 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2442 if (WARN_ON_ONCE(!rfcp)) {
2443 schedule_timeout_interruptible(1);
2449 rfcp->rfc_rfp = rfp;
2454 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2455 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2456 if (tick_nohz_full_enabled()) {
2457 local_irq_save(flags);
2458 rcu_momentary_dyntick_idle();
2459 local_irq_restore(flags);
2462 stoppedat = jiffies;
2463 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2464 cver = READ_ONCE(rcu_torture_current_version) - cver;
2465 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2466 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2467 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2468 (void)rcu_torture_fwd_prog_cbfree(rfp);
2470 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2471 !shutdown_time_arrived()) {
2472 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2473 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2475 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2476 n_launders + n_max_cbs - n_launders_cb_snap,
2477 n_launders, n_launders_sa,
2478 n_max_gps, n_max_cbs, cver, gps);
2479 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2480 mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2481 rcu_torture_fwd_cb_hist(rfp);
2482 mutex_unlock(&rcu_fwd_mutex);
2484 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2485 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2486 atomic_dec(&rcu_fwd_cb_nodelay);
2491 * OOM notifier, but this only prints diagnostic information for the
2492 * current forward-progress test.
2494 static int rcutorture_oom_notify(struct notifier_block *self,
2495 unsigned long notused, void *nfreed)
2499 struct rcu_fwd *rfp;
2501 mutex_lock(&rcu_fwd_mutex);
2504 mutex_unlock(&rcu_fwd_mutex);
2507 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2509 for (i = 0; i < fwd_progress; i++) {
2510 rcu_torture_fwd_cb_hist(&rfp[i]);
2511 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2513 WRITE_ONCE(rcu_fwd_emergency_stop, true);
2514 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2516 for (i = 0; i < fwd_progress; i++)
2517 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2518 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2521 for (i = 0; i < fwd_progress; i++)
2522 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2523 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2526 for (i = 0; i < fwd_progress; i++)
2527 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2528 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2529 smp_mb(); /* Frees before return to avoid redoing OOM. */
2530 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2531 pr_info("%s returning after OOM processing.\n", __func__);
2532 mutex_unlock(&rcu_fwd_mutex);
2536 static struct notifier_block rcutorture_oom_nb = {
2537 .notifier_call = rcutorture_oom_notify
2540 /* Carry out grace-period forward-progress testing. */
2541 static int rcu_torture_fwd_prog(void *args)
2543 bool firsttime = true;
2545 int oldnice = task_nice(current);
2546 unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
2547 struct rcu_fwd *rfp = args;
2549 int tested_tries = 0;
2551 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2552 rcu_bind_current_to_nocb();
2553 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2554 set_user_nice(current, MAX_NICE);
2556 if (!rfp->rcu_fwd_id) {
2557 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2558 WRITE_ONCE(rcu_fwd_emergency_stop, false);
2560 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2561 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2564 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2566 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
2567 schedule_timeout_interruptible(1);
2568 oldseq = READ_ONCE(rcu_fwd_seq);
2570 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2571 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
2572 rcu_torture_fwd_prog_cr(rfp);
2573 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2574 (!IS_ENABLED(CONFIG_TINY_RCU) ||
2575 (rcu_inkernel_boot_has_ended() &&
2576 torture_num_online_cpus() > rfp->rcu_fwd_id)))
2577 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2579 /* Avoid slow periods, better to test when busy. */
2580 if (stutter_wait("rcu_torture_fwd_prog"))
2581 sched_set_normal(current, oldnice);
2582 } while (!torture_must_stop());
2583 /* Short runs might not contain a valid forward-progress attempt. */
2584 if (!rfp->rcu_fwd_id) {
2585 WARN_ON(!tested && tested_tries >= 5);
2586 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2588 torture_kthread_stopping("rcu_torture_fwd_prog");
2592 /* If forward-progress checking is requested and feasible, spawn the thread. */
2593 static int __init rcu_torture_fwd_prog_init(void)
2597 struct rcu_fwd *rfp;
2600 return 0; /* Not requested, so don't do it. */
2601 if (fwd_progress >= nr_cpu_ids) {
2602 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2603 fwd_progress = nr_cpu_ids;
2604 } else if (fwd_progress < 0) {
2605 fwd_progress = nr_cpu_ids;
2607 if ((!cur_ops->sync && !cur_ops->call) ||
2608 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2609 cur_ops == &rcu_busted_ops) {
2610 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2614 if (stall_cpu > 0) {
2615 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2617 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2618 return -EINVAL; /* In module, can fail back to user. */
2619 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2622 if (fwd_progress_holdoff <= 0)
2623 fwd_progress_holdoff = 1;
2624 if (fwd_progress_div <= 0)
2625 fwd_progress_div = 4;
2626 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2627 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2628 if (!rfp || !fwd_prog_tasks) {
2630 kfree(fwd_prog_tasks);
2631 fwd_prog_tasks = NULL;
2635 for (i = 0; i < fwd_progress; i++) {
2636 spin_lock_init(&rfp[i].rcu_fwd_lock);
2637 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
2638 rfp[i].rcu_fwd_id = i;
2640 mutex_lock(&rcu_fwd_mutex);
2642 mutex_unlock(&rcu_fwd_mutex);
2643 register_oom_notifier(&rcutorture_oom_nb);
2644 for (i = 0; i < fwd_progress; i++) {
2645 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
2654 static void rcu_torture_fwd_prog_cleanup(void)
2657 struct rcu_fwd *rfp;
2659 if (!rcu_fwds || !fwd_prog_tasks)
2661 for (i = 0; i < fwd_progress; i++)
2662 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
2663 unregister_oom_notifier(&rcutorture_oom_nb);
2664 mutex_lock(&rcu_fwd_mutex);
2667 mutex_unlock(&rcu_fwd_mutex);
2669 kfree(fwd_prog_tasks);
2670 fwd_prog_tasks = NULL;
2673 /* Callback function for RCU barrier testing. */
2674 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2676 atomic_inc(&barrier_cbs_invoked);
2679 /* IPI handler to get callback posted on desired CPU, if online. */
2680 static void rcu_torture_barrier1cb(void *rcu_void)
2682 struct rcu_head *rhp = rcu_void;
2684 cur_ops->call(rhp, rcu_torture_barrier_cbf);
2687 /* kthread function to register callbacks used to test RCU barriers. */
2688 static int rcu_torture_barrier_cbs(void *arg)
2690 long myid = (long)arg;
2691 bool lastphase = false;
2693 struct rcu_head rcu;
2695 init_rcu_head_on_stack(&rcu);
2696 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2697 set_user_nice(current, MAX_NICE);
2699 wait_event(barrier_cbs_wq[myid],
2701 smp_load_acquire(&barrier_phase)) != lastphase ||
2702 torture_must_stop());
2703 lastphase = newphase;
2704 if (torture_must_stop())
2707 * The above smp_load_acquire() ensures barrier_phase load
2708 * is ordered before the following ->call().
2710 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2712 // IPI failed, so use direct call from current CPU.
2713 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2715 if (atomic_dec_and_test(&barrier_cbs_count))
2716 wake_up(&barrier_wq);
2717 } while (!torture_must_stop());
2718 if (cur_ops->cb_barrier != NULL)
2719 cur_ops->cb_barrier();
2720 destroy_rcu_head_on_stack(&rcu);
2721 torture_kthread_stopping("rcu_torture_barrier_cbs");
2725 /* kthread function to drive and coordinate RCU barrier testing. */
2726 static int rcu_torture_barrier(void *arg)
2730 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2732 atomic_set(&barrier_cbs_invoked, 0);
2733 atomic_set(&barrier_cbs_count, n_barrier_cbs);
2734 /* Ensure barrier_phase ordered after prior assignments. */
2735 smp_store_release(&barrier_phase, !barrier_phase);
2736 for (i = 0; i < n_barrier_cbs; i++)
2737 wake_up(&barrier_cbs_wq[i]);
2738 wait_event(barrier_wq,
2739 atomic_read(&barrier_cbs_count) == 0 ||
2740 torture_must_stop());
2741 if (torture_must_stop())
2743 n_barrier_attempts++;
2744 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2745 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2746 n_rcu_torture_barrier_error++;
2747 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2748 atomic_read(&barrier_cbs_invoked),
2751 // Wait manually for the remaining callbacks
2754 if (WARN_ON(i++ > HZ))
2756 schedule_timeout_interruptible(1);
2757 cur_ops->cb_barrier();
2758 } while (atomic_read(&barrier_cbs_invoked) !=
2760 !torture_must_stop());
2761 smp_mb(); // Can't trust ordering if broken.
2762 if (!torture_must_stop())
2763 pr_err("Recovered: barrier_cbs_invoked = %d\n",
2764 atomic_read(&barrier_cbs_invoked));
2766 n_barrier_successes++;
2768 schedule_timeout_interruptible(HZ / 10);
2769 } while (!torture_must_stop());
2770 torture_kthread_stopping("rcu_torture_barrier");
2774 /* Initialize RCU barrier testing. */
2775 static int rcu_torture_barrier_init(void)
2780 if (n_barrier_cbs <= 0)
2782 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2783 pr_alert("%s" TORTURE_FLAG
2784 " Call or barrier ops missing for %s,\n",
2785 torture_type, cur_ops->name);
2786 pr_alert("%s" TORTURE_FLAG
2787 " RCU barrier testing omitted from run.\n",
2791 atomic_set(&barrier_cbs_count, 0);
2792 atomic_set(&barrier_cbs_invoked, 0);
2794 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2797 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2798 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2800 for (i = 0; i < n_barrier_cbs; i++) {
2801 init_waitqueue_head(&barrier_cbs_wq[i]);
2802 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2804 barrier_cbs_tasks[i]);
2808 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2811 /* Clean up after RCU barrier testing. */
2812 static void rcu_torture_barrier_cleanup(void)
2816 torture_stop_kthread(rcu_torture_barrier, barrier_task);
2817 if (barrier_cbs_tasks != NULL) {
2818 for (i = 0; i < n_barrier_cbs; i++)
2819 torture_stop_kthread(rcu_torture_barrier_cbs,
2820 barrier_cbs_tasks[i]);
2821 kfree(barrier_cbs_tasks);
2822 barrier_cbs_tasks = NULL;
2824 if (barrier_cbs_wq != NULL) {
2825 kfree(barrier_cbs_wq);
2826 barrier_cbs_wq = NULL;
2830 static bool rcu_torture_can_boost(void)
2832 static int boost_warn_once;
2835 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2837 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
2840 prio = rcu_get_gp_kthreads_prio();
2845 if (boost_warn_once == 1)
2848 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2849 boost_warn_once = 1;
2856 static bool read_exit_child_stop;
2857 static bool read_exit_child_stopped;
2858 static wait_queue_head_t read_exit_wq;
2860 // Child kthread which just does an rcutorture reader and exits.
2861 static int rcu_torture_read_exit_child(void *trsp_in)
2863 struct torture_random_state *trsp = trsp_in;
2865 set_user_nice(current, MAX_NICE);
2866 // Minimize time between reading and exiting.
2867 while (!kthread_should_stop())
2868 schedule_timeout_uninterruptible(1);
2869 (void)rcu_torture_one_read(trsp, -1);
2873 // Parent kthread which creates and destroys read-exit child kthreads.
2874 static int rcu_torture_read_exit(void *unused)
2877 bool errexit = false;
2879 struct task_struct *tsp;
2880 DEFINE_TORTURE_RANDOM(trs);
2882 // Allocate and initialize.
2883 set_user_nice(current, MAX_NICE);
2884 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2886 // Each pass through this loop does one read-exit episode.
2888 if (++count > read_exit_burst) {
2889 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2890 rcu_barrier(); // Wait for task_struct free, avoid OOM.
2891 for (i = 0; i < read_exit_delay; i++) {
2892 schedule_timeout_uninterruptible(HZ);
2893 if (READ_ONCE(read_exit_child_stop))
2896 if (!READ_ONCE(read_exit_child_stop))
2897 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2900 if (READ_ONCE(read_exit_child_stop))
2903 tsp = kthread_run(rcu_torture_read_exit_child,
2905 "rcu_torture_read_exit_child");
2907 TOROUT_ERRSTRING("out of memory");
2915 stutter_wait("rcu_torture_read_exit");
2916 } while (!errexit && !READ_ONCE(read_exit_child_stop));
2918 // Clean up and exit.
2919 smp_store_release(&read_exit_child_stopped, true); // After reaping.
2920 smp_mb(); // Store before wakeup.
2921 wake_up(&read_exit_wq);
2922 while (!torture_must_stop())
2923 schedule_timeout_uninterruptible(1);
2924 torture_kthread_stopping("rcu_torture_read_exit");
2928 static int rcu_torture_read_exit_init(void)
2930 if (read_exit_burst <= 0)
2932 init_waitqueue_head(&read_exit_wq);
2933 read_exit_child_stop = false;
2934 read_exit_child_stopped = false;
2935 return torture_create_kthread(rcu_torture_read_exit, NULL,
2939 static void rcu_torture_read_exit_cleanup(void)
2941 if (!read_exit_task)
2943 WRITE_ONCE(read_exit_child_stop, true);
2944 smp_mb(); // Above write before wait.
2945 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2946 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2949 static enum cpuhp_state rcutor_hp;
2952 rcu_torture_cleanup(void)
2956 unsigned long gp_seq = 0;
2959 if (torture_cleanup_begin()) {
2960 if (cur_ops->cb_barrier != NULL) {
2961 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
2962 cur_ops->cb_barrier();
2964 rcu_gp_slow_unregister(NULL);
2968 torture_cleanup_end();
2969 rcu_gp_slow_unregister(NULL);
2973 if (cur_ops->gp_kthread_dbg)
2974 cur_ops->gp_kthread_dbg();
2975 rcu_torture_read_exit_cleanup();
2976 rcu_torture_barrier_cleanup();
2977 rcu_torture_fwd_prog_cleanup();
2978 torture_stop_kthread(rcu_torture_stall, stall_task);
2979 torture_stop_kthread(rcu_torture_writer, writer_task);
2982 for (i = 0; i < nrealnocbers; i++)
2983 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
2989 for (i = 0; i < nrealreaders; i++)
2990 torture_stop_kthread(rcu_torture_reader,
2992 kfree(reader_tasks);
2993 reader_tasks = NULL;
2995 kfree(rcu_torture_reader_mbchk);
2996 rcu_torture_reader_mbchk = NULL;
2998 if (fakewriter_tasks) {
2999 for (i = 0; i < nfakewriters; i++)
3000 torture_stop_kthread(rcu_torture_fakewriter,
3001 fakewriter_tasks[i]);
3002 kfree(fakewriter_tasks);
3003 fakewriter_tasks = NULL;
3006 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3007 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3008 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3009 cur_ops->name, (long)gp_seq, flags,
3010 rcutorture_seq_diff(gp_seq, start_gp_seq));
3011 torture_stop_kthread(rcu_torture_stats, stats_task);
3012 torture_stop_kthread(rcu_torture_fqs, fqs_task);
3013 if (rcu_torture_can_boost() && rcutor_hp >= 0)
3014 cpuhp_remove_state(rcutor_hp);
3017 * Wait for all RCU callbacks to fire, then do torture-type-specific
3018 * cleanup operations.
3020 if (cur_ops->cb_barrier != NULL) {
3021 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3022 cur_ops->cb_barrier();
3024 if (cur_ops->cleanup != NULL)
3027 rcu_torture_mem_dump_obj();
3029 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
3031 if (err_segs_recorded) {
3032 pr_alert("Failure/close-call rcutorture reader segments:\n");
3033 if (rt_read_nsegs == 0)
3034 pr_alert("\t: No segments recorded!!!\n");
3036 for (i = 0; i < rt_read_nsegs; i++) {
3037 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
3038 if (err_segs[i].rt_delay_jiffies != 0) {
3039 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3040 err_segs[i].rt_delay_jiffies);
3043 if (err_segs[i].rt_delay_ms != 0) {
3044 pr_cont("%s%ldms", firsttime ? "" : "+",
3045 err_segs[i].rt_delay_ms);
3048 if (err_segs[i].rt_delay_us != 0) {
3049 pr_cont("%s%ldus", firsttime ? "" : "+",
3050 err_segs[i].rt_delay_us);
3054 err_segs[i].rt_preempted ? "preempted" : "");
3058 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3059 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3060 else if (torture_onoff_failures())
3061 rcu_torture_print_module_parms(cur_ops,
3062 "End of test: RCU_HOTPLUG");
3064 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3065 torture_cleanup_end();
3066 rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
3069 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3070 static void rcu_torture_leak_cb(struct rcu_head *rhp)
3074 static void rcu_torture_err_cb(struct rcu_head *rhp)
3077 * This -might- happen due to race conditions, but is unlikely.
3078 * The scenario that leads to this happening is that the
3079 * first of the pair of duplicate callbacks is queued,
3080 * someone else starts a grace period that includes that
3081 * callback, then the second of the pair must wait for the
3082 * next grace period. Unlikely, but can happen. If it
3083 * does happen, the debug-objects subsystem won't have splatted.
3085 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3087 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3090 * Verify that double-free causes debug-objects to complain, but only
3091 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
3092 * cannot be carried out.
3094 static void rcu_test_debug_objects(void)
3096 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3097 struct rcu_head rh1;
3098 struct rcu_head rh2;
3099 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3101 init_rcu_head_on_stack(&rh1);
3102 init_rcu_head_on_stack(&rh2);
3103 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
3105 /* Try to queue the rh2 pair of callbacks for the same grace period. */
3106 preempt_disable(); /* Prevent preemption from interrupting test. */
3107 rcu_read_lock(); /* Make it impossible to finish a grace period. */
3108 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3109 local_irq_disable(); /* Make it harder to start a new grace period. */
3110 call_rcu(&rh2, rcu_torture_leak_cb);
3111 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
3113 call_rcu(rhp, rcu_torture_leak_cb);
3114 call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3120 /* Wait for them all to get done so we can safely return. */
3122 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
3123 destroy_rcu_head_on_stack(&rh1);
3124 destroy_rcu_head_on_stack(&rh2);
3125 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3126 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
3127 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3130 static void rcutorture_sync(void)
3132 static unsigned long n;
3134 if (cur_ops->sync && !(++n & 0xfff))
3139 rcu_torture_init(void)
3145 unsigned long gp_seq = 0;
3146 static struct rcu_torture_ops *torture_ops[] = {
3147 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
3148 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
3152 if (!torture_init_begin(torture_type, verbose))
3155 /* Process args and tell the world that the torturer is on the job. */
3156 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3157 cur_ops = torture_ops[i];
3158 if (strcmp(torture_type, cur_ops->name) == 0)
3161 if (i == ARRAY_SIZE(torture_ops)) {
3162 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3164 pr_alert("rcu-torture types:");
3165 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3166 pr_cont(" %s", torture_ops[i]->name);
3172 if (cur_ops->fqs == NULL && fqs_duration != 0) {
3173 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3179 if (nreaders >= 0) {
3180 nrealreaders = nreaders;
3182 nrealreaders = num_online_cpus() - 2 - nreaders;
3183 if (nrealreaders <= 0)
3186 rcu_torture_print_module_parms(cur_ops, "Start of test");
3187 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3188 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3189 start_gp_seq = gp_seq;
3190 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
3191 cur_ops->name, (long)gp_seq, flags);
3193 /* Set up the freelist. */
3195 INIT_LIST_HEAD(&rcu_torture_freelist);
3196 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3197 rcu_tortures[i].rtort_mbtest = 0;
3198 list_add_tail(&rcu_tortures[i].rtort_free,
3199 &rcu_torture_freelist);
3202 /* Initialize the statistics so that each run gets its own numbers. */
3204 rcu_torture_current = NULL;
3205 rcu_torture_current_version = 0;
3206 atomic_set(&n_rcu_torture_alloc, 0);
3207 atomic_set(&n_rcu_torture_alloc_fail, 0);
3208 atomic_set(&n_rcu_torture_free, 0);
3209 atomic_set(&n_rcu_torture_mberror, 0);
3210 atomic_set(&n_rcu_torture_mbchk_fail, 0);
3211 atomic_set(&n_rcu_torture_mbchk_tries, 0);
3212 atomic_set(&n_rcu_torture_error, 0);
3213 n_rcu_torture_barrier_error = 0;
3214 n_rcu_torture_boost_ktrerror = 0;
3215 n_rcu_torture_boost_rterror = 0;
3216 n_rcu_torture_boost_failure = 0;
3217 n_rcu_torture_boosts = 0;
3218 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3219 atomic_set(&rcu_torture_wcount[i], 0);
3220 for_each_possible_cpu(cpu) {
3221 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3222 per_cpu(rcu_torture_count, cpu)[i] = 0;
3223 per_cpu(rcu_torture_batch, cpu)[i] = 0;
3226 err_segs_recorded = 0;
3229 /* Start up the kthreads. */
3231 rcu_torture_write_types();
3232 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3234 if (torture_init_error(firsterr))
3236 if (nfakewriters > 0) {
3237 fakewriter_tasks = kcalloc(nfakewriters,
3238 sizeof(fakewriter_tasks[0]),
3240 if (fakewriter_tasks == NULL) {
3241 TOROUT_ERRSTRING("out of memory");
3246 for (i = 0; i < nfakewriters; i++) {
3247 firsterr = torture_create_kthread(rcu_torture_fakewriter,
3248 NULL, fakewriter_tasks[i]);
3249 if (torture_init_error(firsterr))
3252 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3254 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3256 if (!reader_tasks || !rcu_torture_reader_mbchk) {
3257 TOROUT_ERRSTRING("out of memory");
3261 for (i = 0; i < nrealreaders; i++) {
3262 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3263 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3265 if (torture_init_error(firsterr))
3268 nrealnocbers = nocbs_nthreads;
3269 if (WARN_ON(nrealnocbers < 0))
3271 if (WARN_ON(nocbs_toggle < 0))
3273 if (nrealnocbers > 0) {
3274 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3275 if (nocb_tasks == NULL) {
3276 TOROUT_ERRSTRING("out of memory");
3283 for (i = 0; i < nrealnocbers; i++) {
3284 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3285 if (torture_init_error(firsterr))
3288 if (stat_interval > 0) {
3289 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3291 if (torture_init_error(firsterr))
3294 if (test_no_idle_hz && shuffle_interval > 0) {
3295 firsterr = torture_shuffle_init(shuffle_interval * HZ);
3296 if (torture_init_error(firsterr))
3304 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3305 firsterr = torture_stutter_init(stutter * HZ, t);
3306 if (torture_init_error(firsterr))
3309 if (fqs_duration < 0)
3312 /* Create the fqs thread */
3313 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3315 if (torture_init_error(firsterr))
3318 if (test_boost_interval < 1)
3319 test_boost_interval = 1;
3320 if (test_boost_duration < 2)
3321 test_boost_duration = 2;
3322 if (rcu_torture_can_boost()) {
3324 boost_starttime = jiffies + test_boost_interval * HZ;
3326 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3327 rcutorture_booster_init,
3328 rcutorture_booster_cleanup);
3329 rcutor_hp = firsterr;
3330 if (torture_init_error(firsterr))
3333 // Testing RCU priority boosting requires rcutorture do
3334 // some serious abuse. Counter this by running ksoftirqd
3335 // at higher priority.
3336 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
3337 for_each_online_cpu(cpu) {
3338 struct sched_param sp;
3339 struct task_struct *t;
3341 t = per_cpu(ksoftirqd, cpu);
3343 sp.sched_priority = 2;
3344 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3348 shutdown_jiffies = jiffies + shutdown_secs * HZ;
3349 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3350 if (torture_init_error(firsterr))
3352 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3354 if (torture_init_error(firsterr))
3356 firsterr = rcu_torture_stall_init();
3357 if (torture_init_error(firsterr))
3359 firsterr = rcu_torture_fwd_prog_init();
3360 if (torture_init_error(firsterr))
3362 firsterr = rcu_torture_barrier_init();
3363 if (torture_init_error(firsterr))
3365 firsterr = rcu_torture_read_exit_init();
3366 if (torture_init_error(firsterr))
3369 rcu_test_debug_objects();
3371 rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
3376 rcu_torture_cleanup();
3377 if (shutdown_secs) {
3378 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3384 module_init(rcu_torture_init);
3385 module_exit(rcu_torture_cleanup);