1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
5 * Copyright (C) IBM Corporation, 2005, 2006
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
10 * See also: Documentation/RCU/torture.rst
13 #define pr_fmt(fmt) fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
52 MODULE_LICENSE("GPL");
53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
55 /* Bits for ->extendables field, extendables param, and related definitions. */
56 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
57 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
58 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
59 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
60 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
61 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
62 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
63 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
64 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
65 #define RCUTORTURE_MAX_EXTEND \
66 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
67 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
69 /* Must be power of two minus one. */
70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
72 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
73 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
74 torture_param(int, fqs_duration, 0,
75 "Duration of fqs bursts (us), 0 to disable");
76 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
77 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
78 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
79 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
80 torture_param(int, fwd_progress_holdoff, 60,
81 "Time between forward-progress tests (s)");
82 torture_param(bool, fwd_progress_need_resched, 1,
83 "Hide cond_resched() behind need_resched()");
84 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
85 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
86 torture_param(bool, gp_normal, false,
87 "Use normal (non-expedited) GP wait primitives");
88 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
89 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
90 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
91 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
92 torture_param(int, n_barrier_cbs, 0,
93 "# of callbacks/kthreads for barrier testing");
94 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
95 torture_param(int, nreaders, -1, "Number of RCU reader threads");
96 torture_param(int, object_debug, 0,
97 "Enable debug-object double call_rcu() testing");
98 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
99 torture_param(int, onoff_interval, 0,
100 "Time between CPU hotplugs (jiffies), 0=disable");
101 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
102 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
103 torture_param(int, read_exit_delay, 13,
104 "Delay between read-then-exit episodes (s)");
105 torture_param(int, read_exit_burst, 16,
106 "# of read-then-exit bursts per episode, zero to disable");
107 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
108 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
109 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
110 torture_param(int, stall_cpu_holdoff, 10,
111 "Time to wait before starting stall (s).");
112 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
113 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
114 torture_param(int, stall_gp_kthread, 0,
115 "Grace-period kthread stall duration (s).");
116 torture_param(int, stat_interval, 60,
117 "Number of seconds between stats printk()s");
118 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
119 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
120 torture_param(int, test_boost_duration, 4,
121 "Duration of each boost test, seconds.");
122 torture_param(int, test_boost_interval, 7,
123 "Interval between boost tests, seconds.");
124 torture_param(bool, test_no_idle_hz, true,
125 "Test support for tickless idle CPUs");
126 torture_param(int, verbose, 1,
127 "Enable verbose debugging printk()s");
129 static char *torture_type = "rcu";
130 module_param(torture_type, charp, 0444);
131 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
133 static int nrealnocbers;
134 static int nrealreaders;
135 static struct task_struct *writer_task;
136 static struct task_struct **fakewriter_tasks;
137 static struct task_struct **reader_tasks;
138 static struct task_struct **nocb_tasks;
139 static struct task_struct *stats_task;
140 static struct task_struct *fqs_task;
141 static struct task_struct *boost_tasks[NR_CPUS];
142 static struct task_struct *stall_task;
143 static struct task_struct *fwd_prog_task;
144 static struct task_struct **barrier_cbs_tasks;
145 static struct task_struct *barrier_task;
146 static struct task_struct *read_exit_task;
148 #define RCU_TORTURE_PIPE_LEN 10
150 // Mailbox-like structure to check RCU global memory ordering.
151 struct rcu_torture_reader_check {
152 unsigned long rtc_myloops;
154 unsigned long rtc_chkloops;
156 struct rcu_torture_reader_check *rtc_assigner;
157 } ____cacheline_internodealigned_in_smp;
159 // Update-side data structure used to check RCU readers.
161 struct rcu_head rtort_rcu;
162 int rtort_pipe_count;
163 struct list_head rtort_free;
165 struct rcu_torture_reader_check *rtort_chkp;
168 static LIST_HEAD(rcu_torture_freelist);
169 static struct rcu_torture __rcu *rcu_torture_current;
170 static unsigned long rcu_torture_current_version;
171 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
172 static DEFINE_SPINLOCK(rcu_torture_lock);
173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
174 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
175 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
176 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
177 static atomic_t n_rcu_torture_alloc;
178 static atomic_t n_rcu_torture_alloc_fail;
179 static atomic_t n_rcu_torture_free;
180 static atomic_t n_rcu_torture_mberror;
181 static atomic_t n_rcu_torture_mbchk_fail;
182 static atomic_t n_rcu_torture_mbchk_tries;
183 static atomic_t n_rcu_torture_error;
184 static long n_rcu_torture_barrier_error;
185 static long n_rcu_torture_boost_ktrerror;
186 static long n_rcu_torture_boost_rterror;
187 static long n_rcu_torture_boost_failure;
188 static long n_rcu_torture_boosts;
189 static atomic_long_t n_rcu_torture_timers;
190 static long n_barrier_attempts;
191 static long n_barrier_successes; /* did rcu_barrier test succeed? */
192 static unsigned long n_read_exits;
193 static struct list_head rcu_torture_removed;
194 static unsigned long shutdown_jiffies;
195 static unsigned long start_gp_seq;
196 static atomic_long_t n_nocb_offload;
197 static atomic_long_t n_nocb_deoffload;
199 static int rcu_torture_writer_state;
200 #define RTWS_FIXED_DELAY 0
202 #define RTWS_REPLACE 2
203 #define RTWS_DEF_FREE 3
204 #define RTWS_EXP_SYNC 4
205 #define RTWS_COND_GET 5
206 #define RTWS_COND_SYNC 6
207 #define RTWS_POLL_GET 7
208 #define RTWS_POLL_WAIT 8
210 #define RTWS_STUTTER 10
211 #define RTWS_STOPPING 11
212 static const char * const rcu_torture_writer_state_names[] = {
227 /* Record reader segment types and duration for first failing read. */
230 unsigned long rt_delay_jiffies;
231 unsigned long rt_delay_ms;
232 unsigned long rt_delay_us;
235 static int err_segs_recorded;
236 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
237 static int rt_read_nsegs;
239 static const char *rcu_torture_writer_state_getname(void)
241 unsigned int i = READ_ONCE(rcu_torture_writer_state);
243 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
245 return rcu_torture_writer_state_names[i];
248 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
249 #define rcu_can_boost() 1
250 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
251 #define rcu_can_boost() 0
252 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
254 #ifdef CONFIG_RCU_TRACE
255 static u64 notrace rcu_trace_clock_local(void)
257 u64 ts = trace_clock_local();
259 (void)do_div(ts, NSEC_PER_USEC);
262 #else /* #ifdef CONFIG_RCU_TRACE */
263 static u64 notrace rcu_trace_clock_local(void)
267 #endif /* #else #ifdef CONFIG_RCU_TRACE */
270 * Stop aggressive CPU-hog tests a bit before the end of the test in order
271 * to avoid interfering with test shutdown.
273 static bool shutdown_time_arrived(void)
275 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
278 static unsigned long boost_starttime; /* jiffies of next boost test start. */
279 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
280 /* and boost task create/destroy. */
281 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
282 static bool barrier_phase; /* Test phase. */
283 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
284 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
285 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
287 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
290 * Allocate an element from the rcu_tortures pool.
292 static struct rcu_torture *
293 rcu_torture_alloc(void)
297 spin_lock_bh(&rcu_torture_lock);
298 if (list_empty(&rcu_torture_freelist)) {
299 atomic_inc(&n_rcu_torture_alloc_fail);
300 spin_unlock_bh(&rcu_torture_lock);
303 atomic_inc(&n_rcu_torture_alloc);
304 p = rcu_torture_freelist.next;
306 spin_unlock_bh(&rcu_torture_lock);
307 return container_of(p, struct rcu_torture, rtort_free);
311 * Free an element to the rcu_tortures pool.
314 rcu_torture_free(struct rcu_torture *p)
316 atomic_inc(&n_rcu_torture_free);
317 spin_lock_bh(&rcu_torture_lock);
318 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
319 spin_unlock_bh(&rcu_torture_lock);
323 * Operations vector for selecting different types of tests.
326 struct rcu_torture_ops {
329 void (*cleanup)(void);
330 int (*readlock)(void);
331 void (*read_delay)(struct torture_random_state *rrsp,
332 struct rt_read_seg *rtrsp);
333 void (*readunlock)(int idx);
334 unsigned long (*get_gp_seq)(void);
335 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
336 void (*deferred_free)(struct rcu_torture *p);
338 void (*exp_sync)(void);
339 unsigned long (*get_gp_state)(void);
340 unsigned long (*start_gp_poll)(void);
341 bool (*poll_gp_state)(unsigned long oldstate);
342 void (*cond_sync)(unsigned long oldstate);
343 call_rcu_func_t call;
344 void (*cb_barrier)(void);
347 void (*gp_kthread_dbg)(void);
348 int (*stall_dur)(void);
356 static struct rcu_torture_ops *cur_ops;
359 * Definitions for rcu torture testing.
362 static int rcu_torture_read_lock(void) __acquires(RCU)
369 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
371 unsigned long started;
372 unsigned long completed;
373 const unsigned long shortdelay_us = 200;
374 unsigned long longdelay_ms = 300;
375 unsigned long long ts;
377 /* We want a short delay sometimes to make a reader delay the grace
378 * period, and we want a long delay occasionally to trigger
379 * force_quiescent_state. */
381 if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
382 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
383 started = cur_ops->get_gp_seq();
384 ts = rcu_trace_clock_local();
385 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
386 longdelay_ms = 5; /* Avoid triggering BH limits. */
387 mdelay(longdelay_ms);
388 rtrsp->rt_delay_ms = longdelay_ms;
389 completed = cur_ops->get_gp_seq();
390 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
393 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
394 udelay(shortdelay_us);
395 rtrsp->rt_delay_us = shortdelay_us;
397 if (!preempt_count() &&
398 !(torture_random(rrsp) % (nrealreaders * 500))) {
399 torture_preempt_schedule(); /* QS only if preemptible. */
400 rtrsp->rt_preempted = true;
404 static void rcu_torture_read_unlock(int idx) __releases(RCU)
410 * Update callback in the pipe. This should be invoked after a grace period.
413 rcu_torture_pipe_update_one(struct rcu_torture *rp)
416 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
419 WRITE_ONCE(rp->rtort_chkp, NULL);
420 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
422 i = READ_ONCE(rp->rtort_pipe_count);
423 if (i > RCU_TORTURE_PIPE_LEN)
424 i = RCU_TORTURE_PIPE_LEN;
425 atomic_inc(&rcu_torture_wcount[i]);
426 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
427 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
428 rp->rtort_mbtest = 0;
435 * Update all callbacks in the pipe. Suitable for synchronous grace-period
439 rcu_torture_pipe_update(struct rcu_torture *old_rp)
441 struct rcu_torture *rp;
442 struct rcu_torture *rp1;
445 list_add(&old_rp->rtort_free, &rcu_torture_removed);
446 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
447 if (rcu_torture_pipe_update_one(rp)) {
448 list_del(&rp->rtort_free);
449 rcu_torture_free(rp);
455 rcu_torture_cb(struct rcu_head *p)
457 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
459 if (torture_must_stop_irq()) {
460 /* Test is ending, just drop callbacks on the floor. */
461 /* The next initialization will pick up the pieces. */
464 if (rcu_torture_pipe_update_one(rp))
465 rcu_torture_free(rp);
467 cur_ops->deferred_free(rp);
470 static unsigned long rcu_no_completed(void)
475 static void rcu_torture_deferred_free(struct rcu_torture *p)
477 call_rcu(&p->rtort_rcu, rcu_torture_cb);
480 static void rcu_sync_torture_init(void)
482 INIT_LIST_HEAD(&rcu_torture_removed);
485 static struct rcu_torture_ops rcu_ops = {
487 .init = rcu_sync_torture_init,
488 .readlock = rcu_torture_read_lock,
489 .read_delay = rcu_read_delay,
490 .readunlock = rcu_torture_read_unlock,
491 .get_gp_seq = rcu_get_gp_seq,
492 .gp_diff = rcu_seq_diff,
493 .deferred_free = rcu_torture_deferred_free,
494 .sync = synchronize_rcu,
495 .exp_sync = synchronize_rcu_expedited,
496 .get_gp_state = get_state_synchronize_rcu,
497 .cond_sync = cond_synchronize_rcu,
499 .cb_barrier = rcu_barrier,
500 .fqs = rcu_force_quiescent_state,
502 .gp_kthread_dbg = show_rcu_gp_kthreads,
503 .stall_dur = rcu_jiffies_till_stall_check,
505 .can_boost = rcu_can_boost(),
506 .extendables = RCUTORTURE_MAX_EXTEND,
511 * Don't even think about trying any of these in real life!!!
512 * The names includes "busted", and they really means it!
513 * The only purpose of these functions is to provide a buggy RCU
514 * implementation to make sure that rcutorture correctly emits
515 * buggy-RCU error messages.
517 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
519 /* This is a deliberate bug for testing purposes only! */
520 rcu_torture_cb(&p->rtort_rcu);
523 static void synchronize_rcu_busted(void)
525 /* This is a deliberate bug for testing purposes only! */
529 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
531 /* This is a deliberate bug for testing purposes only! */
535 static struct rcu_torture_ops rcu_busted_ops = {
536 .ttype = INVALID_RCU_FLAVOR,
537 .init = rcu_sync_torture_init,
538 .readlock = rcu_torture_read_lock,
539 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
540 .readunlock = rcu_torture_read_unlock,
541 .get_gp_seq = rcu_no_completed,
542 .deferred_free = rcu_busted_torture_deferred_free,
543 .sync = synchronize_rcu_busted,
544 .exp_sync = synchronize_rcu_busted,
545 .call = call_rcu_busted,
554 * Definitions for srcu torture testing.
557 DEFINE_STATIC_SRCU(srcu_ctl);
558 static struct srcu_struct srcu_ctld;
559 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
561 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
563 return srcu_read_lock(srcu_ctlp);
567 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
570 const long uspertick = 1000000 / HZ;
571 const long longdelay = 10;
573 /* We want there to be long-running readers, but not all the time. */
575 delay = torture_random(rrsp) %
576 (nrealreaders * 2 * longdelay * uspertick);
577 if (!delay && in_task()) {
578 schedule_timeout_interruptible(longdelay);
579 rtrsp->rt_delay_jiffies = longdelay;
581 rcu_read_delay(rrsp, rtrsp);
585 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
587 srcu_read_unlock(srcu_ctlp, idx);
590 static unsigned long srcu_torture_completed(void)
592 return srcu_batches_completed(srcu_ctlp);
595 static void srcu_torture_deferred_free(struct rcu_torture *rp)
597 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
600 static void srcu_torture_synchronize(void)
602 synchronize_srcu(srcu_ctlp);
605 static unsigned long srcu_torture_get_gp_state(void)
607 return get_state_synchronize_srcu(srcu_ctlp);
610 static unsigned long srcu_torture_start_gp_poll(void)
612 return start_poll_synchronize_srcu(srcu_ctlp);
615 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
617 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
620 static void srcu_torture_call(struct rcu_head *head,
623 call_srcu(srcu_ctlp, head, func);
626 static void srcu_torture_barrier(void)
628 srcu_barrier(srcu_ctlp);
631 static void srcu_torture_stats(void)
633 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
636 static void srcu_torture_synchronize_expedited(void)
638 synchronize_srcu_expedited(srcu_ctlp);
641 static struct rcu_torture_ops srcu_ops = {
642 .ttype = SRCU_FLAVOR,
643 .init = rcu_sync_torture_init,
644 .readlock = srcu_torture_read_lock,
645 .read_delay = srcu_read_delay,
646 .readunlock = srcu_torture_read_unlock,
647 .get_gp_seq = srcu_torture_completed,
648 .deferred_free = srcu_torture_deferred_free,
649 .sync = srcu_torture_synchronize,
650 .exp_sync = srcu_torture_synchronize_expedited,
651 .get_gp_state = srcu_torture_get_gp_state,
652 .start_gp_poll = srcu_torture_start_gp_poll,
653 .poll_gp_state = srcu_torture_poll_gp_state,
654 .call = srcu_torture_call,
655 .cb_barrier = srcu_torture_barrier,
656 .stats = srcu_torture_stats,
661 static void srcu_torture_init(void)
663 rcu_sync_torture_init();
664 WARN_ON(init_srcu_struct(&srcu_ctld));
665 srcu_ctlp = &srcu_ctld;
668 static void srcu_torture_cleanup(void)
670 cleanup_srcu_struct(&srcu_ctld);
671 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
674 /* As above, but dynamically allocated. */
675 static struct rcu_torture_ops srcud_ops = {
676 .ttype = SRCU_FLAVOR,
677 .init = srcu_torture_init,
678 .cleanup = srcu_torture_cleanup,
679 .readlock = srcu_torture_read_lock,
680 .read_delay = srcu_read_delay,
681 .readunlock = srcu_torture_read_unlock,
682 .get_gp_seq = srcu_torture_completed,
683 .deferred_free = srcu_torture_deferred_free,
684 .sync = srcu_torture_synchronize,
685 .exp_sync = srcu_torture_synchronize_expedited,
686 .call = srcu_torture_call,
687 .cb_barrier = srcu_torture_barrier,
688 .stats = srcu_torture_stats,
693 /* As above, but broken due to inappropriate reader extension. */
694 static struct rcu_torture_ops busted_srcud_ops = {
695 .ttype = SRCU_FLAVOR,
696 .init = srcu_torture_init,
697 .cleanup = srcu_torture_cleanup,
698 .readlock = srcu_torture_read_lock,
699 .read_delay = rcu_read_delay,
700 .readunlock = srcu_torture_read_unlock,
701 .get_gp_seq = srcu_torture_completed,
702 .deferred_free = srcu_torture_deferred_free,
703 .sync = srcu_torture_synchronize,
704 .exp_sync = srcu_torture_synchronize_expedited,
705 .call = srcu_torture_call,
706 .cb_barrier = srcu_torture_barrier,
707 .stats = srcu_torture_stats,
709 .extendables = RCUTORTURE_MAX_EXTEND,
710 .name = "busted_srcud"
714 * Definitions for RCU-tasks torture testing.
717 static int tasks_torture_read_lock(void)
722 static void tasks_torture_read_unlock(int idx)
726 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
728 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
731 static void synchronize_rcu_mult_test(void)
733 synchronize_rcu_mult(call_rcu_tasks, call_rcu);
736 static struct rcu_torture_ops tasks_ops = {
737 .ttype = RCU_TASKS_FLAVOR,
738 .init = rcu_sync_torture_init,
739 .readlock = tasks_torture_read_lock,
740 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
741 .readunlock = tasks_torture_read_unlock,
742 .get_gp_seq = rcu_no_completed,
743 .deferred_free = rcu_tasks_torture_deferred_free,
744 .sync = synchronize_rcu_tasks,
745 .exp_sync = synchronize_rcu_mult_test,
746 .call = call_rcu_tasks,
747 .cb_barrier = rcu_barrier_tasks,
748 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
757 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
758 * This implementation does not necessarily work well with CPU hotplug.
761 static void synchronize_rcu_trivial(void)
765 for_each_online_cpu(cpu) {
766 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
767 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
771 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
777 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
782 static struct rcu_torture_ops trivial_ops = {
783 .ttype = RCU_TRIVIAL_FLAVOR,
784 .init = rcu_sync_torture_init,
785 .readlock = rcu_torture_read_lock_trivial,
786 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
787 .readunlock = rcu_torture_read_unlock_trivial,
788 .get_gp_seq = rcu_no_completed,
789 .sync = synchronize_rcu_trivial,
790 .exp_sync = synchronize_rcu_trivial,
798 * Definitions for rude RCU-tasks torture testing.
801 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
803 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
806 static struct rcu_torture_ops tasks_rude_ops = {
807 .ttype = RCU_TASKS_RUDE_FLAVOR,
808 .init = rcu_sync_torture_init,
809 .readlock = rcu_torture_read_lock_trivial,
810 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
811 .readunlock = rcu_torture_read_unlock_trivial,
812 .get_gp_seq = rcu_no_completed,
813 .deferred_free = rcu_tasks_rude_torture_deferred_free,
814 .sync = synchronize_rcu_tasks_rude,
815 .exp_sync = synchronize_rcu_tasks_rude,
816 .call = call_rcu_tasks_rude,
817 .cb_barrier = rcu_barrier_tasks_rude,
818 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
826 * Definitions for tracing RCU-tasks torture testing.
829 static int tasks_tracing_torture_read_lock(void)
831 rcu_read_lock_trace();
835 static void tasks_tracing_torture_read_unlock(int idx)
837 rcu_read_unlock_trace();
840 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
842 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
845 static struct rcu_torture_ops tasks_tracing_ops = {
846 .ttype = RCU_TASKS_TRACING_FLAVOR,
847 .init = rcu_sync_torture_init,
848 .readlock = tasks_tracing_torture_read_lock,
849 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
850 .readunlock = tasks_tracing_torture_read_unlock,
851 .get_gp_seq = rcu_no_completed,
852 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
853 .sync = synchronize_rcu_tasks_trace,
854 .exp_sync = synchronize_rcu_tasks_trace,
855 .call = call_rcu_tasks_trace,
856 .cb_barrier = rcu_barrier_tasks_trace,
857 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
862 .name = "tasks-tracing"
865 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
867 if (!cur_ops->gp_diff)
869 return cur_ops->gp_diff(new, old);
872 static bool __maybe_unused torturing_tasks(void)
874 return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops;
878 * RCU torture priority-boost testing. Runs one real-time thread per
879 * CPU for moderate bursts, repeatedly registering RCU callbacks and
880 * spinning waiting for them to be invoked. If a given callback takes
881 * too long to be invoked, we assume that priority inversion has occurred.
884 struct rcu_boost_inflight {
889 static void rcu_torture_boost_cb(struct rcu_head *head)
891 struct rcu_boost_inflight *rbip =
892 container_of(head, struct rcu_boost_inflight, rcu);
894 /* Ensure RCU-core accesses precede clearing ->inflight */
895 smp_store_release(&rbip->inflight, 0);
898 static int old_rt_runtime = -1;
900 static void rcu_torture_disable_rt_throttle(void)
903 * Disable RT throttling so that rcutorture's boost threads don't get
904 * throttled. Only possible if rcutorture is built-in otherwise the
905 * user should manually do this by setting the sched_rt_period_us and
906 * sched_rt_runtime sysctls.
908 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
911 old_rt_runtime = sysctl_sched_rt_runtime;
912 sysctl_sched_rt_runtime = -1;
915 static void rcu_torture_enable_rt_throttle(void)
917 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
920 sysctl_sched_rt_runtime = old_rt_runtime;
924 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
926 if (end - start > test_boost_duration * HZ - HZ / 2) {
927 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
928 n_rcu_torture_boost_failure++;
930 return true; /* failed */
933 return false; /* passed */
936 static int rcu_torture_boost(void *arg)
938 unsigned long call_rcu_time;
939 unsigned long endtime;
940 unsigned long oldstarttime;
941 struct rcu_boost_inflight rbi = { .inflight = 0 };
943 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
945 /* Set real-time priority. */
946 sched_set_fifo_low(current);
948 init_rcu_head_on_stack(&rbi.rcu);
949 /* Each pass through the following loop does one boost-test cycle. */
951 /* Track if the test failed already in this test interval? */
954 /* Increment n_rcu_torture_boosts once per boost-test */
955 while (!kthread_should_stop()) {
956 if (mutex_trylock(&boost_mutex)) {
957 n_rcu_torture_boosts++;
958 mutex_unlock(&boost_mutex);
961 schedule_timeout_uninterruptible(1);
963 if (kthread_should_stop())
966 /* Wait for the next test interval. */
967 oldstarttime = boost_starttime;
968 while (time_before(jiffies, oldstarttime)) {
969 schedule_timeout_interruptible(oldstarttime - jiffies);
970 if (stutter_wait("rcu_torture_boost"))
971 sched_set_fifo_low(current);
972 if (torture_must_stop())
976 /* Do one boost-test interval. */
977 endtime = oldstarttime + test_boost_duration * HZ;
978 call_rcu_time = jiffies;
979 while (time_before(jiffies, endtime)) {
980 /* If we don't have a callback in flight, post one. */
981 if (!smp_load_acquire(&rbi.inflight)) {
982 /* RCU core before ->inflight = 1. */
983 smp_store_release(&rbi.inflight, 1);
984 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
985 /* Check if the boost test failed */
987 rcu_torture_boost_failed(call_rcu_time,
989 call_rcu_time = jiffies;
991 if (stutter_wait("rcu_torture_boost"))
992 sched_set_fifo_low(current);
993 if (torture_must_stop())
998 * If boost never happened, then inflight will always be 1, in
999 * this case the boost check would never happen in the above
1000 * loop so do another one here.
1002 if (!failed && smp_load_acquire(&rbi.inflight))
1003 rcu_torture_boost_failed(call_rcu_time, jiffies);
1006 * Set the start time of the next test interval.
1007 * Yes, this is vulnerable to long delays, but such
1008 * delays simply cause a false negative for the next
1009 * interval. Besides, we are running at RT priority,
1010 * so delays should be relatively rare.
1012 while (oldstarttime == boost_starttime &&
1013 !kthread_should_stop()) {
1014 if (mutex_trylock(&boost_mutex)) {
1015 boost_starttime = jiffies +
1016 test_boost_interval * HZ;
1017 mutex_unlock(&boost_mutex);
1020 schedule_timeout_uninterruptible(1);
1023 /* Go do the stutter. */
1024 checkwait: if (stutter_wait("rcu_torture_boost"))
1025 sched_set_fifo_low(current);
1026 } while (!torture_must_stop());
1028 /* Clean up and exit. */
1029 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
1030 torture_shutdown_absorb("rcu_torture_boost");
1031 schedule_timeout_uninterruptible(1);
1033 destroy_rcu_head_on_stack(&rbi.rcu);
1034 torture_kthread_stopping("rcu_torture_boost");
1039 * RCU torture force-quiescent-state kthread. Repeatedly induces
1040 * bursts of calls to force_quiescent_state(), increasing the probability
1041 * of occurrence of some important types of race conditions.
1044 rcu_torture_fqs(void *arg)
1046 unsigned long fqs_resume_time;
1047 int fqs_burst_remaining;
1048 int oldnice = task_nice(current);
1050 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1052 fqs_resume_time = jiffies + fqs_stutter * HZ;
1053 while (time_before(jiffies, fqs_resume_time) &&
1054 !kthread_should_stop()) {
1055 schedule_timeout_interruptible(1);
1057 fqs_burst_remaining = fqs_duration;
1058 while (fqs_burst_remaining > 0 &&
1059 !kthread_should_stop()) {
1061 udelay(fqs_holdoff);
1062 fqs_burst_remaining -= fqs_holdoff;
1064 if (stutter_wait("rcu_torture_fqs"))
1065 sched_set_normal(current, oldnice);
1066 } while (!torture_must_stop());
1067 torture_kthread_stopping("rcu_torture_fqs");
1071 // Used by writers to randomly choose from the available grace-period
1072 // primitives. The only purpose of the initialization is to size the array.
1073 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC };
1074 static int nsynctypes;
1077 * Determine which grace-period primitives are available.
1079 static void rcu_torture_write_types(void)
1081 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1082 bool gp_poll1 = gp_poll, gp_sync1 = gp_sync;
1084 /* Initialize synctype[] array. If none set, take default. */
1085 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1)
1086 gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true;
1087 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1088 synctype[nsynctypes++] = RTWS_COND_GET;
1089 pr_info("%s: Testing conditional GPs.\n", __func__);
1090 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1091 pr_alert("%s: gp_cond without primitives.\n", __func__);
1093 if (gp_exp1 && cur_ops->exp_sync) {
1094 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1095 pr_info("%s: Testing expedited GPs.\n", __func__);
1096 } else if (gp_exp && !cur_ops->exp_sync) {
1097 pr_alert("%s: gp_exp without primitives.\n", __func__);
1099 if (gp_normal1 && cur_ops->deferred_free) {
1100 synctype[nsynctypes++] = RTWS_DEF_FREE;
1101 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1102 } else if (gp_normal && !cur_ops->deferred_free) {
1103 pr_alert("%s: gp_normal without primitives.\n", __func__);
1105 if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1106 synctype[nsynctypes++] = RTWS_POLL_GET;
1107 pr_info("%s: Testing polling GPs.\n", __func__);
1108 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1109 pr_alert("%s: gp_poll without primitives.\n", __func__);
1111 if (gp_sync1 && cur_ops->sync) {
1112 synctype[nsynctypes++] = RTWS_SYNC;
1113 pr_info("%s: Testing normal GPs.\n", __func__);
1114 } else if (gp_sync && !cur_ops->sync) {
1115 pr_alert("%s: gp_sync without primitives.\n", __func__);
1120 * RCU torture writer kthread. Repeatedly substitutes a new structure
1121 * for that pointed to by rcu_torture_current, freeing the old structure
1122 * after a series of grace periods (the "pipeline").
1125 rcu_torture_writer(void *arg)
1128 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1129 unsigned long cookie;
1131 unsigned long gp_snap;
1134 int oldnice = task_nice(current);
1135 struct rcu_torture *rp;
1136 struct rcu_torture *old_rp;
1137 static DEFINE_TORTURE_RANDOM(rand);
1138 bool stutter_waited;
1140 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1142 pr_alert("%s" TORTURE_FLAG
1143 " GP expediting controlled from boot/sysfs for %s.\n",
1144 torture_type, cur_ops->name);
1145 if (WARN_ONCE(nsynctypes == 0,
1146 "rcu_torture_writer: No update-side primitives.\n")) {
1148 * No updates primitives, so don't try updating.
1149 * The resulting test won't be testing much, hence the
1150 * above WARN_ONCE().
1152 rcu_torture_writer_state = RTWS_STOPPING;
1153 torture_kthread_stopping("rcu_torture_writer");
1157 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1158 torture_hrtimeout_us(500, 1000, &rand);
1159 rp = rcu_torture_alloc();
1162 rp->rtort_pipe_count = 0;
1163 rcu_torture_writer_state = RTWS_DELAY;
1164 udelay(torture_random(&rand) & 0x3ff);
1165 rcu_torture_writer_state = RTWS_REPLACE;
1166 old_rp = rcu_dereference_check(rcu_torture_current,
1167 current == writer_task);
1168 rp->rtort_mbtest = 1;
1169 rcu_assign_pointer(rcu_torture_current, rp);
1170 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1172 i = old_rp->rtort_pipe_count;
1173 if (i > RCU_TORTURE_PIPE_LEN)
1174 i = RCU_TORTURE_PIPE_LEN;
1175 atomic_inc(&rcu_torture_wcount[i]);
1176 WRITE_ONCE(old_rp->rtort_pipe_count,
1177 old_rp->rtort_pipe_count + 1);
1178 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1179 idx = cur_ops->readlock();
1180 cookie = cur_ops->get_gp_state();
1181 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1182 cur_ops->poll_gp_state(cookie),
1183 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1185 rcu_torture_writer_state_getname(),
1186 rcu_torture_writer_state,
1187 cookie, cur_ops->get_gp_state());
1188 cur_ops->readunlock(idx);
1190 switch (synctype[torture_random(&rand) % nsynctypes]) {
1192 rcu_torture_writer_state = RTWS_DEF_FREE;
1193 cur_ops->deferred_free(old_rp);
1196 rcu_torture_writer_state = RTWS_EXP_SYNC;
1197 cur_ops->exp_sync();
1198 rcu_torture_pipe_update(old_rp);
1201 rcu_torture_writer_state = RTWS_COND_GET;
1202 gp_snap = cur_ops->get_gp_state();
1203 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1204 rcu_torture_writer_state = RTWS_COND_SYNC;
1205 cur_ops->cond_sync(gp_snap);
1206 rcu_torture_pipe_update(old_rp);
1209 rcu_torture_writer_state = RTWS_POLL_GET;
1210 gp_snap = cur_ops->start_gp_poll();
1211 rcu_torture_writer_state = RTWS_POLL_WAIT;
1212 while (!cur_ops->poll_gp_state(gp_snap))
1213 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1215 rcu_torture_pipe_update(old_rp);
1218 rcu_torture_writer_state = RTWS_SYNC;
1220 rcu_torture_pipe_update(old_rp);
1226 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1227 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1228 !cur_ops->poll_gp_state(cookie),
1229 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
1231 rcu_torture_writer_state_getname(),
1232 rcu_torture_writer_state,
1233 cookie, cur_ops->get_gp_state());
1235 WRITE_ONCE(rcu_torture_current_version,
1236 rcu_torture_current_version + 1);
1237 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1239 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1240 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1241 if (expediting >= 0)
1244 rcu_unexpedite_gp();
1245 if (++expediting > 3)
1246 expediting = -expediting;
1247 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1248 can_expedite = !rcu_gp_is_expedited() &&
1249 !rcu_gp_is_normal();
1251 rcu_torture_writer_state = RTWS_STUTTER;
1252 boot_ended = rcu_inkernel_boot_has_ended();
1253 stutter_waited = stutter_wait("rcu_torture_writer");
1254 if (stutter_waited &&
1255 !READ_ONCE(rcu_fwd_cb_nodelay) &&
1256 !cur_ops->slow_gps &&
1257 !torture_must_stop() &&
1259 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1260 if (list_empty(&rcu_tortures[i].rtort_free) &&
1261 rcu_access_pointer(rcu_torture_current) !=
1263 rcu_ftrace_dump(DUMP_ALL);
1264 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1267 sched_set_normal(current, oldnice);
1268 } while (!torture_must_stop());
1269 rcu_torture_current = NULL; // Let stats task know that we are done.
1270 /* Reset expediting back to unexpedited. */
1272 expediting = -expediting;
1273 while (can_expedite && expediting++ < 0)
1274 rcu_unexpedite_gp();
1275 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1277 pr_alert("%s" TORTURE_FLAG
1278 " Dynamic grace-period expediting was disabled.\n",
1280 rcu_torture_writer_state = RTWS_STOPPING;
1281 torture_kthread_stopping("rcu_torture_writer");
1286 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1287 * delay between calls.
1290 rcu_torture_fakewriter(void *arg)
1292 unsigned long gp_snap;
1293 DEFINE_TORTURE_RANDOM(rand);
1295 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1296 set_user_nice(current, MAX_NICE);
1299 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1300 if (cur_ops->cb_barrier != NULL &&
1301 torture_random(&rand) % (nfakewriters * 8) == 0) {
1302 cur_ops->cb_barrier();
1304 switch (synctype[torture_random(&rand) % nsynctypes]) {
1308 cur_ops->exp_sync();
1311 gp_snap = cur_ops->get_gp_state();
1312 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1313 cur_ops->cond_sync(gp_snap);
1316 gp_snap = cur_ops->start_gp_poll();
1317 while (!cur_ops->poll_gp_state(gp_snap)) {
1318 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1330 stutter_wait("rcu_torture_fakewriter");
1331 } while (!torture_must_stop());
1333 torture_kthread_stopping("rcu_torture_fakewriter");
1337 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1342 // Set up and carry out testing of RCU's global memory ordering
1343 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1344 struct torture_random_state *trsp)
1346 unsigned long loops;
1347 int noc = torture_num_online_cpus();
1350 struct rcu_torture_reader_check *rtrcp; // Me.
1351 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1352 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1353 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1356 return; // Don't try this from timer handlers.
1358 // Increment my counter.
1359 rtrcp = &rcu_torture_reader_mbchk[myid];
1360 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1362 // Attempt to assign someone else some checking work.
1363 rdrchked = torture_random(trsp) % nrealreaders;
1364 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1365 rdrchker = torture_random(trsp) % nrealreaders;
1366 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1367 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1368 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1369 !READ_ONCE(rtp->rtort_chkp) &&
1370 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1371 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1372 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1373 rtrcp->rtc_chkrdr = rdrchked;
1374 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1375 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1376 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1377 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1380 // If assigned some completed work, do it!
1381 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1382 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1383 return; // No work or work not yet ready.
1384 rdrchked = rtrcp_assigner->rtc_chkrdr;
1385 if (WARN_ON_ONCE(rdrchked < 0))
1387 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1388 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1389 atomic_inc(&n_rcu_torture_mbchk_tries);
1390 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1391 atomic_inc(&n_rcu_torture_mbchk_fail);
1392 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1393 rtrcp_assigner->rtc_ready = 0;
1394 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1395 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1399 * Do one extension of an RCU read-side critical section using the
1400 * current reader state in readstate (set to zero for initial entry
1401 * to extended critical section), set the new state as specified by
1402 * newstate (set to zero for final exit from extended critical section),
1403 * and random-number-generator state in trsp. If this is neither the
1404 * beginning or end of the critical section and if there was actually a
1405 * change, do a ->read_delay().
1407 static void rcutorture_one_extend(int *readstate, int newstate,
1408 struct torture_random_state *trsp,
1409 struct rt_read_seg *rtrsp)
1411 unsigned long flags;
1413 int idxold = *readstate;
1414 int statesnew = ~*readstate & newstate;
1415 int statesold = *readstate & ~newstate;
1417 WARN_ON_ONCE(idxold < 0);
1418 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1419 rtrsp->rt_readstate = newstate;
1421 /* First, put new protection in place to avoid critical-section gap. */
1422 if (statesnew & RCUTORTURE_RDR_BH)
1424 if (statesnew & RCUTORTURE_RDR_IRQ)
1425 local_irq_disable();
1426 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1428 if (statesnew & RCUTORTURE_RDR_RBH)
1430 if (statesnew & RCUTORTURE_RDR_SCHED)
1431 rcu_read_lock_sched();
1432 if (statesnew & RCUTORTURE_RDR_RCU)
1433 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1435 /* Next, remove old protection, irq first due to bh conflict. */
1436 if (statesold & RCUTORTURE_RDR_IRQ)
1438 if (statesold & RCUTORTURE_RDR_BH)
1440 if (statesold & RCUTORTURE_RDR_PREEMPT)
1442 if (statesold & RCUTORTURE_RDR_RBH)
1443 rcu_read_unlock_bh();
1444 if (statesold & RCUTORTURE_RDR_SCHED)
1445 rcu_read_unlock_sched();
1446 if (statesold & RCUTORTURE_RDR_RCU) {
1447 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
1450 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1451 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1453 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1456 /* Delay if neither beginning nor end and there was a change. */
1457 if ((statesnew || statesold) && *readstate && newstate)
1458 cur_ops->read_delay(trsp, rtrsp);
1460 /* Update the reader state. */
1462 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1463 WARN_ON_ONCE(idxnew < 0);
1464 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1465 *readstate = idxnew | newstate;
1466 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1467 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1470 /* Return the biggest extendables mask given current RCU and boot parameters. */
1471 static int rcutorture_extend_mask_max(void)
1475 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1476 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1477 mask = mask | RCUTORTURE_RDR_RCU;
1481 /* Return a random protection state mask, but with at least one bit set. */
1483 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1485 int mask = rcutorture_extend_mask_max();
1486 unsigned long randmask1 = torture_random(trsp) >> 8;
1487 unsigned long randmask2 = randmask1 >> 3;
1489 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1490 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1491 if (!(randmask1 & 0x7))
1492 mask = mask & randmask2;
1494 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1495 /* Can't enable bh w/irq disabled. */
1496 if ((mask & RCUTORTURE_RDR_IRQ) &&
1497 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1498 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1499 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1500 return mask ?: RCUTORTURE_RDR_RCU;
1504 * Do a randomly selected number of extensions of an existing RCU read-side
1507 static struct rt_read_seg *
1508 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1509 struct rt_read_seg *rtrsp)
1513 int mask = rcutorture_extend_mask_max();
1515 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1516 if (!((mask - 1) & mask))
1517 return rtrsp; /* Current RCU reader not extendable. */
1518 /* Bias towards larger numbers of loops. */
1519 i = (torture_random(trsp) >> 3);
1520 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1521 for (j = 0; j < i; j++) {
1522 mask = rcutorture_extend_mask(*readstate, trsp);
1523 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1529 * Do one read-side critical section, returning false if there was
1530 * no data to read. Can be invoked both from process context and
1531 * from a timer handler.
1533 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1535 unsigned long cookie;
1537 unsigned long started;
1538 unsigned long completed;
1540 struct rcu_torture *p;
1543 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1544 struct rt_read_seg *rtrsp = &rtseg[0];
1545 struct rt_read_seg *rtrsp1;
1546 unsigned long long ts;
1548 WARN_ON_ONCE(!rcu_is_watching());
1549 newstate = rcutorture_extend_mask(readstate, trsp);
1550 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1551 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1552 cookie = cur_ops->get_gp_state();
1553 started = cur_ops->get_gp_seq();
1554 ts = rcu_trace_clock_local();
1555 p = rcu_dereference_check(rcu_torture_current,
1556 rcu_read_lock_bh_held() ||
1557 rcu_read_lock_sched_held() ||
1558 srcu_read_lock_held(srcu_ctlp) ||
1559 rcu_read_lock_trace_held() ||
1562 /* Wait for rcu_torture_writer to get underway */
1563 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1566 if (p->rtort_mbtest == 0)
1567 atomic_inc(&n_rcu_torture_mberror);
1568 rcu_torture_reader_do_mbchk(myid, p, trsp);
1569 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1571 pipe_count = READ_ONCE(p->rtort_pipe_count);
1572 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1573 /* Should not happen, but... */
1574 pipe_count = RCU_TORTURE_PIPE_LEN;
1576 completed = cur_ops->get_gp_seq();
1577 if (pipe_count > 1) {
1578 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1579 ts, started, completed);
1580 rcu_ftrace_dump(DUMP_ALL);
1582 __this_cpu_inc(rcu_torture_count[pipe_count]);
1583 completed = rcutorture_seq_diff(completed, started);
1584 if (completed > RCU_TORTURE_PIPE_LEN) {
1585 /* Should not happen, but... */
1586 completed = RCU_TORTURE_PIPE_LEN;
1588 __this_cpu_inc(rcu_torture_batch[completed]);
1590 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1591 WARN_ONCE(cur_ops->poll_gp_state(cookie),
1592 "%s: Cookie check 3 failed %s(%d) %lu->%lu\n",
1594 rcu_torture_writer_state_getname(),
1595 rcu_torture_writer_state,
1596 cookie, cur_ops->get_gp_state());
1597 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1598 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1599 // This next splat is expected behavior if leakpointer, especially
1600 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1601 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
1603 /* If error or close call, record the sequence of reader protections. */
1604 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1606 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1607 err_segs[i++] = *rtrsp1;
1614 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1617 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1618 * incrementing the corresponding element of the pipeline array. The
1619 * counter in the element should never be greater than 1, otherwise, the
1620 * RCU implementation is broken.
1622 static void rcu_torture_timer(struct timer_list *unused)
1624 atomic_long_inc(&n_rcu_torture_timers);
1625 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
1627 /* Test call_rcu() invocation from interrupt handler. */
1628 if (cur_ops->call) {
1629 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1632 cur_ops->call(rhp, rcu_torture_timer_cb);
1637 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1638 * incrementing the corresponding element of the pipeline array. The
1639 * counter in the element should never be greater than 1, otherwise, the
1640 * RCU implementation is broken.
1643 rcu_torture_reader(void *arg)
1645 unsigned long lastsleep = jiffies;
1646 long myid = (long)arg;
1647 int mynumonline = myid;
1648 DEFINE_TORTURE_RANDOM(rand);
1649 struct timer_list t;
1651 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1652 set_user_nice(current, MAX_NICE);
1653 if (irqreader && cur_ops->irq_capable)
1654 timer_setup_on_stack(&t, rcu_torture_timer, 0);
1655 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1657 if (irqreader && cur_ops->irq_capable) {
1658 if (!timer_pending(&t))
1659 mod_timer(&t, jiffies + 1);
1661 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
1662 schedule_timeout_interruptible(HZ);
1663 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1664 torture_hrtimeout_us(500, 1000, &rand);
1665 lastsleep = jiffies + 10;
1667 while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
1668 schedule_timeout_interruptible(HZ / 5);
1669 stutter_wait("rcu_torture_reader");
1670 } while (!torture_must_stop());
1671 if (irqreader && cur_ops->irq_capable) {
1673 destroy_timer_on_stack(&t);
1675 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1676 torture_kthread_stopping("rcu_torture_reader");
1681 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
1682 * increase race probabilities and fuzzes the interval between toggling.
1684 static int rcu_nocb_toggle(void *arg)
1688 int oldnice = task_nice(current);
1690 DEFINE_TORTURE_RANDOM(rand);
1691 ktime_t toggle_delay;
1692 unsigned long toggle_fuzz;
1693 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
1695 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
1696 while (!rcu_inkernel_boot_has_ended())
1697 schedule_timeout_interruptible(HZ / 10);
1698 for_each_online_cpu(cpu)
1700 WARN_ON(maxcpu < 0);
1701 if (toggle_interval > ULONG_MAX)
1702 toggle_fuzz = ULONG_MAX >> 3;
1704 toggle_fuzz = toggle_interval >> 3;
1705 if (toggle_fuzz <= 0)
1706 toggle_fuzz = NSEC_PER_USEC;
1708 r = torture_random(&rand);
1709 cpu = (r >> 4) % (maxcpu + 1);
1711 rcu_nocb_cpu_offload(cpu);
1712 atomic_long_inc(&n_nocb_offload);
1714 rcu_nocb_cpu_deoffload(cpu);
1715 atomic_long_inc(&n_nocb_deoffload);
1717 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
1718 set_current_state(TASK_INTERRUPTIBLE);
1719 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
1720 if (stutter_wait("rcu_nocb_toggle"))
1721 sched_set_normal(current, oldnice);
1722 } while (!torture_must_stop());
1723 torture_kthread_stopping("rcu_nocb_toggle");
1728 * Print torture statistics. Caller must ensure that there is only
1729 * one call to this function at a given time!!! This is normally
1730 * accomplished by relying on the module system to only have one copy
1731 * of the module loaded, and then by giving the rcu_torture_stats
1732 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1733 * thread is not running).
1736 rcu_torture_stats_print(void)
1740 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1741 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1742 struct rcu_torture *rtcp;
1743 static unsigned long rtcv_snap = ULONG_MAX;
1744 static bool splatted;
1745 struct task_struct *wtp;
1747 for_each_possible_cpu(cpu) {
1748 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1749 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1750 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1753 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1754 if (pipesummary[i] != 0)
1758 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1759 rtcp = rcu_access_pointer(rcu_torture_current);
1760 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1762 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1763 rcu_torture_current_version,
1764 list_empty(&rcu_torture_freelist),
1765 atomic_read(&n_rcu_torture_alloc),
1766 atomic_read(&n_rcu_torture_alloc_fail),
1767 atomic_read(&n_rcu_torture_free));
1768 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
1769 atomic_read(&n_rcu_torture_mberror),
1770 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
1771 n_rcu_torture_barrier_error,
1772 n_rcu_torture_boost_ktrerror,
1773 n_rcu_torture_boost_rterror);
1774 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1775 n_rcu_torture_boost_failure,
1776 n_rcu_torture_boosts,
1777 atomic_long_read(&n_rcu_torture_timers));
1778 torture_onoff_stats();
1779 pr_cont("barrier: %ld/%ld:%ld ",
1780 data_race(n_barrier_successes),
1781 data_race(n_barrier_attempts),
1782 data_race(n_rcu_torture_barrier_error));
1783 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
1784 pr_cont("nocb-toggles: %ld:%ld\n",
1785 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
1787 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1788 if (atomic_read(&n_rcu_torture_mberror) ||
1789 atomic_read(&n_rcu_torture_mbchk_fail) ||
1790 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1791 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1793 pr_cont("%s", "!!! ");
1794 atomic_inc(&n_rcu_torture_error);
1795 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1796 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
1797 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
1798 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1799 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1800 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
1801 WARN_ON_ONCE(i > 1); // Too-short grace period
1803 pr_cont("Reader Pipe: ");
1804 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1805 pr_cont(" %ld", pipesummary[i]);
1808 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1809 pr_cont("Reader Batch: ");
1810 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1811 pr_cont(" %ld", batchsummary[i]);
1814 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1815 pr_cont("Free-Block Circulation: ");
1816 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1817 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1823 if (rtcv_snap == rcu_torture_current_version &&
1824 rcu_access_pointer(rcu_torture_current) &&
1825 !rcu_stall_is_suppressed()) {
1826 int __maybe_unused flags = 0;
1827 unsigned long __maybe_unused gp_seq = 0;
1829 rcutorture_get_gp_data(cur_ops->ttype,
1831 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1833 wtp = READ_ONCE(writer_task);
1834 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1835 rcu_torture_writer_state_getname(),
1836 rcu_torture_writer_state, gp_seq, flags,
1837 wtp == NULL ? ~0UL : wtp->state,
1838 wtp == NULL ? -1 : (int)task_cpu(wtp));
1839 if (!splatted && wtp) {
1840 sched_show_task(wtp);
1843 if (cur_ops->gp_kthread_dbg)
1844 cur_ops->gp_kthread_dbg();
1845 rcu_ftrace_dump(DUMP_ALL);
1847 rtcv_snap = rcu_torture_current_version;
1851 * Periodically prints torture statistics, if periodic statistics printing
1852 * was specified via the stat_interval module parameter.
1855 rcu_torture_stats(void *arg)
1857 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1859 schedule_timeout_interruptible(stat_interval * HZ);
1860 rcu_torture_stats_print();
1861 torture_shutdown_absorb("rcu_torture_stats");
1862 } while (!torture_must_stop());
1863 torture_kthread_stopping("rcu_torture_stats");
1868 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1870 pr_alert("%s" TORTURE_FLAG
1871 "--- %s: nreaders=%d nfakewriters=%d "
1872 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1873 "shuffle_interval=%d stutter=%d irqreader=%d "
1874 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1875 "test_boost=%d/%d test_boost_interval=%d "
1876 "test_boost_duration=%d shutdown_secs=%d "
1877 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1878 "stall_cpu_block=%d "
1880 "onoff_interval=%d onoff_holdoff=%d "
1881 "read_exit_delay=%d read_exit_burst=%d "
1882 "nocbs_nthreads=%d nocbs_toggle=%d\n",
1883 torture_type, tag, nrealreaders, nfakewriters,
1884 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1885 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1886 test_boost, cur_ops->can_boost,
1887 test_boost_interval, test_boost_duration, shutdown_secs,
1888 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1891 onoff_interval, onoff_holdoff,
1892 read_exit_delay, read_exit_burst,
1893 nocbs_nthreads, nocbs_toggle);
1896 static int rcutorture_booster_cleanup(unsigned int cpu)
1898 struct task_struct *t;
1900 if (boost_tasks[cpu] == NULL)
1902 mutex_lock(&boost_mutex);
1903 t = boost_tasks[cpu];
1904 boost_tasks[cpu] = NULL;
1905 rcu_torture_enable_rt_throttle();
1906 mutex_unlock(&boost_mutex);
1908 /* This must be outside of the mutex, otherwise deadlock! */
1909 torture_stop_kthread(rcu_torture_boost, t);
1913 static int rcutorture_booster_init(unsigned int cpu)
1917 if (boost_tasks[cpu] != NULL)
1918 return 0; /* Already created, nothing more to do. */
1920 /* Don't allow time recalculation while creating a new task. */
1921 mutex_lock(&boost_mutex);
1922 rcu_torture_disable_rt_throttle();
1923 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1924 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1926 "rcu_torture_boost");
1927 if (IS_ERR(boost_tasks[cpu])) {
1928 retval = PTR_ERR(boost_tasks[cpu]);
1929 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1930 n_rcu_torture_boost_ktrerror++;
1931 boost_tasks[cpu] = NULL;
1932 mutex_unlock(&boost_mutex);
1935 kthread_bind(boost_tasks[cpu], cpu);
1936 wake_up_process(boost_tasks[cpu]);
1937 mutex_unlock(&boost_mutex);
1942 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1943 * induces a CPU stall for the time specified by stall_cpu.
1945 static int rcu_torture_stall(void *args)
1948 unsigned long stop_at;
1950 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1951 if (stall_cpu_holdoff > 0) {
1952 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1953 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1954 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1956 if (!kthread_should_stop() && stall_gp_kthread > 0) {
1957 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
1958 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
1959 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
1960 if (kthread_should_stop())
1962 schedule_timeout_uninterruptible(HZ);
1965 if (!kthread_should_stop() && stall_cpu > 0) {
1966 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
1967 stop_at = ktime_get_seconds() + stall_cpu;
1968 /* RCU CPU stall is expected behavior in following code. */
1969 idx = cur_ops->readlock();
1970 if (stall_cpu_irqsoff)
1971 local_irq_disable();
1972 else if (!stall_cpu_block)
1974 pr_alert("rcu_torture_stall start on CPU %d.\n",
1975 raw_smp_processor_id());
1976 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1978 if (stall_cpu_block)
1979 schedule_timeout_uninterruptible(HZ);
1980 if (stall_cpu_irqsoff)
1982 else if (!stall_cpu_block)
1984 cur_ops->readunlock(idx);
1986 pr_alert("rcu_torture_stall end.\n");
1987 torture_shutdown_absorb("rcu_torture_stall");
1988 while (!kthread_should_stop())
1989 schedule_timeout_interruptible(10 * HZ);
1993 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1994 static int __init rcu_torture_stall_init(void)
1996 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
1998 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2001 /* State structure for forward-progress self-propagating RCU callback. */
2002 struct fwd_cb_state {
2008 * Forward-progress self-propagating RCU callback function. Because
2009 * callbacks run from softirq, this function is an implicit RCU read-side
2012 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2014 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2016 if (READ_ONCE(fcsp->stop)) {
2017 WRITE_ONCE(fcsp->stop, 2);
2020 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2023 /* State for continuous-flood RCU callbacks. */
2026 struct rcu_fwd_cb *rfc_next;
2027 struct rcu_fwd *rfc_rfp;
2031 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
2032 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
2033 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
2034 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
2035 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2037 struct rcu_launder_hist {
2039 unsigned long launder_gp_seq;
2043 spinlock_t rcu_fwd_lock;
2044 struct rcu_fwd_cb *rcu_fwd_cb_head;
2045 struct rcu_fwd_cb **rcu_fwd_cb_tail;
2047 unsigned long rcu_fwd_startat;
2048 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2049 unsigned long rcu_launder_gp_seq_start;
2052 static DEFINE_MUTEX(rcu_fwd_mutex);
2053 static struct rcu_fwd *rcu_fwds;
2054 static bool rcu_fwd_emergency_stop;
2056 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2059 unsigned long gps_old;
2063 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2064 if (rfp->n_launders_hist[i].n_launders > 0)
2066 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
2067 __func__, jiffies - rfp->rcu_fwd_startat);
2068 gps_old = rfp->rcu_launder_gp_seq_start;
2069 for (j = 0; j <= i; j++) {
2070 gps = rfp->n_launders_hist[j].launder_gp_seq;
2071 pr_cont(" %ds/%d: %ld:%ld",
2072 j + 1, FWD_CBS_HIST_DIV,
2073 rfp->n_launders_hist[j].n_launders,
2074 rcutorture_seq_diff(gps, gps_old));
2080 /* Callback function for continuous-flood RCU callbacks. */
2081 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2083 unsigned long flags;
2085 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2086 struct rcu_fwd_cb **rfcpp;
2087 struct rcu_fwd *rfp = rfcp->rfc_rfp;
2089 rfcp->rfc_next = NULL;
2091 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2092 rfcpp = rfp->rcu_fwd_cb_tail;
2093 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2094 WRITE_ONCE(*rfcpp, rfcp);
2095 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2096 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2097 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2098 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2099 rfp->n_launders_hist[i].n_launders++;
2100 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2101 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2104 // Give the scheduler a chance, even on nohz_full CPUs.
2105 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2107 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2108 // Real call_rcu() floods hit userspace, so emulate that.
2109 if (need_resched() || (iter & 0xfff))
2113 // No userspace emulation: CB invocation throttles call_rcu()
2118 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2119 * test is over or because we hit an OOM event.
2121 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2123 unsigned long flags;
2124 unsigned long freed = 0;
2125 struct rcu_fwd_cb *rfcp;
2128 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2129 rfcp = rfp->rcu_fwd_cb_head;
2131 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2134 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2135 if (!rfp->rcu_fwd_cb_head)
2136 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2137 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2140 rcu_torture_fwd_prog_cond_resched(freed);
2141 if (tick_nohz_full_enabled()) {
2142 local_irq_save(flags);
2143 rcu_momentary_dyntick_idle();
2144 local_irq_restore(flags);
2150 /* Carry out need_resched()/cond_resched() forward-progress testing. */
2151 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2152 int *tested, int *tested_tries)
2156 struct fwd_cb_state fcs;
2161 bool selfpropcb = false;
2162 unsigned long stopat;
2163 static DEFINE_TORTURE_RANDOM(trs);
2166 return; // Cannot do need_resched() forward progress testing without ->sync.
2167 if (cur_ops->call && cur_ops->cb_barrier) {
2168 init_rcu_head_on_stack(&fcs.rh);
2172 /* Tight loop containing cond_resched(). */
2173 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2174 cur_ops->sync(); /* Later readers see above write. */
2176 WRITE_ONCE(fcs.stop, 0);
2177 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2179 cver = READ_ONCE(rcu_torture_current_version);
2180 gps = cur_ops->get_gp_seq();
2181 sd = cur_ops->stall_dur() + 1;
2182 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2183 dur = sd4 + torture_random(&trs) % (sd - sd4);
2184 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2185 stopat = rfp->rcu_fwd_startat + dur;
2186 while (time_before(jiffies, stopat) &&
2187 !shutdown_time_arrived() &&
2188 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2189 idx = cur_ops->readlock();
2191 cur_ops->readunlock(idx);
2192 if (!fwd_progress_need_resched || need_resched())
2196 if (!time_before(jiffies, stopat) &&
2197 !shutdown_time_arrived() &&
2198 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2200 cver = READ_ONCE(rcu_torture_current_version) - cver;
2201 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2202 WARN_ON(!cver && gps < 2);
2203 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
2206 WRITE_ONCE(fcs.stop, 1);
2207 cur_ops->sync(); /* Wait for running CB to complete. */
2208 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2212 WARN_ON(READ_ONCE(fcs.stop) != 2);
2213 destroy_rcu_head_on_stack(&fcs.rh);
2215 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2216 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2219 /* Carry out call_rcu() forward-progress testing. */
2220 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2223 unsigned long flags;
2227 long n_launders_cb_snap;
2231 struct rcu_fwd_cb *rfcp;
2232 struct rcu_fwd_cb *rfcpn;
2233 unsigned long stopat;
2234 unsigned long stoppedat;
2236 if (READ_ONCE(rcu_fwd_emergency_stop))
2237 return; /* Get out of the way quickly, no GP wait! */
2239 return; /* Can't do call_rcu() fwd prog without ->call. */
2241 /* Loop continuously posting RCU callbacks. */
2242 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2243 cur_ops->sync(); /* Later readers see above write. */
2244 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2245 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2247 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2251 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2252 rfp->n_launders_hist[i].n_launders = 0;
2253 cver = READ_ONCE(rcu_torture_current_version);
2254 gps = cur_ops->get_gp_seq();
2255 rfp->rcu_launder_gp_seq_start = gps;
2256 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2257 while (time_before(jiffies, stopat) &&
2258 !shutdown_time_arrived() &&
2259 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2260 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2263 rfcpn = READ_ONCE(rfcp->rfc_next);
2265 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2266 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2268 rfp->rcu_fwd_cb_head = rfcpn;
2272 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2273 if (WARN_ON_ONCE(!rfcp)) {
2274 schedule_timeout_interruptible(1);
2280 rfcp->rfc_rfp = rfp;
2282 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2283 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2284 if (tick_nohz_full_enabled()) {
2285 local_irq_save(flags);
2286 rcu_momentary_dyntick_idle();
2287 local_irq_restore(flags);
2290 stoppedat = jiffies;
2291 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2292 cver = READ_ONCE(rcu_torture_current_version) - cver;
2293 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2294 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2295 (void)rcu_torture_fwd_prog_cbfree(rfp);
2297 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2298 !shutdown_time_arrived()) {
2299 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2300 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2302 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2303 n_launders + n_max_cbs - n_launders_cb_snap,
2304 n_launders, n_launders_sa,
2305 n_max_gps, n_max_cbs, cver, gps);
2306 rcu_torture_fwd_cb_hist(rfp);
2308 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2309 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2310 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2315 * OOM notifier, but this only prints diagnostic information for the
2316 * current forward-progress test.
2318 static int rcutorture_oom_notify(struct notifier_block *self,
2319 unsigned long notused, void *nfreed)
2321 struct rcu_fwd *rfp;
2323 mutex_lock(&rcu_fwd_mutex);
2326 mutex_unlock(&rcu_fwd_mutex);
2329 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2331 rcu_torture_fwd_cb_hist(rfp);
2332 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
2333 WRITE_ONCE(rcu_fwd_emergency_stop, true);
2334 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2335 pr_info("%s: Freed %lu RCU callbacks.\n",
2336 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2338 pr_info("%s: Freed %lu RCU callbacks.\n",
2339 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2341 pr_info("%s: Freed %lu RCU callbacks.\n",
2342 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2343 smp_mb(); /* Frees before return to avoid redoing OOM. */
2344 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2345 pr_info("%s returning after OOM processing.\n", __func__);
2346 mutex_unlock(&rcu_fwd_mutex);
2350 static struct notifier_block rcutorture_oom_nb = {
2351 .notifier_call = rcutorture_oom_notify
2354 /* Carry out grace-period forward-progress testing. */
2355 static int rcu_torture_fwd_prog(void *args)
2357 int oldnice = task_nice(current);
2358 struct rcu_fwd *rfp = args;
2360 int tested_tries = 0;
2362 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2363 rcu_bind_current_to_nocb();
2364 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2365 set_user_nice(current, MAX_NICE);
2367 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2368 WRITE_ONCE(rcu_fwd_emergency_stop, false);
2369 if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2370 rcu_inkernel_boot_has_ended())
2371 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2372 if (rcu_inkernel_boot_has_ended())
2373 rcu_torture_fwd_prog_cr(rfp);
2375 /* Avoid slow periods, better to test when busy. */
2376 if (stutter_wait("rcu_torture_fwd_prog"))
2377 sched_set_normal(current, oldnice);
2378 } while (!torture_must_stop());
2379 /* Short runs might not contain a valid forward-progress attempt. */
2380 WARN_ON(!tested && tested_tries >= 5);
2381 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2382 torture_kthread_stopping("rcu_torture_fwd_prog");
2386 /* If forward-progress checking is requested and feasible, spawn the thread. */
2387 static int __init rcu_torture_fwd_prog_init(void)
2389 struct rcu_fwd *rfp;
2392 return 0; /* Not requested, so don't do it. */
2393 if ((!cur_ops->sync && !cur_ops->call) ||
2394 !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) {
2395 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2398 if (stall_cpu > 0) {
2399 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2400 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
2401 return -EINVAL; /* In module, can fail back to user. */
2402 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2405 if (fwd_progress_holdoff <= 0)
2406 fwd_progress_holdoff = 1;
2407 if (fwd_progress_div <= 0)
2408 fwd_progress_div = 4;
2409 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2412 spin_lock_init(&rfp->rcu_fwd_lock);
2413 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2414 mutex_lock(&rcu_fwd_mutex);
2416 mutex_unlock(&rcu_fwd_mutex);
2417 register_oom_notifier(&rcutorture_oom_nb);
2418 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
2421 static void rcu_torture_fwd_prog_cleanup(void)
2423 struct rcu_fwd *rfp;
2425 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2427 mutex_lock(&rcu_fwd_mutex);
2429 mutex_unlock(&rcu_fwd_mutex);
2430 unregister_oom_notifier(&rcutorture_oom_nb);
2434 /* Callback function for RCU barrier testing. */
2435 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2437 atomic_inc(&barrier_cbs_invoked);
2440 /* IPI handler to get callback posted on desired CPU, if online. */
2441 static void rcu_torture_barrier1cb(void *rcu_void)
2443 struct rcu_head *rhp = rcu_void;
2445 cur_ops->call(rhp, rcu_torture_barrier_cbf);
2448 /* kthread function to register callbacks used to test RCU barriers. */
2449 static int rcu_torture_barrier_cbs(void *arg)
2451 long myid = (long)arg;
2452 bool lastphase = false;
2454 struct rcu_head rcu;
2456 init_rcu_head_on_stack(&rcu);
2457 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2458 set_user_nice(current, MAX_NICE);
2460 wait_event(barrier_cbs_wq[myid],
2462 smp_load_acquire(&barrier_phase)) != lastphase ||
2463 torture_must_stop());
2464 lastphase = newphase;
2465 if (torture_must_stop())
2468 * The above smp_load_acquire() ensures barrier_phase load
2469 * is ordered before the following ->call().
2471 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2473 // IPI failed, so use direct call from current CPU.
2474 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2476 if (atomic_dec_and_test(&barrier_cbs_count))
2477 wake_up(&barrier_wq);
2478 } while (!torture_must_stop());
2479 if (cur_ops->cb_barrier != NULL)
2480 cur_ops->cb_barrier();
2481 destroy_rcu_head_on_stack(&rcu);
2482 torture_kthread_stopping("rcu_torture_barrier_cbs");
2486 /* kthread function to drive and coordinate RCU barrier testing. */
2487 static int rcu_torture_barrier(void *arg)
2491 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2493 atomic_set(&barrier_cbs_invoked, 0);
2494 atomic_set(&barrier_cbs_count, n_barrier_cbs);
2495 /* Ensure barrier_phase ordered after prior assignments. */
2496 smp_store_release(&barrier_phase, !barrier_phase);
2497 for (i = 0; i < n_barrier_cbs; i++)
2498 wake_up(&barrier_cbs_wq[i]);
2499 wait_event(barrier_wq,
2500 atomic_read(&barrier_cbs_count) == 0 ||
2501 torture_must_stop());
2502 if (torture_must_stop())
2504 n_barrier_attempts++;
2505 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2506 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2507 n_rcu_torture_barrier_error++;
2508 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2509 atomic_read(&barrier_cbs_invoked),
2512 // Wait manually for the remaining callbacks
2515 if (WARN_ON(i++ > HZ))
2517 schedule_timeout_interruptible(1);
2518 cur_ops->cb_barrier();
2519 } while (atomic_read(&barrier_cbs_invoked) !=
2521 !torture_must_stop());
2522 smp_mb(); // Can't trust ordering if broken.
2523 if (!torture_must_stop())
2524 pr_err("Recovered: barrier_cbs_invoked = %d\n",
2525 atomic_read(&barrier_cbs_invoked));
2527 n_barrier_successes++;
2529 schedule_timeout_interruptible(HZ / 10);
2530 } while (!torture_must_stop());
2531 torture_kthread_stopping("rcu_torture_barrier");
2535 /* Initialize RCU barrier testing. */
2536 static int rcu_torture_barrier_init(void)
2541 if (n_barrier_cbs <= 0)
2543 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2544 pr_alert("%s" TORTURE_FLAG
2545 " Call or barrier ops missing for %s,\n",
2546 torture_type, cur_ops->name);
2547 pr_alert("%s" TORTURE_FLAG
2548 " RCU barrier testing omitted from run.\n",
2552 atomic_set(&barrier_cbs_count, 0);
2553 atomic_set(&barrier_cbs_invoked, 0);
2555 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2558 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2559 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2561 for (i = 0; i < n_barrier_cbs; i++) {
2562 init_waitqueue_head(&barrier_cbs_wq[i]);
2563 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2565 barrier_cbs_tasks[i]);
2569 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2572 /* Clean up after RCU barrier testing. */
2573 static void rcu_torture_barrier_cleanup(void)
2577 torture_stop_kthread(rcu_torture_barrier, barrier_task);
2578 if (barrier_cbs_tasks != NULL) {
2579 for (i = 0; i < n_barrier_cbs; i++)
2580 torture_stop_kthread(rcu_torture_barrier_cbs,
2581 barrier_cbs_tasks[i]);
2582 kfree(barrier_cbs_tasks);
2583 barrier_cbs_tasks = NULL;
2585 if (barrier_cbs_wq != NULL) {
2586 kfree(barrier_cbs_wq);
2587 barrier_cbs_wq = NULL;
2591 static bool rcu_torture_can_boost(void)
2593 static int boost_warn_once;
2596 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2599 prio = rcu_get_gp_kthreads_prio();
2604 if (boost_warn_once == 1)
2607 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2608 boost_warn_once = 1;
2615 static bool read_exit_child_stop;
2616 static bool read_exit_child_stopped;
2617 static wait_queue_head_t read_exit_wq;
2619 // Child kthread which just does an rcutorture reader and exits.
2620 static int rcu_torture_read_exit_child(void *trsp_in)
2622 struct torture_random_state *trsp = trsp_in;
2624 set_user_nice(current, MAX_NICE);
2625 // Minimize time between reading and exiting.
2626 while (!kthread_should_stop())
2627 schedule_timeout_uninterruptible(1);
2628 (void)rcu_torture_one_read(trsp, -1);
2632 // Parent kthread which creates and destroys read-exit child kthreads.
2633 static int rcu_torture_read_exit(void *unused)
2636 bool errexit = false;
2638 struct task_struct *tsp;
2639 DEFINE_TORTURE_RANDOM(trs);
2641 // Allocate and initialize.
2642 set_user_nice(current, MAX_NICE);
2643 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2645 // Each pass through this loop does one read-exit episode.
2647 if (++count > read_exit_burst) {
2648 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2649 rcu_barrier(); // Wait for task_struct free, avoid OOM.
2650 for (i = 0; i < read_exit_delay; i++) {
2651 schedule_timeout_uninterruptible(HZ);
2652 if (READ_ONCE(read_exit_child_stop))
2655 if (!READ_ONCE(read_exit_child_stop))
2656 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2659 if (READ_ONCE(read_exit_child_stop))
2662 tsp = kthread_run(rcu_torture_read_exit_child,
2664 "rcu_torture_read_exit_child");
2666 VERBOSE_TOROUT_ERRSTRING("out of memory");
2674 stutter_wait("rcu_torture_read_exit");
2675 } while (!errexit && !READ_ONCE(read_exit_child_stop));
2677 // Clean up and exit.
2678 smp_store_release(&read_exit_child_stopped, true); // After reaping.
2679 smp_mb(); // Store before wakeup.
2680 wake_up(&read_exit_wq);
2681 while (!torture_must_stop())
2682 schedule_timeout_uninterruptible(1);
2683 torture_kthread_stopping("rcu_torture_read_exit");
2687 static int rcu_torture_read_exit_init(void)
2689 if (read_exit_burst <= 0)
2691 init_waitqueue_head(&read_exit_wq);
2692 read_exit_child_stop = false;
2693 read_exit_child_stopped = false;
2694 return torture_create_kthread(rcu_torture_read_exit, NULL,
2698 static void rcu_torture_read_exit_cleanup(void)
2700 if (!read_exit_task)
2702 WRITE_ONCE(read_exit_child_stop, true);
2703 smp_mb(); // Above write before wait.
2704 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2705 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2708 static enum cpuhp_state rcutor_hp;
2711 rcu_torture_cleanup(void)
2715 unsigned long gp_seq = 0;
2718 if (torture_cleanup_begin()) {
2719 if (cur_ops->cb_barrier != NULL)
2720 cur_ops->cb_barrier();
2724 torture_cleanup_end();
2728 if (cur_ops->gp_kthread_dbg)
2729 cur_ops->gp_kthread_dbg();
2730 rcu_torture_read_exit_cleanup();
2731 rcu_torture_barrier_cleanup();
2732 rcu_torture_fwd_prog_cleanup();
2733 torture_stop_kthread(rcu_torture_stall, stall_task);
2734 torture_stop_kthread(rcu_torture_writer, writer_task);
2737 for (i = 0; i < nrealnocbers; i++)
2738 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
2744 for (i = 0; i < nrealreaders; i++)
2745 torture_stop_kthread(rcu_torture_reader,
2747 kfree(reader_tasks);
2748 reader_tasks = NULL;
2750 kfree(rcu_torture_reader_mbchk);
2751 rcu_torture_reader_mbchk = NULL;
2753 if (fakewriter_tasks) {
2754 for (i = 0; i < nfakewriters; i++)
2755 torture_stop_kthread(rcu_torture_fakewriter,
2756 fakewriter_tasks[i]);
2757 kfree(fakewriter_tasks);
2758 fakewriter_tasks = NULL;
2761 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2762 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2763 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
2764 cur_ops->name, (long)gp_seq, flags,
2765 rcutorture_seq_diff(gp_seq, start_gp_seq));
2766 torture_stop_kthread(rcu_torture_stats, stats_task);
2767 torture_stop_kthread(rcu_torture_fqs, fqs_task);
2768 if (rcu_torture_can_boost())
2769 cpuhp_remove_state(rcutor_hp);
2772 * Wait for all RCU callbacks to fire, then do torture-type-specific
2773 * cleanup operations.
2775 if (cur_ops->cb_barrier != NULL)
2776 cur_ops->cb_barrier();
2777 if (cur_ops->cleanup != NULL)
2780 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
2782 if (err_segs_recorded) {
2783 pr_alert("Failure/close-call rcutorture reader segments:\n");
2784 if (rt_read_nsegs == 0)
2785 pr_alert("\t: No segments recorded!!!\n");
2787 for (i = 0; i < rt_read_nsegs; i++) {
2788 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2789 if (err_segs[i].rt_delay_jiffies != 0) {
2790 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2791 err_segs[i].rt_delay_jiffies);
2794 if (err_segs[i].rt_delay_ms != 0) {
2795 pr_cont("%s%ldms", firsttime ? "" : "+",
2796 err_segs[i].rt_delay_ms);
2799 if (err_segs[i].rt_delay_us != 0) {
2800 pr_cont("%s%ldus", firsttime ? "" : "+",
2801 err_segs[i].rt_delay_us);
2805 err_segs[i].rt_preempted ? "preempted" : "");
2809 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2810 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2811 else if (torture_onoff_failures())
2812 rcu_torture_print_module_parms(cur_ops,
2813 "End of test: RCU_HOTPLUG");
2815 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2816 torture_cleanup_end();
2819 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2820 static void rcu_torture_leak_cb(struct rcu_head *rhp)
2824 static void rcu_torture_err_cb(struct rcu_head *rhp)
2827 * This -might- happen due to race conditions, but is unlikely.
2828 * The scenario that leads to this happening is that the
2829 * first of the pair of duplicate callbacks is queued,
2830 * someone else starts a grace period that includes that
2831 * callback, then the second of the pair must wait for the
2832 * next grace period. Unlikely, but can happen. If it
2833 * does happen, the debug-objects subsystem won't have splatted.
2835 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2837 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2840 * Verify that double-free causes debug-objects to complain, but only
2841 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2842 * cannot be carried out.
2844 static void rcu_test_debug_objects(void)
2846 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2847 struct rcu_head rh1;
2848 struct rcu_head rh2;
2849 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2851 init_rcu_head_on_stack(&rh1);
2852 init_rcu_head_on_stack(&rh2);
2853 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2855 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2856 preempt_disable(); /* Prevent preemption from interrupting test. */
2857 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2858 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2859 local_irq_disable(); /* Make it harder to start a new grace period. */
2860 call_rcu(&rh2, rcu_torture_leak_cb);
2861 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2863 call_rcu(rhp, rcu_torture_leak_cb);
2864 call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
2870 /* Wait for them all to get done so we can safely return. */
2872 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2873 destroy_rcu_head_on_stack(&rh1);
2874 destroy_rcu_head_on_stack(&rh2);
2875 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2876 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2877 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2880 static void rcutorture_sync(void)
2882 static unsigned long n;
2884 if (cur_ops->sync && !(++n & 0xfff))
2889 rcu_torture_init(void)
2895 unsigned long gp_seq = 0;
2896 static struct rcu_torture_ops *torture_ops[] = {
2897 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2898 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
2899 &tasks_tracing_ops, &trivial_ops,
2902 if (!torture_init_begin(torture_type, verbose))
2905 /* Process args and tell the world that the torturer is on the job. */
2906 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
2907 cur_ops = torture_ops[i];
2908 if (strcmp(torture_type, cur_ops->name) == 0)
2911 if (i == ARRAY_SIZE(torture_ops)) {
2912 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2914 pr_alert("rcu-torture types:");
2915 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
2916 pr_cont(" %s", torture_ops[i]->name);
2922 if (cur_ops->fqs == NULL && fqs_duration != 0) {
2923 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2929 if (nreaders >= 0) {
2930 nrealreaders = nreaders;
2932 nrealreaders = num_online_cpus() - 2 - nreaders;
2933 if (nrealreaders <= 0)
2936 rcu_torture_print_module_parms(cur_ops, "Start of test");
2937 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2938 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2939 start_gp_seq = gp_seq;
2940 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
2941 cur_ops->name, (long)gp_seq, flags);
2943 /* Set up the freelist. */
2945 INIT_LIST_HEAD(&rcu_torture_freelist);
2946 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
2947 rcu_tortures[i].rtort_mbtest = 0;
2948 list_add_tail(&rcu_tortures[i].rtort_free,
2949 &rcu_torture_freelist);
2952 /* Initialize the statistics so that each run gets its own numbers. */
2954 rcu_torture_current = NULL;
2955 rcu_torture_current_version = 0;
2956 atomic_set(&n_rcu_torture_alloc, 0);
2957 atomic_set(&n_rcu_torture_alloc_fail, 0);
2958 atomic_set(&n_rcu_torture_free, 0);
2959 atomic_set(&n_rcu_torture_mberror, 0);
2960 atomic_set(&n_rcu_torture_mbchk_fail, 0);
2961 atomic_set(&n_rcu_torture_mbchk_tries, 0);
2962 atomic_set(&n_rcu_torture_error, 0);
2963 n_rcu_torture_barrier_error = 0;
2964 n_rcu_torture_boost_ktrerror = 0;
2965 n_rcu_torture_boost_rterror = 0;
2966 n_rcu_torture_boost_failure = 0;
2967 n_rcu_torture_boosts = 0;
2968 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2969 atomic_set(&rcu_torture_wcount[i], 0);
2970 for_each_possible_cpu(cpu) {
2971 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2972 per_cpu(rcu_torture_count, cpu)[i] = 0;
2973 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2976 err_segs_recorded = 0;
2979 /* Start up the kthreads. */
2981 rcu_torture_write_types();
2982 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2986 if (nfakewriters > 0) {
2987 fakewriter_tasks = kcalloc(nfakewriters,
2988 sizeof(fakewriter_tasks[0]),
2990 if (fakewriter_tasks == NULL) {
2991 VERBOSE_TOROUT_ERRSTRING("out of memory");
2996 for (i = 0; i < nfakewriters; i++) {
2997 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2998 NULL, fakewriter_tasks[i]);
3002 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3004 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3006 if (!reader_tasks || !rcu_torture_reader_mbchk) {
3007 VERBOSE_TOROUT_ERRSTRING("out of memory");
3011 for (i = 0; i < nrealreaders; i++) {
3012 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3013 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3018 nrealnocbers = nocbs_nthreads;
3019 if (WARN_ON(nrealnocbers < 0))
3021 if (WARN_ON(nocbs_toggle < 0))
3023 if (nrealnocbers > 0) {
3024 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3025 if (nocb_tasks == NULL) {
3026 VERBOSE_TOROUT_ERRSTRING("out of memory");
3033 for (i = 0; i < nrealnocbers; i++) {
3034 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3038 if (stat_interval > 0) {
3039 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3044 if (test_no_idle_hz && shuffle_interval > 0) {
3045 firsterr = torture_shuffle_init(shuffle_interval * HZ);
3054 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3055 firsterr = torture_stutter_init(stutter * HZ, t);
3059 if (fqs_duration < 0)
3062 /* Create the fqs thread */
3063 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3068 if (test_boost_interval < 1)
3069 test_boost_interval = 1;
3070 if (test_boost_duration < 2)
3071 test_boost_duration = 2;
3072 if (rcu_torture_can_boost()) {
3074 boost_starttime = jiffies + test_boost_interval * HZ;
3076 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3077 rcutorture_booster_init,
3078 rcutorture_booster_cleanup);
3081 rcutor_hp = firsterr;
3083 shutdown_jiffies = jiffies + shutdown_secs * HZ;
3084 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3087 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3091 firsterr = rcu_torture_stall_init();
3094 firsterr = rcu_torture_fwd_prog_init();
3097 firsterr = rcu_torture_barrier_init();
3100 firsterr = rcu_torture_read_exit_init();
3104 rcu_test_debug_objects();
3110 rcu_torture_cleanup();
3111 if (shutdown_secs) {
3112 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3118 module_init(rcu_torture_init);
3119 module_exit(rcu_torture_cleanup);