1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
5 * Copyright (C) IBM Corporation, 2005, 2006
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
10 * See also: Documentation/RCU/torture.txt
13 #define pr_fmt(fmt) fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
52 MODULE_LICENSE("GPL");
53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
56 #define data_race(expr) \
61 #ifndef ASSERT_EXCLUSIVE_WRITER
62 #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
64 #ifndef ASSERT_EXCLUSIVE_ACCESS
65 #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
68 /* Bits for ->extendables field, extendables param, and related definitions. */
69 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
70 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
71 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
72 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
73 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
74 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
75 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
76 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
77 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
78 #define RCUTORTURE_MAX_EXTEND \
79 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
80 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
81 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
82 /* Must be power of two minus one. */
83 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
85 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
86 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
87 torture_param(int, fqs_duration, 0,
88 "Duration of fqs bursts (us), 0 to disable");
89 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
90 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
91 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
92 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
93 torture_param(int, fwd_progress_holdoff, 60,
94 "Time between forward-progress tests (s)");
95 torture_param(bool, fwd_progress_need_resched, 1,
96 "Hide cond_resched() behind need_resched()");
97 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
98 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
99 torture_param(bool, gp_normal, false,
100 "Use normal (non-expedited) GP wait primitives");
101 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
102 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
103 torture_param(int, n_barrier_cbs, 0,
104 "# of callbacks/kthreads for barrier testing");
105 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
106 torture_param(int, nreaders, -1, "Number of RCU reader threads");
107 torture_param(int, object_debug, 0,
108 "Enable debug-object double call_rcu() testing");
109 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
110 torture_param(int, onoff_interval, 0,
111 "Time between CPU hotplugs (jiffies), 0=disable");
112 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
113 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
114 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
115 torture_param(int, stall_cpu_holdoff, 10,
116 "Time to wait before starting stall (s).");
117 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
118 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
119 torture_param(int, stall_gp_kthread, 0,
120 "Grace-period kthread stall duration (s).");
121 torture_param(int, stat_interval, 60,
122 "Number of seconds between stats printk()s");
123 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
124 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
125 torture_param(int, test_boost_duration, 4,
126 "Duration of each boost test, seconds.");
127 torture_param(int, test_boost_interval, 7,
128 "Interval between boost tests, seconds.");
129 torture_param(bool, test_no_idle_hz, true,
130 "Test support for tickless idle CPUs");
131 torture_param(int, verbose, 1,
132 "Enable verbose debugging printk()s");
134 static char *torture_type = "rcu";
135 module_param(torture_type, charp, 0444);
136 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
138 static int nrealreaders;
139 static struct task_struct *writer_task;
140 static struct task_struct **fakewriter_tasks;
141 static struct task_struct **reader_tasks;
142 static struct task_struct *stats_task;
143 static struct task_struct *fqs_task;
144 static struct task_struct *boost_tasks[NR_CPUS];
145 static struct task_struct *stall_task;
146 static struct task_struct *fwd_prog_task;
147 static struct task_struct **barrier_cbs_tasks;
148 static struct task_struct *barrier_task;
150 #define RCU_TORTURE_PIPE_LEN 10
153 struct rcu_head rtort_rcu;
154 int rtort_pipe_count;
155 struct list_head rtort_free;
159 static LIST_HEAD(rcu_torture_freelist);
160 static struct rcu_torture __rcu *rcu_torture_current;
161 static unsigned long rcu_torture_current_version;
162 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
163 static DEFINE_SPINLOCK(rcu_torture_lock);
164 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
165 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
166 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
167 static atomic_t n_rcu_torture_alloc;
168 static atomic_t n_rcu_torture_alloc_fail;
169 static atomic_t n_rcu_torture_free;
170 static atomic_t n_rcu_torture_mberror;
171 static atomic_t n_rcu_torture_error;
172 static long n_rcu_torture_barrier_error;
173 static long n_rcu_torture_boost_ktrerror;
174 static long n_rcu_torture_boost_rterror;
175 static long n_rcu_torture_boost_failure;
176 static long n_rcu_torture_boosts;
177 static atomic_long_t n_rcu_torture_timers;
178 static long n_barrier_attempts;
179 static long n_barrier_successes; /* did rcu_barrier test succeed? */
180 static struct list_head rcu_torture_removed;
181 static unsigned long shutdown_jiffies;
183 static int rcu_torture_writer_state;
184 #define RTWS_FIXED_DELAY 0
186 #define RTWS_REPLACE 2
187 #define RTWS_DEF_FREE 3
188 #define RTWS_EXP_SYNC 4
189 #define RTWS_COND_GET 5
190 #define RTWS_COND_SYNC 6
192 #define RTWS_STUTTER 8
193 #define RTWS_STOPPING 9
194 static const char * const rcu_torture_writer_state_names[] = {
207 /* Record reader segment types and duration for first failing read. */
210 unsigned long rt_delay_jiffies;
211 unsigned long rt_delay_ms;
212 unsigned long rt_delay_us;
215 static int err_segs_recorded;
216 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
217 static int rt_read_nsegs;
219 static const char *rcu_torture_writer_state_getname(void)
221 unsigned int i = READ_ONCE(rcu_torture_writer_state);
223 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
225 return rcu_torture_writer_state_names[i];
228 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
229 #define rcu_can_boost() 1
230 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
231 #define rcu_can_boost() 0
232 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
234 #ifdef CONFIG_RCU_TRACE
235 static u64 notrace rcu_trace_clock_local(void)
237 u64 ts = trace_clock_local();
239 (void)do_div(ts, NSEC_PER_USEC);
242 #else /* #ifdef CONFIG_RCU_TRACE */
243 static u64 notrace rcu_trace_clock_local(void)
247 #endif /* #else #ifdef CONFIG_RCU_TRACE */
250 * Stop aggressive CPU-hog tests a bit before the end of the test in order
251 * to avoid interfering with test shutdown.
253 static bool shutdown_time_arrived(void)
255 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
258 static unsigned long boost_starttime; /* jiffies of next boost test start. */
259 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
260 /* and boost task create/destroy. */
261 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
262 static bool barrier_phase; /* Test phase. */
263 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
264 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
265 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
267 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
270 * Allocate an element from the rcu_tortures pool.
272 static struct rcu_torture *
273 rcu_torture_alloc(void)
277 spin_lock_bh(&rcu_torture_lock);
278 if (list_empty(&rcu_torture_freelist)) {
279 atomic_inc(&n_rcu_torture_alloc_fail);
280 spin_unlock_bh(&rcu_torture_lock);
283 atomic_inc(&n_rcu_torture_alloc);
284 p = rcu_torture_freelist.next;
286 spin_unlock_bh(&rcu_torture_lock);
287 return container_of(p, struct rcu_torture, rtort_free);
291 * Free an element to the rcu_tortures pool.
294 rcu_torture_free(struct rcu_torture *p)
296 atomic_inc(&n_rcu_torture_free);
297 spin_lock_bh(&rcu_torture_lock);
298 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
299 spin_unlock_bh(&rcu_torture_lock);
303 * Operations vector for selecting different types of tests.
306 struct rcu_torture_ops {
309 void (*cleanup)(void);
310 int (*readlock)(void);
311 void (*read_delay)(struct torture_random_state *rrsp,
312 struct rt_read_seg *rtrsp);
313 void (*readunlock)(int idx);
314 unsigned long (*get_gp_seq)(void);
315 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
316 void (*deferred_free)(struct rcu_torture *p);
318 void (*exp_sync)(void);
319 unsigned long (*get_state)(void);
320 void (*cond_sync)(unsigned long oldstate);
321 call_rcu_func_t call;
322 void (*cb_barrier)(void);
325 int (*stall_dur)(void);
333 static struct rcu_torture_ops *cur_ops;
336 * Definitions for rcu torture testing.
339 static int rcu_torture_read_lock(void) __acquires(RCU)
346 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
348 unsigned long started;
349 unsigned long completed;
350 const unsigned long shortdelay_us = 200;
351 unsigned long longdelay_ms = 300;
352 unsigned long long ts;
354 /* We want a short delay sometimes to make a reader delay the grace
355 * period, and we want a long delay occasionally to trigger
356 * force_quiescent_state. */
358 if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
359 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
360 started = cur_ops->get_gp_seq();
361 ts = rcu_trace_clock_local();
362 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
363 longdelay_ms = 5; /* Avoid triggering BH limits. */
364 mdelay(longdelay_ms);
365 rtrsp->rt_delay_ms = longdelay_ms;
366 completed = cur_ops->get_gp_seq();
367 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
370 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
371 udelay(shortdelay_us);
372 rtrsp->rt_delay_us = shortdelay_us;
374 if (!preempt_count() &&
375 !(torture_random(rrsp) % (nrealreaders * 500))) {
376 torture_preempt_schedule(); /* QS only if preemptible. */
377 rtrsp->rt_preempted = true;
381 static void rcu_torture_read_unlock(int idx) __releases(RCU)
387 * Update callback in the pipe. This should be invoked after a grace period.
390 rcu_torture_pipe_update_one(struct rcu_torture *rp)
394 i = READ_ONCE(rp->rtort_pipe_count);
395 if (i > RCU_TORTURE_PIPE_LEN)
396 i = RCU_TORTURE_PIPE_LEN;
397 atomic_inc(&rcu_torture_wcount[i]);
398 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
399 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
400 rp->rtort_mbtest = 0;
407 * Update all callbacks in the pipe. Suitable for synchronous grace-period
411 rcu_torture_pipe_update(struct rcu_torture *old_rp)
413 struct rcu_torture *rp;
414 struct rcu_torture *rp1;
417 list_add(&old_rp->rtort_free, &rcu_torture_removed);
418 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
419 if (rcu_torture_pipe_update_one(rp)) {
420 list_del(&rp->rtort_free);
421 rcu_torture_free(rp);
427 rcu_torture_cb(struct rcu_head *p)
429 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
431 if (torture_must_stop_irq()) {
432 /* Test is ending, just drop callbacks on the floor. */
433 /* The next initialization will pick up the pieces. */
436 if (rcu_torture_pipe_update_one(rp))
437 rcu_torture_free(rp);
439 cur_ops->deferred_free(rp);
442 static unsigned long rcu_no_completed(void)
447 static void rcu_torture_deferred_free(struct rcu_torture *p)
449 call_rcu(&p->rtort_rcu, rcu_torture_cb);
452 static void rcu_sync_torture_init(void)
454 INIT_LIST_HEAD(&rcu_torture_removed);
457 static struct rcu_torture_ops rcu_ops = {
459 .init = rcu_sync_torture_init,
460 .readlock = rcu_torture_read_lock,
461 .read_delay = rcu_read_delay,
462 .readunlock = rcu_torture_read_unlock,
463 .get_gp_seq = rcu_get_gp_seq,
464 .gp_diff = rcu_seq_diff,
465 .deferred_free = rcu_torture_deferred_free,
466 .sync = synchronize_rcu,
467 .exp_sync = synchronize_rcu_expedited,
468 .get_state = get_state_synchronize_rcu,
469 .cond_sync = cond_synchronize_rcu,
471 .cb_barrier = rcu_barrier,
472 .fqs = rcu_force_quiescent_state,
474 .stall_dur = rcu_jiffies_till_stall_check,
476 .can_boost = rcu_can_boost(),
477 .extendables = RCUTORTURE_MAX_EXTEND,
482 * Don't even think about trying any of these in real life!!!
483 * The names includes "busted", and they really means it!
484 * The only purpose of these functions is to provide a buggy RCU
485 * implementation to make sure that rcutorture correctly emits
486 * buggy-RCU error messages.
488 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
490 /* This is a deliberate bug for testing purposes only! */
491 rcu_torture_cb(&p->rtort_rcu);
494 static void synchronize_rcu_busted(void)
496 /* This is a deliberate bug for testing purposes only! */
500 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
502 /* This is a deliberate bug for testing purposes only! */
506 static struct rcu_torture_ops rcu_busted_ops = {
507 .ttype = INVALID_RCU_FLAVOR,
508 .init = rcu_sync_torture_init,
509 .readlock = rcu_torture_read_lock,
510 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
511 .readunlock = rcu_torture_read_unlock,
512 .get_gp_seq = rcu_no_completed,
513 .deferred_free = rcu_busted_torture_deferred_free,
514 .sync = synchronize_rcu_busted,
515 .exp_sync = synchronize_rcu_busted,
516 .call = call_rcu_busted,
525 * Definitions for srcu torture testing.
528 DEFINE_STATIC_SRCU(srcu_ctl);
529 static struct srcu_struct srcu_ctld;
530 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
532 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
534 return srcu_read_lock(srcu_ctlp);
538 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
541 const long uspertick = 1000000 / HZ;
542 const long longdelay = 10;
544 /* We want there to be long-running readers, but not all the time. */
546 delay = torture_random(rrsp) %
547 (nrealreaders * 2 * longdelay * uspertick);
548 if (!delay && in_task()) {
549 schedule_timeout_interruptible(longdelay);
550 rtrsp->rt_delay_jiffies = longdelay;
552 rcu_read_delay(rrsp, rtrsp);
556 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
558 srcu_read_unlock(srcu_ctlp, idx);
561 static unsigned long srcu_torture_completed(void)
563 return srcu_batches_completed(srcu_ctlp);
566 static void srcu_torture_deferred_free(struct rcu_torture *rp)
568 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
571 static void srcu_torture_synchronize(void)
573 synchronize_srcu(srcu_ctlp);
576 static void srcu_torture_call(struct rcu_head *head,
579 call_srcu(srcu_ctlp, head, func);
582 static void srcu_torture_barrier(void)
584 srcu_barrier(srcu_ctlp);
587 static void srcu_torture_stats(void)
589 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
592 static void srcu_torture_synchronize_expedited(void)
594 synchronize_srcu_expedited(srcu_ctlp);
597 static struct rcu_torture_ops srcu_ops = {
598 .ttype = SRCU_FLAVOR,
599 .init = rcu_sync_torture_init,
600 .readlock = srcu_torture_read_lock,
601 .read_delay = srcu_read_delay,
602 .readunlock = srcu_torture_read_unlock,
603 .get_gp_seq = srcu_torture_completed,
604 .deferred_free = srcu_torture_deferred_free,
605 .sync = srcu_torture_synchronize,
606 .exp_sync = srcu_torture_synchronize_expedited,
607 .call = srcu_torture_call,
608 .cb_barrier = srcu_torture_barrier,
609 .stats = srcu_torture_stats,
614 static void srcu_torture_init(void)
616 rcu_sync_torture_init();
617 WARN_ON(init_srcu_struct(&srcu_ctld));
618 srcu_ctlp = &srcu_ctld;
621 static void srcu_torture_cleanup(void)
623 cleanup_srcu_struct(&srcu_ctld);
624 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
627 /* As above, but dynamically allocated. */
628 static struct rcu_torture_ops srcud_ops = {
629 .ttype = SRCU_FLAVOR,
630 .init = srcu_torture_init,
631 .cleanup = srcu_torture_cleanup,
632 .readlock = srcu_torture_read_lock,
633 .read_delay = srcu_read_delay,
634 .readunlock = srcu_torture_read_unlock,
635 .get_gp_seq = srcu_torture_completed,
636 .deferred_free = srcu_torture_deferred_free,
637 .sync = srcu_torture_synchronize,
638 .exp_sync = srcu_torture_synchronize_expedited,
639 .call = srcu_torture_call,
640 .cb_barrier = srcu_torture_barrier,
641 .stats = srcu_torture_stats,
646 /* As above, but broken due to inappropriate reader extension. */
647 static struct rcu_torture_ops busted_srcud_ops = {
648 .ttype = SRCU_FLAVOR,
649 .init = srcu_torture_init,
650 .cleanup = srcu_torture_cleanup,
651 .readlock = srcu_torture_read_lock,
652 .read_delay = rcu_read_delay,
653 .readunlock = srcu_torture_read_unlock,
654 .get_gp_seq = srcu_torture_completed,
655 .deferred_free = srcu_torture_deferred_free,
656 .sync = srcu_torture_synchronize,
657 .exp_sync = srcu_torture_synchronize_expedited,
658 .call = srcu_torture_call,
659 .cb_barrier = srcu_torture_barrier,
660 .stats = srcu_torture_stats,
662 .extendables = RCUTORTURE_MAX_EXTEND,
663 .name = "busted_srcud"
667 * Definitions for RCU-tasks torture testing.
670 static int tasks_torture_read_lock(void)
675 static void tasks_torture_read_unlock(int idx)
679 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
681 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
684 static void synchronize_rcu_mult_test(void)
686 synchronize_rcu_mult(call_rcu_tasks, call_rcu);
689 static struct rcu_torture_ops tasks_ops = {
690 .ttype = RCU_TASKS_FLAVOR,
691 .init = rcu_sync_torture_init,
692 .readlock = tasks_torture_read_lock,
693 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
694 .readunlock = tasks_torture_read_unlock,
695 .get_gp_seq = rcu_no_completed,
696 .deferred_free = rcu_tasks_torture_deferred_free,
697 .sync = synchronize_rcu_tasks,
698 .exp_sync = synchronize_rcu_mult_test,
699 .call = call_rcu_tasks,
700 .cb_barrier = rcu_barrier_tasks,
709 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
710 * This implementation does not necessarily work well with CPU hotplug.
713 static void synchronize_rcu_trivial(void)
717 for_each_online_cpu(cpu) {
718 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
719 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
723 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
729 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
734 static struct rcu_torture_ops trivial_ops = {
735 .ttype = RCU_TRIVIAL_FLAVOR,
736 .init = rcu_sync_torture_init,
737 .readlock = rcu_torture_read_lock_trivial,
738 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
739 .readunlock = rcu_torture_read_unlock_trivial,
740 .get_gp_seq = rcu_no_completed,
741 .sync = synchronize_rcu_trivial,
742 .exp_sync = synchronize_rcu_trivial,
750 * Definitions for rude RCU-tasks torture testing.
753 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
755 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
758 static struct rcu_torture_ops tasks_rude_ops = {
759 .ttype = RCU_TASKS_RUDE_FLAVOR,
760 .init = rcu_sync_torture_init,
761 .readlock = rcu_torture_read_lock_trivial,
762 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
763 .readunlock = rcu_torture_read_unlock_trivial,
764 .get_gp_seq = rcu_no_completed,
765 .deferred_free = rcu_tasks_rude_torture_deferred_free,
766 .sync = synchronize_rcu_tasks_rude,
767 .exp_sync = synchronize_rcu_tasks_rude,
768 .call = call_rcu_tasks_rude,
769 .cb_barrier = rcu_barrier_tasks_rude,
777 * Definitions for tracing RCU-tasks torture testing.
780 static int tasks_tracing_torture_read_lock(void)
782 rcu_read_lock_trace();
786 static void tasks_tracing_torture_read_unlock(int idx)
788 rcu_read_unlock_trace();
791 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
793 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
796 static struct rcu_torture_ops tasks_tracing_ops = {
797 .ttype = RCU_TASKS_TRACING_FLAVOR,
798 .init = rcu_sync_torture_init,
799 .readlock = tasks_tracing_torture_read_lock,
800 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
801 .readunlock = tasks_tracing_torture_read_unlock,
802 .get_gp_seq = rcu_no_completed,
803 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
804 .sync = synchronize_rcu_tasks_trace,
805 .exp_sync = synchronize_rcu_tasks_trace,
806 .call = call_rcu_tasks_trace,
807 .cb_barrier = rcu_barrier_tasks_trace,
812 .name = "tasks-tracing"
815 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
817 if (!cur_ops->gp_diff)
819 return cur_ops->gp_diff(new, old);
822 static bool __maybe_unused torturing_tasks(void)
824 return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops;
828 * RCU torture priority-boost testing. Runs one real-time thread per
829 * CPU for moderate bursts, repeatedly registering RCU callbacks and
830 * spinning waiting for them to be invoked. If a given callback takes
831 * too long to be invoked, we assume that priority inversion has occurred.
834 struct rcu_boost_inflight {
839 static void rcu_torture_boost_cb(struct rcu_head *head)
841 struct rcu_boost_inflight *rbip =
842 container_of(head, struct rcu_boost_inflight, rcu);
844 /* Ensure RCU-core accesses precede clearing ->inflight */
845 smp_store_release(&rbip->inflight, 0);
848 static int old_rt_runtime = -1;
850 static void rcu_torture_disable_rt_throttle(void)
853 * Disable RT throttling so that rcutorture's boost threads don't get
854 * throttled. Only possible if rcutorture is built-in otherwise the
855 * user should manually do this by setting the sched_rt_period_us and
856 * sched_rt_runtime sysctls.
858 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
861 old_rt_runtime = sysctl_sched_rt_runtime;
862 sysctl_sched_rt_runtime = -1;
865 static void rcu_torture_enable_rt_throttle(void)
867 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
870 sysctl_sched_rt_runtime = old_rt_runtime;
874 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
876 if (end - start > test_boost_duration * HZ - HZ / 2) {
877 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
878 n_rcu_torture_boost_failure++;
880 return true; /* failed */
883 return false; /* passed */
886 static int rcu_torture_boost(void *arg)
888 unsigned long call_rcu_time;
889 unsigned long endtime;
890 unsigned long oldstarttime;
891 struct rcu_boost_inflight rbi = { .inflight = 0 };
893 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
895 /* Set real-time priority. */
896 sched_set_fifo_low(current);
898 init_rcu_head_on_stack(&rbi.rcu);
899 /* Each pass through the following loop does one boost-test cycle. */
901 /* Track if the test failed already in this test interval? */
904 /* Increment n_rcu_torture_boosts once per boost-test */
905 while (!kthread_should_stop()) {
906 if (mutex_trylock(&boost_mutex)) {
907 n_rcu_torture_boosts++;
908 mutex_unlock(&boost_mutex);
911 schedule_timeout_uninterruptible(1);
913 if (kthread_should_stop())
916 /* Wait for the next test interval. */
917 oldstarttime = boost_starttime;
918 while (time_before(jiffies, oldstarttime)) {
919 schedule_timeout_interruptible(oldstarttime - jiffies);
920 stutter_wait("rcu_torture_boost");
921 if (torture_must_stop())
925 /* Do one boost-test interval. */
926 endtime = oldstarttime + test_boost_duration * HZ;
927 call_rcu_time = jiffies;
928 while (time_before(jiffies, endtime)) {
929 /* If we don't have a callback in flight, post one. */
930 if (!smp_load_acquire(&rbi.inflight)) {
931 /* RCU core before ->inflight = 1. */
932 smp_store_release(&rbi.inflight, 1);
933 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
934 /* Check if the boost test failed */
936 rcu_torture_boost_failed(call_rcu_time,
938 call_rcu_time = jiffies;
940 stutter_wait("rcu_torture_boost");
941 if (torture_must_stop())
946 * If boost never happened, then inflight will always be 1, in
947 * this case the boost check would never happen in the above
948 * loop so do another one here.
950 if (!failed && smp_load_acquire(&rbi.inflight))
951 rcu_torture_boost_failed(call_rcu_time, jiffies);
954 * Set the start time of the next test interval.
955 * Yes, this is vulnerable to long delays, but such
956 * delays simply cause a false negative for the next
957 * interval. Besides, we are running at RT priority,
958 * so delays should be relatively rare.
960 while (oldstarttime == boost_starttime &&
961 !kthread_should_stop()) {
962 if (mutex_trylock(&boost_mutex)) {
963 boost_starttime = jiffies +
964 test_boost_interval * HZ;
965 mutex_unlock(&boost_mutex);
968 schedule_timeout_uninterruptible(1);
971 /* Go do the stutter. */
972 checkwait: stutter_wait("rcu_torture_boost");
973 } while (!torture_must_stop());
975 /* Clean up and exit. */
976 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
977 torture_shutdown_absorb("rcu_torture_boost");
978 schedule_timeout_uninterruptible(1);
980 destroy_rcu_head_on_stack(&rbi.rcu);
981 torture_kthread_stopping("rcu_torture_boost");
986 * RCU torture force-quiescent-state kthread. Repeatedly induces
987 * bursts of calls to force_quiescent_state(), increasing the probability
988 * of occurrence of some important types of race conditions.
991 rcu_torture_fqs(void *arg)
993 unsigned long fqs_resume_time;
994 int fqs_burst_remaining;
996 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
998 fqs_resume_time = jiffies + fqs_stutter * HZ;
999 while (time_before(jiffies, fqs_resume_time) &&
1000 !kthread_should_stop()) {
1001 schedule_timeout_interruptible(1);
1003 fqs_burst_remaining = fqs_duration;
1004 while (fqs_burst_remaining > 0 &&
1005 !kthread_should_stop()) {
1007 udelay(fqs_holdoff);
1008 fqs_burst_remaining -= fqs_holdoff;
1010 stutter_wait("rcu_torture_fqs");
1011 } while (!torture_must_stop());
1012 torture_kthread_stopping("rcu_torture_fqs");
1017 * RCU torture writer kthread. Repeatedly substitutes a new structure
1018 * for that pointed to by rcu_torture_current, freeing the old structure
1019 * after a series of grace periods (the "pipeline").
1022 rcu_torture_writer(void *arg)
1024 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1026 unsigned long gp_snap;
1027 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1028 bool gp_sync1 = gp_sync;
1030 struct rcu_torture *rp;
1031 struct rcu_torture *old_rp;
1032 static DEFINE_TORTURE_RANDOM(rand);
1033 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
1034 RTWS_COND_GET, RTWS_SYNC };
1037 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1039 pr_alert("%s" TORTURE_FLAG
1040 " GP expediting controlled from boot/sysfs for %s.\n",
1041 torture_type, cur_ops->name);
1043 /* Initialize synctype[] array. If none set, take default. */
1044 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
1045 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
1046 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
1047 synctype[nsynctypes++] = RTWS_COND_GET;
1048 pr_info("%s: Testing conditional GPs.\n", __func__);
1049 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
1050 pr_alert("%s: gp_cond without primitives.\n", __func__);
1052 if (gp_exp1 && cur_ops->exp_sync) {
1053 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1054 pr_info("%s: Testing expedited GPs.\n", __func__);
1055 } else if (gp_exp && !cur_ops->exp_sync) {
1056 pr_alert("%s: gp_exp without primitives.\n", __func__);
1058 if (gp_normal1 && cur_ops->deferred_free) {
1059 synctype[nsynctypes++] = RTWS_DEF_FREE;
1060 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1061 } else if (gp_normal && !cur_ops->deferred_free) {
1062 pr_alert("%s: gp_normal without primitives.\n", __func__);
1064 if (gp_sync1 && cur_ops->sync) {
1065 synctype[nsynctypes++] = RTWS_SYNC;
1066 pr_info("%s: Testing normal GPs.\n", __func__);
1067 } else if (gp_sync && !cur_ops->sync) {
1068 pr_alert("%s: gp_sync without primitives.\n", __func__);
1070 if (WARN_ONCE(nsynctypes == 0,
1071 "rcu_torture_writer: No update-side primitives.\n")) {
1073 * No updates primitives, so don't try updating.
1074 * The resulting test won't be testing much, hence the
1075 * above WARN_ONCE().
1077 rcu_torture_writer_state = RTWS_STOPPING;
1078 torture_kthread_stopping("rcu_torture_writer");
1082 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1083 schedule_timeout_uninterruptible(1);
1084 rp = rcu_torture_alloc();
1087 rp->rtort_pipe_count = 0;
1088 rcu_torture_writer_state = RTWS_DELAY;
1089 udelay(torture_random(&rand) & 0x3ff);
1090 rcu_torture_writer_state = RTWS_REPLACE;
1091 old_rp = rcu_dereference_check(rcu_torture_current,
1092 current == writer_task);
1093 rp->rtort_mbtest = 1;
1094 rcu_assign_pointer(rcu_torture_current, rp);
1095 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1097 i = old_rp->rtort_pipe_count;
1098 if (i > RCU_TORTURE_PIPE_LEN)
1099 i = RCU_TORTURE_PIPE_LEN;
1100 atomic_inc(&rcu_torture_wcount[i]);
1101 WRITE_ONCE(old_rp->rtort_pipe_count,
1102 old_rp->rtort_pipe_count + 1);
1103 switch (synctype[torture_random(&rand) % nsynctypes]) {
1105 rcu_torture_writer_state = RTWS_DEF_FREE;
1106 cur_ops->deferred_free(old_rp);
1109 rcu_torture_writer_state = RTWS_EXP_SYNC;
1110 cur_ops->exp_sync();
1111 rcu_torture_pipe_update(old_rp);
1114 rcu_torture_writer_state = RTWS_COND_GET;
1115 gp_snap = cur_ops->get_state();
1116 i = torture_random(&rand) % 16;
1118 schedule_timeout_interruptible(i);
1119 udelay(torture_random(&rand) % 1000);
1120 rcu_torture_writer_state = RTWS_COND_SYNC;
1121 cur_ops->cond_sync(gp_snap);
1122 rcu_torture_pipe_update(old_rp);
1125 rcu_torture_writer_state = RTWS_SYNC;
1127 rcu_torture_pipe_update(old_rp);
1134 WRITE_ONCE(rcu_torture_current_version,
1135 rcu_torture_current_version + 1);
1136 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1138 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1139 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1140 if (expediting >= 0)
1143 rcu_unexpedite_gp();
1144 if (++expediting > 3)
1145 expediting = -expediting;
1146 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1147 can_expedite = !rcu_gp_is_expedited() &&
1148 !rcu_gp_is_normal();
1150 rcu_torture_writer_state = RTWS_STUTTER;
1151 if (stutter_wait("rcu_torture_writer") &&
1152 !READ_ONCE(rcu_fwd_cb_nodelay) &&
1153 !cur_ops->slow_gps &&
1154 !torture_must_stop() &&
1155 rcu_inkernel_boot_has_ended())
1156 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1157 if (list_empty(&rcu_tortures[i].rtort_free) &&
1158 rcu_access_pointer(rcu_torture_current) !=
1160 rcu_ftrace_dump(DUMP_ALL);
1161 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1163 } while (!torture_must_stop());
1164 /* Reset expediting back to unexpedited. */
1166 expediting = -expediting;
1167 while (can_expedite && expediting++ < 0)
1168 rcu_unexpedite_gp();
1169 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1171 pr_alert("%s" TORTURE_FLAG
1172 " Dynamic grace-period expediting was disabled.\n",
1174 rcu_torture_writer_state = RTWS_STOPPING;
1175 torture_kthread_stopping("rcu_torture_writer");
1180 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1181 * delay between calls.
1184 rcu_torture_fakewriter(void *arg)
1186 DEFINE_TORTURE_RANDOM(rand);
1188 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1189 set_user_nice(current, MAX_NICE);
1192 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1193 udelay(torture_random(&rand) & 0x3ff);
1194 if (cur_ops->cb_barrier != NULL &&
1195 torture_random(&rand) % (nfakewriters * 8) == 0) {
1196 cur_ops->cb_barrier();
1197 } else if (gp_normal == gp_exp) {
1198 if (cur_ops->sync && torture_random(&rand) & 0x80)
1200 else if (cur_ops->exp_sync)
1201 cur_ops->exp_sync();
1202 } else if (gp_normal && cur_ops->sync) {
1204 } else if (cur_ops->exp_sync) {
1205 cur_ops->exp_sync();
1207 stutter_wait("rcu_torture_fakewriter");
1208 } while (!torture_must_stop());
1210 torture_kthread_stopping("rcu_torture_fakewriter");
1214 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1220 * Do one extension of an RCU read-side critical section using the
1221 * current reader state in readstate (set to zero for initial entry
1222 * to extended critical section), set the new state as specified by
1223 * newstate (set to zero for final exit from extended critical section),
1224 * and random-number-generator state in trsp. If this is neither the
1225 * beginning or end of the critical section and if there was actually a
1226 * change, do a ->read_delay().
1228 static void rcutorture_one_extend(int *readstate, int newstate,
1229 struct torture_random_state *trsp,
1230 struct rt_read_seg *rtrsp)
1232 unsigned long flags;
1234 int idxold = *readstate;
1235 int statesnew = ~*readstate & newstate;
1236 int statesold = *readstate & ~newstate;
1238 WARN_ON_ONCE(idxold < 0);
1239 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1240 rtrsp->rt_readstate = newstate;
1242 /* First, put new protection in place to avoid critical-section gap. */
1243 if (statesnew & RCUTORTURE_RDR_BH)
1245 if (statesnew & RCUTORTURE_RDR_IRQ)
1246 local_irq_disable();
1247 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1249 if (statesnew & RCUTORTURE_RDR_RBH)
1251 if (statesnew & RCUTORTURE_RDR_SCHED)
1252 rcu_read_lock_sched();
1253 if (statesnew & RCUTORTURE_RDR_RCU)
1254 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1256 /* Next, remove old protection, irq first due to bh conflict. */
1257 if (statesold & RCUTORTURE_RDR_IRQ)
1259 if (statesold & RCUTORTURE_RDR_BH)
1261 if (statesold & RCUTORTURE_RDR_PREEMPT)
1263 if (statesold & RCUTORTURE_RDR_RBH)
1264 rcu_read_unlock_bh();
1265 if (statesold & RCUTORTURE_RDR_SCHED)
1266 rcu_read_unlock_sched();
1267 if (statesold & RCUTORTURE_RDR_RCU) {
1268 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
1271 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1272 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1274 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1277 /* Delay if neither beginning nor end and there was a change. */
1278 if ((statesnew || statesold) && *readstate && newstate)
1279 cur_ops->read_delay(trsp, rtrsp);
1281 /* Update the reader state. */
1283 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1284 WARN_ON_ONCE(idxnew < 0);
1285 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1286 *readstate = idxnew | newstate;
1287 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1288 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1291 /* Return the biggest extendables mask given current RCU and boot parameters. */
1292 static int rcutorture_extend_mask_max(void)
1296 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1297 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1298 mask = mask | RCUTORTURE_RDR_RCU;
1302 /* Return a random protection state mask, but with at least one bit set. */
1304 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1306 int mask = rcutorture_extend_mask_max();
1307 unsigned long randmask1 = torture_random(trsp) >> 8;
1308 unsigned long randmask2 = randmask1 >> 3;
1310 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1311 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1312 if (!(randmask1 & 0x7))
1313 mask = mask & randmask2;
1315 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1316 /* Can't enable bh w/irq disabled. */
1317 if ((mask & RCUTORTURE_RDR_IRQ) &&
1318 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1319 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1320 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1321 return mask ?: RCUTORTURE_RDR_RCU;
1325 * Do a randomly selected number of extensions of an existing RCU read-side
1328 static struct rt_read_seg *
1329 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1330 struct rt_read_seg *rtrsp)
1334 int mask = rcutorture_extend_mask_max();
1336 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1337 if (!((mask - 1) & mask))
1338 return rtrsp; /* Current RCU reader not extendable. */
1339 /* Bias towards larger numbers of loops. */
1340 i = (torture_random(trsp) >> 3);
1341 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1342 for (j = 0; j < i; j++) {
1343 mask = rcutorture_extend_mask(*readstate, trsp);
1344 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1350 * Do one read-side critical section, returning false if there was
1351 * no data to read. Can be invoked both from process context and
1352 * from a timer handler.
1354 static bool rcu_torture_one_read(struct torture_random_state *trsp)
1357 unsigned long started;
1358 unsigned long completed;
1360 struct rcu_torture *p;
1363 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1364 struct rt_read_seg *rtrsp = &rtseg[0];
1365 struct rt_read_seg *rtrsp1;
1366 unsigned long long ts;
1368 newstate = rcutorture_extend_mask(readstate, trsp);
1369 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1370 started = cur_ops->get_gp_seq();
1371 ts = rcu_trace_clock_local();
1372 p = rcu_dereference_check(rcu_torture_current,
1373 rcu_read_lock_bh_held() ||
1374 rcu_read_lock_sched_held() ||
1375 srcu_read_lock_held(srcu_ctlp) ||
1376 rcu_read_lock_trace_held() ||
1379 /* Wait for rcu_torture_writer to get underway */
1380 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1383 if (p->rtort_mbtest == 0)
1384 atomic_inc(&n_rcu_torture_mberror);
1385 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1387 pipe_count = READ_ONCE(p->rtort_pipe_count);
1388 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1389 /* Should not happen, but... */
1390 pipe_count = RCU_TORTURE_PIPE_LEN;
1392 completed = cur_ops->get_gp_seq();
1393 if (pipe_count > 1) {
1394 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1395 ts, started, completed);
1396 rcu_ftrace_dump(DUMP_ALL);
1398 __this_cpu_inc(rcu_torture_count[pipe_count]);
1399 completed = rcutorture_seq_diff(completed, started);
1400 if (completed > RCU_TORTURE_PIPE_LEN) {
1401 /* Should not happen, but... */
1402 completed = RCU_TORTURE_PIPE_LEN;
1404 __this_cpu_inc(rcu_torture_batch[completed]);
1406 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1407 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1409 /* If error or close call, record the sequence of reader protections. */
1410 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1412 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1413 err_segs[i++] = *rtrsp1;
1420 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1423 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1424 * incrementing the corresponding element of the pipeline array. The
1425 * counter in the element should never be greater than 1, otherwise, the
1426 * RCU implementation is broken.
1428 static void rcu_torture_timer(struct timer_list *unused)
1430 atomic_long_inc(&n_rcu_torture_timers);
1431 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
1433 /* Test call_rcu() invocation from interrupt handler. */
1434 if (cur_ops->call) {
1435 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1438 cur_ops->call(rhp, rcu_torture_timer_cb);
1443 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1444 * incrementing the corresponding element of the pipeline array. The
1445 * counter in the element should never be greater than 1, otherwise, the
1446 * RCU implementation is broken.
1449 rcu_torture_reader(void *arg)
1451 unsigned long lastsleep = jiffies;
1452 long myid = (long)arg;
1453 int mynumonline = myid;
1454 DEFINE_TORTURE_RANDOM(rand);
1455 struct timer_list t;
1457 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1458 set_user_nice(current, MAX_NICE);
1459 if (irqreader && cur_ops->irq_capable)
1460 timer_setup_on_stack(&t, rcu_torture_timer, 0);
1461 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1463 if (irqreader && cur_ops->irq_capable) {
1464 if (!timer_pending(&t))
1465 mod_timer(&t, jiffies + 1);
1467 if (!rcu_torture_one_read(&rand) && !torture_must_stop())
1468 schedule_timeout_interruptible(HZ);
1469 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1470 schedule_timeout_interruptible(1);
1471 lastsleep = jiffies + 10;
1473 while (num_online_cpus() < mynumonline && !torture_must_stop())
1474 schedule_timeout_interruptible(HZ / 5);
1475 stutter_wait("rcu_torture_reader");
1476 } while (!torture_must_stop());
1477 if (irqreader && cur_ops->irq_capable) {
1479 destroy_timer_on_stack(&t);
1481 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1482 torture_kthread_stopping("rcu_torture_reader");
1487 * Print torture statistics. Caller must ensure that there is only
1488 * one call to this function at a given time!!! This is normally
1489 * accomplished by relying on the module system to only have one copy
1490 * of the module loaded, and then by giving the rcu_torture_stats
1491 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1492 * thread is not running).
1495 rcu_torture_stats_print(void)
1499 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1500 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1501 struct rcu_torture *rtcp;
1502 static unsigned long rtcv_snap = ULONG_MAX;
1503 static bool splatted;
1504 struct task_struct *wtp;
1506 for_each_possible_cpu(cpu) {
1507 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1508 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1509 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1512 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1513 if (pipesummary[i] != 0)
1517 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1518 rtcp = rcu_access_pointer(rcu_torture_current);
1519 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1521 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1522 rcu_torture_current_version,
1523 list_empty(&rcu_torture_freelist),
1524 atomic_read(&n_rcu_torture_alloc),
1525 atomic_read(&n_rcu_torture_alloc_fail),
1526 atomic_read(&n_rcu_torture_free));
1527 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1528 atomic_read(&n_rcu_torture_mberror),
1529 n_rcu_torture_barrier_error,
1530 n_rcu_torture_boost_ktrerror,
1531 n_rcu_torture_boost_rterror);
1532 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1533 n_rcu_torture_boost_failure,
1534 n_rcu_torture_boosts,
1535 atomic_long_read(&n_rcu_torture_timers));
1536 torture_onoff_stats();
1537 pr_cont("barrier: %ld/%ld:%ld\n",
1538 data_race(n_barrier_successes),
1539 data_race(n_barrier_attempts),
1540 data_race(n_rcu_torture_barrier_error));
1542 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1543 if (atomic_read(&n_rcu_torture_mberror) ||
1544 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1545 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1547 pr_cont("%s", "!!! ");
1548 atomic_inc(&n_rcu_torture_error);
1549 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1550 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
1551 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1552 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1553 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
1554 WARN_ON_ONCE(i > 1); // Too-short grace period
1556 pr_cont("Reader Pipe: ");
1557 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1558 pr_cont(" %ld", pipesummary[i]);
1561 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1562 pr_cont("Reader Batch: ");
1563 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1564 pr_cont(" %ld", batchsummary[i]);
1567 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1568 pr_cont("Free-Block Circulation: ");
1569 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1570 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1576 if (rtcv_snap == rcu_torture_current_version &&
1577 rcu_access_pointer(rcu_torture_current) &&
1578 !rcu_stall_is_suppressed()) {
1579 int __maybe_unused flags = 0;
1580 unsigned long __maybe_unused gp_seq = 0;
1582 rcutorture_get_gp_data(cur_ops->ttype,
1584 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1586 wtp = READ_ONCE(writer_task);
1587 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1588 rcu_torture_writer_state_getname(),
1589 rcu_torture_writer_state, gp_seq, flags,
1590 wtp == NULL ? ~0UL : wtp->state,
1591 wtp == NULL ? -1 : (int)task_cpu(wtp));
1592 if (!splatted && wtp) {
1593 sched_show_task(wtp);
1596 show_rcu_gp_kthreads();
1597 rcu_ftrace_dump(DUMP_ALL);
1599 rtcv_snap = rcu_torture_current_version;
1603 * Periodically prints torture statistics, if periodic statistics printing
1604 * was specified via the stat_interval module parameter.
1607 rcu_torture_stats(void *arg)
1609 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1611 schedule_timeout_interruptible(stat_interval * HZ);
1612 rcu_torture_stats_print();
1613 torture_shutdown_absorb("rcu_torture_stats");
1614 } while (!torture_must_stop());
1615 torture_kthread_stopping("rcu_torture_stats");
1620 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1622 pr_alert("%s" TORTURE_FLAG
1623 "--- %s: nreaders=%d nfakewriters=%d "
1624 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1625 "shuffle_interval=%d stutter=%d irqreader=%d "
1626 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1627 "test_boost=%d/%d test_boost_interval=%d "
1628 "test_boost_duration=%d shutdown_secs=%d "
1629 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1630 "stall_cpu_block=%d "
1632 "onoff_interval=%d onoff_holdoff=%d\n",
1633 torture_type, tag, nrealreaders, nfakewriters,
1634 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1635 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1636 test_boost, cur_ops->can_boost,
1637 test_boost_interval, test_boost_duration, shutdown_secs,
1638 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1641 onoff_interval, onoff_holdoff);
1644 static int rcutorture_booster_cleanup(unsigned int cpu)
1646 struct task_struct *t;
1648 if (boost_tasks[cpu] == NULL)
1650 mutex_lock(&boost_mutex);
1651 t = boost_tasks[cpu];
1652 boost_tasks[cpu] = NULL;
1653 rcu_torture_enable_rt_throttle();
1654 mutex_unlock(&boost_mutex);
1656 /* This must be outside of the mutex, otherwise deadlock! */
1657 torture_stop_kthread(rcu_torture_boost, t);
1661 static int rcutorture_booster_init(unsigned int cpu)
1665 if (boost_tasks[cpu] != NULL)
1666 return 0; /* Already created, nothing more to do. */
1668 /* Don't allow time recalculation while creating a new task. */
1669 mutex_lock(&boost_mutex);
1670 rcu_torture_disable_rt_throttle();
1671 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1672 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1674 "rcu_torture_boost");
1675 if (IS_ERR(boost_tasks[cpu])) {
1676 retval = PTR_ERR(boost_tasks[cpu]);
1677 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1678 n_rcu_torture_boost_ktrerror++;
1679 boost_tasks[cpu] = NULL;
1680 mutex_unlock(&boost_mutex);
1683 kthread_bind(boost_tasks[cpu], cpu);
1684 wake_up_process(boost_tasks[cpu]);
1685 mutex_unlock(&boost_mutex);
1690 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1691 * induces a CPU stall for the time specified by stall_cpu.
1693 static int rcu_torture_stall(void *args)
1696 unsigned long stop_at;
1698 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1699 if (stall_cpu_holdoff > 0) {
1700 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1701 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1702 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1704 if (!kthread_should_stop() && stall_gp_kthread > 0) {
1705 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
1706 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
1707 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
1708 if (kthread_should_stop())
1710 schedule_timeout_uninterruptible(HZ);
1713 if (!kthread_should_stop() && stall_cpu > 0) {
1714 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
1715 stop_at = ktime_get_seconds() + stall_cpu;
1716 /* RCU CPU stall is expected behavior in following code. */
1717 idx = cur_ops->readlock();
1718 if (stall_cpu_irqsoff)
1719 local_irq_disable();
1720 else if (!stall_cpu_block)
1722 pr_alert("rcu_torture_stall start on CPU %d.\n",
1723 raw_smp_processor_id());
1724 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1726 if (stall_cpu_block)
1727 schedule_timeout_uninterruptible(HZ);
1728 if (stall_cpu_irqsoff)
1730 else if (!stall_cpu_block)
1732 cur_ops->readunlock(idx);
1734 pr_alert("rcu_torture_stall end.\n");
1735 torture_shutdown_absorb("rcu_torture_stall");
1736 while (!kthread_should_stop())
1737 schedule_timeout_interruptible(10 * HZ);
1741 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1742 static int __init rcu_torture_stall_init(void)
1744 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
1746 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
1749 /* State structure for forward-progress self-propagating RCU callback. */
1750 struct fwd_cb_state {
1756 * Forward-progress self-propagating RCU callback function. Because
1757 * callbacks run from softirq, this function is an implicit RCU read-side
1760 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1762 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1764 if (READ_ONCE(fcsp->stop)) {
1765 WRITE_ONCE(fcsp->stop, 2);
1768 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1771 /* State for continuous-flood RCU callbacks. */
1774 struct rcu_fwd_cb *rfc_next;
1775 struct rcu_fwd *rfc_rfp;
1779 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1780 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1781 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1782 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
1783 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1785 struct rcu_launder_hist {
1787 unsigned long launder_gp_seq;
1791 spinlock_t rcu_fwd_lock;
1792 struct rcu_fwd_cb *rcu_fwd_cb_head;
1793 struct rcu_fwd_cb **rcu_fwd_cb_tail;
1795 unsigned long rcu_fwd_startat;
1796 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
1797 unsigned long rcu_launder_gp_seq_start;
1800 static struct rcu_fwd *rcu_fwds;
1801 static bool rcu_fwd_emergency_stop;
1803 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
1806 unsigned long gps_old;
1810 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
1811 if (rfp->n_launders_hist[i].n_launders > 0)
1813 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
1814 __func__, jiffies - rfp->rcu_fwd_startat);
1815 gps_old = rfp->rcu_launder_gp_seq_start;
1816 for (j = 0; j <= i; j++) {
1817 gps = rfp->n_launders_hist[j].launder_gp_seq;
1818 pr_cont(" %ds/%d: %ld:%ld",
1819 j + 1, FWD_CBS_HIST_DIV,
1820 rfp->n_launders_hist[j].n_launders,
1821 rcutorture_seq_diff(gps, gps_old));
1827 /* Callback function for continuous-flood RCU callbacks. */
1828 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1830 unsigned long flags;
1832 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
1833 struct rcu_fwd_cb **rfcpp;
1834 struct rcu_fwd *rfp = rfcp->rfc_rfp;
1836 rfcp->rfc_next = NULL;
1838 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1839 rfcpp = rfp->rcu_fwd_cb_tail;
1840 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
1841 WRITE_ONCE(*rfcpp, rfcp);
1842 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
1843 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
1844 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
1845 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
1846 rfp->n_launders_hist[i].n_launders++;
1847 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
1848 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
1851 // Give the scheduler a chance, even on nohz_full CPUs.
1852 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
1854 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
1855 // Real call_rcu() floods hit userspace, so emulate that.
1856 if (need_resched() || (iter & 0xfff))
1860 // No userspace emulation: CB invocation throttles call_rcu()
1865 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1866 * test is over or because we hit an OOM event.
1868 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
1870 unsigned long flags;
1871 unsigned long freed = 0;
1872 struct rcu_fwd_cb *rfcp;
1875 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1876 rfcp = rfp->rcu_fwd_cb_head;
1878 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
1881 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
1882 if (!rfp->rcu_fwd_cb_head)
1883 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
1884 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
1887 rcu_torture_fwd_prog_cond_resched(freed);
1888 if (tick_nohz_full_enabled()) {
1889 local_irq_save(flags);
1890 rcu_momentary_dyntick_idle();
1891 local_irq_restore(flags);
1897 /* Carry out need_resched()/cond_resched() forward-progress testing. */
1898 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
1899 int *tested, int *tested_tries)
1903 struct fwd_cb_state fcs;
1908 bool selfpropcb = false;
1909 unsigned long stopat;
1910 static DEFINE_TORTURE_RANDOM(trs);
1912 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
1913 init_rcu_head_on_stack(&fcs.rh);
1917 /* Tight loop containing cond_resched(). */
1918 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1919 cur_ops->sync(); /* Later readers see above write. */
1921 WRITE_ONCE(fcs.stop, 0);
1922 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1924 cver = READ_ONCE(rcu_torture_current_version);
1925 gps = cur_ops->get_gp_seq();
1926 sd = cur_ops->stall_dur() + 1;
1927 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
1928 dur = sd4 + torture_random(&trs) % (sd - sd4);
1929 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1930 stopat = rfp->rcu_fwd_startat + dur;
1931 while (time_before(jiffies, stopat) &&
1932 !shutdown_time_arrived() &&
1933 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1934 idx = cur_ops->readlock();
1936 cur_ops->readunlock(idx);
1937 if (!fwd_progress_need_resched || need_resched())
1941 if (!time_before(jiffies, stopat) &&
1942 !shutdown_time_arrived() &&
1943 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1945 cver = READ_ONCE(rcu_torture_current_version) - cver;
1946 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1947 WARN_ON(!cver && gps < 2);
1948 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
1951 WRITE_ONCE(fcs.stop, 1);
1952 cur_ops->sync(); /* Wait for running CB to complete. */
1953 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
1957 WARN_ON(READ_ONCE(fcs.stop) != 2);
1958 destroy_rcu_head_on_stack(&fcs.rh);
1960 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
1961 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
1964 /* Carry out call_rcu() forward-progress testing. */
1965 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
1968 unsigned long flags;
1972 long n_launders_cb_snap;
1976 struct rcu_fwd_cb *rfcp;
1977 struct rcu_fwd_cb *rfcpn;
1978 unsigned long stopat;
1979 unsigned long stoppedat;
1981 if (READ_ONCE(rcu_fwd_emergency_stop))
1982 return; /* Get out of the way quickly, no GP wait! */
1984 return; /* Can't do call_rcu() fwd prog without ->call. */
1986 /* Loop continuously posting RCU callbacks. */
1987 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1988 cur_ops->sync(); /* Later readers see above write. */
1989 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1990 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
1992 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
1996 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
1997 rfp->n_launders_hist[i].n_launders = 0;
1998 cver = READ_ONCE(rcu_torture_current_version);
1999 gps = cur_ops->get_gp_seq();
2000 rfp->rcu_launder_gp_seq_start = gps;
2001 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2002 while (time_before(jiffies, stopat) &&
2003 !shutdown_time_arrived() &&
2004 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2005 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2008 rfcpn = READ_ONCE(rfcp->rfc_next);
2010 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2011 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2013 rfp->rcu_fwd_cb_head = rfcpn;
2017 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2018 if (WARN_ON_ONCE(!rfcp)) {
2019 schedule_timeout_interruptible(1);
2025 rfcp->rfc_rfp = rfp;
2027 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2028 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2029 if (tick_nohz_full_enabled()) {
2030 local_irq_save(flags);
2031 rcu_momentary_dyntick_idle();
2032 local_irq_restore(flags);
2035 stoppedat = jiffies;
2036 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2037 cver = READ_ONCE(rcu_torture_current_version) - cver;
2038 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2039 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2040 (void)rcu_torture_fwd_prog_cbfree(rfp);
2042 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2043 !shutdown_time_arrived()) {
2044 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2045 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2047 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2048 n_launders + n_max_cbs - n_launders_cb_snap,
2049 n_launders, n_launders_sa,
2050 n_max_gps, n_max_cbs, cver, gps);
2051 rcu_torture_fwd_cb_hist(rfp);
2053 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2054 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2055 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2060 * OOM notifier, but this only prints diagnostic information for the
2061 * current forward-progress test.
2063 static int rcutorture_oom_notify(struct notifier_block *self,
2064 unsigned long notused, void *nfreed)
2066 struct rcu_fwd *rfp = rcu_fwds;
2068 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2070 rcu_torture_fwd_cb_hist(rfp);
2071 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
2072 WRITE_ONCE(rcu_fwd_emergency_stop, true);
2073 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2074 pr_info("%s: Freed %lu RCU callbacks.\n",
2075 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2077 pr_info("%s: Freed %lu RCU callbacks.\n",
2078 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2080 pr_info("%s: Freed %lu RCU callbacks.\n",
2081 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2082 smp_mb(); /* Frees before return to avoid redoing OOM. */
2083 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2084 pr_info("%s returning after OOM processing.\n", __func__);
2088 static struct notifier_block rcutorture_oom_nb = {
2089 .notifier_call = rcutorture_oom_notify
2092 /* Carry out grace-period forward-progress testing. */
2093 static int rcu_torture_fwd_prog(void *args)
2095 struct rcu_fwd *rfp = args;
2097 int tested_tries = 0;
2099 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2100 rcu_bind_current_to_nocb();
2101 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2102 set_user_nice(current, MAX_NICE);
2104 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2105 WRITE_ONCE(rcu_fwd_emergency_stop, false);
2106 register_oom_notifier(&rcutorture_oom_nb);
2107 if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2108 rcu_inkernel_boot_has_ended())
2109 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2110 if (rcu_inkernel_boot_has_ended())
2111 rcu_torture_fwd_prog_cr(rfp);
2112 unregister_oom_notifier(&rcutorture_oom_nb);
2114 /* Avoid slow periods, better to test when busy. */
2115 stutter_wait("rcu_torture_fwd_prog");
2116 } while (!torture_must_stop());
2117 /* Short runs might not contain a valid forward-progress attempt. */
2118 WARN_ON(!tested && tested_tries >= 5);
2119 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2120 torture_kthread_stopping("rcu_torture_fwd_prog");
2124 /* If forward-progress checking is requested and feasible, spawn the thread. */
2125 static int __init rcu_torture_fwd_prog_init(void)
2127 struct rcu_fwd *rfp;
2130 return 0; /* Not requested, so don't do it. */
2131 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
2132 cur_ops == &rcu_busted_ops) {
2133 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2136 if (stall_cpu > 0) {
2137 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2138 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
2139 return -EINVAL; /* In module, can fail back to user. */
2140 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2143 if (fwd_progress_holdoff <= 0)
2144 fwd_progress_holdoff = 1;
2145 if (fwd_progress_div <= 0)
2146 fwd_progress_div = 4;
2147 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2150 spin_lock_init(&rfp->rcu_fwd_lock);
2151 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2152 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
2155 /* Callback function for RCU barrier testing. */
2156 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2158 atomic_inc(&barrier_cbs_invoked);
2161 /* IPI handler to get callback posted on desired CPU, if online. */
2162 static void rcu_torture_barrier1cb(void *rcu_void)
2164 struct rcu_head *rhp = rcu_void;
2166 cur_ops->call(rhp, rcu_torture_barrier_cbf);
2169 /* kthread function to register callbacks used to test RCU barriers. */
2170 static int rcu_torture_barrier_cbs(void *arg)
2172 long myid = (long)arg;
2175 struct rcu_head rcu;
2177 init_rcu_head_on_stack(&rcu);
2178 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2179 set_user_nice(current, MAX_NICE);
2181 wait_event(barrier_cbs_wq[myid],
2183 smp_load_acquire(&barrier_phase)) != lastphase ||
2184 torture_must_stop());
2185 lastphase = newphase;
2186 if (torture_must_stop())
2189 * The above smp_load_acquire() ensures barrier_phase load
2190 * is ordered before the following ->call().
2192 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2194 // IPI failed, so use direct call from current CPU.
2195 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2197 if (atomic_dec_and_test(&barrier_cbs_count))
2198 wake_up(&barrier_wq);
2199 } while (!torture_must_stop());
2200 if (cur_ops->cb_barrier != NULL)
2201 cur_ops->cb_barrier();
2202 destroy_rcu_head_on_stack(&rcu);
2203 torture_kthread_stopping("rcu_torture_barrier_cbs");
2207 /* kthread function to drive and coordinate RCU barrier testing. */
2208 static int rcu_torture_barrier(void *arg)
2212 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2214 atomic_set(&barrier_cbs_invoked, 0);
2215 atomic_set(&barrier_cbs_count, n_barrier_cbs);
2216 /* Ensure barrier_phase ordered after prior assignments. */
2217 smp_store_release(&barrier_phase, !barrier_phase);
2218 for (i = 0; i < n_barrier_cbs; i++)
2219 wake_up(&barrier_cbs_wq[i]);
2220 wait_event(barrier_wq,
2221 atomic_read(&barrier_cbs_count) == 0 ||
2222 torture_must_stop());
2223 if (torture_must_stop())
2225 n_barrier_attempts++;
2226 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2227 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2228 n_rcu_torture_barrier_error++;
2229 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2230 atomic_read(&barrier_cbs_invoked),
2233 // Wait manually for the remaining callbacks
2236 if (WARN_ON(i++ > HZ))
2238 schedule_timeout_interruptible(1);
2239 cur_ops->cb_barrier();
2240 } while (atomic_read(&barrier_cbs_invoked) !=
2242 !torture_must_stop());
2243 smp_mb(); // Can't trust ordering if broken.
2244 if (!torture_must_stop())
2245 pr_err("Recovered: barrier_cbs_invoked = %d\n",
2246 atomic_read(&barrier_cbs_invoked));
2248 n_barrier_successes++;
2250 schedule_timeout_interruptible(HZ / 10);
2251 } while (!torture_must_stop());
2252 torture_kthread_stopping("rcu_torture_barrier");
2256 /* Initialize RCU barrier testing. */
2257 static int rcu_torture_barrier_init(void)
2262 if (n_barrier_cbs <= 0)
2264 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2265 pr_alert("%s" TORTURE_FLAG
2266 " Call or barrier ops missing for %s,\n",
2267 torture_type, cur_ops->name);
2268 pr_alert("%s" TORTURE_FLAG
2269 " RCU barrier testing omitted from run.\n",
2273 atomic_set(&barrier_cbs_count, 0);
2274 atomic_set(&barrier_cbs_invoked, 0);
2276 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2279 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2280 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2282 for (i = 0; i < n_barrier_cbs; i++) {
2283 init_waitqueue_head(&barrier_cbs_wq[i]);
2284 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2286 barrier_cbs_tasks[i]);
2290 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2293 /* Clean up after RCU barrier testing. */
2294 static void rcu_torture_barrier_cleanup(void)
2298 torture_stop_kthread(rcu_torture_barrier, barrier_task);
2299 if (barrier_cbs_tasks != NULL) {
2300 for (i = 0; i < n_barrier_cbs; i++)
2301 torture_stop_kthread(rcu_torture_barrier_cbs,
2302 barrier_cbs_tasks[i]);
2303 kfree(barrier_cbs_tasks);
2304 barrier_cbs_tasks = NULL;
2306 if (barrier_cbs_wq != NULL) {
2307 kfree(barrier_cbs_wq);
2308 barrier_cbs_wq = NULL;
2312 static bool rcu_torture_can_boost(void)
2314 static int boost_warn_once;
2317 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2320 prio = rcu_get_gp_kthreads_prio();
2325 if (boost_warn_once == 1)
2328 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2329 boost_warn_once = 1;
2336 static enum cpuhp_state rcutor_hp;
2339 rcu_torture_cleanup(void)
2343 unsigned long gp_seq = 0;
2346 if (torture_cleanup_begin()) {
2347 if (cur_ops->cb_barrier != NULL)
2348 cur_ops->cb_barrier();
2352 torture_cleanup_end();
2356 show_rcu_gp_kthreads();
2357 rcu_torture_barrier_cleanup();
2358 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2359 torture_stop_kthread(rcu_torture_stall, stall_task);
2360 torture_stop_kthread(rcu_torture_writer, writer_task);
2363 for (i = 0; i < nrealreaders; i++)
2364 torture_stop_kthread(rcu_torture_reader,
2366 kfree(reader_tasks);
2368 rcu_torture_current = NULL;
2370 if (fakewriter_tasks) {
2371 for (i = 0; i < nfakewriters; i++) {
2372 torture_stop_kthread(rcu_torture_fakewriter,
2373 fakewriter_tasks[i]);
2375 kfree(fakewriter_tasks);
2376 fakewriter_tasks = NULL;
2379 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2380 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2381 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2382 cur_ops->name, gp_seq, flags);
2383 torture_stop_kthread(rcu_torture_stats, stats_task);
2384 torture_stop_kthread(rcu_torture_fqs, fqs_task);
2385 if (rcu_torture_can_boost())
2386 cpuhp_remove_state(rcutor_hp);
2389 * Wait for all RCU callbacks to fire, then do torture-type-specific
2390 * cleanup operations.
2392 if (cur_ops->cb_barrier != NULL)
2393 cur_ops->cb_barrier();
2394 if (cur_ops->cleanup != NULL)
2397 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
2399 if (err_segs_recorded) {
2400 pr_alert("Failure/close-call rcutorture reader segments:\n");
2401 if (rt_read_nsegs == 0)
2402 pr_alert("\t: No segments recorded!!!\n");
2404 for (i = 0; i < rt_read_nsegs; i++) {
2405 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2406 if (err_segs[i].rt_delay_jiffies != 0) {
2407 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2408 err_segs[i].rt_delay_jiffies);
2411 if (err_segs[i].rt_delay_ms != 0) {
2412 pr_cont("%s%ldms", firsttime ? "" : "+",
2413 err_segs[i].rt_delay_ms);
2416 if (err_segs[i].rt_delay_us != 0) {
2417 pr_cont("%s%ldus", firsttime ? "" : "+",
2418 err_segs[i].rt_delay_us);
2422 err_segs[i].rt_preempted ? "preempted" : "");
2426 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2427 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2428 else if (torture_onoff_failures())
2429 rcu_torture_print_module_parms(cur_ops,
2430 "End of test: RCU_HOTPLUG");
2432 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2433 torture_cleanup_end();
2436 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2437 static void rcu_torture_leak_cb(struct rcu_head *rhp)
2441 static void rcu_torture_err_cb(struct rcu_head *rhp)
2444 * This -might- happen due to race conditions, but is unlikely.
2445 * The scenario that leads to this happening is that the
2446 * first of the pair of duplicate callbacks is queued,
2447 * someone else starts a grace period that includes that
2448 * callback, then the second of the pair must wait for the
2449 * next grace period. Unlikely, but can happen. If it
2450 * does happen, the debug-objects subsystem won't have splatted.
2452 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2454 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2457 * Verify that double-free causes debug-objects to complain, but only
2458 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2459 * cannot be carried out.
2461 static void rcu_test_debug_objects(void)
2463 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2464 struct rcu_head rh1;
2465 struct rcu_head rh2;
2467 init_rcu_head_on_stack(&rh1);
2468 init_rcu_head_on_stack(&rh2);
2469 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2471 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2472 preempt_disable(); /* Prevent preemption from interrupting test. */
2473 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2474 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2475 local_irq_disable(); /* Make it harder to start a new grace period. */
2476 call_rcu(&rh2, rcu_torture_leak_cb);
2477 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2482 /* Wait for them all to get done so we can safely return. */
2484 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2485 destroy_rcu_head_on_stack(&rh1);
2486 destroy_rcu_head_on_stack(&rh2);
2487 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2488 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2489 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2492 static void rcutorture_sync(void)
2494 static unsigned long n;
2496 if (cur_ops->sync && !(++n & 0xfff))
2501 rcu_torture_init(void)
2506 static struct rcu_torture_ops *torture_ops[] = {
2507 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2508 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
2509 &tasks_tracing_ops, &trivial_ops,
2512 if (!torture_init_begin(torture_type, verbose))
2515 /* Process args and tell the world that the torturer is on the job. */
2516 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
2517 cur_ops = torture_ops[i];
2518 if (strcmp(torture_type, cur_ops->name) == 0)
2521 if (i == ARRAY_SIZE(torture_ops)) {
2522 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2524 pr_alert("rcu-torture types:");
2525 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
2526 pr_cont(" %s", torture_ops[i]->name);
2528 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
2533 if (cur_ops->fqs == NULL && fqs_duration != 0) {
2534 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2540 if (nreaders >= 0) {
2541 nrealreaders = nreaders;
2543 nrealreaders = num_online_cpus() - 2 - nreaders;
2544 if (nrealreaders <= 0)
2547 rcu_torture_print_module_parms(cur_ops, "Start of test");
2549 /* Set up the freelist. */
2551 INIT_LIST_HEAD(&rcu_torture_freelist);
2552 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
2553 rcu_tortures[i].rtort_mbtest = 0;
2554 list_add_tail(&rcu_tortures[i].rtort_free,
2555 &rcu_torture_freelist);
2558 /* Initialize the statistics so that each run gets its own numbers. */
2560 rcu_torture_current = NULL;
2561 rcu_torture_current_version = 0;
2562 atomic_set(&n_rcu_torture_alloc, 0);
2563 atomic_set(&n_rcu_torture_alloc_fail, 0);
2564 atomic_set(&n_rcu_torture_free, 0);
2565 atomic_set(&n_rcu_torture_mberror, 0);
2566 atomic_set(&n_rcu_torture_error, 0);
2567 n_rcu_torture_barrier_error = 0;
2568 n_rcu_torture_boost_ktrerror = 0;
2569 n_rcu_torture_boost_rterror = 0;
2570 n_rcu_torture_boost_failure = 0;
2571 n_rcu_torture_boosts = 0;
2572 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2573 atomic_set(&rcu_torture_wcount[i], 0);
2574 for_each_possible_cpu(cpu) {
2575 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2576 per_cpu(rcu_torture_count, cpu)[i] = 0;
2577 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2580 err_segs_recorded = 0;
2583 /* Start up the kthreads. */
2585 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2589 if (nfakewriters > 0) {
2590 fakewriter_tasks = kcalloc(nfakewriters,
2591 sizeof(fakewriter_tasks[0]),
2593 if (fakewriter_tasks == NULL) {
2594 VERBOSE_TOROUT_ERRSTRING("out of memory");
2599 for (i = 0; i < nfakewriters; i++) {
2600 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2601 NULL, fakewriter_tasks[i]);
2605 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
2607 if (reader_tasks == NULL) {
2608 VERBOSE_TOROUT_ERRSTRING("out of memory");
2612 for (i = 0; i < nrealreaders; i++) {
2613 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
2618 if (stat_interval > 0) {
2619 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2624 if (test_no_idle_hz && shuffle_interval > 0) {
2625 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2634 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
2635 firsterr = torture_stutter_init(stutter * HZ, t);
2639 if (fqs_duration < 0)
2642 /* Create the fqs thread */
2643 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2648 if (test_boost_interval < 1)
2649 test_boost_interval = 1;
2650 if (test_boost_duration < 2)
2651 test_boost_duration = 2;
2652 if (rcu_torture_can_boost()) {
2654 boost_starttime = jiffies + test_boost_interval * HZ;
2656 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2657 rcutorture_booster_init,
2658 rcutorture_booster_cleanup);
2661 rcutor_hp = firsterr;
2663 shutdown_jiffies = jiffies + shutdown_secs * HZ;
2664 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2667 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
2671 firsterr = rcu_torture_stall_init();
2674 firsterr = rcu_torture_fwd_prog_init();
2677 firsterr = rcu_torture_barrier_init();
2681 rcu_test_debug_objects();
2687 rcu_torture_cleanup();
2691 module_init(rcu_torture_init);
2692 module_exit(rcu_torture_cleanup);