1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based performance-test facility
5 * Copyright (C) IBM Corporation, 2015
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
10 #define pr_fmt(fmt) fmt
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/rcupdate.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/atomic.h>
26 #include <linux/bitops.h>
27 #include <linux/completion.h>
28 #include <linux/moduleparam.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/freezer.h>
33 #include <linux/cpu.h>
34 #include <linux/delay.h>
35 #include <linux/stat.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include <linux/vmalloc.h>
44 MODULE_LICENSE("GPL");
45 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
47 #define PERF_FLAG "-perf:"
48 #define PERFOUT_STRING(s) \
49 pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
50 #define VERBOSE_PERFOUT_STRING(s) \
51 do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
52 #define VERBOSE_PERFOUT_ERRSTRING(s) \
53 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
56 * The intended use cases for the nreaders and nwriters module parameters
59 * 1. Specify only the nr_cpus kernel boot parameter. This will
60 * set both nreaders and nwriters to the value specified by
61 * nr_cpus for a mixed reader/writer test.
63 * 2. Specify the nr_cpus kernel boot parameter, but set
64 * rcuperf.nreaders to zero. This will set nwriters to the
65 * value specified by nr_cpus for an update-only test.
67 * 3. Specify the nr_cpus kernel boot parameter, but set
68 * rcuperf.nwriters to zero. This will set nreaders to the
69 * value specified by nr_cpus for a read-only test.
71 * Various other use cases may of course be specified.
73 * Note that this test's readers are intended only as a test load for
74 * the writers. The reader performance statistics will be overly
75 * pessimistic due to the per-critical-section interrupt disabling,
76 * test-end checks, and the pair of calls through pointers.
80 # define RCUPERF_SHUTDOWN 0
82 # define RCUPERF_SHUTDOWN 1
85 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
86 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
87 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
88 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
89 torture_param(int, nreaders, -1, "Number of RCU reader threads");
90 torture_param(int, nwriters, -1, "Number of RCU updater threads");
91 torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
92 "Shutdown at end of performance tests.");
93 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
94 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
95 torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() perf test?");
96 torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
98 static char *perf_type = "rcu";
99 module_param(perf_type, charp, 0444);
100 MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, srcu, ...)");
102 static int nrealreaders;
103 static int nrealwriters;
104 static struct task_struct **writer_tasks;
105 static struct task_struct **reader_tasks;
106 static struct task_struct *shutdown_task;
108 static u64 **writer_durations;
109 static int *writer_n_durations;
110 static atomic_t n_rcu_perf_reader_started;
111 static atomic_t n_rcu_perf_writer_started;
112 static atomic_t n_rcu_perf_writer_finished;
113 static wait_queue_head_t shutdown_wq;
114 static u64 t_rcu_perf_writer_started;
115 static u64 t_rcu_perf_writer_finished;
116 static unsigned long b_rcu_gp_test_started;
117 static unsigned long b_rcu_gp_test_finished;
118 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
120 #define MAX_MEAS 10000
124 * Operations vector for selecting different types of tests.
127 struct rcu_perf_ops {
130 void (*cleanup)(void);
131 int (*readlock)(void);
132 void (*readunlock)(int idx);
133 unsigned long (*get_gp_seq)(void);
134 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
135 unsigned long (*exp_completed)(void);
136 void (*async)(struct rcu_head *head, rcu_callback_t func);
137 void (*gp_barrier)(void);
139 void (*exp_sync)(void);
143 static struct rcu_perf_ops *cur_ops;
146 * Definitions for rcu perf testing.
149 static int rcu_perf_read_lock(void) __acquires(RCU)
155 static void rcu_perf_read_unlock(int idx) __releases(RCU)
160 static unsigned long __maybe_unused rcu_no_completed(void)
165 static void rcu_sync_perf_init(void)
169 static struct rcu_perf_ops rcu_ops = {
171 .init = rcu_sync_perf_init,
172 .readlock = rcu_perf_read_lock,
173 .readunlock = rcu_perf_read_unlock,
174 .get_gp_seq = rcu_get_gp_seq,
175 .gp_diff = rcu_seq_diff,
176 .exp_completed = rcu_exp_batches_completed,
178 .gp_barrier = rcu_barrier,
179 .sync = synchronize_rcu,
180 .exp_sync = synchronize_rcu_expedited,
185 * Definitions for srcu perf testing.
188 DEFINE_STATIC_SRCU(srcu_ctl_perf);
189 static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
191 static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
193 return srcu_read_lock(srcu_ctlp);
196 static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
198 srcu_read_unlock(srcu_ctlp, idx);
201 static unsigned long srcu_perf_completed(void)
203 return srcu_batches_completed(srcu_ctlp);
206 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
208 call_srcu(srcu_ctlp, head, func);
211 static void srcu_rcu_barrier(void)
213 srcu_barrier(srcu_ctlp);
216 static void srcu_perf_synchronize(void)
218 synchronize_srcu(srcu_ctlp);
221 static void srcu_perf_synchronize_expedited(void)
223 synchronize_srcu_expedited(srcu_ctlp);
226 static struct rcu_perf_ops srcu_ops = {
227 .ptype = SRCU_FLAVOR,
228 .init = rcu_sync_perf_init,
229 .readlock = srcu_perf_read_lock,
230 .readunlock = srcu_perf_read_unlock,
231 .get_gp_seq = srcu_perf_completed,
232 .gp_diff = rcu_seq_diff,
233 .exp_completed = srcu_perf_completed,
234 .async = srcu_call_rcu,
235 .gp_barrier = srcu_rcu_barrier,
236 .sync = srcu_perf_synchronize,
237 .exp_sync = srcu_perf_synchronize_expedited,
241 static struct srcu_struct srcud;
243 static void srcu_sync_perf_init(void)
246 init_srcu_struct(srcu_ctlp);
249 static void srcu_sync_perf_cleanup(void)
251 cleanup_srcu_struct(srcu_ctlp);
254 static struct rcu_perf_ops srcud_ops = {
255 .ptype = SRCU_FLAVOR,
256 .init = srcu_sync_perf_init,
257 .cleanup = srcu_sync_perf_cleanup,
258 .readlock = srcu_perf_read_lock,
259 .readunlock = srcu_perf_read_unlock,
260 .get_gp_seq = srcu_perf_completed,
261 .gp_diff = rcu_seq_diff,
262 .exp_completed = srcu_perf_completed,
263 .async = srcu_call_rcu,
264 .gp_barrier = srcu_rcu_barrier,
265 .sync = srcu_perf_synchronize,
266 .exp_sync = srcu_perf_synchronize_expedited,
271 * Definitions for RCU-tasks perf testing.
274 static int tasks_perf_read_lock(void)
279 static void tasks_perf_read_unlock(int idx)
283 static struct rcu_perf_ops tasks_ops = {
284 .ptype = RCU_TASKS_FLAVOR,
285 .init = rcu_sync_perf_init,
286 .readlock = tasks_perf_read_lock,
287 .readunlock = tasks_perf_read_unlock,
288 .get_gp_seq = rcu_no_completed,
289 .gp_diff = rcu_seq_diff,
290 .async = call_rcu_tasks,
291 .gp_barrier = rcu_barrier_tasks,
292 .sync = synchronize_rcu_tasks,
293 .exp_sync = synchronize_rcu_tasks,
297 static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
299 if (!cur_ops->gp_diff)
301 return cur_ops->gp_diff(new, old);
305 * If performance tests complete, wait for shutdown to commence.
307 static void rcu_perf_wait_shutdown(void)
309 cond_resched_tasks_rcu_qs();
310 if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
312 while (!torture_must_stop())
313 schedule_timeout_uninterruptible(1);
317 * RCU perf reader kthread. Repeatedly does empty RCU read-side critical
318 * section, minimizing update-side interference. However, the point of
319 * this test is not to evaluate reader performance, but instead to serve
320 * as a test load for update-side performance testing.
323 rcu_perf_reader(void *arg)
329 VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
330 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
331 set_user_nice(current, MAX_NICE);
332 atomic_inc(&n_rcu_perf_reader_started);
335 local_irq_save(flags);
336 idx = cur_ops->readlock();
337 cur_ops->readunlock(idx);
338 local_irq_restore(flags);
339 rcu_perf_wait_shutdown();
340 } while (!torture_must_stop());
341 torture_kthread_stopping("rcu_perf_reader");
346 * Callback function for asynchronous grace periods from rcu_perf_writer().
348 static void rcu_perf_async_cb(struct rcu_head *rhp)
350 atomic_dec(this_cpu_ptr(&n_async_inflight));
355 * RCU perf writer kthread. Repeatedly does a grace period.
358 rcu_perf_writer(void *arg)
363 struct rcu_head *rhp = NULL;
364 struct sched_param sp;
365 bool started = false, done = false, alldone = false;
368 u64 *wdpp = writer_durations[me];
370 VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
372 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
373 sp.sched_priority = 1;
374 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
377 schedule_timeout_uninterruptible(holdoff * HZ);
380 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
381 * so that RCU is not always expedited for normal GP tests.
382 * The system_state test is approximate, but works well in practice.
384 while (!gp_exp && system_state != SYSTEM_RUNNING)
385 schedule_timeout_uninterruptible(1);
387 t = ktime_get_mono_fast_ns();
388 if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
389 t_rcu_perf_writer_started = t;
391 b_rcu_gp_test_started =
392 cur_ops->exp_completed() / 2;
394 b_rcu_gp_test_started = cur_ops->get_gp_seq();
400 udelay(writer_holdoff);
402 *wdp = ktime_get_mono_fast_ns();
406 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
407 if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
408 atomic_inc(this_cpu_ptr(&n_async_inflight));
409 cur_ops->async(rhp, rcu_perf_async_cb);
411 } else if (!kthread_should_stop()) {
412 cur_ops->gp_barrier();
415 kfree(rhp); /* Because we are stopping. */
422 t = ktime_get_mono_fast_ns();
426 atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
428 if (!done && i >= MIN_MEAS) {
430 sp.sched_priority = 0;
431 sched_setscheduler_nocheck(current,
433 pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
434 perf_type, PERF_FLAG, me, MIN_MEAS);
435 if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
437 schedule_timeout_interruptible(10);
438 rcu_ftrace_dump(DUMP_ALL);
439 PERFOUT_STRING("Test complete");
440 t_rcu_perf_writer_finished = t;
442 b_rcu_gp_test_finished =
443 cur_ops->exp_completed() / 2;
445 b_rcu_gp_test_finished =
446 cur_ops->get_gp_seq();
449 smp_mb(); /* Assign before wake. */
450 wake_up(&shutdown_wq);
454 if (done && !alldone &&
455 atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
457 if (started && !alldone && i < MAX_MEAS - 1)
459 rcu_perf_wait_shutdown();
460 } while (!torture_must_stop());
462 cur_ops->gp_barrier();
464 writer_n_durations[me] = i_max;
465 torture_kthread_stopping("rcu_perf_writer");
470 rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
472 pr_alert("%s" PERF_FLAG
473 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
474 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
478 rcu_perf_cleanup(void)
487 * Would like warning at start, but everything is expedited
488 * during the mid-boot phase, so have to wait till the end.
490 if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
491 VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
492 if (rcu_gp_is_normal() && gp_exp)
493 VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
494 if (gp_exp && gp_async)
495 VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
497 if (torture_cleanup_begin())
500 torture_cleanup_end();
505 for (i = 0; i < nrealreaders; i++)
506 torture_stop_kthread(rcu_perf_reader,
512 for (i = 0; i < nrealwriters; i++) {
513 torture_stop_kthread(rcu_perf_writer,
515 if (!writer_n_durations)
517 j = writer_n_durations[i];
518 pr_alert("%s%s writer %d gps: %d\n",
519 perf_type, PERF_FLAG, i, j);
522 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
523 perf_type, PERF_FLAG,
524 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
525 t_rcu_perf_writer_finished -
526 t_rcu_perf_writer_started,
528 rcuperf_seq_diff(b_rcu_gp_test_finished,
529 b_rcu_gp_test_started));
530 for (i = 0; i < nrealwriters; i++) {
531 if (!writer_durations)
533 if (!writer_n_durations)
535 wdpp = writer_durations[i];
538 for (j = 0; j <= writer_n_durations[i]; j++) {
540 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
541 perf_type, PERF_FLAG,
544 schedule_timeout_uninterruptible(1);
546 kfree(writer_durations[i]);
549 kfree(writer_durations);
550 kfree(writer_n_durations);
553 /* Do torture-type-specific cleanup operations. */
554 if (cur_ops->cleanup != NULL)
557 torture_cleanup_end();
561 * Return the number if non-negative. If -1, the number of CPUs.
562 * If less than -1, that much less than the number of CPUs, but
565 static int compute_real(int n)
572 nr = num_online_cpus() + 1 + n;
580 * RCU perf shutdown kthread. Just waits to be awakened, then shuts
584 rcu_perf_shutdown(void *arg)
586 wait_event(shutdown_wq,
587 atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters);
588 smp_mb(); /* Wake before output. */
595 * kfree_rcu() performance tests: Start a kfree_rcu() loop on all CPUs for number
596 * of iterations and measure total time and number of GP for all iterations to complete.
599 torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
600 torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
601 torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
603 static struct task_struct **kfree_reader_tasks;
604 static int kfree_nrealthreads;
605 static atomic_t n_kfree_perf_thread_started;
606 static atomic_t n_kfree_perf_thread_ended;
614 kfree_perf_thread(void *arg)
618 struct kfree_obj *alloc_ptr;
619 u64 start_time, end_time;
620 long long mem_begin, mem_during = 0;
622 VERBOSE_PERFOUT_STRING("kfree_perf_thread task started");
623 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
624 set_user_nice(current, MAX_NICE);
626 start_time = ktime_get_mono_fast_ns();
628 if (atomic_inc_return(&n_kfree_perf_thread_started) >= kfree_nrealthreads) {
630 b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
632 b_rcu_gp_test_started = cur_ops->get_gp_seq();
637 mem_during = mem_begin = si_mem_available();
638 } else if (loop % (kfree_loops / 4) == 0) {
639 mem_during = (mem_during + si_mem_available()) / 2;
642 for (i = 0; i < kfree_alloc_num; i++) {
643 alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
647 kfree_rcu(alloc_ptr, rh);
651 } while (!torture_must_stop() && ++loop < kfree_loops);
653 if (atomic_inc_return(&n_kfree_perf_thread_ended) >= kfree_nrealthreads) {
654 end_time = ktime_get_mono_fast_ns();
657 b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
659 b_rcu_gp_test_finished = cur_ops->get_gp_seq();
661 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
662 (unsigned long long)(end_time - start_time), kfree_loops,
663 rcuperf_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
664 (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
667 smp_mb(); /* Assign before wake. */
668 wake_up(&shutdown_wq);
672 torture_kthread_stopping("kfree_perf_thread");
677 kfree_perf_cleanup(void)
681 if (torture_cleanup_begin())
684 if (kfree_reader_tasks) {
685 for (i = 0; i < kfree_nrealthreads; i++)
686 torture_stop_kthread(kfree_perf_thread,
687 kfree_reader_tasks[i]);
688 kfree(kfree_reader_tasks);
691 torture_cleanup_end();
695 * shutdown kthread. Just waits to be awakened, then shuts down system.
698 kfree_perf_shutdown(void *arg)
700 wait_event(shutdown_wq,
701 atomic_read(&n_kfree_perf_thread_ended) >= kfree_nrealthreads);
703 smp_mb(); /* Wake before output. */
705 kfree_perf_cleanup();
711 kfree_perf_init(void)
716 kfree_nrealthreads = compute_real(kfree_nthreads);
717 /* Start up the kthreads. */
719 init_waitqueue_head(&shutdown_wq);
720 firsterr = torture_create_kthread(kfree_perf_shutdown, NULL,
724 schedule_timeout_uninterruptible(1);
727 pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
729 kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
731 if (kfree_reader_tasks == NULL) {
736 for (i = 0; i < kfree_nrealthreads; i++) {
737 firsterr = torture_create_kthread(kfree_perf_thread, (void *)i,
738 kfree_reader_tasks[i]);
743 while (atomic_read(&n_kfree_perf_thread_started) < kfree_nrealthreads)
744 schedule_timeout_uninterruptible(1);
751 kfree_perf_cleanup();
760 static struct rcu_perf_ops *perf_ops[] = {
761 &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
764 if (!torture_init_begin(perf_type, verbose))
767 /* Process args and tell the world that the perf'er is on the job. */
768 for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
769 cur_ops = perf_ops[i];
770 if (strcmp(perf_type, cur_ops->name) == 0)
773 if (i == ARRAY_SIZE(perf_ops)) {
774 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
775 pr_alert("rcu-perf types:");
776 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
777 pr_cont(" %s", perf_ops[i]->name);
779 WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
788 return kfree_perf_init();
790 nrealwriters = compute_real(nwriters);
791 nrealreaders = compute_real(nreaders);
792 atomic_set(&n_rcu_perf_reader_started, 0);
793 atomic_set(&n_rcu_perf_writer_started, 0);
794 atomic_set(&n_rcu_perf_writer_finished, 0);
795 rcu_perf_print_module_parms(cur_ops, "Start of test");
797 /* Start up the kthreads. */
800 init_waitqueue_head(&shutdown_wq);
801 firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
805 schedule_timeout_uninterruptible(1);
807 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
809 if (reader_tasks == NULL) {
810 VERBOSE_PERFOUT_ERRSTRING("out of memory");
814 for (i = 0; i < nrealreaders; i++) {
815 firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
820 while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
821 schedule_timeout_uninterruptible(1);
822 writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
824 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
827 kcalloc(nrealwriters, sizeof(*writer_n_durations),
829 if (!writer_tasks || !writer_durations || !writer_n_durations) {
830 VERBOSE_PERFOUT_ERRSTRING("out of memory");
834 for (i = 0; i < nrealwriters; i++) {
835 writer_durations[i] =
836 kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
838 if (!writer_durations[i]) {
842 firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
856 module_init(rcu_perf_init);
857 module_exit(rcu_perf_cleanup);