1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based performance-test facility
5 * Copyright (C) IBM Corporation, 2015
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
10 #define pr_fmt(fmt) fmt
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/rcupdate.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/atomic.h>
26 #include <linux/bitops.h>
27 #include <linux/completion.h>
28 #include <linux/moduleparam.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/freezer.h>
33 #include <linux/cpu.h>
34 #include <linux/delay.h>
35 #include <linux/stat.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include <linux/vmalloc.h>
44 MODULE_LICENSE("GPL");
45 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
47 #define PERF_FLAG "-perf:"
48 #define PERFOUT_STRING(s) \
49 pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
50 #define VERBOSE_PERFOUT_STRING(s) \
51 do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
52 #define VERBOSE_PERFOUT_ERRSTRING(s) \
53 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
56 * The intended use cases for the nreaders and nwriters module parameters
59 * 1. Specify only the nr_cpus kernel boot parameter. This will
60 * set both nreaders and nwriters to the value specified by
61 * nr_cpus for a mixed reader/writer test.
63 * 2. Specify the nr_cpus kernel boot parameter, but set
64 * rcuperf.nreaders to zero. This will set nwriters to the
65 * value specified by nr_cpus for an update-only test.
67 * 3. Specify the nr_cpus kernel boot parameter, but set
68 * rcuperf.nwriters to zero. This will set nreaders to the
69 * value specified by nr_cpus for a read-only test.
71 * Various other use cases may of course be specified.
75 # define RCUPERF_SHUTDOWN 0
77 # define RCUPERF_SHUTDOWN 1
80 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
81 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
82 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
83 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
84 torture_param(int, nreaders, -1, "Number of RCU reader threads");
85 torture_param(int, nwriters, -1, "Number of RCU updater threads");
86 torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
87 "Shutdown at end of performance tests.");
88 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
89 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
90 torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() perf test?");
91 torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
93 static char *perf_type = "rcu";
94 module_param(perf_type, charp, 0444);
95 MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, srcu, ...)");
97 static int nrealreaders;
98 static int nrealwriters;
99 static struct task_struct **writer_tasks;
100 static struct task_struct **reader_tasks;
101 static struct task_struct *shutdown_task;
103 static u64 **writer_durations;
104 static int *writer_n_durations;
105 static atomic_t n_rcu_perf_reader_started;
106 static atomic_t n_rcu_perf_writer_started;
107 static atomic_t n_rcu_perf_writer_finished;
108 static wait_queue_head_t shutdown_wq;
109 static u64 t_rcu_perf_writer_started;
110 static u64 t_rcu_perf_writer_finished;
111 static unsigned long b_rcu_gp_test_started;
112 static unsigned long b_rcu_gp_test_finished;
113 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
115 #define MAX_MEAS 10000
119 * Operations vector for selecting different types of tests.
122 struct rcu_perf_ops {
125 void (*cleanup)(void);
126 int (*readlock)(void);
127 void (*readunlock)(int idx);
128 unsigned long (*get_gp_seq)(void);
129 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
130 unsigned long (*exp_completed)(void);
131 void (*async)(struct rcu_head *head, rcu_callback_t func);
132 void (*gp_barrier)(void);
134 void (*exp_sync)(void);
138 static struct rcu_perf_ops *cur_ops;
141 * Definitions for rcu perf testing.
144 static int rcu_perf_read_lock(void) __acquires(RCU)
150 static void rcu_perf_read_unlock(int idx) __releases(RCU)
155 static unsigned long __maybe_unused rcu_no_completed(void)
160 static void rcu_sync_perf_init(void)
164 static struct rcu_perf_ops rcu_ops = {
166 .init = rcu_sync_perf_init,
167 .readlock = rcu_perf_read_lock,
168 .readunlock = rcu_perf_read_unlock,
169 .get_gp_seq = rcu_get_gp_seq,
170 .gp_diff = rcu_seq_diff,
171 .exp_completed = rcu_exp_batches_completed,
173 .gp_barrier = rcu_barrier,
174 .sync = synchronize_rcu,
175 .exp_sync = synchronize_rcu_expedited,
180 * Definitions for srcu perf testing.
183 DEFINE_STATIC_SRCU(srcu_ctl_perf);
184 static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
186 static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
188 return srcu_read_lock(srcu_ctlp);
191 static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
193 srcu_read_unlock(srcu_ctlp, idx);
196 static unsigned long srcu_perf_completed(void)
198 return srcu_batches_completed(srcu_ctlp);
201 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
203 call_srcu(srcu_ctlp, head, func);
206 static void srcu_rcu_barrier(void)
208 srcu_barrier(srcu_ctlp);
211 static void srcu_perf_synchronize(void)
213 synchronize_srcu(srcu_ctlp);
216 static void srcu_perf_synchronize_expedited(void)
218 synchronize_srcu_expedited(srcu_ctlp);
221 static struct rcu_perf_ops srcu_ops = {
222 .ptype = SRCU_FLAVOR,
223 .init = rcu_sync_perf_init,
224 .readlock = srcu_perf_read_lock,
225 .readunlock = srcu_perf_read_unlock,
226 .get_gp_seq = srcu_perf_completed,
227 .gp_diff = rcu_seq_diff,
228 .exp_completed = srcu_perf_completed,
229 .async = srcu_call_rcu,
230 .gp_barrier = srcu_rcu_barrier,
231 .sync = srcu_perf_synchronize,
232 .exp_sync = srcu_perf_synchronize_expedited,
236 static struct srcu_struct srcud;
238 static void srcu_sync_perf_init(void)
241 init_srcu_struct(srcu_ctlp);
244 static void srcu_sync_perf_cleanup(void)
246 cleanup_srcu_struct(srcu_ctlp);
249 static struct rcu_perf_ops srcud_ops = {
250 .ptype = SRCU_FLAVOR,
251 .init = srcu_sync_perf_init,
252 .cleanup = srcu_sync_perf_cleanup,
253 .readlock = srcu_perf_read_lock,
254 .readunlock = srcu_perf_read_unlock,
255 .get_gp_seq = srcu_perf_completed,
256 .gp_diff = rcu_seq_diff,
257 .exp_completed = srcu_perf_completed,
258 .async = srcu_call_rcu,
259 .gp_barrier = srcu_rcu_barrier,
260 .sync = srcu_perf_synchronize,
261 .exp_sync = srcu_perf_synchronize_expedited,
266 * Definitions for RCU-tasks perf testing.
269 static int tasks_perf_read_lock(void)
274 static void tasks_perf_read_unlock(int idx)
278 static struct rcu_perf_ops tasks_ops = {
279 .ptype = RCU_TASKS_FLAVOR,
280 .init = rcu_sync_perf_init,
281 .readlock = tasks_perf_read_lock,
282 .readunlock = tasks_perf_read_unlock,
283 .get_gp_seq = rcu_no_completed,
284 .gp_diff = rcu_seq_diff,
285 .async = call_rcu_tasks,
286 .gp_barrier = rcu_barrier_tasks,
287 .sync = synchronize_rcu_tasks,
288 .exp_sync = synchronize_rcu_tasks,
292 static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
294 if (!cur_ops->gp_diff)
296 return cur_ops->gp_diff(new, old);
300 * If performance tests complete, wait for shutdown to commence.
302 static void rcu_perf_wait_shutdown(void)
304 cond_resched_tasks_rcu_qs();
305 if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
307 while (!torture_must_stop())
308 schedule_timeout_uninterruptible(1);
312 * RCU perf reader kthread. Repeatedly does empty RCU read-side
313 * critical section, minimizing update-side interference.
316 rcu_perf_reader(void *arg)
322 VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
323 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
324 set_user_nice(current, MAX_NICE);
325 atomic_inc(&n_rcu_perf_reader_started);
328 local_irq_save(flags);
329 idx = cur_ops->readlock();
330 cur_ops->readunlock(idx);
331 local_irq_restore(flags);
332 rcu_perf_wait_shutdown();
333 } while (!torture_must_stop());
334 torture_kthread_stopping("rcu_perf_reader");
339 * Callback function for asynchronous grace periods from rcu_perf_writer().
341 static void rcu_perf_async_cb(struct rcu_head *rhp)
343 atomic_dec(this_cpu_ptr(&n_async_inflight));
348 * RCU perf writer kthread. Repeatedly does a grace period.
351 rcu_perf_writer(void *arg)
356 struct rcu_head *rhp = NULL;
357 struct sched_param sp;
358 bool started = false, done = false, alldone = false;
361 u64 *wdpp = writer_durations[me];
363 VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
365 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
366 sp.sched_priority = 1;
367 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
370 schedule_timeout_uninterruptible(holdoff * HZ);
373 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
374 * so that RCU is not always expedited for normal GP tests.
375 * The system_state test is approximate, but works well in practice.
377 while (!gp_exp && system_state != SYSTEM_RUNNING)
378 schedule_timeout_uninterruptible(1);
380 t = ktime_get_mono_fast_ns();
381 if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
382 t_rcu_perf_writer_started = t;
384 b_rcu_gp_test_started =
385 cur_ops->exp_completed() / 2;
387 b_rcu_gp_test_started = cur_ops->get_gp_seq();
393 udelay(writer_holdoff);
395 *wdp = ktime_get_mono_fast_ns();
399 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
400 if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
401 atomic_inc(this_cpu_ptr(&n_async_inflight));
402 cur_ops->async(rhp, rcu_perf_async_cb);
404 } else if (!kthread_should_stop()) {
405 cur_ops->gp_barrier();
408 kfree(rhp); /* Because we are stopping. */
415 t = ktime_get_mono_fast_ns();
419 atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
421 if (!done && i >= MIN_MEAS) {
423 sp.sched_priority = 0;
424 sched_setscheduler_nocheck(current,
426 pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
427 perf_type, PERF_FLAG, me, MIN_MEAS);
428 if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
430 schedule_timeout_interruptible(10);
431 rcu_ftrace_dump(DUMP_ALL);
432 PERFOUT_STRING("Test complete");
433 t_rcu_perf_writer_finished = t;
435 b_rcu_gp_test_finished =
436 cur_ops->exp_completed() / 2;
438 b_rcu_gp_test_finished =
439 cur_ops->get_gp_seq();
442 smp_mb(); /* Assign before wake. */
443 wake_up(&shutdown_wq);
447 if (done && !alldone &&
448 atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
450 if (started && !alldone && i < MAX_MEAS - 1)
452 rcu_perf_wait_shutdown();
453 } while (!torture_must_stop());
455 cur_ops->gp_barrier();
457 writer_n_durations[me] = i_max;
458 torture_kthread_stopping("rcu_perf_writer");
463 rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
465 pr_alert("%s" PERF_FLAG
466 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
467 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
471 rcu_perf_cleanup(void)
480 * Would like warning at start, but everything is expedited
481 * during the mid-boot phase, so have to wait till the end.
483 if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
484 VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
485 if (rcu_gp_is_normal() && gp_exp)
486 VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
487 if (gp_exp && gp_async)
488 VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
490 if (torture_cleanup_begin())
493 torture_cleanup_end();
498 for (i = 0; i < nrealreaders; i++)
499 torture_stop_kthread(rcu_perf_reader,
505 for (i = 0; i < nrealwriters; i++) {
506 torture_stop_kthread(rcu_perf_writer,
508 if (!writer_n_durations)
510 j = writer_n_durations[i];
511 pr_alert("%s%s writer %d gps: %d\n",
512 perf_type, PERF_FLAG, i, j);
515 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
516 perf_type, PERF_FLAG,
517 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
518 t_rcu_perf_writer_finished -
519 t_rcu_perf_writer_started,
521 rcuperf_seq_diff(b_rcu_gp_test_finished,
522 b_rcu_gp_test_started));
523 for (i = 0; i < nrealwriters; i++) {
524 if (!writer_durations)
526 if (!writer_n_durations)
528 wdpp = writer_durations[i];
531 for (j = 0; j <= writer_n_durations[i]; j++) {
533 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
534 perf_type, PERF_FLAG,
537 schedule_timeout_uninterruptible(1);
539 kfree(writer_durations[i]);
542 kfree(writer_durations);
543 kfree(writer_n_durations);
546 /* Do torture-type-specific cleanup operations. */
547 if (cur_ops->cleanup != NULL)
550 torture_cleanup_end();
554 * Return the number if non-negative. If -1, the number of CPUs.
555 * If less than -1, that much less than the number of CPUs, but
558 static int compute_real(int n)
565 nr = num_online_cpus() + 1 + n;
573 * RCU perf shutdown kthread. Just waits to be awakened, then shuts
577 rcu_perf_shutdown(void *arg)
580 wait_event(shutdown_wq,
581 atomic_read(&n_rcu_perf_writer_finished) >=
583 } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
584 smp_mb(); /* Wake before output. */
591 * kfree_rcu() performance tests: Start a kfree_rcu() loop on all CPUs for number
592 * of iterations and measure total time and number of GP for all iterations to complete.
595 torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
596 torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
597 torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
599 static struct task_struct **kfree_reader_tasks;
600 static int kfree_nrealthreads;
601 static atomic_t n_kfree_perf_thread_started;
602 static atomic_t n_kfree_perf_thread_ended;
610 kfree_perf_thread(void *arg)
614 struct kfree_obj *alloc_ptr;
615 u64 start_time, end_time;
616 long long mem_begin, mem_during = 0;
618 VERBOSE_PERFOUT_STRING("kfree_perf_thread task started");
619 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
620 set_user_nice(current, MAX_NICE);
622 start_time = ktime_get_mono_fast_ns();
624 if (atomic_inc_return(&n_kfree_perf_thread_started) >= kfree_nrealthreads) {
626 b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
628 b_rcu_gp_test_started = cur_ops->get_gp_seq();
633 mem_during = mem_begin = si_mem_available();
634 } else if (loop % (kfree_loops / 4) == 0) {
635 mem_during = (mem_during + si_mem_available()) / 2;
638 for (i = 0; i < kfree_alloc_num; i++) {
639 alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
643 kfree_rcu(alloc_ptr, rh);
647 } while (!torture_must_stop() && ++loop < kfree_loops);
649 if (atomic_inc_return(&n_kfree_perf_thread_ended) >= kfree_nrealthreads) {
650 end_time = ktime_get_mono_fast_ns();
653 b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
655 b_rcu_gp_test_finished = cur_ops->get_gp_seq();
657 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
658 (unsigned long long)(end_time - start_time), kfree_loops,
659 rcuperf_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
660 (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
663 smp_mb(); /* Assign before wake. */
664 wake_up(&shutdown_wq);
668 torture_kthread_stopping("kfree_perf_thread");
673 kfree_perf_cleanup(void)
677 if (torture_cleanup_begin())
680 if (kfree_reader_tasks) {
681 for (i = 0; i < kfree_nrealthreads; i++)
682 torture_stop_kthread(kfree_perf_thread,
683 kfree_reader_tasks[i]);
684 kfree(kfree_reader_tasks);
687 torture_cleanup_end();
691 * shutdown kthread. Just waits to be awakened, then shuts down system.
694 kfree_perf_shutdown(void *arg)
697 wait_event(shutdown_wq,
698 atomic_read(&n_kfree_perf_thread_ended) >=
700 } while (atomic_read(&n_kfree_perf_thread_ended) < kfree_nrealthreads);
702 smp_mb(); /* Wake before output. */
704 kfree_perf_cleanup();
710 kfree_perf_init(void)
715 kfree_nrealthreads = compute_real(kfree_nthreads);
716 /* Start up the kthreads. */
718 init_waitqueue_head(&shutdown_wq);
719 firsterr = torture_create_kthread(kfree_perf_shutdown, NULL,
723 schedule_timeout_uninterruptible(1);
726 pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
728 kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
730 if (kfree_reader_tasks == NULL) {
735 for (i = 0; i < kfree_nrealthreads; i++) {
736 firsterr = torture_create_kthread(kfree_perf_thread, (void *)i,
737 kfree_reader_tasks[i]);
742 while (atomic_read(&n_kfree_perf_thread_started) < kfree_nrealthreads)
743 schedule_timeout_uninterruptible(1);
750 kfree_perf_cleanup();
759 static struct rcu_perf_ops *perf_ops[] = {
760 &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
763 if (!torture_init_begin(perf_type, verbose))
766 /* Process args and tell the world that the perf'er is on the job. */
767 for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
768 cur_ops = perf_ops[i];
769 if (strcmp(perf_type, cur_ops->name) == 0)
772 if (i == ARRAY_SIZE(perf_ops)) {
773 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
774 pr_alert("rcu-perf types:");
775 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
776 pr_cont(" %s", perf_ops[i]->name);
778 WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
787 return kfree_perf_init();
789 nrealwriters = compute_real(nwriters);
790 nrealreaders = compute_real(nreaders);
791 atomic_set(&n_rcu_perf_reader_started, 0);
792 atomic_set(&n_rcu_perf_writer_started, 0);
793 atomic_set(&n_rcu_perf_writer_finished, 0);
794 rcu_perf_print_module_parms(cur_ops, "Start of test");
796 /* Start up the kthreads. */
799 init_waitqueue_head(&shutdown_wq);
800 firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
804 schedule_timeout_uninterruptible(1);
806 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
808 if (reader_tasks == NULL) {
809 VERBOSE_PERFOUT_ERRSTRING("out of memory");
813 for (i = 0; i < nrealreaders; i++) {
814 firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
819 while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
820 schedule_timeout_uninterruptible(1);
821 writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
823 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
826 kcalloc(nrealwriters, sizeof(*writer_n_durations),
828 if (!writer_tasks || !writer_durations || !writer_n_durations) {
829 VERBOSE_PERFOUT_ERRSTRING("out of memory");
833 for (i = 0; i < nrealwriters; i++) {
834 writer_durations[i] =
835 kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
837 if (!writer_durations[i]) {
841 firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
855 module_init(rcu_perf_init);
856 module_exit(rcu_perf_cleanup);