1 // SPDX-License-Identifier: GPL-2.0+
3 // Scalability test comparing RCU vs other mechanisms
4 // for acquiring references on objects.
6 // Copyright (C) Google, 2020.
8 // Author: Joel Fernandes <joel@joelfernandes.org>
10 #define pr_fmt(fmt) fmt
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/completion.h>
15 #include <linux/cpu.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/kthread.h>
21 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/notifier.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/rcupdate_trace.h>
29 #include <linux/reboot.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/stat.h>
34 #include <linux/srcu.h>
35 #include <linux/slab.h>
36 #include <linux/torture.h>
37 #include <linux/types.h>
41 #define SCALE_FLAG "-ref-scale: "
43 #define SCALEOUT(s, x...) \
44 pr_alert("%s" SCALE_FLAG s, scale_type, ## x)
46 #define VERBOSE_SCALEOUT(s, x...) \
47 do { if (verbose) pr_alert("%s" SCALE_FLAG s, scale_type, ## x); } while (0)
49 static atomic_t verbose_batch_ctr;
51 #define VERBOSE_SCALEOUT_BATCH(s, x...) \
54 (verbose_batched <= 0 || \
55 !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \
56 schedule_timeout_uninterruptible(1); \
57 pr_alert("%s" SCALE_FLAG s, scale_type, ## x); \
61 #define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \
62 do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x); } while (0)
64 MODULE_LICENSE("GPL");
65 MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
67 static char *scale_type = "rcu";
68 module_param(scale_type, charp, 0444);
69 MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
71 torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
72 torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
74 // Wait until there are multiple CPUs before starting test.
75 torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
76 "Holdoff time before test start (s)");
77 // Number of loops per experiment, all readers execute operations concurrently.
78 torture_param(long, loops, 10000, "Number of loops per experiment.");
79 // Number of readers, with -1 defaulting to about 75% of the CPUs.
80 torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
82 torture_param(int, nruns, 30, "Number of experiments to run.");
83 // Reader delay in nanoseconds, 0 for no delay.
84 torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
87 # define REFSCALE_SHUTDOWN 0
89 # define REFSCALE_SHUTDOWN 1
92 torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
93 "Shutdown at end of scalability tests.");
96 struct task_struct *task;
102 static struct task_struct *shutdown_task;
103 static wait_queue_head_t shutdown_wq;
105 static struct task_struct *main_task;
106 static wait_queue_head_t main_wq;
107 static int shutdown_start;
109 static struct reader_task *reader_tasks;
111 // Number of readers that are part of the current experiment.
112 static atomic_t nreaders_exp;
114 // Use to wait for all threads to start.
115 static atomic_t n_init;
116 static atomic_t n_started;
117 static atomic_t n_warmedup;
118 static atomic_t n_cooleddown;
120 // Track which experiment is currently running.
123 // Operations vector for selecting different types of tests.
124 struct ref_scale_ops {
126 void (*cleanup)(void);
127 void (*readsection)(const int nloops);
128 void (*delaysection)(const int nloops, const int udl, const int ndl);
132 static struct ref_scale_ops *cur_ops;
134 static void un_delay(const int udl, const int ndl)
142 static void ref_rcu_read_section(const int nloops)
146 for (i = nloops; i >= 0; i--) {
152 static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
156 for (i = nloops; i >= 0; i--) {
163 static void rcu_sync_scale_init(void)
167 static struct ref_scale_ops rcu_ops = {
168 .init = rcu_sync_scale_init,
169 .readsection = ref_rcu_read_section,
170 .delaysection = ref_rcu_delay_section,
174 // Definitions for SRCU ref scale testing.
175 DEFINE_STATIC_SRCU(srcu_refctl_scale);
176 static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale;
178 static void srcu_ref_scale_read_section(const int nloops)
183 for (i = nloops; i >= 0; i--) {
184 idx = srcu_read_lock(srcu_ctlp);
185 srcu_read_unlock(srcu_ctlp, idx);
189 static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
194 for (i = nloops; i >= 0; i--) {
195 idx = srcu_read_lock(srcu_ctlp);
197 srcu_read_unlock(srcu_ctlp, idx);
201 static struct ref_scale_ops srcu_ops = {
202 .init = rcu_sync_scale_init,
203 .readsection = srcu_ref_scale_read_section,
204 .delaysection = srcu_ref_scale_delay_section,
208 // Definitions for RCU Tasks ref scale testing: Empty read markers.
209 // These definitions also work for RCU Rude readers.
210 static void rcu_tasks_ref_scale_read_section(const int nloops)
214 for (i = nloops; i >= 0; i--)
218 static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
222 for (i = nloops; i >= 0; i--)
226 static struct ref_scale_ops rcu_tasks_ops = {
227 .init = rcu_sync_scale_init,
228 .readsection = rcu_tasks_ref_scale_read_section,
229 .delaysection = rcu_tasks_ref_scale_delay_section,
233 // Definitions for RCU Tasks Trace ref scale testing.
234 static void rcu_trace_ref_scale_read_section(const int nloops)
238 for (i = nloops; i >= 0; i--) {
239 rcu_read_lock_trace();
240 rcu_read_unlock_trace();
244 static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
248 for (i = nloops; i >= 0; i--) {
249 rcu_read_lock_trace();
251 rcu_read_unlock_trace();
255 static struct ref_scale_ops rcu_trace_ops = {
256 .init = rcu_sync_scale_init,
257 .readsection = rcu_trace_ref_scale_read_section,
258 .delaysection = rcu_trace_ref_scale_delay_section,
262 // Definitions for reference count
263 static atomic_t refcnt;
265 static void ref_refcnt_section(const int nloops)
269 for (i = nloops; i >= 0; i--) {
275 static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
279 for (i = nloops; i >= 0; i--) {
286 static struct ref_scale_ops refcnt_ops = {
287 .init = rcu_sync_scale_init,
288 .readsection = ref_refcnt_section,
289 .delaysection = ref_refcnt_delay_section,
293 // Definitions for rwlock
294 static rwlock_t test_rwlock;
296 static void ref_rwlock_init(void)
298 rwlock_init(&test_rwlock);
301 static void ref_rwlock_section(const int nloops)
305 for (i = nloops; i >= 0; i--) {
306 read_lock(&test_rwlock);
307 read_unlock(&test_rwlock);
311 static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
315 for (i = nloops; i >= 0; i--) {
316 read_lock(&test_rwlock);
318 read_unlock(&test_rwlock);
322 static struct ref_scale_ops rwlock_ops = {
323 .init = ref_rwlock_init,
324 .readsection = ref_rwlock_section,
325 .delaysection = ref_rwlock_delay_section,
329 // Definitions for rwsem
330 static struct rw_semaphore test_rwsem;
332 static void ref_rwsem_init(void)
334 init_rwsem(&test_rwsem);
337 static void ref_rwsem_section(const int nloops)
341 for (i = nloops; i >= 0; i--) {
342 down_read(&test_rwsem);
343 up_read(&test_rwsem);
347 static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
351 for (i = nloops; i >= 0; i--) {
352 down_read(&test_rwsem);
354 up_read(&test_rwsem);
358 static struct ref_scale_ops rwsem_ops = {
359 .init = ref_rwsem_init,
360 .readsection = ref_rwsem_section,
361 .delaysection = ref_rwsem_delay_section,
365 // Definitions for global spinlock
366 static DEFINE_SPINLOCK(test_lock);
368 static void ref_lock_section(const int nloops)
373 for (i = nloops; i >= 0; i--) {
374 spin_lock(&test_lock);
375 spin_unlock(&test_lock);
380 static void ref_lock_delay_section(const int nloops, const int udl, const int ndl)
385 for (i = nloops; i >= 0; i--) {
386 spin_lock(&test_lock);
388 spin_unlock(&test_lock);
393 static struct ref_scale_ops lock_ops = {
394 .readsection = ref_lock_section,
395 .delaysection = ref_lock_delay_section,
399 // Definitions for global irq-save spinlock
401 static void ref_lock_irq_section(const int nloops)
407 for (i = nloops; i >= 0; i--) {
408 spin_lock_irqsave(&test_lock, flags);
409 spin_unlock_irqrestore(&test_lock, flags);
414 static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl)
420 for (i = nloops; i >= 0; i--) {
421 spin_lock_irqsave(&test_lock, flags);
423 spin_unlock_irqrestore(&test_lock, flags);
428 static struct ref_scale_ops lock_irq_ops = {
429 .readsection = ref_lock_irq_section,
430 .delaysection = ref_lock_irq_delay_section,
434 // Definitions acquire-release.
435 static DEFINE_PER_CPU(unsigned long, test_acqrel);
437 static void ref_acqrel_section(const int nloops)
443 for (i = nloops; i >= 0; i--) {
444 x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
445 smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
450 static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl)
456 for (i = nloops; i >= 0; i--) {
457 x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
459 smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
464 static struct ref_scale_ops acqrel_ops = {
465 .readsection = ref_acqrel_section,
466 .delaysection = ref_acqrel_delay_section,
470 static volatile u64 stopopts;
472 static void ref_clock_section(const int nloops)
478 for (i = nloops; i >= 0; i--)
479 x += ktime_get_real_fast_ns();
484 static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
490 for (i = nloops; i >= 0; i--) {
491 x += ktime_get_real_fast_ns();
498 static struct ref_scale_ops clock_ops = {
499 .readsection = ref_clock_section,
500 .delaysection = ref_clock_delay_section,
504 static void rcu_scale_one_reader(void)
507 cur_ops->readsection(loops);
509 cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000);
512 // Reader kthread. Repeatedly does empty RCU read-side
513 // critical section, minimizing update-side interference.
515 ref_scale_reader(void *arg)
519 struct reader_task *rt = &(reader_tasks[me]);
523 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
524 WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
525 set_user_nice(current, MAX_NICE);
528 schedule_timeout_interruptible(holdoff * HZ);
530 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
532 // Wait for signal that this reader can start.
533 wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
534 torture_must_stop());
536 if (torture_must_stop())
539 // Make sure that the CPU is affinitized appropriately during testing.
540 WARN_ON_ONCE(raw_smp_processor_id() != me);
542 WRITE_ONCE(rt->start_reader, 0);
543 if (!atomic_dec_return(&n_started))
544 while (atomic_read_acquire(&n_started))
547 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx);
550 // To reduce noise, do an initial cache-warming invocation, check
551 // in, and then keep warming until everyone has checked in.
552 rcu_scale_one_reader();
553 if (!atomic_dec_return(&n_warmedup))
554 while (atomic_read_acquire(&n_warmedup))
555 rcu_scale_one_reader();
556 // Also keep interrupts disabled. This also has the effect
557 // of preventing entries into slow path for rcu_read_unlock().
558 local_irq_save(flags);
559 start = ktime_get_mono_fast_ns();
561 rcu_scale_one_reader();
563 duration = ktime_get_mono_fast_ns() - start;
564 local_irq_restore(flags);
566 rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
567 // To reduce runtime-skew noise, do maintain-load invocations until
569 if (!atomic_dec_return(&n_cooleddown))
570 while (atomic_read_acquire(&n_cooleddown))
571 rcu_scale_one_reader();
573 if (atomic_dec_and_test(&nreaders_exp))
576 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
577 me, exp_idx, atomic_read(&nreaders_exp));
579 if (!torture_must_stop())
582 torture_kthread_stopping("ref_scale_reader");
586 static void reset_readers(void)
589 struct reader_task *rt;
591 for (i = 0; i < nreaders; i++) {
592 rt = &(reader_tasks[i]);
594 rt->last_duration_ns = 0;
598 // Print the results of each reader and return the sum of all their durations.
599 static u64 process_durations(int n)
602 struct reader_task *rt;
607 buf = kmalloc(128 + nreaders * 32, GFP_KERNEL);
611 sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
614 for (i = 0; i < n && !torture_must_stop(); i++) {
615 rt = &(reader_tasks[i]);
616 sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns);
622 sum += rt->last_duration_ns;
626 SCALEOUT("%s\n", buf);
632 // The main_func is the main orchestrator, it performs a bunch of
633 // experiments. For every experiment, it orders all the readers
634 // involved to start and waits for them to finish the experiment. It
635 // then reads their timestamps and starts the next experiment. Each
636 // experiment progresses from 1 concurrent reader to N of them at which
637 // point all the timestamps are printed.
638 static int main_func(void *arg)
640 bool errexit = false;
646 set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids));
647 set_user_nice(current, MAX_NICE);
649 VERBOSE_SCALEOUT("main_func task started");
650 result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
651 buf = kzalloc(64 + nruns * 32, GFP_KERNEL);
652 if (!result_avg || !buf) {
653 VERBOSE_SCALEOUT_ERRSTRING("out of memory");
657 schedule_timeout_interruptible(holdoff * HZ);
659 // Wait for all threads to start.
661 while (atomic_read(&n_init) < nreaders + 1)
662 schedule_timeout_uninterruptible(1);
664 // Start exp readers up per experiment
665 for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
668 if (torture_must_stop())
672 atomic_set(&nreaders_exp, nreaders);
673 atomic_set(&n_started, nreaders);
674 atomic_set(&n_warmedup, nreaders);
675 atomic_set(&n_cooleddown, nreaders);
679 for (r = 0; r < nreaders; r++) {
680 smp_store_release(&reader_tasks[r].start_reader, 1);
681 wake_up(&reader_tasks[r].wq);
684 VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers",
688 !atomic_read(&nreaders_exp) || torture_must_stop());
690 VERBOSE_SCALEOUT("main_func: experiment ended");
692 if (torture_must_stop())
695 result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops);
698 // Print the average of all experiments
699 SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
704 strcat(buf, "Runs\tTime(ns)\n");
707 for (exp = 0; exp < nruns; exp++) {
713 avg = div_u64_rem(result_avg[exp], 1000, &rem);
714 sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem);
721 // This will shutdown everything including us.
724 wake_up(&shutdown_wq);
727 // Wait for torture to stop us
728 while (!torture_must_stop())
729 schedule_timeout_uninterruptible(1);
732 torture_kthread_stopping("main_func");
739 ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
741 pr_alert("%s" SCALE_FLAG
742 "--- %s: verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
743 verbose, shutdown, holdoff, loops, nreaders, nruns, readdelay);
747 ref_scale_cleanup(void)
751 if (torture_cleanup_begin())
755 torture_cleanup_end();
760 for (i = 0; i < nreaders; i++)
761 torture_stop_kthread("ref_scale_reader",
762 reader_tasks[i].task);
766 torture_stop_kthread("main_task", main_task);
769 // Do scale-type-specific cleanup operations.
770 if (cur_ops->cleanup != NULL)
773 torture_cleanup_end();
776 // Shutdown kthread. Just waits to be awakened, then shuts down system.
778 ref_scale_shutdown(void *arg)
780 wait_event(shutdown_wq, shutdown_start);
782 smp_mb(); // Wake before output.
794 static struct ref_scale_ops *scale_ops[] = {
795 &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, &refcnt_ops, &rwlock_ops,
796 &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops,
799 if (!torture_init_begin(scale_type, verbose))
802 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
803 cur_ops = scale_ops[i];
804 if (strcmp(scale_type, cur_ops->name) == 0)
807 if (i == ARRAY_SIZE(scale_ops)) {
808 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
809 pr_alert("rcu-scale types:");
810 for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
811 pr_cont(" %s", scale_ops[i]->name);
820 ref_scale_print_module_parms(cur_ops, "Start of test");
824 init_waitqueue_head(&shutdown_wq);
825 firsterr = torture_create_kthread(ref_scale_shutdown, NULL,
829 schedule_timeout_uninterruptible(1);
832 // Reader tasks (default to ~75% of online CPUs).
834 nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
835 if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops))
837 if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders))
839 if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns))
841 reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
844 VERBOSE_SCALEOUT_ERRSTRING("out of memory");
849 VERBOSE_SCALEOUT("Starting %d reader threads\n", nreaders);
851 for (i = 0; i < nreaders; i++) {
852 firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
853 reader_tasks[i].task);
857 init_waitqueue_head(&(reader_tasks[i].wq));
861 init_waitqueue_head(&main_wq);
862 firsterr = torture_create_kthread(main_func, NULL, main_task);
873 WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
879 module_init(ref_scale_init);
880 module_exit(ref_scale_cleanup);