Merge tag 'locking-urgent-2020-10-25' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / kernel / rcu / rcuscale.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based scalability-test facility
4  *
5  * Copyright (C) IBM Corporation, 2015
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9
10 #define pr_fmt(fmt) fmt
11
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/rcupdate.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/atomic.h>
26 #include <linux/bitops.h>
27 #include <linux/completion.h>
28 #include <linux/moduleparam.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/freezer.h>
33 #include <linux/cpu.h>
34 #include <linux/delay.h>
35 #include <linux/stat.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include <linux/vmalloc.h>
41
42 #include "rcu.h"
43
44 MODULE_LICENSE("GPL");
45 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
46
47 #define SCALE_FLAG "-scale:"
48 #define SCALEOUT_STRING(s) \
49         pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
50 #define VERBOSE_SCALEOUT_STRING(s) \
51         do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
52 #define VERBOSE_SCALEOUT_ERRSTRING(s) \
53         do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s); } while (0)
54
55 /*
56  * The intended use cases for the nreaders and nwriters module parameters
57  * are as follows:
58  *
59  * 1.   Specify only the nr_cpus kernel boot parameter.  This will
60  *      set both nreaders and nwriters to the value specified by
61  *      nr_cpus for a mixed reader/writer test.
62  *
63  * 2.   Specify the nr_cpus kernel boot parameter, but set
64  *      rcuscale.nreaders to zero.  This will set nwriters to the
65  *      value specified by nr_cpus for an update-only test.
66  *
67  * 3.   Specify the nr_cpus kernel boot parameter, but set
68  *      rcuscale.nwriters to zero.  This will set nreaders to the
69  *      value specified by nr_cpus for a read-only test.
70  *
71  * Various other use cases may of course be specified.
72  *
73  * Note that this test's readers are intended only as a test load for
74  * the writers.  The reader scalability statistics will be overly
75  * pessimistic due to the per-critical-section interrupt disabling,
76  * test-end checks, and the pair of calls through pointers.
77  */
78
79 #ifdef MODULE
80 # define RCUSCALE_SHUTDOWN 0
81 #else
82 # define RCUSCALE_SHUTDOWN 1
83 #endif
84
85 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
86 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
87 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
88 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
89 torture_param(int, nreaders, -1, "Number of RCU reader threads");
90 torture_param(int, nwriters, -1, "Number of RCU updater threads");
91 torture_param(bool, shutdown, RCUSCALE_SHUTDOWN,
92               "Shutdown at end of scalability tests.");
93 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
94 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
95 torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?");
96 torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
97
98 static char *scale_type = "rcu";
99 module_param(scale_type, charp, 0444);
100 MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)");
101
102 static int nrealreaders;
103 static int nrealwriters;
104 static struct task_struct **writer_tasks;
105 static struct task_struct **reader_tasks;
106 static struct task_struct *shutdown_task;
107
108 static u64 **writer_durations;
109 static int *writer_n_durations;
110 static atomic_t n_rcu_scale_reader_started;
111 static atomic_t n_rcu_scale_writer_started;
112 static atomic_t n_rcu_scale_writer_finished;
113 static wait_queue_head_t shutdown_wq;
114 static u64 t_rcu_scale_writer_started;
115 static u64 t_rcu_scale_writer_finished;
116 static unsigned long b_rcu_gp_test_started;
117 static unsigned long b_rcu_gp_test_finished;
118 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
119
120 #define MAX_MEAS 10000
121 #define MIN_MEAS 100
122
123 /*
124  * Operations vector for selecting different types of tests.
125  */
126
127 struct rcu_scale_ops {
128         int ptype;
129         void (*init)(void);
130         void (*cleanup)(void);
131         int (*readlock)(void);
132         void (*readunlock)(int idx);
133         unsigned long (*get_gp_seq)(void);
134         unsigned long (*gp_diff)(unsigned long new, unsigned long old);
135         unsigned long (*exp_completed)(void);
136         void (*async)(struct rcu_head *head, rcu_callback_t func);
137         void (*gp_barrier)(void);
138         void (*sync)(void);
139         void (*exp_sync)(void);
140         const char *name;
141 };
142
143 static struct rcu_scale_ops *cur_ops;
144
145 /*
146  * Definitions for rcu scalability testing.
147  */
148
149 static int rcu_scale_read_lock(void) __acquires(RCU)
150 {
151         rcu_read_lock();
152         return 0;
153 }
154
155 static void rcu_scale_read_unlock(int idx) __releases(RCU)
156 {
157         rcu_read_unlock();
158 }
159
160 static unsigned long __maybe_unused rcu_no_completed(void)
161 {
162         return 0;
163 }
164
165 static void rcu_sync_scale_init(void)
166 {
167 }
168
169 static struct rcu_scale_ops rcu_ops = {
170         .ptype          = RCU_FLAVOR,
171         .init           = rcu_sync_scale_init,
172         .readlock       = rcu_scale_read_lock,
173         .readunlock     = rcu_scale_read_unlock,
174         .get_gp_seq     = rcu_get_gp_seq,
175         .gp_diff        = rcu_seq_diff,
176         .exp_completed  = rcu_exp_batches_completed,
177         .async          = call_rcu,
178         .gp_barrier     = rcu_barrier,
179         .sync           = synchronize_rcu,
180         .exp_sync       = synchronize_rcu_expedited,
181         .name           = "rcu"
182 };
183
184 /*
185  * Definitions for srcu scalability testing.
186  */
187
188 DEFINE_STATIC_SRCU(srcu_ctl_scale);
189 static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale;
190
191 static int srcu_scale_read_lock(void) __acquires(srcu_ctlp)
192 {
193         return srcu_read_lock(srcu_ctlp);
194 }
195
196 static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp)
197 {
198         srcu_read_unlock(srcu_ctlp, idx);
199 }
200
201 static unsigned long srcu_scale_completed(void)
202 {
203         return srcu_batches_completed(srcu_ctlp);
204 }
205
206 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
207 {
208         call_srcu(srcu_ctlp, head, func);
209 }
210
211 static void srcu_rcu_barrier(void)
212 {
213         srcu_barrier(srcu_ctlp);
214 }
215
216 static void srcu_scale_synchronize(void)
217 {
218         synchronize_srcu(srcu_ctlp);
219 }
220
221 static void srcu_scale_synchronize_expedited(void)
222 {
223         synchronize_srcu_expedited(srcu_ctlp);
224 }
225
226 static struct rcu_scale_ops srcu_ops = {
227         .ptype          = SRCU_FLAVOR,
228         .init           = rcu_sync_scale_init,
229         .readlock       = srcu_scale_read_lock,
230         .readunlock     = srcu_scale_read_unlock,
231         .get_gp_seq     = srcu_scale_completed,
232         .gp_diff        = rcu_seq_diff,
233         .exp_completed  = srcu_scale_completed,
234         .async          = srcu_call_rcu,
235         .gp_barrier     = srcu_rcu_barrier,
236         .sync           = srcu_scale_synchronize,
237         .exp_sync       = srcu_scale_synchronize_expedited,
238         .name           = "srcu"
239 };
240
241 static struct srcu_struct srcud;
242
243 static void srcu_sync_scale_init(void)
244 {
245         srcu_ctlp = &srcud;
246         init_srcu_struct(srcu_ctlp);
247 }
248
249 static void srcu_sync_scale_cleanup(void)
250 {
251         cleanup_srcu_struct(srcu_ctlp);
252 }
253
254 static struct rcu_scale_ops srcud_ops = {
255         .ptype          = SRCU_FLAVOR,
256         .init           = srcu_sync_scale_init,
257         .cleanup        = srcu_sync_scale_cleanup,
258         .readlock       = srcu_scale_read_lock,
259         .readunlock     = srcu_scale_read_unlock,
260         .get_gp_seq     = srcu_scale_completed,
261         .gp_diff        = rcu_seq_diff,
262         .exp_completed  = srcu_scale_completed,
263         .async          = srcu_call_rcu,
264         .gp_barrier     = srcu_rcu_barrier,
265         .sync           = srcu_scale_synchronize,
266         .exp_sync       = srcu_scale_synchronize_expedited,
267         .name           = "srcud"
268 };
269
270 /*
271  * Definitions for RCU-tasks scalability testing.
272  */
273
274 static int tasks_scale_read_lock(void)
275 {
276         return 0;
277 }
278
279 static void tasks_scale_read_unlock(int idx)
280 {
281 }
282
283 static struct rcu_scale_ops tasks_ops = {
284         .ptype          = RCU_TASKS_FLAVOR,
285         .init           = rcu_sync_scale_init,
286         .readlock       = tasks_scale_read_lock,
287         .readunlock     = tasks_scale_read_unlock,
288         .get_gp_seq     = rcu_no_completed,
289         .gp_diff        = rcu_seq_diff,
290         .async          = call_rcu_tasks,
291         .gp_barrier     = rcu_barrier_tasks,
292         .sync           = synchronize_rcu_tasks,
293         .exp_sync       = synchronize_rcu_tasks,
294         .name           = "tasks"
295 };
296
297 static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
298 {
299         if (!cur_ops->gp_diff)
300                 return new - old;
301         return cur_ops->gp_diff(new, old);
302 }
303
304 /*
305  * If scalability tests complete, wait for shutdown to commence.
306  */
307 static void rcu_scale_wait_shutdown(void)
308 {
309         cond_resched_tasks_rcu_qs();
310         if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters)
311                 return;
312         while (!torture_must_stop())
313                 schedule_timeout_uninterruptible(1);
314 }
315
316 /*
317  * RCU scalability reader kthread.  Repeatedly does empty RCU read-side
318  * critical section, minimizing update-side interference.  However, the
319  * point of this test is not to evaluate reader scalability, but instead
320  * to serve as a test load for update-side scalability testing.
321  */
322 static int
323 rcu_scale_reader(void *arg)
324 {
325         unsigned long flags;
326         int idx;
327         long me = (long)arg;
328
329         VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started");
330         set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
331         set_user_nice(current, MAX_NICE);
332         atomic_inc(&n_rcu_scale_reader_started);
333
334         do {
335                 local_irq_save(flags);
336                 idx = cur_ops->readlock();
337                 cur_ops->readunlock(idx);
338                 local_irq_restore(flags);
339                 rcu_scale_wait_shutdown();
340         } while (!torture_must_stop());
341         torture_kthread_stopping("rcu_scale_reader");
342         return 0;
343 }
344
345 /*
346  * Callback function for asynchronous grace periods from rcu_scale_writer().
347  */
348 static void rcu_scale_async_cb(struct rcu_head *rhp)
349 {
350         atomic_dec(this_cpu_ptr(&n_async_inflight));
351         kfree(rhp);
352 }
353
354 /*
355  * RCU scale writer kthread.  Repeatedly does a grace period.
356  */
357 static int
358 rcu_scale_writer(void *arg)
359 {
360         int i = 0;
361         int i_max;
362         long me = (long)arg;
363         struct rcu_head *rhp = NULL;
364         bool started = false, done = false, alldone = false;
365         u64 t;
366         u64 *wdp;
367         u64 *wdpp = writer_durations[me];
368
369         VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
370         WARN_ON(!wdpp);
371         set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
372         sched_set_fifo_low(current);
373
374         if (holdoff)
375                 schedule_timeout_uninterruptible(holdoff * HZ);
376
377         /*
378          * Wait until rcu_end_inkernel_boot() is called for normal GP tests
379          * so that RCU is not always expedited for normal GP tests.
380          * The system_state test is approximate, but works well in practice.
381          */
382         while (!gp_exp && system_state != SYSTEM_RUNNING)
383                 schedule_timeout_uninterruptible(1);
384
385         t = ktime_get_mono_fast_ns();
386         if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) {
387                 t_rcu_scale_writer_started = t;
388                 if (gp_exp) {
389                         b_rcu_gp_test_started =
390                                 cur_ops->exp_completed() / 2;
391                 } else {
392                         b_rcu_gp_test_started = cur_ops->get_gp_seq();
393                 }
394         }
395
396         do {
397                 if (writer_holdoff)
398                         udelay(writer_holdoff);
399                 wdp = &wdpp[i];
400                 *wdp = ktime_get_mono_fast_ns();
401                 if (gp_async) {
402 retry:
403                         if (!rhp)
404                                 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
405                         if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
406                                 atomic_inc(this_cpu_ptr(&n_async_inflight));
407                                 cur_ops->async(rhp, rcu_scale_async_cb);
408                                 rhp = NULL;
409                         } else if (!kthread_should_stop()) {
410                                 cur_ops->gp_barrier();
411                                 goto retry;
412                         } else {
413                                 kfree(rhp); /* Because we are stopping. */
414                         }
415                 } else if (gp_exp) {
416                         cur_ops->exp_sync();
417                 } else {
418                         cur_ops->sync();
419                 }
420                 t = ktime_get_mono_fast_ns();
421                 *wdp = t - *wdp;
422                 i_max = i;
423                 if (!started &&
424                     atomic_read(&n_rcu_scale_writer_started) >= nrealwriters)
425                         started = true;
426                 if (!done && i >= MIN_MEAS) {
427                         done = true;
428                         sched_set_normal(current, 0);
429                         pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
430                                  scale_type, SCALE_FLAG, me, MIN_MEAS);
431                         if (atomic_inc_return(&n_rcu_scale_writer_finished) >=
432                             nrealwriters) {
433                                 schedule_timeout_interruptible(10);
434                                 rcu_ftrace_dump(DUMP_ALL);
435                                 SCALEOUT_STRING("Test complete");
436                                 t_rcu_scale_writer_finished = t;
437                                 if (gp_exp) {
438                                         b_rcu_gp_test_finished =
439                                                 cur_ops->exp_completed() / 2;
440                                 } else {
441                                         b_rcu_gp_test_finished =
442                                                 cur_ops->get_gp_seq();
443                                 }
444                                 if (shutdown) {
445                                         smp_mb(); /* Assign before wake. */
446                                         wake_up(&shutdown_wq);
447                                 }
448                         }
449                 }
450                 if (done && !alldone &&
451                     atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters)
452                         alldone = true;
453                 if (started && !alldone && i < MAX_MEAS - 1)
454                         i++;
455                 rcu_scale_wait_shutdown();
456         } while (!torture_must_stop());
457         if (gp_async) {
458                 cur_ops->gp_barrier();
459         }
460         writer_n_durations[me] = i_max;
461         torture_kthread_stopping("rcu_scale_writer");
462         return 0;
463 }
464
465 static void
466 rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
467 {
468         pr_alert("%s" SCALE_FLAG
469                  "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
470                  scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
471 }
472
473 static void
474 rcu_scale_cleanup(void)
475 {
476         int i;
477         int j;
478         int ngps = 0;
479         u64 *wdp;
480         u64 *wdpp;
481
482         /*
483          * Would like warning at start, but everything is expedited
484          * during the mid-boot phase, so have to wait till the end.
485          */
486         if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
487                 VERBOSE_SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
488         if (rcu_gp_is_normal() && gp_exp)
489                 VERBOSE_SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
490         if (gp_exp && gp_async)
491                 VERBOSE_SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
492
493         if (torture_cleanup_begin())
494                 return;
495         if (!cur_ops) {
496                 torture_cleanup_end();
497                 return;
498         }
499
500         if (reader_tasks) {
501                 for (i = 0; i < nrealreaders; i++)
502                         torture_stop_kthread(rcu_scale_reader,
503                                              reader_tasks[i]);
504                 kfree(reader_tasks);
505         }
506
507         if (writer_tasks) {
508                 for (i = 0; i < nrealwriters; i++) {
509                         torture_stop_kthread(rcu_scale_writer,
510                                              writer_tasks[i]);
511                         if (!writer_n_durations)
512                                 continue;
513                         j = writer_n_durations[i];
514                         pr_alert("%s%s writer %d gps: %d\n",
515                                  scale_type, SCALE_FLAG, i, j);
516                         ngps += j;
517                 }
518                 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
519                          scale_type, SCALE_FLAG,
520                          t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
521                          t_rcu_scale_writer_finished -
522                          t_rcu_scale_writer_started,
523                          ngps,
524                          rcuscale_seq_diff(b_rcu_gp_test_finished,
525                                            b_rcu_gp_test_started));
526                 for (i = 0; i < nrealwriters; i++) {
527                         if (!writer_durations)
528                                 break;
529                         if (!writer_n_durations)
530                                 continue;
531                         wdpp = writer_durations[i];
532                         if (!wdpp)
533                                 continue;
534                         for (j = 0; j <= writer_n_durations[i]; j++) {
535                                 wdp = &wdpp[j];
536                                 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
537                                         scale_type, SCALE_FLAG,
538                                         i, j, *wdp);
539                                 if (j % 100 == 0)
540                                         schedule_timeout_uninterruptible(1);
541                         }
542                         kfree(writer_durations[i]);
543                 }
544                 kfree(writer_tasks);
545                 kfree(writer_durations);
546                 kfree(writer_n_durations);
547         }
548
549         /* Do torture-type-specific cleanup operations.  */
550         if (cur_ops->cleanup != NULL)
551                 cur_ops->cleanup();
552
553         torture_cleanup_end();
554 }
555
556 /*
557  * Return the number if non-negative.  If -1, the number of CPUs.
558  * If less than -1, that much less than the number of CPUs, but
559  * at least one.
560  */
561 static int compute_real(int n)
562 {
563         int nr;
564
565         if (n >= 0) {
566                 nr = n;
567         } else {
568                 nr = num_online_cpus() + 1 + n;
569                 if (nr <= 0)
570                         nr = 1;
571         }
572         return nr;
573 }
574
575 /*
576  * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
577  * down system.
578  */
579 static int
580 rcu_scale_shutdown(void *arg)
581 {
582         wait_event(shutdown_wq,
583                    atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
584         smp_mb(); /* Wake before output. */
585         rcu_scale_cleanup();
586         kernel_power_off();
587         return -EINVAL;
588 }
589
590 /*
591  * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
592  * of iterations and measure total time and number of GP for all iterations to complete.
593  */
594
595 torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
596 torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
597 torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
598
599 static struct task_struct **kfree_reader_tasks;
600 static int kfree_nrealthreads;
601 static atomic_t n_kfree_scale_thread_started;
602 static atomic_t n_kfree_scale_thread_ended;
603
604 struct kfree_obj {
605         char kfree_obj[8];
606         struct rcu_head rh;
607 };
608
609 static int
610 kfree_scale_thread(void *arg)
611 {
612         int i, loop = 0;
613         long me = (long)arg;
614         struct kfree_obj *alloc_ptr;
615         u64 start_time, end_time;
616         long long mem_begin, mem_during = 0;
617
618         VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started");
619         set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
620         set_user_nice(current, MAX_NICE);
621
622         start_time = ktime_get_mono_fast_ns();
623
624         if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) {
625                 if (gp_exp)
626                         b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
627                 else
628                         b_rcu_gp_test_started = cur_ops->get_gp_seq();
629         }
630
631         do {
632                 if (!mem_during) {
633                         mem_during = mem_begin = si_mem_available();
634                 } else if (loop % (kfree_loops / 4) == 0) {
635                         mem_during = (mem_during + si_mem_available()) / 2;
636                 }
637
638                 for (i = 0; i < kfree_alloc_num; i++) {
639                         alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
640                         if (!alloc_ptr)
641                                 return -ENOMEM;
642
643                         kfree_rcu(alloc_ptr, rh);
644                 }
645
646                 cond_resched();
647         } while (!torture_must_stop() && ++loop < kfree_loops);
648
649         if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) {
650                 end_time = ktime_get_mono_fast_ns();
651
652                 if (gp_exp)
653                         b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
654                 else
655                         b_rcu_gp_test_finished = cur_ops->get_gp_seq();
656
657                 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
658                        (unsigned long long)(end_time - start_time), kfree_loops,
659                        rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
660                        (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
661
662                 if (shutdown) {
663                         smp_mb(); /* Assign before wake. */
664                         wake_up(&shutdown_wq);
665                 }
666         }
667
668         torture_kthread_stopping("kfree_scale_thread");
669         return 0;
670 }
671
672 static void
673 kfree_scale_cleanup(void)
674 {
675         int i;
676
677         if (torture_cleanup_begin())
678                 return;
679
680         if (kfree_reader_tasks) {
681                 for (i = 0; i < kfree_nrealthreads; i++)
682                         torture_stop_kthread(kfree_scale_thread,
683                                              kfree_reader_tasks[i]);
684                 kfree(kfree_reader_tasks);
685         }
686
687         torture_cleanup_end();
688 }
689
690 /*
691  * shutdown kthread.  Just waits to be awakened, then shuts down system.
692  */
693 static int
694 kfree_scale_shutdown(void *arg)
695 {
696         wait_event(shutdown_wq,
697                    atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
698
699         smp_mb(); /* Wake before output. */
700
701         kfree_scale_cleanup();
702         kernel_power_off();
703         return -EINVAL;
704 }
705
706 static int __init
707 kfree_scale_init(void)
708 {
709         long i;
710         int firsterr = 0;
711
712         kfree_nrealthreads = compute_real(kfree_nthreads);
713         /* Start up the kthreads. */
714         if (shutdown) {
715                 init_waitqueue_head(&shutdown_wq);
716                 firsterr = torture_create_kthread(kfree_scale_shutdown, NULL,
717                                                   shutdown_task);
718                 if (firsterr)
719                         goto unwind;
720                 schedule_timeout_uninterruptible(1);
721         }
722
723         pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
724
725         kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
726                                GFP_KERNEL);
727         if (kfree_reader_tasks == NULL) {
728                 firsterr = -ENOMEM;
729                 goto unwind;
730         }
731
732         for (i = 0; i < kfree_nrealthreads; i++) {
733                 firsterr = torture_create_kthread(kfree_scale_thread, (void *)i,
734                                                   kfree_reader_tasks[i]);
735                 if (firsterr)
736                         goto unwind;
737         }
738
739         while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads)
740                 schedule_timeout_uninterruptible(1);
741
742         torture_init_end();
743         return 0;
744
745 unwind:
746         torture_init_end();
747         kfree_scale_cleanup();
748         return firsterr;
749 }
750
751 static int __init
752 rcu_scale_init(void)
753 {
754         long i;
755         int firsterr = 0;
756         static struct rcu_scale_ops *scale_ops[] = {
757                 &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
758         };
759
760         if (!torture_init_begin(scale_type, verbose))
761                 return -EBUSY;
762
763         /* Process args and announce that the scalability'er is on the job. */
764         for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
765                 cur_ops = scale_ops[i];
766                 if (strcmp(scale_type, cur_ops->name) == 0)
767                         break;
768         }
769         if (i == ARRAY_SIZE(scale_ops)) {
770                 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
771                 pr_alert("rcu-scale types:");
772                 for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
773                         pr_cont(" %s", scale_ops[i]->name);
774                 pr_cont("\n");
775                 WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
776                 firsterr = -EINVAL;
777                 cur_ops = NULL;
778                 goto unwind;
779         }
780         if (cur_ops->init)
781                 cur_ops->init();
782
783         if (kfree_rcu_test)
784                 return kfree_scale_init();
785
786         nrealwriters = compute_real(nwriters);
787         nrealreaders = compute_real(nreaders);
788         atomic_set(&n_rcu_scale_reader_started, 0);
789         atomic_set(&n_rcu_scale_writer_started, 0);
790         atomic_set(&n_rcu_scale_writer_finished, 0);
791         rcu_scale_print_module_parms(cur_ops, "Start of test");
792
793         /* Start up the kthreads. */
794
795         if (shutdown) {
796                 init_waitqueue_head(&shutdown_wq);
797                 firsterr = torture_create_kthread(rcu_scale_shutdown, NULL,
798                                                   shutdown_task);
799                 if (firsterr)
800                         goto unwind;
801                 schedule_timeout_uninterruptible(1);
802         }
803         reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
804                                GFP_KERNEL);
805         if (reader_tasks == NULL) {
806                 VERBOSE_SCALEOUT_ERRSTRING("out of memory");
807                 firsterr = -ENOMEM;
808                 goto unwind;
809         }
810         for (i = 0; i < nrealreaders; i++) {
811                 firsterr = torture_create_kthread(rcu_scale_reader, (void *)i,
812                                                   reader_tasks[i]);
813                 if (firsterr)
814                         goto unwind;
815         }
816         while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
817                 schedule_timeout_uninterruptible(1);
818         writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
819                                GFP_KERNEL);
820         writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
821                                    GFP_KERNEL);
822         writer_n_durations =
823                 kcalloc(nrealwriters, sizeof(*writer_n_durations),
824                         GFP_KERNEL);
825         if (!writer_tasks || !writer_durations || !writer_n_durations) {
826                 VERBOSE_SCALEOUT_ERRSTRING("out of memory");
827                 firsterr = -ENOMEM;
828                 goto unwind;
829         }
830         for (i = 0; i < nrealwriters; i++) {
831                 writer_durations[i] =
832                         kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
833                                 GFP_KERNEL);
834                 if (!writer_durations[i]) {
835                         firsterr = -ENOMEM;
836                         goto unwind;
837                 }
838                 firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
839                                                   writer_tasks[i]);
840                 if (firsterr)
841                         goto unwind;
842         }
843         torture_init_end();
844         return 0;
845
846 unwind:
847         torture_init_end();
848         rcu_scale_cleanup();
849         return firsterr;
850 }
851
852 module_init(rcu_scale_init);
853 module_exit(rcu_scale_cleanup);