sched,debug: Convert sysctl sched_domains to debugfs
[linux-2.6-microblaze.git] / kernel / sched / debug.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9 #include "sched.h"
10
11 static DEFINE_SPINLOCK(sched_debug_lock);
12
13 /*
14  * This allows printing both to /proc/sched_debug and
15  * to the console
16  */
17 #define SEQ_printf(m, x...)                     \
18  do {                                           \
19         if (m)                                  \
20                 seq_printf(m, x);               \
21         else                                    \
22                 pr_cont(x);                     \
23  } while (0)
24
25 /*
26  * Ease the printing of nsec fields:
27  */
28 static long long nsec_high(unsigned long long nsec)
29 {
30         if ((long long)nsec < 0) {
31                 nsec = -nsec;
32                 do_div(nsec, 1000000);
33                 return -nsec;
34         }
35         do_div(nsec, 1000000);
36
37         return nsec;
38 }
39
40 static unsigned long nsec_low(unsigned long long nsec)
41 {
42         if ((long long)nsec < 0)
43                 nsec = -nsec;
44
45         return do_div(nsec, 1000000);
46 }
47
48 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49
50 #define SCHED_FEAT(name, enabled)       \
51         #name ,
52
53 static const char * const sched_feat_names[] = {
54 #include "features.h"
55 };
56
57 #undef SCHED_FEAT
58
59 static int sched_feat_show(struct seq_file *m, void *v)
60 {
61         int i;
62
63         for (i = 0; i < __SCHED_FEAT_NR; i++) {
64                 if (!(sysctl_sched_features & (1UL << i)))
65                         seq_puts(m, "NO_");
66                 seq_printf(m, "%s ", sched_feat_names[i]);
67         }
68         seq_puts(m, "\n");
69
70         return 0;
71 }
72
73 #ifdef CONFIG_JUMP_LABEL
74
75 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
76 #define jump_label_key__false STATIC_KEY_INIT_FALSE
77
78 #define SCHED_FEAT(name, enabled)       \
79         jump_label_key__##enabled ,
80
81 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82 #include "features.h"
83 };
84
85 #undef SCHED_FEAT
86
87 static void sched_feat_disable(int i)
88 {
89         static_key_disable_cpuslocked(&sched_feat_keys[i]);
90 }
91
92 static void sched_feat_enable(int i)
93 {
94         static_key_enable_cpuslocked(&sched_feat_keys[i]);
95 }
96 #else
97 static void sched_feat_disable(int i) { };
98 static void sched_feat_enable(int i) { };
99 #endif /* CONFIG_JUMP_LABEL */
100
101 static int sched_feat_set(char *cmp)
102 {
103         int i;
104         int neg = 0;
105
106         if (strncmp(cmp, "NO_", 3) == 0) {
107                 neg = 1;
108                 cmp += 3;
109         }
110
111         i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112         if (i < 0)
113                 return i;
114
115         if (neg) {
116                 sysctl_sched_features &= ~(1UL << i);
117                 sched_feat_disable(i);
118         } else {
119                 sysctl_sched_features |= (1UL << i);
120                 sched_feat_enable(i);
121         }
122
123         return 0;
124 }
125
126 static ssize_t
127 sched_feat_write(struct file *filp, const char __user *ubuf,
128                 size_t cnt, loff_t *ppos)
129 {
130         char buf[64];
131         char *cmp;
132         int ret;
133         struct inode *inode;
134
135         if (cnt > 63)
136                 cnt = 63;
137
138         if (copy_from_user(&buf, ubuf, cnt))
139                 return -EFAULT;
140
141         buf[cnt] = 0;
142         cmp = strstrip(buf);
143
144         /* Ensure the static_key remains in a consistent state */
145         inode = file_inode(filp);
146         cpus_read_lock();
147         inode_lock(inode);
148         ret = sched_feat_set(cmp);
149         inode_unlock(inode);
150         cpus_read_unlock();
151         if (ret < 0)
152                 return ret;
153
154         *ppos += cnt;
155
156         return cnt;
157 }
158
159 static int sched_feat_open(struct inode *inode, struct file *filp)
160 {
161         return single_open(filp, sched_feat_show, NULL);
162 }
163
164 static const struct file_operations sched_feat_fops = {
165         .open           = sched_feat_open,
166         .write          = sched_feat_write,
167         .read           = seq_read,
168         .llseek         = seq_lseek,
169         .release        = single_release,
170 };
171
172 #ifdef CONFIG_SMP
173
174 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
175                                    size_t cnt, loff_t *ppos)
176 {
177         char buf[16];
178
179         if (cnt > 15)
180                 cnt = 15;
181
182         if (copy_from_user(&buf, ubuf, cnt))
183                 return -EFAULT;
184
185         if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
186                 return -EINVAL;
187
188         if (sched_update_scaling())
189                 return -EINVAL;
190
191         *ppos += cnt;
192         return cnt;
193 }
194
195 static int sched_scaling_show(struct seq_file *m, void *v)
196 {
197         seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
198         return 0;
199 }
200
201 static int sched_scaling_open(struct inode *inode, struct file *filp)
202 {
203         return single_open(filp, sched_scaling_show, NULL);
204 }
205
206 static const struct file_operations sched_scaling_fops = {
207         .open           = sched_scaling_open,
208         .write          = sched_scaling_write,
209         .read           = seq_read,
210         .llseek         = seq_lseek,
211         .release        = single_release,
212 };
213
214 #endif /* SMP */
215
216 #ifdef CONFIG_PREEMPT_DYNAMIC
217
218 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
219                                    size_t cnt, loff_t *ppos)
220 {
221         char buf[16];
222         int mode;
223
224         if (cnt > 15)
225                 cnt = 15;
226
227         if (copy_from_user(&buf, ubuf, cnt))
228                 return -EFAULT;
229
230         buf[cnt] = 0;
231         mode = sched_dynamic_mode(strstrip(buf));
232         if (mode < 0)
233                 return mode;
234
235         sched_dynamic_update(mode);
236
237         *ppos += cnt;
238
239         return cnt;
240 }
241
242 static int sched_dynamic_show(struct seq_file *m, void *v)
243 {
244         static const char * preempt_modes[] = {
245                 "none", "voluntary", "full"
246         };
247         int i;
248
249         for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
250                 if (preempt_dynamic_mode == i)
251                         seq_puts(m, "(");
252                 seq_puts(m, preempt_modes[i]);
253                 if (preempt_dynamic_mode == i)
254                         seq_puts(m, ")");
255
256                 seq_puts(m, " ");
257         }
258
259         seq_puts(m, "\n");
260         return 0;
261 }
262
263 static int sched_dynamic_open(struct inode *inode, struct file *filp)
264 {
265         return single_open(filp, sched_dynamic_show, NULL);
266 }
267
268 static const struct file_operations sched_dynamic_fops = {
269         .open           = sched_dynamic_open,
270         .write          = sched_dynamic_write,
271         .read           = seq_read,
272         .llseek         = seq_lseek,
273         .release        = single_release,
274 };
275
276 #endif /* CONFIG_PREEMPT_DYNAMIC */
277
278 __read_mostly bool sched_debug_enabled;
279
280 static struct dentry *debugfs_sched;
281
282 static __init int sched_init_debug(void)
283 {
284         struct dentry __maybe_unused *numa;
285
286         debugfs_sched = debugfs_create_dir("sched", NULL);
287
288         debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
289         debugfs_create_bool("debug_enabled", 0644, debugfs_sched, &sched_debug_enabled);
290 #ifdef CONFIG_PREEMPT_DYNAMIC
291         debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
292 #endif
293
294         debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
295         debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
296         debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
297
298 #ifdef CONFIG_SMP
299         debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
300         debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
301         debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
302
303         mutex_lock(&sched_domains_mutex);
304         update_sched_domain_debugfs();
305         mutex_unlock(&sched_domains_mutex);
306 #endif
307
308 #ifdef CONFIG_NUMA_BALANCING
309         numa = debugfs_create_dir("numa_balancing", debugfs_sched);
310
311         debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
312         debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
313         debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
314         debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
315 #endif
316
317         return 0;
318 }
319 late_initcall(sched_init_debug);
320
321 #ifdef CONFIG_SMP
322
323 static cpumask_var_t            sd_sysctl_cpus;
324 static struct dentry            *sd_dentry;
325
326 static int sd_flags_show(struct seq_file *m, void *v)
327 {
328         unsigned long flags = *(unsigned int *)m->private;
329         int idx;
330
331         for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
332                 seq_puts(m, sd_flag_debug[idx].name);
333                 seq_puts(m, " ");
334         }
335         seq_puts(m, "\n");
336
337         return 0;
338 }
339
340 static int sd_flags_open(struct inode *inode, struct file *file)
341 {
342         return single_open(file, sd_flags_show, inode->i_private);
343 }
344
345 static const struct file_operations sd_flags_fops = {
346         .open           = sd_flags_open,
347         .read           = seq_read,
348         .llseek         = seq_lseek,
349         .release        = single_release,
350 };
351
352 static void register_sd(struct sched_domain *sd, struct dentry *parent)
353 {
354 #define SDM(type, mode, member) \
355         debugfs_create_##type(#member, mode, parent, &sd->member)
356
357         SDM(ulong, 0644, min_interval);
358         SDM(ulong, 0644, max_interval);
359         SDM(u64,   0644, max_newidle_lb_cost);
360         SDM(u32,   0644, busy_factor);
361         SDM(u32,   0644, imbalance_pct);
362         SDM(u32,   0644, cache_nice_tries);
363         SDM(str,   0444, name);
364
365 #undef SDM
366
367         debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
368 }
369
370 void update_sched_domain_debugfs(void)
371 {
372         int cpu, i;
373
374         if (!cpumask_available(sd_sysctl_cpus)) {
375                 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
376                         return;
377                 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
378         }
379
380         if (!sd_dentry)
381                 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
382
383         for_each_cpu(cpu, sd_sysctl_cpus) {
384                 struct sched_domain *sd;
385                 struct dentry *d_cpu;
386                 char buf[32];
387
388                 snprintf(buf, sizeof(buf), "cpu%d", cpu);
389                 debugfs_remove(debugfs_lookup(buf, sd_dentry));
390                 d_cpu = debugfs_create_dir(buf, sd_dentry);
391
392                 i = 0;
393                 for_each_domain(cpu, sd) {
394                         struct dentry *d_sd;
395
396                         snprintf(buf, sizeof(buf), "domain%d", i);
397                         d_sd = debugfs_create_dir(buf, d_cpu);
398
399                         register_sd(sd, d_sd);
400                         i++;
401                 }
402
403                 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
404         }
405 }
406
407 void dirty_sched_domain_sysctl(int cpu)
408 {
409         if (cpumask_available(sd_sysctl_cpus))
410                 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
411 }
412
413 #endif /* CONFIG_SMP */
414
415 #ifdef CONFIG_FAIR_GROUP_SCHED
416 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
417 {
418         struct sched_entity *se = tg->se[cpu];
419
420 #define P(F)            SEQ_printf(m, "  .%-30s: %lld\n",       #F, (long long)F)
421 #define P_SCHEDSTAT(F)  SEQ_printf(m, "  .%-30s: %lld\n",       #F, (long long)schedstat_val(F))
422 #define PN(F)           SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
423 #define PN_SCHEDSTAT(F) SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
424
425         if (!se)
426                 return;
427
428         PN(se->exec_start);
429         PN(se->vruntime);
430         PN(se->sum_exec_runtime);
431
432         if (schedstat_enabled()) {
433                 PN_SCHEDSTAT(se->statistics.wait_start);
434                 PN_SCHEDSTAT(se->statistics.sleep_start);
435                 PN_SCHEDSTAT(se->statistics.block_start);
436                 PN_SCHEDSTAT(se->statistics.sleep_max);
437                 PN_SCHEDSTAT(se->statistics.block_max);
438                 PN_SCHEDSTAT(se->statistics.exec_max);
439                 PN_SCHEDSTAT(se->statistics.slice_max);
440                 PN_SCHEDSTAT(se->statistics.wait_max);
441                 PN_SCHEDSTAT(se->statistics.wait_sum);
442                 P_SCHEDSTAT(se->statistics.wait_count);
443         }
444
445         P(se->load.weight);
446 #ifdef CONFIG_SMP
447         P(se->avg.load_avg);
448         P(se->avg.util_avg);
449         P(se->avg.runnable_avg);
450 #endif
451
452 #undef PN_SCHEDSTAT
453 #undef PN
454 #undef P_SCHEDSTAT
455 #undef P
456 }
457 #endif
458
459 #ifdef CONFIG_CGROUP_SCHED
460 static char group_path[PATH_MAX];
461
462 static char *task_group_path(struct task_group *tg)
463 {
464         if (autogroup_path(tg, group_path, PATH_MAX))
465                 return group_path;
466
467         cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
468
469         return group_path;
470 }
471 #endif
472
473 static void
474 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
475 {
476         if (task_current(rq, p))
477                 SEQ_printf(m, ">R");
478         else
479                 SEQ_printf(m, " %c", task_state_to_char(p));
480
481         SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
482                 p->comm, task_pid_nr(p),
483                 SPLIT_NS(p->se.vruntime),
484                 (long long)(p->nvcsw + p->nivcsw),
485                 p->prio);
486
487         SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
488                 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
489                 SPLIT_NS(p->se.sum_exec_runtime),
490                 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
491
492 #ifdef CONFIG_NUMA_BALANCING
493         SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
494 #endif
495 #ifdef CONFIG_CGROUP_SCHED
496         SEQ_printf(m, " %s", task_group_path(task_group(p)));
497 #endif
498
499         SEQ_printf(m, "\n");
500 }
501
502 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
503 {
504         struct task_struct *g, *p;
505
506         SEQ_printf(m, "\n");
507         SEQ_printf(m, "runnable tasks:\n");
508         SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
509                    "     wait-time             sum-exec        sum-sleep\n");
510         SEQ_printf(m, "-------------------------------------------------------"
511                    "------------------------------------------------------\n");
512
513         rcu_read_lock();
514         for_each_process_thread(g, p) {
515                 if (task_cpu(p) != rq_cpu)
516                         continue;
517
518                 print_task(m, rq, p);
519         }
520         rcu_read_unlock();
521 }
522
523 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
524 {
525         s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
526                 spread, rq0_min_vruntime, spread0;
527         struct rq *rq = cpu_rq(cpu);
528         struct sched_entity *last;
529         unsigned long flags;
530
531 #ifdef CONFIG_FAIR_GROUP_SCHED
532         SEQ_printf(m, "\n");
533         SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
534 #else
535         SEQ_printf(m, "\n");
536         SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
537 #endif
538         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
539                         SPLIT_NS(cfs_rq->exec_clock));
540
541         raw_spin_lock_irqsave(&rq->lock, flags);
542         if (rb_first_cached(&cfs_rq->tasks_timeline))
543                 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
544         last = __pick_last_entity(cfs_rq);
545         if (last)
546                 max_vruntime = last->vruntime;
547         min_vruntime = cfs_rq->min_vruntime;
548         rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
549         raw_spin_unlock_irqrestore(&rq->lock, flags);
550         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
551                         SPLIT_NS(MIN_vruntime));
552         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
553                         SPLIT_NS(min_vruntime));
554         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
555                         SPLIT_NS(max_vruntime));
556         spread = max_vruntime - MIN_vruntime;
557         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
558                         SPLIT_NS(spread));
559         spread0 = min_vruntime - rq0_min_vruntime;
560         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
561                         SPLIT_NS(spread0));
562         SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
563                         cfs_rq->nr_spread_over);
564         SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
565         SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
566 #ifdef CONFIG_SMP
567         SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
568                         cfs_rq->avg.load_avg);
569         SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
570                         cfs_rq->avg.runnable_avg);
571         SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
572                         cfs_rq->avg.util_avg);
573         SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
574                         cfs_rq->avg.util_est.enqueued);
575         SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
576                         cfs_rq->removed.load_avg);
577         SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
578                         cfs_rq->removed.util_avg);
579         SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
580                         cfs_rq->removed.runnable_avg);
581 #ifdef CONFIG_FAIR_GROUP_SCHED
582         SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
583                         cfs_rq->tg_load_avg_contrib);
584         SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
585                         atomic_long_read(&cfs_rq->tg->load_avg));
586 #endif
587 #endif
588 #ifdef CONFIG_CFS_BANDWIDTH
589         SEQ_printf(m, "  .%-30s: %d\n", "throttled",
590                         cfs_rq->throttled);
591         SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
592                         cfs_rq->throttle_count);
593 #endif
594
595 #ifdef CONFIG_FAIR_GROUP_SCHED
596         print_cfs_group_stats(m, cpu, cfs_rq->tg);
597 #endif
598 }
599
600 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
601 {
602 #ifdef CONFIG_RT_GROUP_SCHED
603         SEQ_printf(m, "\n");
604         SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
605 #else
606         SEQ_printf(m, "\n");
607         SEQ_printf(m, "rt_rq[%d]:\n", cpu);
608 #endif
609
610 #define P(x) \
611         SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
612 #define PU(x) \
613         SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
614 #define PN(x) \
615         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
616
617         PU(rt_nr_running);
618 #ifdef CONFIG_SMP
619         PU(rt_nr_migratory);
620 #endif
621         P(rt_throttled);
622         PN(rt_time);
623         PN(rt_runtime);
624
625 #undef PN
626 #undef PU
627 #undef P
628 }
629
630 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
631 {
632         struct dl_bw *dl_bw;
633
634         SEQ_printf(m, "\n");
635         SEQ_printf(m, "dl_rq[%d]:\n", cpu);
636
637 #define PU(x) \
638         SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
639
640         PU(dl_nr_running);
641 #ifdef CONFIG_SMP
642         PU(dl_nr_migratory);
643         dl_bw = &cpu_rq(cpu)->rd->dl_bw;
644 #else
645         dl_bw = &dl_rq->dl_bw;
646 #endif
647         SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
648         SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
649
650 #undef PU
651 }
652
653 static void print_cpu(struct seq_file *m, int cpu)
654 {
655         struct rq *rq = cpu_rq(cpu);
656         unsigned long flags;
657
658 #ifdef CONFIG_X86
659         {
660                 unsigned int freq = cpu_khz ? : 1;
661
662                 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
663                            cpu, freq / 1000, (freq % 1000));
664         }
665 #else
666         SEQ_printf(m, "cpu#%d\n", cpu);
667 #endif
668
669 #define P(x)                                                            \
670 do {                                                                    \
671         if (sizeof(rq->x) == 4)                                         \
672                 SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));    \
673         else                                                            \
674                 SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
675 } while (0)
676
677 #define PN(x) \
678         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
679
680         P(nr_running);
681         P(nr_switches);
682         P(nr_uninterruptible);
683         PN(next_balance);
684         SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
685         PN(clock);
686         PN(clock_task);
687 #undef P
688 #undef PN
689
690 #ifdef CONFIG_SMP
691 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
692         P64(avg_idle);
693         P64(max_idle_balance_cost);
694 #undef P64
695 #endif
696
697 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
698         if (schedstat_enabled()) {
699                 P(yld_count);
700                 P(sched_count);
701                 P(sched_goidle);
702                 P(ttwu_count);
703                 P(ttwu_local);
704         }
705 #undef P
706
707         spin_lock_irqsave(&sched_debug_lock, flags);
708         print_cfs_stats(m, cpu);
709         print_rt_stats(m, cpu);
710         print_dl_stats(m, cpu);
711
712         print_rq(m, rq, cpu);
713         spin_unlock_irqrestore(&sched_debug_lock, flags);
714         SEQ_printf(m, "\n");
715 }
716
717 static const char *sched_tunable_scaling_names[] = {
718         "none",
719         "logarithmic",
720         "linear"
721 };
722
723 static void sched_debug_header(struct seq_file *m)
724 {
725         u64 ktime, sched_clk, cpu_clk;
726         unsigned long flags;
727
728         local_irq_save(flags);
729         ktime = ktime_to_ns(ktime_get());
730         sched_clk = sched_clock();
731         cpu_clk = local_clock();
732         local_irq_restore(flags);
733
734         SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
735                 init_utsname()->release,
736                 (int)strcspn(init_utsname()->version, " "),
737                 init_utsname()->version);
738
739 #define P(x) \
740         SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
741 #define PN(x) \
742         SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
743         PN(ktime);
744         PN(sched_clk);
745         PN(cpu_clk);
746         P(jiffies);
747 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
748         P(sched_clock_stable());
749 #endif
750 #undef PN
751 #undef P
752
753         SEQ_printf(m, "\n");
754         SEQ_printf(m, "sysctl_sched\n");
755
756 #define P(x) \
757         SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
758 #define PN(x) \
759         SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
760         PN(sysctl_sched_latency);
761         PN(sysctl_sched_min_granularity);
762         PN(sysctl_sched_wakeup_granularity);
763         P(sysctl_sched_child_runs_first);
764         P(sysctl_sched_features);
765 #undef PN
766 #undef P
767
768         SEQ_printf(m, "  .%-40s: %d (%s)\n",
769                 "sysctl_sched_tunable_scaling",
770                 sysctl_sched_tunable_scaling,
771                 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
772         SEQ_printf(m, "\n");
773 }
774
775 static int sched_debug_show(struct seq_file *m, void *v)
776 {
777         int cpu = (unsigned long)(v - 2);
778
779         if (cpu != -1)
780                 print_cpu(m, cpu);
781         else
782                 sched_debug_header(m);
783
784         return 0;
785 }
786
787 void sysrq_sched_debug_show(void)
788 {
789         int cpu;
790
791         sched_debug_header(NULL);
792         for_each_online_cpu(cpu) {
793                 /*
794                  * Need to reset softlockup watchdogs on all CPUs, because
795                  * another CPU might be blocked waiting for us to process
796                  * an IPI or stop_machine.
797                  */
798                 touch_nmi_watchdog();
799                 touch_all_softlockup_watchdogs();
800                 print_cpu(NULL, cpu);
801         }
802 }
803
804 /*
805  * This iterator needs some explanation.
806  * It returns 1 for the header position.
807  * This means 2 is CPU 0.
808  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
809  * to use cpumask_* to iterate over the CPUs.
810  */
811 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
812 {
813         unsigned long n = *offset;
814
815         if (n == 0)
816                 return (void *) 1;
817
818         n--;
819
820         if (n > 0)
821                 n = cpumask_next(n - 1, cpu_online_mask);
822         else
823                 n = cpumask_first(cpu_online_mask);
824
825         *offset = n + 1;
826
827         if (n < nr_cpu_ids)
828                 return (void *)(unsigned long)(n + 2);
829
830         return NULL;
831 }
832
833 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
834 {
835         (*offset)++;
836         return sched_debug_start(file, offset);
837 }
838
839 static void sched_debug_stop(struct seq_file *file, void *data)
840 {
841 }
842
843 static const struct seq_operations sched_debug_sops = {
844         .start          = sched_debug_start,
845         .next           = sched_debug_next,
846         .stop           = sched_debug_stop,
847         .show           = sched_debug_show,
848 };
849
850 static int __init init_sched_debug_procfs(void)
851 {
852         if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
853                 return -ENOMEM;
854         return 0;
855 }
856
857 __initcall(init_sched_debug_procfs);
858
859 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
860 #define __P(F) __PS(#F, F)
861 #define   P(F) __PS(#F, p->F)
862 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
863 #define __PN(F) __PSN(#F, F)
864 #define   PN(F) __PSN(#F, p->F)
865
866
867 #ifdef CONFIG_NUMA_BALANCING
868 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
869                 unsigned long tpf, unsigned long gsf, unsigned long gpf)
870 {
871         SEQ_printf(m, "numa_faults node=%d ", node);
872         SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
873         SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
874 }
875 #endif
876
877
878 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
879 {
880 #ifdef CONFIG_NUMA_BALANCING
881         struct mempolicy *pol;
882
883         if (p->mm)
884                 P(mm->numa_scan_seq);
885
886         task_lock(p);
887         pol = p->mempolicy;
888         if (pol && !(pol->flags & MPOL_F_MORON))
889                 pol = NULL;
890         mpol_get(pol);
891         task_unlock(p);
892
893         P(numa_pages_migrated);
894         P(numa_preferred_nid);
895         P(total_numa_faults);
896         SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
897                         task_node(p), task_numa_group_id(p));
898         show_numa_stats(p, m);
899         mpol_put(pol);
900 #endif
901 }
902
903 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
904                                                   struct seq_file *m)
905 {
906         unsigned long nr_switches;
907
908         SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
909                                                 get_nr_threads(p));
910         SEQ_printf(m,
911                 "---------------------------------------------------------"
912                 "----------\n");
913
914 #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->F))
915 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
916
917         PN(se.exec_start);
918         PN(se.vruntime);
919         PN(se.sum_exec_runtime);
920
921         nr_switches = p->nvcsw + p->nivcsw;
922
923         P(se.nr_migrations);
924
925         if (schedstat_enabled()) {
926                 u64 avg_atom, avg_per_cpu;
927
928                 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
929                 PN_SCHEDSTAT(se.statistics.wait_start);
930                 PN_SCHEDSTAT(se.statistics.sleep_start);
931                 PN_SCHEDSTAT(se.statistics.block_start);
932                 PN_SCHEDSTAT(se.statistics.sleep_max);
933                 PN_SCHEDSTAT(se.statistics.block_max);
934                 PN_SCHEDSTAT(se.statistics.exec_max);
935                 PN_SCHEDSTAT(se.statistics.slice_max);
936                 PN_SCHEDSTAT(se.statistics.wait_max);
937                 PN_SCHEDSTAT(se.statistics.wait_sum);
938                 P_SCHEDSTAT(se.statistics.wait_count);
939                 PN_SCHEDSTAT(se.statistics.iowait_sum);
940                 P_SCHEDSTAT(se.statistics.iowait_count);
941                 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
942                 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
943                 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
944                 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
945                 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
946                 P_SCHEDSTAT(se.statistics.nr_wakeups);
947                 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
948                 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
949                 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
950                 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
951                 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
952                 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
953                 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
954                 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
955
956                 avg_atom = p->se.sum_exec_runtime;
957                 if (nr_switches)
958                         avg_atom = div64_ul(avg_atom, nr_switches);
959                 else
960                         avg_atom = -1LL;
961
962                 avg_per_cpu = p->se.sum_exec_runtime;
963                 if (p->se.nr_migrations) {
964                         avg_per_cpu = div64_u64(avg_per_cpu,
965                                                 p->se.nr_migrations);
966                 } else {
967                         avg_per_cpu = -1LL;
968                 }
969
970                 __PN(avg_atom);
971                 __PN(avg_per_cpu);
972         }
973
974         __P(nr_switches);
975         __PS("nr_voluntary_switches", p->nvcsw);
976         __PS("nr_involuntary_switches", p->nivcsw);
977
978         P(se.load.weight);
979 #ifdef CONFIG_SMP
980         P(se.avg.load_sum);
981         P(se.avg.runnable_sum);
982         P(se.avg.util_sum);
983         P(se.avg.load_avg);
984         P(se.avg.runnable_avg);
985         P(se.avg.util_avg);
986         P(se.avg.last_update_time);
987         P(se.avg.util_est.ewma);
988         P(se.avg.util_est.enqueued);
989 #endif
990 #ifdef CONFIG_UCLAMP_TASK
991         __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
992         __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
993         __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
994         __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
995 #endif
996         P(policy);
997         P(prio);
998         if (task_has_dl_policy(p)) {
999                 P(dl.runtime);
1000                 P(dl.deadline);
1001         }
1002 #undef PN_SCHEDSTAT
1003 #undef P_SCHEDSTAT
1004
1005         {
1006                 unsigned int this_cpu = raw_smp_processor_id();
1007                 u64 t0, t1;
1008
1009                 t0 = cpu_clock(this_cpu);
1010                 t1 = cpu_clock(this_cpu);
1011                 __PS("clock-delta", t1-t0);
1012         }
1013
1014         sched_show_numa(p, m);
1015 }
1016
1017 void proc_sched_set_task(struct task_struct *p)
1018 {
1019 #ifdef CONFIG_SCHEDSTATS
1020         memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1021 #endif
1022 }