Merge tag 'x86_microcode_for_5.8' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / kernel / sched / debug.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9 #include "sched.h"
10
11 static DEFINE_SPINLOCK(sched_debug_lock);
12
13 /*
14  * This allows printing both to /proc/sched_debug and
15  * to the console
16  */
17 #define SEQ_printf(m, x...)                     \
18  do {                                           \
19         if (m)                                  \
20                 seq_printf(m, x);               \
21         else                                    \
22                 pr_cont(x);                     \
23  } while (0)
24
25 /*
26  * Ease the printing of nsec fields:
27  */
28 static long long nsec_high(unsigned long long nsec)
29 {
30         if ((long long)nsec < 0) {
31                 nsec = -nsec;
32                 do_div(nsec, 1000000);
33                 return -nsec;
34         }
35         do_div(nsec, 1000000);
36
37         return nsec;
38 }
39
40 static unsigned long nsec_low(unsigned long long nsec)
41 {
42         if ((long long)nsec < 0)
43                 nsec = -nsec;
44
45         return do_div(nsec, 1000000);
46 }
47
48 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49
50 #define SCHED_FEAT(name, enabled)       \
51         #name ,
52
53 static const char * const sched_feat_names[] = {
54 #include "features.h"
55 };
56
57 #undef SCHED_FEAT
58
59 static int sched_feat_show(struct seq_file *m, void *v)
60 {
61         int i;
62
63         for (i = 0; i < __SCHED_FEAT_NR; i++) {
64                 if (!(sysctl_sched_features & (1UL << i)))
65                         seq_puts(m, "NO_");
66                 seq_printf(m, "%s ", sched_feat_names[i]);
67         }
68         seq_puts(m, "\n");
69
70         return 0;
71 }
72
73 #ifdef CONFIG_JUMP_LABEL
74
75 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
76 #define jump_label_key__false STATIC_KEY_INIT_FALSE
77
78 #define SCHED_FEAT(name, enabled)       \
79         jump_label_key__##enabled ,
80
81 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82 #include "features.h"
83 };
84
85 #undef SCHED_FEAT
86
87 static void sched_feat_disable(int i)
88 {
89         static_key_disable_cpuslocked(&sched_feat_keys[i]);
90 }
91
92 static void sched_feat_enable(int i)
93 {
94         static_key_enable_cpuslocked(&sched_feat_keys[i]);
95 }
96 #else
97 static void sched_feat_disable(int i) { };
98 static void sched_feat_enable(int i) { };
99 #endif /* CONFIG_JUMP_LABEL */
100
101 static int sched_feat_set(char *cmp)
102 {
103         int i;
104         int neg = 0;
105
106         if (strncmp(cmp, "NO_", 3) == 0) {
107                 neg = 1;
108                 cmp += 3;
109         }
110
111         i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112         if (i < 0)
113                 return i;
114
115         if (neg) {
116                 sysctl_sched_features &= ~(1UL << i);
117                 sched_feat_disable(i);
118         } else {
119                 sysctl_sched_features |= (1UL << i);
120                 sched_feat_enable(i);
121         }
122
123         return 0;
124 }
125
126 static ssize_t
127 sched_feat_write(struct file *filp, const char __user *ubuf,
128                 size_t cnt, loff_t *ppos)
129 {
130         char buf[64];
131         char *cmp;
132         int ret;
133         struct inode *inode;
134
135         if (cnt > 63)
136                 cnt = 63;
137
138         if (copy_from_user(&buf, ubuf, cnt))
139                 return -EFAULT;
140
141         buf[cnt] = 0;
142         cmp = strstrip(buf);
143
144         /* Ensure the static_key remains in a consistent state */
145         inode = file_inode(filp);
146         cpus_read_lock();
147         inode_lock(inode);
148         ret = sched_feat_set(cmp);
149         inode_unlock(inode);
150         cpus_read_unlock();
151         if (ret < 0)
152                 return ret;
153
154         *ppos += cnt;
155
156         return cnt;
157 }
158
159 static int sched_feat_open(struct inode *inode, struct file *filp)
160 {
161         return single_open(filp, sched_feat_show, NULL);
162 }
163
164 static const struct file_operations sched_feat_fops = {
165         .open           = sched_feat_open,
166         .write          = sched_feat_write,
167         .read           = seq_read,
168         .llseek         = seq_lseek,
169         .release        = single_release,
170 };
171
172 __read_mostly bool sched_debug_enabled;
173
174 static __init int sched_init_debug(void)
175 {
176         debugfs_create_file("sched_features", 0644, NULL, NULL,
177                         &sched_feat_fops);
178
179         debugfs_create_bool("sched_debug", 0644, NULL,
180                         &sched_debug_enabled);
181
182         return 0;
183 }
184 late_initcall(sched_init_debug);
185
186 #ifdef CONFIG_SMP
187
188 #ifdef CONFIG_SYSCTL
189
190 static struct ctl_table sd_ctl_dir[] = {
191         {
192                 .procname       = "sched_domain",
193                 .mode           = 0555,
194         },
195         {}
196 };
197
198 static struct ctl_table sd_ctl_root[] = {
199         {
200                 .procname       = "kernel",
201                 .mode           = 0555,
202                 .child          = sd_ctl_dir,
203         },
204         {}
205 };
206
207 static struct ctl_table *sd_alloc_ctl_entry(int n)
208 {
209         struct ctl_table *entry =
210                 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
211
212         return entry;
213 }
214
215 static void sd_free_ctl_entry(struct ctl_table **tablep)
216 {
217         struct ctl_table *entry;
218
219         /*
220          * In the intermediate directories, both the child directory and
221          * procname are dynamically allocated and could fail but the mode
222          * will always be set. In the lowest directory the names are
223          * static strings and all have proc handlers.
224          */
225         for (entry = *tablep; entry->mode; entry++) {
226                 if (entry->child)
227                         sd_free_ctl_entry(&entry->child);
228                 if (entry->proc_handler == NULL)
229                         kfree(entry->procname);
230         }
231
232         kfree(*tablep);
233         *tablep = NULL;
234 }
235
236 static void
237 set_table_entry(struct ctl_table *entry,
238                 const char *procname, void *data, int maxlen,
239                 umode_t mode, proc_handler *proc_handler)
240 {
241         entry->procname = procname;
242         entry->data = data;
243         entry->maxlen = maxlen;
244         entry->mode = mode;
245         entry->proc_handler = proc_handler;
246 }
247
248 static struct ctl_table *
249 sd_alloc_ctl_domain_table(struct sched_domain *sd)
250 {
251         struct ctl_table *table = sd_alloc_ctl_entry(9);
252
253         if (table == NULL)
254                 return NULL;
255
256         set_table_entry(&table[0], "min_interval",        &sd->min_interval,        sizeof(long), 0644, proc_doulongvec_minmax);
257         set_table_entry(&table[1], "max_interval",        &sd->max_interval,        sizeof(long), 0644, proc_doulongvec_minmax);
258         set_table_entry(&table[2], "busy_factor",         &sd->busy_factor,         sizeof(int),  0644, proc_dointvec_minmax);
259         set_table_entry(&table[3], "imbalance_pct",       &sd->imbalance_pct,       sizeof(int),  0644, proc_dointvec_minmax);
260         set_table_entry(&table[4], "cache_nice_tries",    &sd->cache_nice_tries,    sizeof(int),  0644, proc_dointvec_minmax);
261         set_table_entry(&table[5], "flags",               &sd->flags,               sizeof(int),  0644, proc_dointvec_minmax);
262         set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
263         set_table_entry(&table[7], "name",                sd->name,            CORENAME_MAX_SIZE, 0444, proc_dostring);
264         /* &table[8] is terminator */
265
266         return table;
267 }
268
269 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
270 {
271         struct ctl_table *entry, *table;
272         struct sched_domain *sd;
273         int domain_num = 0, i;
274         char buf[32];
275
276         for_each_domain(cpu, sd)
277                 domain_num++;
278         entry = table = sd_alloc_ctl_entry(domain_num + 1);
279         if (table == NULL)
280                 return NULL;
281
282         i = 0;
283         for_each_domain(cpu, sd) {
284                 snprintf(buf, 32, "domain%d", i);
285                 entry->procname = kstrdup(buf, GFP_KERNEL);
286                 entry->mode = 0555;
287                 entry->child = sd_alloc_ctl_domain_table(sd);
288                 entry++;
289                 i++;
290         }
291         return table;
292 }
293
294 static cpumask_var_t            sd_sysctl_cpus;
295 static struct ctl_table_header  *sd_sysctl_header;
296
297 void register_sched_domain_sysctl(void)
298 {
299         static struct ctl_table *cpu_entries;
300         static struct ctl_table **cpu_idx;
301         static bool init_done = false;
302         char buf[32];
303         int i;
304
305         if (!cpu_entries) {
306                 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
307                 if (!cpu_entries)
308                         return;
309
310                 WARN_ON(sd_ctl_dir[0].child);
311                 sd_ctl_dir[0].child = cpu_entries;
312         }
313
314         if (!cpu_idx) {
315                 struct ctl_table *e = cpu_entries;
316
317                 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
318                 if (!cpu_idx)
319                         return;
320
321                 /* deal with sparse possible map */
322                 for_each_possible_cpu(i) {
323                         cpu_idx[i] = e;
324                         e++;
325                 }
326         }
327
328         if (!cpumask_available(sd_sysctl_cpus)) {
329                 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
330                         return;
331         }
332
333         if (!init_done) {
334                 init_done = true;
335                 /* init to possible to not have holes in @cpu_entries */
336                 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
337         }
338
339         for_each_cpu(i, sd_sysctl_cpus) {
340                 struct ctl_table *e = cpu_idx[i];
341
342                 if (e->child)
343                         sd_free_ctl_entry(&e->child);
344
345                 if (!e->procname) {
346                         snprintf(buf, 32, "cpu%d", i);
347                         e->procname = kstrdup(buf, GFP_KERNEL);
348                 }
349                 e->mode = 0555;
350                 e->child = sd_alloc_ctl_cpu_table(i);
351
352                 __cpumask_clear_cpu(i, sd_sysctl_cpus);
353         }
354
355         WARN_ON(sd_sysctl_header);
356         sd_sysctl_header = register_sysctl_table(sd_ctl_root);
357 }
358
359 void dirty_sched_domain_sysctl(int cpu)
360 {
361         if (cpumask_available(sd_sysctl_cpus))
362                 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
363 }
364
365 /* may be called multiple times per register */
366 void unregister_sched_domain_sysctl(void)
367 {
368         unregister_sysctl_table(sd_sysctl_header);
369         sd_sysctl_header = NULL;
370 }
371 #endif /* CONFIG_SYSCTL */
372 #endif /* CONFIG_SMP */
373
374 #ifdef CONFIG_FAIR_GROUP_SCHED
375 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
376 {
377         struct sched_entity *se = tg->se[cpu];
378
379 #define P(F)            SEQ_printf(m, "  .%-30s: %lld\n",       #F, (long long)F)
380 #define P_SCHEDSTAT(F)  SEQ_printf(m, "  .%-30s: %lld\n",       #F, (long long)schedstat_val(F))
381 #define PN(F)           SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
382 #define PN_SCHEDSTAT(F) SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
383
384         if (!se)
385                 return;
386
387         PN(se->exec_start);
388         PN(se->vruntime);
389         PN(se->sum_exec_runtime);
390
391         if (schedstat_enabled()) {
392                 PN_SCHEDSTAT(se->statistics.wait_start);
393                 PN_SCHEDSTAT(se->statistics.sleep_start);
394                 PN_SCHEDSTAT(se->statistics.block_start);
395                 PN_SCHEDSTAT(se->statistics.sleep_max);
396                 PN_SCHEDSTAT(se->statistics.block_max);
397                 PN_SCHEDSTAT(se->statistics.exec_max);
398                 PN_SCHEDSTAT(se->statistics.slice_max);
399                 PN_SCHEDSTAT(se->statistics.wait_max);
400                 PN_SCHEDSTAT(se->statistics.wait_sum);
401                 P_SCHEDSTAT(se->statistics.wait_count);
402         }
403
404         P(se->load.weight);
405 #ifdef CONFIG_SMP
406         P(se->avg.load_avg);
407         P(se->avg.util_avg);
408         P(se->avg.runnable_avg);
409 #endif
410
411 #undef PN_SCHEDSTAT
412 #undef PN
413 #undef P_SCHEDSTAT
414 #undef P
415 }
416 #endif
417
418 #ifdef CONFIG_CGROUP_SCHED
419 static char group_path[PATH_MAX];
420
421 static char *task_group_path(struct task_group *tg)
422 {
423         if (autogroup_path(tg, group_path, PATH_MAX))
424                 return group_path;
425
426         cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
427
428         return group_path;
429 }
430 #endif
431
432 static void
433 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
434 {
435         if (rq->curr == p)
436                 SEQ_printf(m, ">R");
437         else
438                 SEQ_printf(m, " %c", task_state_to_char(p));
439
440         SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
441                 p->comm, task_pid_nr(p),
442                 SPLIT_NS(p->se.vruntime),
443                 (long long)(p->nvcsw + p->nivcsw),
444                 p->prio);
445
446         SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
447                 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
448                 SPLIT_NS(p->se.sum_exec_runtime),
449                 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
450
451 #ifdef CONFIG_NUMA_BALANCING
452         SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
453 #endif
454 #ifdef CONFIG_CGROUP_SCHED
455         SEQ_printf(m, " %s", task_group_path(task_group(p)));
456 #endif
457
458         SEQ_printf(m, "\n");
459 }
460
461 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
462 {
463         struct task_struct *g, *p;
464
465         SEQ_printf(m, "\n");
466         SEQ_printf(m, "runnable tasks:\n");
467         SEQ_printf(m, " S           task   PID         tree-key  switches  prio"
468                    "     wait-time             sum-exec        sum-sleep\n");
469         SEQ_printf(m, "-------------------------------------------------------"
470                    "----------------------------------------------------\n");
471
472         rcu_read_lock();
473         for_each_process_thread(g, p) {
474                 if (task_cpu(p) != rq_cpu)
475                         continue;
476
477                 print_task(m, rq, p);
478         }
479         rcu_read_unlock();
480 }
481
482 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
483 {
484         s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
485                 spread, rq0_min_vruntime, spread0;
486         struct rq *rq = cpu_rq(cpu);
487         struct sched_entity *last;
488         unsigned long flags;
489
490 #ifdef CONFIG_FAIR_GROUP_SCHED
491         SEQ_printf(m, "\n");
492         SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
493 #else
494         SEQ_printf(m, "\n");
495         SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
496 #endif
497         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
498                         SPLIT_NS(cfs_rq->exec_clock));
499
500         raw_spin_lock_irqsave(&rq->lock, flags);
501         if (rb_first_cached(&cfs_rq->tasks_timeline))
502                 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
503         last = __pick_last_entity(cfs_rq);
504         if (last)
505                 max_vruntime = last->vruntime;
506         min_vruntime = cfs_rq->min_vruntime;
507         rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
508         raw_spin_unlock_irqrestore(&rq->lock, flags);
509         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
510                         SPLIT_NS(MIN_vruntime));
511         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
512                         SPLIT_NS(min_vruntime));
513         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
514                         SPLIT_NS(max_vruntime));
515         spread = max_vruntime - MIN_vruntime;
516         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
517                         SPLIT_NS(spread));
518         spread0 = min_vruntime - rq0_min_vruntime;
519         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
520                         SPLIT_NS(spread0));
521         SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
522                         cfs_rq->nr_spread_over);
523         SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
524         SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
525 #ifdef CONFIG_SMP
526         SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
527                         cfs_rq->avg.load_avg);
528         SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
529                         cfs_rq->avg.runnable_avg);
530         SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
531                         cfs_rq->avg.util_avg);
532         SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
533                         cfs_rq->avg.util_est.enqueued);
534         SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
535                         cfs_rq->removed.load_avg);
536         SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
537                         cfs_rq->removed.util_avg);
538         SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
539                         cfs_rq->removed.runnable_avg);
540 #ifdef CONFIG_FAIR_GROUP_SCHED
541         SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
542                         cfs_rq->tg_load_avg_contrib);
543         SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
544                         atomic_long_read(&cfs_rq->tg->load_avg));
545 #endif
546 #endif
547 #ifdef CONFIG_CFS_BANDWIDTH
548         SEQ_printf(m, "  .%-30s: %d\n", "throttled",
549                         cfs_rq->throttled);
550         SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
551                         cfs_rq->throttle_count);
552 #endif
553
554 #ifdef CONFIG_FAIR_GROUP_SCHED
555         print_cfs_group_stats(m, cpu, cfs_rq->tg);
556 #endif
557 }
558
559 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
560 {
561 #ifdef CONFIG_RT_GROUP_SCHED
562         SEQ_printf(m, "\n");
563         SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
564 #else
565         SEQ_printf(m, "\n");
566         SEQ_printf(m, "rt_rq[%d]:\n", cpu);
567 #endif
568
569 #define P(x) \
570         SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
571 #define PU(x) \
572         SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
573 #define PN(x) \
574         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
575
576         PU(rt_nr_running);
577 #ifdef CONFIG_SMP
578         PU(rt_nr_migratory);
579 #endif
580         P(rt_throttled);
581         PN(rt_time);
582         PN(rt_runtime);
583
584 #undef PN
585 #undef PU
586 #undef P
587 }
588
589 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
590 {
591         struct dl_bw *dl_bw;
592
593         SEQ_printf(m, "\n");
594         SEQ_printf(m, "dl_rq[%d]:\n", cpu);
595
596 #define PU(x) \
597         SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
598
599         PU(dl_nr_running);
600 #ifdef CONFIG_SMP
601         PU(dl_nr_migratory);
602         dl_bw = &cpu_rq(cpu)->rd->dl_bw;
603 #else
604         dl_bw = &dl_rq->dl_bw;
605 #endif
606         SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
607         SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
608
609 #undef PU
610 }
611
612 static void print_cpu(struct seq_file *m, int cpu)
613 {
614         struct rq *rq = cpu_rq(cpu);
615         unsigned long flags;
616
617 #ifdef CONFIG_X86
618         {
619                 unsigned int freq = cpu_khz ? : 1;
620
621                 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
622                            cpu, freq / 1000, (freq % 1000));
623         }
624 #else
625         SEQ_printf(m, "cpu#%d\n", cpu);
626 #endif
627
628 #define P(x)                                                            \
629 do {                                                                    \
630         if (sizeof(rq->x) == 4)                                         \
631                 SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));    \
632         else                                                            \
633                 SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
634 } while (0)
635
636 #define PN(x) \
637         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
638
639         P(nr_running);
640         P(nr_switches);
641         P(nr_load_updates);
642         P(nr_uninterruptible);
643         PN(next_balance);
644         SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
645         PN(clock);
646         PN(clock_task);
647 #undef P
648 #undef PN
649
650 #ifdef CONFIG_SMP
651 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
652         P64(avg_idle);
653         P64(max_idle_balance_cost);
654 #undef P64
655 #endif
656
657 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
658         if (schedstat_enabled()) {
659                 P(yld_count);
660                 P(sched_count);
661                 P(sched_goidle);
662                 P(ttwu_count);
663                 P(ttwu_local);
664         }
665 #undef P
666
667         spin_lock_irqsave(&sched_debug_lock, flags);
668         print_cfs_stats(m, cpu);
669         print_rt_stats(m, cpu);
670         print_dl_stats(m, cpu);
671
672         print_rq(m, rq, cpu);
673         spin_unlock_irqrestore(&sched_debug_lock, flags);
674         SEQ_printf(m, "\n");
675 }
676
677 static const char *sched_tunable_scaling_names[] = {
678         "none",
679         "logarithmic",
680         "linear"
681 };
682
683 static void sched_debug_header(struct seq_file *m)
684 {
685         u64 ktime, sched_clk, cpu_clk;
686         unsigned long flags;
687
688         local_irq_save(flags);
689         ktime = ktime_to_ns(ktime_get());
690         sched_clk = sched_clock();
691         cpu_clk = local_clock();
692         local_irq_restore(flags);
693
694         SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
695                 init_utsname()->release,
696                 (int)strcspn(init_utsname()->version, " "),
697                 init_utsname()->version);
698
699 #define P(x) \
700         SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
701 #define PN(x) \
702         SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
703         PN(ktime);
704         PN(sched_clk);
705         PN(cpu_clk);
706         P(jiffies);
707 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
708         P(sched_clock_stable());
709 #endif
710 #undef PN
711 #undef P
712
713         SEQ_printf(m, "\n");
714         SEQ_printf(m, "sysctl_sched\n");
715
716 #define P(x) \
717         SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
718 #define PN(x) \
719         SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
720         PN(sysctl_sched_latency);
721         PN(sysctl_sched_min_granularity);
722         PN(sysctl_sched_wakeup_granularity);
723         P(sysctl_sched_child_runs_first);
724         P(sysctl_sched_features);
725 #undef PN
726 #undef P
727
728         SEQ_printf(m, "  .%-40s: %d (%s)\n",
729                 "sysctl_sched_tunable_scaling",
730                 sysctl_sched_tunable_scaling,
731                 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
732         SEQ_printf(m, "\n");
733 }
734
735 static int sched_debug_show(struct seq_file *m, void *v)
736 {
737         int cpu = (unsigned long)(v - 2);
738
739         if (cpu != -1)
740                 print_cpu(m, cpu);
741         else
742                 sched_debug_header(m);
743
744         return 0;
745 }
746
747 void sysrq_sched_debug_show(void)
748 {
749         int cpu;
750
751         sched_debug_header(NULL);
752         for_each_online_cpu(cpu) {
753                 /*
754                  * Need to reset softlockup watchdogs on all CPUs, because
755                  * another CPU might be blocked waiting for us to process
756                  * an IPI or stop_machine.
757                  */
758                 touch_nmi_watchdog();
759                 touch_all_softlockup_watchdogs();
760                 print_cpu(NULL, cpu);
761         }
762 }
763
764 /*
765  * This itererator needs some explanation.
766  * It returns 1 for the header position.
767  * This means 2 is CPU 0.
768  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
769  * to use cpumask_* to iterate over the CPUs.
770  */
771 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
772 {
773         unsigned long n = *offset;
774
775         if (n == 0)
776                 return (void *) 1;
777
778         n--;
779
780         if (n > 0)
781                 n = cpumask_next(n - 1, cpu_online_mask);
782         else
783                 n = cpumask_first(cpu_online_mask);
784
785         *offset = n + 1;
786
787         if (n < nr_cpu_ids)
788                 return (void *)(unsigned long)(n + 2);
789
790         return NULL;
791 }
792
793 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
794 {
795         (*offset)++;
796         return sched_debug_start(file, offset);
797 }
798
799 static void sched_debug_stop(struct seq_file *file, void *data)
800 {
801 }
802
803 static const struct seq_operations sched_debug_sops = {
804         .start          = sched_debug_start,
805         .next           = sched_debug_next,
806         .stop           = sched_debug_stop,
807         .show           = sched_debug_show,
808 };
809
810 static int __init init_sched_debug_procfs(void)
811 {
812         if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
813                 return -ENOMEM;
814         return 0;
815 }
816
817 __initcall(init_sched_debug_procfs);
818
819 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
820 #define __P(F) __PS(#F, F)
821 #define   P(F) __PS(#F, p->F)
822 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
823 #define __PN(F) __PSN(#F, F)
824 #define   PN(F) __PSN(#F, p->F)
825
826
827 #ifdef CONFIG_NUMA_BALANCING
828 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
829                 unsigned long tpf, unsigned long gsf, unsigned long gpf)
830 {
831         SEQ_printf(m, "numa_faults node=%d ", node);
832         SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
833         SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
834 }
835 #endif
836
837
838 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
839 {
840 #ifdef CONFIG_NUMA_BALANCING
841         struct mempolicy *pol;
842
843         if (p->mm)
844                 P(mm->numa_scan_seq);
845
846         task_lock(p);
847         pol = p->mempolicy;
848         if (pol && !(pol->flags & MPOL_F_MORON))
849                 pol = NULL;
850         mpol_get(pol);
851         task_unlock(p);
852
853         P(numa_pages_migrated);
854         P(numa_preferred_nid);
855         P(total_numa_faults);
856         SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
857                         task_node(p), task_numa_group_id(p));
858         show_numa_stats(p, m);
859         mpol_put(pol);
860 #endif
861 }
862
863 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
864                                                   struct seq_file *m)
865 {
866         unsigned long nr_switches;
867
868         SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
869                                                 get_nr_threads(p));
870         SEQ_printf(m,
871                 "---------------------------------------------------------"
872                 "----------\n");
873
874 #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->F))
875 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
876
877         PN(se.exec_start);
878         PN(se.vruntime);
879         PN(se.sum_exec_runtime);
880
881         nr_switches = p->nvcsw + p->nivcsw;
882
883         P(se.nr_migrations);
884
885         if (schedstat_enabled()) {
886                 u64 avg_atom, avg_per_cpu;
887
888                 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
889                 PN_SCHEDSTAT(se.statistics.wait_start);
890                 PN_SCHEDSTAT(se.statistics.sleep_start);
891                 PN_SCHEDSTAT(se.statistics.block_start);
892                 PN_SCHEDSTAT(se.statistics.sleep_max);
893                 PN_SCHEDSTAT(se.statistics.block_max);
894                 PN_SCHEDSTAT(se.statistics.exec_max);
895                 PN_SCHEDSTAT(se.statistics.slice_max);
896                 PN_SCHEDSTAT(se.statistics.wait_max);
897                 PN_SCHEDSTAT(se.statistics.wait_sum);
898                 P_SCHEDSTAT(se.statistics.wait_count);
899                 PN_SCHEDSTAT(se.statistics.iowait_sum);
900                 P_SCHEDSTAT(se.statistics.iowait_count);
901                 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
902                 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
903                 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
904                 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
905                 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
906                 P_SCHEDSTAT(se.statistics.nr_wakeups);
907                 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
908                 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
909                 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
910                 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
911                 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
912                 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
913                 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
914                 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
915
916                 avg_atom = p->se.sum_exec_runtime;
917                 if (nr_switches)
918                         avg_atom = div64_ul(avg_atom, nr_switches);
919                 else
920                         avg_atom = -1LL;
921
922                 avg_per_cpu = p->se.sum_exec_runtime;
923                 if (p->se.nr_migrations) {
924                         avg_per_cpu = div64_u64(avg_per_cpu,
925                                                 p->se.nr_migrations);
926                 } else {
927                         avg_per_cpu = -1LL;
928                 }
929
930                 __PN(avg_atom);
931                 __PN(avg_per_cpu);
932         }
933
934         __P(nr_switches);
935         __PS("nr_voluntary_switches", p->nvcsw);
936         __PS("nr_involuntary_switches", p->nivcsw);
937
938         P(se.load.weight);
939 #ifdef CONFIG_SMP
940         P(se.avg.load_sum);
941         P(se.avg.runnable_sum);
942         P(se.avg.util_sum);
943         P(se.avg.load_avg);
944         P(se.avg.runnable_avg);
945         P(se.avg.util_avg);
946         P(se.avg.last_update_time);
947         P(se.avg.util_est.ewma);
948         P(se.avg.util_est.enqueued);
949 #endif
950 #ifdef CONFIG_UCLAMP_TASK
951         __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
952         __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
953         __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
954         __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
955 #endif
956         P(policy);
957         P(prio);
958         if (task_has_dl_policy(p)) {
959                 P(dl.runtime);
960                 P(dl.deadline);
961         }
962 #undef PN_SCHEDSTAT
963 #undef P_SCHEDSTAT
964
965         {
966                 unsigned int this_cpu = raw_smp_processor_id();
967                 u64 t0, t1;
968
969                 t0 = cpu_clock(this_cpu);
970                 t1 = cpu_clock(this_cpu);
971                 __PS("clock-delta", t1-t0);
972         }
973
974         sched_show_numa(p, m);
975 }
976
977 void proc_sched_set_task(struct task_struct *p)
978 {
979 #ifdef CONFIG_SCHEDSTATS
980         memset(&p->se.statistics, 0, sizeof(p->se.statistics));
981 #endif
982 }