platform/x86: intel_menlow: switch to use <linux/units.h> helpers
[linux-2.6-microblaze.git] / kernel / sched / debug.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9 #include "sched.h"
10
11 static DEFINE_SPINLOCK(sched_debug_lock);
12
13 /*
14  * This allows printing both to /proc/sched_debug and
15  * to the console
16  */
17 #define SEQ_printf(m, x...)                     \
18  do {                                           \
19         if (m)                                  \
20                 seq_printf(m, x);               \
21         else                                    \
22                 pr_cont(x);                     \
23  } while (0)
24
25 /*
26  * Ease the printing of nsec fields:
27  */
28 static long long nsec_high(unsigned long long nsec)
29 {
30         if ((long long)nsec < 0) {
31                 nsec = -nsec;
32                 do_div(nsec, 1000000);
33                 return -nsec;
34         }
35         do_div(nsec, 1000000);
36
37         return nsec;
38 }
39
40 static unsigned long nsec_low(unsigned long long nsec)
41 {
42         if ((long long)nsec < 0)
43                 nsec = -nsec;
44
45         return do_div(nsec, 1000000);
46 }
47
48 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49
50 #define SCHED_FEAT(name, enabled)       \
51         #name ,
52
53 static const char * const sched_feat_names[] = {
54 #include "features.h"
55 };
56
57 #undef SCHED_FEAT
58
59 static int sched_feat_show(struct seq_file *m, void *v)
60 {
61         int i;
62
63         for (i = 0; i < __SCHED_FEAT_NR; i++) {
64                 if (!(sysctl_sched_features & (1UL << i)))
65                         seq_puts(m, "NO_");
66                 seq_printf(m, "%s ", sched_feat_names[i]);
67         }
68         seq_puts(m, "\n");
69
70         return 0;
71 }
72
73 #ifdef CONFIG_JUMP_LABEL
74
75 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
76 #define jump_label_key__false STATIC_KEY_INIT_FALSE
77
78 #define SCHED_FEAT(name, enabled)       \
79         jump_label_key__##enabled ,
80
81 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82 #include "features.h"
83 };
84
85 #undef SCHED_FEAT
86
87 static void sched_feat_disable(int i)
88 {
89         static_key_disable_cpuslocked(&sched_feat_keys[i]);
90 }
91
92 static void sched_feat_enable(int i)
93 {
94         static_key_enable_cpuslocked(&sched_feat_keys[i]);
95 }
96 #else
97 static void sched_feat_disable(int i) { };
98 static void sched_feat_enable(int i) { };
99 #endif /* CONFIG_JUMP_LABEL */
100
101 static int sched_feat_set(char *cmp)
102 {
103         int i;
104         int neg = 0;
105
106         if (strncmp(cmp, "NO_", 3) == 0) {
107                 neg = 1;
108                 cmp += 3;
109         }
110
111         i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112         if (i < 0)
113                 return i;
114
115         if (neg) {
116                 sysctl_sched_features &= ~(1UL << i);
117                 sched_feat_disable(i);
118         } else {
119                 sysctl_sched_features |= (1UL << i);
120                 sched_feat_enable(i);
121         }
122
123         return 0;
124 }
125
126 static ssize_t
127 sched_feat_write(struct file *filp, const char __user *ubuf,
128                 size_t cnt, loff_t *ppos)
129 {
130         char buf[64];
131         char *cmp;
132         int ret;
133         struct inode *inode;
134
135         if (cnt > 63)
136                 cnt = 63;
137
138         if (copy_from_user(&buf, ubuf, cnt))
139                 return -EFAULT;
140
141         buf[cnt] = 0;
142         cmp = strstrip(buf);
143
144         /* Ensure the static_key remains in a consistent state */
145         inode = file_inode(filp);
146         cpus_read_lock();
147         inode_lock(inode);
148         ret = sched_feat_set(cmp);
149         inode_unlock(inode);
150         cpus_read_unlock();
151         if (ret < 0)
152                 return ret;
153
154         *ppos += cnt;
155
156         return cnt;
157 }
158
159 static int sched_feat_open(struct inode *inode, struct file *filp)
160 {
161         return single_open(filp, sched_feat_show, NULL);
162 }
163
164 static const struct file_operations sched_feat_fops = {
165         .open           = sched_feat_open,
166         .write          = sched_feat_write,
167         .read           = seq_read,
168         .llseek         = seq_lseek,
169         .release        = single_release,
170 };
171
172 __read_mostly bool sched_debug_enabled;
173
174 static __init int sched_init_debug(void)
175 {
176         debugfs_create_file("sched_features", 0644, NULL, NULL,
177                         &sched_feat_fops);
178
179         debugfs_create_bool("sched_debug", 0644, NULL,
180                         &sched_debug_enabled);
181
182         return 0;
183 }
184 late_initcall(sched_init_debug);
185
186 #ifdef CONFIG_SMP
187
188 #ifdef CONFIG_SYSCTL
189
190 static struct ctl_table sd_ctl_dir[] = {
191         {
192                 .procname       = "sched_domain",
193                 .mode           = 0555,
194         },
195         {}
196 };
197
198 static struct ctl_table sd_ctl_root[] = {
199         {
200                 .procname       = "kernel",
201                 .mode           = 0555,
202                 .child          = sd_ctl_dir,
203         },
204         {}
205 };
206
207 static struct ctl_table *sd_alloc_ctl_entry(int n)
208 {
209         struct ctl_table *entry =
210                 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
211
212         return entry;
213 }
214
215 static void sd_free_ctl_entry(struct ctl_table **tablep)
216 {
217         struct ctl_table *entry;
218
219         /*
220          * In the intermediate directories, both the child directory and
221          * procname are dynamically allocated and could fail but the mode
222          * will always be set. In the lowest directory the names are
223          * static strings and all have proc handlers.
224          */
225         for (entry = *tablep; entry->mode; entry++) {
226                 if (entry->child)
227                         sd_free_ctl_entry(&entry->child);
228                 if (entry->proc_handler == NULL)
229                         kfree(entry->procname);
230         }
231
232         kfree(*tablep);
233         *tablep = NULL;
234 }
235
236 static void
237 set_table_entry(struct ctl_table *entry,
238                 const char *procname, void *data, int maxlen,
239                 umode_t mode, proc_handler *proc_handler)
240 {
241         entry->procname = procname;
242         entry->data = data;
243         entry->maxlen = maxlen;
244         entry->mode = mode;
245         entry->proc_handler = proc_handler;
246 }
247
248 static struct ctl_table *
249 sd_alloc_ctl_domain_table(struct sched_domain *sd)
250 {
251         struct ctl_table *table = sd_alloc_ctl_entry(9);
252
253         if (table == NULL)
254                 return NULL;
255
256         set_table_entry(&table[0], "min_interval",        &sd->min_interval,        sizeof(long), 0644, proc_doulongvec_minmax);
257         set_table_entry(&table[1], "max_interval",        &sd->max_interval,        sizeof(long), 0644, proc_doulongvec_minmax);
258         set_table_entry(&table[2], "busy_factor",         &sd->busy_factor,         sizeof(int),  0644, proc_dointvec_minmax);
259         set_table_entry(&table[3], "imbalance_pct",       &sd->imbalance_pct,       sizeof(int),  0644, proc_dointvec_minmax);
260         set_table_entry(&table[4], "cache_nice_tries",    &sd->cache_nice_tries,    sizeof(int),  0644, proc_dointvec_minmax);
261         set_table_entry(&table[5], "flags",               &sd->flags,               sizeof(int),  0644, proc_dointvec_minmax);
262         set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
263         set_table_entry(&table[7], "name",                sd->name,            CORENAME_MAX_SIZE, 0444, proc_dostring);
264         /* &table[8] is terminator */
265
266         return table;
267 }
268
269 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
270 {
271         struct ctl_table *entry, *table;
272         struct sched_domain *sd;
273         int domain_num = 0, i;
274         char buf[32];
275
276         for_each_domain(cpu, sd)
277                 domain_num++;
278         entry = table = sd_alloc_ctl_entry(domain_num + 1);
279         if (table == NULL)
280                 return NULL;
281
282         i = 0;
283         for_each_domain(cpu, sd) {
284                 snprintf(buf, 32, "domain%d", i);
285                 entry->procname = kstrdup(buf, GFP_KERNEL);
286                 entry->mode = 0555;
287                 entry->child = sd_alloc_ctl_domain_table(sd);
288                 entry++;
289                 i++;
290         }
291         return table;
292 }
293
294 static cpumask_var_t            sd_sysctl_cpus;
295 static struct ctl_table_header  *sd_sysctl_header;
296
297 void register_sched_domain_sysctl(void)
298 {
299         static struct ctl_table *cpu_entries;
300         static struct ctl_table **cpu_idx;
301         static bool init_done = false;
302         char buf[32];
303         int i;
304
305         if (!cpu_entries) {
306                 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
307                 if (!cpu_entries)
308                         return;
309
310                 WARN_ON(sd_ctl_dir[0].child);
311                 sd_ctl_dir[0].child = cpu_entries;
312         }
313
314         if (!cpu_idx) {
315                 struct ctl_table *e = cpu_entries;
316
317                 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
318                 if (!cpu_idx)
319                         return;
320
321                 /* deal with sparse possible map */
322                 for_each_possible_cpu(i) {
323                         cpu_idx[i] = e;
324                         e++;
325                 }
326         }
327
328         if (!cpumask_available(sd_sysctl_cpus)) {
329                 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
330                         return;
331         }
332
333         if (!init_done) {
334                 init_done = true;
335                 /* init to possible to not have holes in @cpu_entries */
336                 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
337         }
338
339         for_each_cpu(i, sd_sysctl_cpus) {
340                 struct ctl_table *e = cpu_idx[i];
341
342                 if (e->child)
343                         sd_free_ctl_entry(&e->child);
344
345                 if (!e->procname) {
346                         snprintf(buf, 32, "cpu%d", i);
347                         e->procname = kstrdup(buf, GFP_KERNEL);
348                 }
349                 e->mode = 0555;
350                 e->child = sd_alloc_ctl_cpu_table(i);
351
352                 __cpumask_clear_cpu(i, sd_sysctl_cpus);
353         }
354
355         WARN_ON(sd_sysctl_header);
356         sd_sysctl_header = register_sysctl_table(sd_ctl_root);
357 }
358
359 void dirty_sched_domain_sysctl(int cpu)
360 {
361         if (cpumask_available(sd_sysctl_cpus))
362                 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
363 }
364
365 /* may be called multiple times per register */
366 void unregister_sched_domain_sysctl(void)
367 {
368         unregister_sysctl_table(sd_sysctl_header);
369         sd_sysctl_header = NULL;
370 }
371 #endif /* CONFIG_SYSCTL */
372 #endif /* CONFIG_SMP */
373
374 #ifdef CONFIG_FAIR_GROUP_SCHED
375 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
376 {
377         struct sched_entity *se = tg->se[cpu];
378
379 #define P(F)            SEQ_printf(m, "  .%-30s: %lld\n",       #F, (long long)F)
380 #define P_SCHEDSTAT(F)  SEQ_printf(m, "  .%-30s: %lld\n",       #F, (long long)schedstat_val(F))
381 #define PN(F)           SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
382 #define PN_SCHEDSTAT(F) SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
383
384         if (!se)
385                 return;
386
387         PN(se->exec_start);
388         PN(se->vruntime);
389         PN(se->sum_exec_runtime);
390
391         if (schedstat_enabled()) {
392                 PN_SCHEDSTAT(se->statistics.wait_start);
393                 PN_SCHEDSTAT(se->statistics.sleep_start);
394                 PN_SCHEDSTAT(se->statistics.block_start);
395                 PN_SCHEDSTAT(se->statistics.sleep_max);
396                 PN_SCHEDSTAT(se->statistics.block_max);
397                 PN_SCHEDSTAT(se->statistics.exec_max);
398                 PN_SCHEDSTAT(se->statistics.slice_max);
399                 PN_SCHEDSTAT(se->statistics.wait_max);
400                 PN_SCHEDSTAT(se->statistics.wait_sum);
401                 P_SCHEDSTAT(se->statistics.wait_count);
402         }
403
404         P(se->load.weight);
405         P(se->runnable_weight);
406 #ifdef CONFIG_SMP
407         P(se->avg.load_avg);
408         P(se->avg.util_avg);
409         P(se->avg.runnable_load_avg);
410 #endif
411
412 #undef PN_SCHEDSTAT
413 #undef PN
414 #undef P_SCHEDSTAT
415 #undef P
416 }
417 #endif
418
419 #ifdef CONFIG_CGROUP_SCHED
420 static char group_path[PATH_MAX];
421
422 static char *task_group_path(struct task_group *tg)
423 {
424         if (autogroup_path(tg, group_path, PATH_MAX))
425                 return group_path;
426
427         cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
428
429         return group_path;
430 }
431 #endif
432
433 static void
434 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
435 {
436         if (rq->curr == p)
437                 SEQ_printf(m, ">R");
438         else
439                 SEQ_printf(m, " %c", task_state_to_char(p));
440
441         SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
442                 p->comm, task_pid_nr(p),
443                 SPLIT_NS(p->se.vruntime),
444                 (long long)(p->nvcsw + p->nivcsw),
445                 p->prio);
446
447         SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
448                 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
449                 SPLIT_NS(p->se.sum_exec_runtime),
450                 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
451
452 #ifdef CONFIG_NUMA_BALANCING
453         SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
454 #endif
455 #ifdef CONFIG_CGROUP_SCHED
456         SEQ_printf(m, " %s", task_group_path(task_group(p)));
457 #endif
458
459         SEQ_printf(m, "\n");
460 }
461
462 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
463 {
464         struct task_struct *g, *p;
465
466         SEQ_printf(m, "\n");
467         SEQ_printf(m, "runnable tasks:\n");
468         SEQ_printf(m, " S           task   PID         tree-key  switches  prio"
469                    "     wait-time             sum-exec        sum-sleep\n");
470         SEQ_printf(m, "-------------------------------------------------------"
471                    "----------------------------------------------------\n");
472
473         rcu_read_lock();
474         for_each_process_thread(g, p) {
475                 if (task_cpu(p) != rq_cpu)
476                         continue;
477
478                 print_task(m, rq, p);
479         }
480         rcu_read_unlock();
481 }
482
483 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
484 {
485         s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
486                 spread, rq0_min_vruntime, spread0;
487         struct rq *rq = cpu_rq(cpu);
488         struct sched_entity *last;
489         unsigned long flags;
490
491 #ifdef CONFIG_FAIR_GROUP_SCHED
492         SEQ_printf(m, "\n");
493         SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
494 #else
495         SEQ_printf(m, "\n");
496         SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
497 #endif
498         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
499                         SPLIT_NS(cfs_rq->exec_clock));
500
501         raw_spin_lock_irqsave(&rq->lock, flags);
502         if (rb_first_cached(&cfs_rq->tasks_timeline))
503                 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
504         last = __pick_last_entity(cfs_rq);
505         if (last)
506                 max_vruntime = last->vruntime;
507         min_vruntime = cfs_rq->min_vruntime;
508         rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
509         raw_spin_unlock_irqrestore(&rq->lock, flags);
510         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
511                         SPLIT_NS(MIN_vruntime));
512         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
513                         SPLIT_NS(min_vruntime));
514         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
515                         SPLIT_NS(max_vruntime));
516         spread = max_vruntime - MIN_vruntime;
517         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
518                         SPLIT_NS(spread));
519         spread0 = min_vruntime - rq0_min_vruntime;
520         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
521                         SPLIT_NS(spread0));
522         SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
523                         cfs_rq->nr_spread_over);
524         SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
525         SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
526 #ifdef CONFIG_SMP
527         SEQ_printf(m, "  .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
528         SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
529                         cfs_rq->avg.load_avg);
530         SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
531                         cfs_rq->avg.runnable_load_avg);
532         SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
533                         cfs_rq->avg.util_avg);
534         SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
535                         cfs_rq->avg.util_est.enqueued);
536         SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
537                         cfs_rq->removed.load_avg);
538         SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
539                         cfs_rq->removed.util_avg);
540         SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_sum",
541                         cfs_rq->removed.runnable_sum);
542 #ifdef CONFIG_FAIR_GROUP_SCHED
543         SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
544                         cfs_rq->tg_load_avg_contrib);
545         SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
546                         atomic_long_read(&cfs_rq->tg->load_avg));
547 #endif
548 #endif
549 #ifdef CONFIG_CFS_BANDWIDTH
550         SEQ_printf(m, "  .%-30s: %d\n", "throttled",
551                         cfs_rq->throttled);
552         SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
553                         cfs_rq->throttle_count);
554 #endif
555
556 #ifdef CONFIG_FAIR_GROUP_SCHED
557         print_cfs_group_stats(m, cpu, cfs_rq->tg);
558 #endif
559 }
560
561 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
562 {
563 #ifdef CONFIG_RT_GROUP_SCHED
564         SEQ_printf(m, "\n");
565         SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
566 #else
567         SEQ_printf(m, "\n");
568         SEQ_printf(m, "rt_rq[%d]:\n", cpu);
569 #endif
570
571 #define P(x) \
572         SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
573 #define PU(x) \
574         SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
575 #define PN(x) \
576         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
577
578         PU(rt_nr_running);
579 #ifdef CONFIG_SMP
580         PU(rt_nr_migratory);
581 #endif
582         P(rt_throttled);
583         PN(rt_time);
584         PN(rt_runtime);
585
586 #undef PN
587 #undef PU
588 #undef P
589 }
590
591 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
592 {
593         struct dl_bw *dl_bw;
594
595         SEQ_printf(m, "\n");
596         SEQ_printf(m, "dl_rq[%d]:\n", cpu);
597
598 #define PU(x) \
599         SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
600
601         PU(dl_nr_running);
602 #ifdef CONFIG_SMP
603         PU(dl_nr_migratory);
604         dl_bw = &cpu_rq(cpu)->rd->dl_bw;
605 #else
606         dl_bw = &dl_rq->dl_bw;
607 #endif
608         SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
609         SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
610
611 #undef PU
612 }
613
614 static void print_cpu(struct seq_file *m, int cpu)
615 {
616         struct rq *rq = cpu_rq(cpu);
617         unsigned long flags;
618
619 #ifdef CONFIG_X86
620         {
621                 unsigned int freq = cpu_khz ? : 1;
622
623                 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
624                            cpu, freq / 1000, (freq % 1000));
625         }
626 #else
627         SEQ_printf(m, "cpu#%d\n", cpu);
628 #endif
629
630 #define P(x)                                                            \
631 do {                                                                    \
632         if (sizeof(rq->x) == 4)                                         \
633                 SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));    \
634         else                                                            \
635                 SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
636 } while (0)
637
638 #define PN(x) \
639         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
640
641         P(nr_running);
642         P(nr_switches);
643         P(nr_load_updates);
644         P(nr_uninterruptible);
645         PN(next_balance);
646         SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
647         PN(clock);
648         PN(clock_task);
649 #undef P
650 #undef PN
651
652 #ifdef CONFIG_SMP
653 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
654         P64(avg_idle);
655         P64(max_idle_balance_cost);
656 #undef P64
657 #endif
658
659 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
660         if (schedstat_enabled()) {
661                 P(yld_count);
662                 P(sched_count);
663                 P(sched_goidle);
664                 P(ttwu_count);
665                 P(ttwu_local);
666         }
667 #undef P
668
669         spin_lock_irqsave(&sched_debug_lock, flags);
670         print_cfs_stats(m, cpu);
671         print_rt_stats(m, cpu);
672         print_dl_stats(m, cpu);
673
674         print_rq(m, rq, cpu);
675         spin_unlock_irqrestore(&sched_debug_lock, flags);
676         SEQ_printf(m, "\n");
677 }
678
679 static const char *sched_tunable_scaling_names[] = {
680         "none",
681         "logarithmic",
682         "linear"
683 };
684
685 static void sched_debug_header(struct seq_file *m)
686 {
687         u64 ktime, sched_clk, cpu_clk;
688         unsigned long flags;
689
690         local_irq_save(flags);
691         ktime = ktime_to_ns(ktime_get());
692         sched_clk = sched_clock();
693         cpu_clk = local_clock();
694         local_irq_restore(flags);
695
696         SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
697                 init_utsname()->release,
698                 (int)strcspn(init_utsname()->version, " "),
699                 init_utsname()->version);
700
701 #define P(x) \
702         SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
703 #define PN(x) \
704         SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
705         PN(ktime);
706         PN(sched_clk);
707         PN(cpu_clk);
708         P(jiffies);
709 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
710         P(sched_clock_stable());
711 #endif
712 #undef PN
713 #undef P
714
715         SEQ_printf(m, "\n");
716         SEQ_printf(m, "sysctl_sched\n");
717
718 #define P(x) \
719         SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
720 #define PN(x) \
721         SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
722         PN(sysctl_sched_latency);
723         PN(sysctl_sched_min_granularity);
724         PN(sysctl_sched_wakeup_granularity);
725         P(sysctl_sched_child_runs_first);
726         P(sysctl_sched_features);
727 #undef PN
728 #undef P
729
730         SEQ_printf(m, "  .%-40s: %d (%s)\n",
731                 "sysctl_sched_tunable_scaling",
732                 sysctl_sched_tunable_scaling,
733                 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
734         SEQ_printf(m, "\n");
735 }
736
737 static int sched_debug_show(struct seq_file *m, void *v)
738 {
739         int cpu = (unsigned long)(v - 2);
740
741         if (cpu != -1)
742                 print_cpu(m, cpu);
743         else
744                 sched_debug_header(m);
745
746         return 0;
747 }
748
749 void sysrq_sched_debug_show(void)
750 {
751         int cpu;
752
753         sched_debug_header(NULL);
754         for_each_online_cpu(cpu) {
755                 /*
756                  * Need to reset softlockup watchdogs on all CPUs, because
757                  * another CPU might be blocked waiting for us to process
758                  * an IPI or stop_machine.
759                  */
760                 touch_nmi_watchdog();
761                 touch_all_softlockup_watchdogs();
762                 print_cpu(NULL, cpu);
763         }
764 }
765
766 /*
767  * This itererator needs some explanation.
768  * It returns 1 for the header position.
769  * This means 2 is CPU 0.
770  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
771  * to use cpumask_* to iterate over the CPUs.
772  */
773 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
774 {
775         unsigned long n = *offset;
776
777         if (n == 0)
778                 return (void *) 1;
779
780         n--;
781
782         if (n > 0)
783                 n = cpumask_next(n - 1, cpu_online_mask);
784         else
785                 n = cpumask_first(cpu_online_mask);
786
787         *offset = n + 1;
788
789         if (n < nr_cpu_ids)
790                 return (void *)(unsigned long)(n + 2);
791
792         return NULL;
793 }
794
795 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
796 {
797         (*offset)++;
798         return sched_debug_start(file, offset);
799 }
800
801 static void sched_debug_stop(struct seq_file *file, void *data)
802 {
803 }
804
805 static const struct seq_operations sched_debug_sops = {
806         .start          = sched_debug_start,
807         .next           = sched_debug_next,
808         .stop           = sched_debug_stop,
809         .show           = sched_debug_show,
810 };
811
812 static int __init init_sched_debug_procfs(void)
813 {
814         if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
815                 return -ENOMEM;
816         return 0;
817 }
818
819 __initcall(init_sched_debug_procfs);
820
821 #define __P(F)  SEQ_printf(m, "%-45s:%21Ld\n",       #F, (long long)F)
822 #define   P(F)  SEQ_printf(m, "%-45s:%21Ld\n",       #F, (long long)p->F)
823 #define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
824 #define   PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
825
826
827 #ifdef CONFIG_NUMA_BALANCING
828 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
829                 unsigned long tpf, unsigned long gsf, unsigned long gpf)
830 {
831         SEQ_printf(m, "numa_faults node=%d ", node);
832         SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
833         SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
834 }
835 #endif
836
837
838 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
839 {
840 #ifdef CONFIG_NUMA_BALANCING
841         struct mempolicy *pol;
842
843         if (p->mm)
844                 P(mm->numa_scan_seq);
845
846         task_lock(p);
847         pol = p->mempolicy;
848         if (pol && !(pol->flags & MPOL_F_MORON))
849                 pol = NULL;
850         mpol_get(pol);
851         task_unlock(p);
852
853         P(numa_pages_migrated);
854         P(numa_preferred_nid);
855         P(total_numa_faults);
856         SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
857                         task_node(p), task_numa_group_id(p));
858         show_numa_stats(p, m);
859         mpol_put(pol);
860 #endif
861 }
862
863 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
864                                                   struct seq_file *m)
865 {
866         unsigned long nr_switches;
867
868         SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
869                                                 get_nr_threads(p));
870         SEQ_printf(m,
871                 "---------------------------------------------------------"
872                 "----------\n");
873 #define __P(F) \
874         SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
875 #define P(F) \
876         SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
877 #define P_SCHEDSTAT(F) \
878         SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
879 #define __PN(F) \
880         SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
881 #define PN(F) \
882         SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
883 #define PN_SCHEDSTAT(F) \
884         SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
885
886         PN(se.exec_start);
887         PN(se.vruntime);
888         PN(se.sum_exec_runtime);
889
890         nr_switches = p->nvcsw + p->nivcsw;
891
892         P(se.nr_migrations);
893
894         if (schedstat_enabled()) {
895                 u64 avg_atom, avg_per_cpu;
896
897                 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
898                 PN_SCHEDSTAT(se.statistics.wait_start);
899                 PN_SCHEDSTAT(se.statistics.sleep_start);
900                 PN_SCHEDSTAT(se.statistics.block_start);
901                 PN_SCHEDSTAT(se.statistics.sleep_max);
902                 PN_SCHEDSTAT(se.statistics.block_max);
903                 PN_SCHEDSTAT(se.statistics.exec_max);
904                 PN_SCHEDSTAT(se.statistics.slice_max);
905                 PN_SCHEDSTAT(se.statistics.wait_max);
906                 PN_SCHEDSTAT(se.statistics.wait_sum);
907                 P_SCHEDSTAT(se.statistics.wait_count);
908                 PN_SCHEDSTAT(se.statistics.iowait_sum);
909                 P_SCHEDSTAT(se.statistics.iowait_count);
910                 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
911                 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
912                 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
913                 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
914                 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
915                 P_SCHEDSTAT(se.statistics.nr_wakeups);
916                 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
917                 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
918                 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
919                 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
920                 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
921                 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
922                 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
923                 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
924
925                 avg_atom = p->se.sum_exec_runtime;
926                 if (nr_switches)
927                         avg_atom = div64_ul(avg_atom, nr_switches);
928                 else
929                         avg_atom = -1LL;
930
931                 avg_per_cpu = p->se.sum_exec_runtime;
932                 if (p->se.nr_migrations) {
933                         avg_per_cpu = div64_u64(avg_per_cpu,
934                                                 p->se.nr_migrations);
935                 } else {
936                         avg_per_cpu = -1LL;
937                 }
938
939                 __PN(avg_atom);
940                 __PN(avg_per_cpu);
941         }
942
943         __P(nr_switches);
944         SEQ_printf(m, "%-45s:%21Ld\n",
945                    "nr_voluntary_switches", (long long)p->nvcsw);
946         SEQ_printf(m, "%-45s:%21Ld\n",
947                    "nr_involuntary_switches", (long long)p->nivcsw);
948
949         P(se.load.weight);
950         P(se.runnable_weight);
951 #ifdef CONFIG_SMP
952         P(se.avg.load_sum);
953         P(se.avg.runnable_load_sum);
954         P(se.avg.util_sum);
955         P(se.avg.load_avg);
956         P(se.avg.runnable_load_avg);
957         P(se.avg.util_avg);
958         P(se.avg.last_update_time);
959         P(se.avg.util_est.ewma);
960         P(se.avg.util_est.enqueued);
961 #endif
962         P(policy);
963         P(prio);
964         if (task_has_dl_policy(p)) {
965                 P(dl.runtime);
966                 P(dl.deadline);
967         }
968 #undef PN_SCHEDSTAT
969 #undef PN
970 #undef __PN
971 #undef P_SCHEDSTAT
972 #undef P
973 #undef __P
974
975         {
976                 unsigned int this_cpu = raw_smp_processor_id();
977                 u64 t0, t1;
978
979                 t0 = cpu_clock(this_cpu);
980                 t1 = cpu_clock(this_cpu);
981                 SEQ_printf(m, "%-45s:%21Ld\n",
982                            "clock-delta", (long long)(t1-t0));
983         }
984
985         sched_show_numa(p, m);
986 }
987
988 void proc_sched_set_task(struct task_struct *p)
989 {
990 #ifdef CONFIG_SCHEDSTATS
991         memset(&p->se.statistics, 0, sizeof(p->se.statistics));
992 #endif
993 }