1 // SPDX-License-Identifier: GPL-2.0
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/cloexec.h"
14 #include "util/thread_map.h"
15 #include "util/color.h"
16 #include "util/stat.h"
17 #include "util/string2.h"
18 #include "util/callchain.h"
19 #include "util/time-utils.h"
21 #include <subcmd/pager.h>
22 #include <subcmd/parse-options.h>
23 #include "util/trace-event.h"
25 #include "util/debug.h"
27 #include <linux/kernel.h>
28 #include <linux/log2.h>
29 #include <linux/zalloc.h>
30 #include <sys/prctl.h>
31 #include <sys/resource.h>
35 #include <semaphore.h>
38 #include <api/fs/fs.h>
39 #include <linux/time64.h>
41 #include <linux/ctype.h>
43 #define PR_SET_NAME 15 /* Set process name */
47 #define MAX_PID 1024000
56 unsigned long nr_events;
57 unsigned long curr_event;
58 struct sched_atom **atoms;
69 enum sched_event_type {
73 SCHED_EVENT_MIGRATION,
77 enum sched_event_type type;
83 struct task_desc *wakee;
86 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
88 /* task state bitmask, copied from include/linux/sched.h */
89 #define TASK_RUNNING 0
90 #define TASK_INTERRUPTIBLE 1
91 #define TASK_UNINTERRUPTIBLE 2
92 #define __TASK_STOPPED 4
93 #define __TASK_TRACED 8
94 /* in tsk->exit_state */
96 #define EXIT_ZOMBIE 32
97 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
98 /* in tsk->state again */
100 #define TASK_WAKEKILL 128
101 #define TASK_WAKING 256
102 #define TASK_PARKED 512
112 struct list_head list;
113 enum thread_state state;
121 struct list_head work_list;
122 struct thread *thread;
132 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
136 struct trace_sched_handler {
137 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
138 struct perf_sample *sample, struct machine *machine);
140 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
141 struct perf_sample *sample, struct machine *machine);
143 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
144 struct perf_sample *sample, struct machine *machine);
146 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
147 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
148 struct machine *machine);
150 int (*migrate_task_event)(struct perf_sched *sched,
152 struct perf_sample *sample,
153 struct machine *machine);
156 #define COLOR_PIDS PERF_COLOR_BLUE
157 #define COLOR_CPUS PERF_COLOR_BG_RED
159 struct perf_sched_map {
160 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
163 struct perf_thread_map *color_pids;
164 const char *color_pids_str;
165 struct perf_cpu_map *color_cpus;
166 const char *color_cpus_str;
167 struct perf_cpu_map *cpus;
168 const char *cpus_str;
172 struct perf_tool tool;
173 const char *sort_order;
174 unsigned long nr_tasks;
175 struct task_desc **pid_to_task;
176 struct task_desc **tasks;
177 const struct trace_sched_handler *tp_handler;
178 pthread_mutex_t start_work_mutex;
179 pthread_mutex_t work_done_wait_mutex;
182 * Track the current task - that way we can know whether there's any
183 * weird events, such as a task being switched away that is not current.
186 u32 curr_pid[MAX_CPUS];
187 struct thread *curr_thread[MAX_CPUS];
188 char next_shortname1;
189 char next_shortname2;
190 unsigned int replay_repeat;
191 unsigned long nr_run_events;
192 unsigned long nr_sleep_events;
193 unsigned long nr_wakeup_events;
194 unsigned long nr_sleep_corrections;
195 unsigned long nr_run_events_optimized;
196 unsigned long targetless_wakeups;
197 unsigned long multitarget_wakeups;
198 unsigned long nr_runs;
199 unsigned long nr_timestamps;
200 unsigned long nr_unordered_timestamps;
201 unsigned long nr_context_switch_bugs;
202 unsigned long nr_events;
203 unsigned long nr_lost_chunks;
204 unsigned long nr_lost_events;
205 u64 run_measurement_overhead;
206 u64 sleep_measurement_overhead;
209 u64 runavg_cpu_usage;
210 u64 parent_cpu_usage;
211 u64 runavg_parent_cpu_usage;
217 u64 cpu_last_switched[MAX_CPUS];
218 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
219 struct list_head sort_list, cmp_pid;
222 struct perf_sched_map map;
224 /* options for timehist command */
229 unsigned int max_stack;
230 bool show_cpu_visual;
233 bool show_migrations;
236 const char *time_str;
237 struct perf_time_interval ptime;
238 struct perf_time_interval hist_time;
241 /* per thread run time data */
242 struct thread_runtime {
243 u64 last_time; /* time of previous sched in/out event */
244 u64 dt_run; /* run time */
245 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
246 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
247 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
248 u64 dt_delay; /* time between wakeup and sched-in */
249 u64 ready_to_run; /* time of wakeup */
251 struct stats run_stats;
253 u64 total_sleep_time;
254 u64 total_iowait_time;
255 u64 total_preempt_time;
256 u64 total_delay_time;
266 /* per event run time data */
267 struct evsel_runtime {
268 u64 *last_time; /* time this event was last seen per cpu */
269 u32 ncpu; /* highest cpu slot allocated */
272 /* per cpu idle time data */
273 struct idle_thread_runtime {
274 struct thread_runtime tr;
275 struct thread *last_thread;
276 struct rb_root_cached sorted_root;
277 struct callchain_root callchain;
278 struct callchain_cursor cursor;
281 /* track idle times per cpu */
282 static struct thread **idle_threads;
283 static int idle_max_cpu;
284 static char idle_comm[] = "<idle>";
286 static u64 get_nsecs(void)
290 clock_gettime(CLOCK_MONOTONIC, &ts);
292 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
295 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
297 u64 T0 = get_nsecs(), T1;
301 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
304 static void sleep_nsecs(u64 nsecs)
308 ts.tv_nsec = nsecs % 999999999;
309 ts.tv_sec = nsecs / 999999999;
311 nanosleep(&ts, NULL);
314 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
316 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
319 for (i = 0; i < 10; i++) {
321 burn_nsecs(sched, 0);
324 min_delta = min(min_delta, delta);
326 sched->run_measurement_overhead = min_delta;
328 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
331 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
333 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
336 for (i = 0; i < 10; i++) {
341 min_delta = min(min_delta, delta);
344 sched->sleep_measurement_overhead = min_delta;
346 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
349 static struct sched_atom *
350 get_new_event(struct task_desc *task, u64 timestamp)
352 struct sched_atom *event = zalloc(sizeof(*event));
353 unsigned long idx = task->nr_events;
356 event->timestamp = timestamp;
360 size = sizeof(struct sched_atom *) * task->nr_events;
361 task->atoms = realloc(task->atoms, size);
362 BUG_ON(!task->atoms);
364 task->atoms[idx] = event;
369 static struct sched_atom *last_event(struct task_desc *task)
371 if (!task->nr_events)
374 return task->atoms[task->nr_events - 1];
377 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
378 u64 timestamp, u64 duration)
380 struct sched_atom *event, *curr_event = last_event(task);
383 * optimize an existing RUN event by merging this one
386 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
387 sched->nr_run_events_optimized++;
388 curr_event->duration += duration;
392 event = get_new_event(task, timestamp);
394 event->type = SCHED_EVENT_RUN;
395 event->duration = duration;
397 sched->nr_run_events++;
400 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
401 u64 timestamp, struct task_desc *wakee)
403 struct sched_atom *event, *wakee_event;
405 event = get_new_event(task, timestamp);
406 event->type = SCHED_EVENT_WAKEUP;
407 event->wakee = wakee;
409 wakee_event = last_event(wakee);
410 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
411 sched->targetless_wakeups++;
414 if (wakee_event->wait_sem) {
415 sched->multitarget_wakeups++;
419 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
420 sem_init(wakee_event->wait_sem, 0, 0);
421 wakee_event->specific_wait = 1;
422 event->wait_sem = wakee_event->wait_sem;
424 sched->nr_wakeup_events++;
427 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
428 u64 timestamp, u64 task_state __maybe_unused)
430 struct sched_atom *event = get_new_event(task, timestamp);
432 event->type = SCHED_EVENT_SLEEP;
434 sched->nr_sleep_events++;
437 static struct task_desc *register_pid(struct perf_sched *sched,
438 unsigned long pid, const char *comm)
440 struct task_desc *task;
443 if (sched->pid_to_task == NULL) {
444 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
446 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
448 if (pid >= (unsigned long)pid_max) {
449 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
450 sizeof(struct task_desc *))) == NULL);
451 while (pid >= (unsigned long)pid_max)
452 sched->pid_to_task[pid_max++] = NULL;
455 task = sched->pid_to_task[pid];
460 task = zalloc(sizeof(*task));
462 task->nr = sched->nr_tasks;
463 strcpy(task->comm, comm);
465 * every task starts in sleeping state - this gets ignored
466 * if there's no wakeup pointing to this sleep state:
468 add_sched_event_sleep(sched, task, 0, 0);
470 sched->pid_to_task[pid] = task;
472 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
473 BUG_ON(!sched->tasks);
474 sched->tasks[task->nr] = task;
477 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
483 static void print_task_traces(struct perf_sched *sched)
485 struct task_desc *task;
488 for (i = 0; i < sched->nr_tasks; i++) {
489 task = sched->tasks[i];
490 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
491 task->nr, task->comm, task->pid, task->nr_events);
495 static void add_cross_task_wakeups(struct perf_sched *sched)
497 struct task_desc *task1, *task2;
500 for (i = 0; i < sched->nr_tasks; i++) {
501 task1 = sched->tasks[i];
503 if (j == sched->nr_tasks)
505 task2 = sched->tasks[j];
506 add_sched_event_wakeup(sched, task1, 0, task2);
510 static void perf_sched__process_event(struct perf_sched *sched,
511 struct sched_atom *atom)
515 switch (atom->type) {
516 case SCHED_EVENT_RUN:
517 burn_nsecs(sched, atom->duration);
519 case SCHED_EVENT_SLEEP:
521 ret = sem_wait(atom->wait_sem);
524 case SCHED_EVENT_WAKEUP:
526 ret = sem_post(atom->wait_sem);
529 case SCHED_EVENT_MIGRATION:
536 static u64 get_cpu_usage_nsec_parent(void)
542 err = getrusage(RUSAGE_SELF, &ru);
545 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
546 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
551 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
553 struct perf_event_attr attr;
554 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
557 bool need_privilege = false;
559 memset(&attr, 0, sizeof(attr));
561 attr.type = PERF_TYPE_SOFTWARE;
562 attr.config = PERF_COUNT_SW_TASK_CLOCK;
565 fd = sys_perf_event_open(&attr, 0, -1, -1,
566 perf_event_open_cloexec_flag());
569 if (errno == EMFILE) {
571 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
572 limit.rlim_cur += sched->nr_tasks - cur_task;
573 if (limit.rlim_cur > limit.rlim_max) {
574 limit.rlim_max = limit.rlim_cur;
575 need_privilege = true;
577 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
578 if (need_privilege && errno == EPERM)
579 strcpy(info, "Need privilege\n");
583 strcpy(info, "Have a try with -f option\n");
585 pr_err("Error: sys_perf_event_open() syscall returned "
586 "with %d (%s)\n%s", fd,
587 str_error_r(errno, sbuf, sizeof(sbuf)), info);
593 static u64 get_cpu_usage_nsec_self(int fd)
598 ret = read(fd, &runtime, sizeof(runtime));
599 BUG_ON(ret != sizeof(runtime));
604 struct sched_thread_parms {
605 struct task_desc *task;
606 struct perf_sched *sched;
610 static void *thread_func(void *ctx)
612 struct sched_thread_parms *parms = ctx;
613 struct task_desc *this_task = parms->task;
614 struct perf_sched *sched = parms->sched;
615 u64 cpu_usage_0, cpu_usage_1;
616 unsigned long i, ret;
622 sprintf(comm2, ":%s", this_task->comm);
623 prctl(PR_SET_NAME, comm2);
627 ret = sem_post(&this_task->ready_for_work);
629 ret = pthread_mutex_lock(&sched->start_work_mutex);
631 ret = pthread_mutex_unlock(&sched->start_work_mutex);
634 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
636 for (i = 0; i < this_task->nr_events; i++) {
637 this_task->curr_event = i;
638 perf_sched__process_event(sched, this_task->atoms[i]);
641 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
642 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
643 ret = sem_post(&this_task->work_done_sem);
646 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
648 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
654 static void create_tasks(struct perf_sched *sched)
656 struct task_desc *task;
661 err = pthread_attr_init(&attr);
663 err = pthread_attr_setstacksize(&attr,
664 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
666 err = pthread_mutex_lock(&sched->start_work_mutex);
668 err = pthread_mutex_lock(&sched->work_done_wait_mutex);
670 for (i = 0; i < sched->nr_tasks; i++) {
671 struct sched_thread_parms *parms = malloc(sizeof(*parms));
672 BUG_ON(parms == NULL);
673 parms->task = task = sched->tasks[i];
674 parms->sched = sched;
675 parms->fd = self_open_counters(sched, i);
676 sem_init(&task->sleep_sem, 0, 0);
677 sem_init(&task->ready_for_work, 0, 0);
678 sem_init(&task->work_done_sem, 0, 0);
679 task->curr_event = 0;
680 err = pthread_create(&task->thread, &attr, thread_func, parms);
685 static void wait_for_tasks(struct perf_sched *sched)
687 u64 cpu_usage_0, cpu_usage_1;
688 struct task_desc *task;
689 unsigned long i, ret;
691 sched->start_time = get_nsecs();
692 sched->cpu_usage = 0;
693 pthread_mutex_unlock(&sched->work_done_wait_mutex);
695 for (i = 0; i < sched->nr_tasks; i++) {
696 task = sched->tasks[i];
697 ret = sem_wait(&task->ready_for_work);
699 sem_init(&task->ready_for_work, 0, 0);
701 ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
704 cpu_usage_0 = get_cpu_usage_nsec_parent();
706 pthread_mutex_unlock(&sched->start_work_mutex);
708 for (i = 0; i < sched->nr_tasks; i++) {
709 task = sched->tasks[i];
710 ret = sem_wait(&task->work_done_sem);
712 sem_init(&task->work_done_sem, 0, 0);
713 sched->cpu_usage += task->cpu_usage;
717 cpu_usage_1 = get_cpu_usage_nsec_parent();
718 if (!sched->runavg_cpu_usage)
719 sched->runavg_cpu_usage = sched->cpu_usage;
720 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
722 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
723 if (!sched->runavg_parent_cpu_usage)
724 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
725 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
726 sched->parent_cpu_usage)/sched->replay_repeat;
728 ret = pthread_mutex_lock(&sched->start_work_mutex);
731 for (i = 0; i < sched->nr_tasks; i++) {
732 task = sched->tasks[i];
733 sem_init(&task->sleep_sem, 0, 0);
734 task->curr_event = 0;
738 static void run_one_test(struct perf_sched *sched)
740 u64 T0, T1, delta, avg_delta, fluct;
743 wait_for_tasks(sched);
747 sched->sum_runtime += delta;
750 avg_delta = sched->sum_runtime / sched->nr_runs;
751 if (delta < avg_delta)
752 fluct = avg_delta - delta;
754 fluct = delta - avg_delta;
755 sched->sum_fluct += fluct;
757 sched->run_avg = delta;
758 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
760 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
762 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
764 printf("cpu: %0.2f / %0.2f",
765 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
769 * rusage statistics done by the parent, these are less
770 * accurate than the sched->sum_exec_runtime based statistics:
772 printf(" [%0.2f / %0.2f]",
773 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
774 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
779 if (sched->nr_sleep_corrections)
780 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
781 sched->nr_sleep_corrections = 0;
784 static void test_calibrations(struct perf_sched *sched)
789 burn_nsecs(sched, NSEC_PER_MSEC);
792 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
795 sleep_nsecs(NSEC_PER_MSEC);
798 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
802 replay_wakeup_event(struct perf_sched *sched,
803 struct evsel *evsel, struct perf_sample *sample,
804 struct machine *machine __maybe_unused)
806 const char *comm = perf_evsel__strval(evsel, sample, "comm");
807 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
808 struct task_desc *waker, *wakee;
811 printf("sched_wakeup event %p\n", evsel);
813 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
816 waker = register_pid(sched, sample->tid, "<unknown>");
817 wakee = register_pid(sched, pid, comm);
819 add_sched_event_wakeup(sched, waker, sample->time, wakee);
823 static int replay_switch_event(struct perf_sched *sched,
825 struct perf_sample *sample,
826 struct machine *machine __maybe_unused)
828 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
829 *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
830 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
831 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
832 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
833 struct task_desc *prev, __maybe_unused *next;
834 u64 timestamp0, timestamp = sample->time;
835 int cpu = sample->cpu;
839 printf("sched_switch event %p\n", evsel);
841 if (cpu >= MAX_CPUS || cpu < 0)
844 timestamp0 = sched->cpu_last_switched[cpu];
846 delta = timestamp - timestamp0;
851 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
855 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
856 prev_comm, prev_pid, next_comm, next_pid, delta);
858 prev = register_pid(sched, prev_pid, prev_comm);
859 next = register_pid(sched, next_pid, next_comm);
861 sched->cpu_last_switched[cpu] = timestamp;
863 add_sched_event_run(sched, prev, timestamp, delta);
864 add_sched_event_sleep(sched, prev, timestamp, prev_state);
869 static int replay_fork_event(struct perf_sched *sched,
870 union perf_event *event,
871 struct machine *machine)
873 struct thread *child, *parent;
875 child = machine__findnew_thread(machine, event->fork.pid,
877 parent = machine__findnew_thread(machine, event->fork.ppid,
880 if (child == NULL || parent == NULL) {
881 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
887 printf("fork event\n");
888 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
889 printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
892 register_pid(sched, parent->tid, thread__comm_str(parent));
893 register_pid(sched, child->tid, thread__comm_str(child));
900 struct sort_dimension {
903 struct list_head list;
907 * handle runtime stats saved per thread
909 static struct thread_runtime *thread__init_runtime(struct thread *thread)
911 struct thread_runtime *r;
913 r = zalloc(sizeof(struct thread_runtime));
917 init_stats(&r->run_stats);
918 thread__set_priv(thread, r);
923 static struct thread_runtime *thread__get_runtime(struct thread *thread)
925 struct thread_runtime *tr;
927 tr = thread__priv(thread);
929 tr = thread__init_runtime(thread);
931 pr_debug("Failed to malloc memory for runtime data.\n");
938 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
940 struct sort_dimension *sort;
943 BUG_ON(list_empty(list));
945 list_for_each_entry(sort, list, list) {
946 ret = sort->cmp(l, r);
954 static struct work_atoms *
955 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
956 struct list_head *sort_list)
958 struct rb_node *node = root->rb_root.rb_node;
959 struct work_atoms key = { .thread = thread };
962 struct work_atoms *atoms;
965 atoms = container_of(node, struct work_atoms, node);
967 cmp = thread_lat_cmp(sort_list, &key, atoms);
969 node = node->rb_left;
971 node = node->rb_right;
973 BUG_ON(thread != atoms->thread);
981 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
982 struct list_head *sort_list)
984 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
985 bool leftmost = true;
988 struct work_atoms *this;
991 this = container_of(*new, struct work_atoms, node);
994 cmp = thread_lat_cmp(sort_list, data, this);
997 new = &((*new)->rb_left);
999 new = &((*new)->rb_right);
1004 rb_link_node(&data->node, parent, new);
1005 rb_insert_color_cached(&data->node, root, leftmost);
1008 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1010 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1012 pr_err("No memory at %s\n", __func__);
1016 atoms->thread = thread__get(thread);
1017 INIT_LIST_HEAD(&atoms->work_list);
1018 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1022 static char sched_out_state(u64 prev_state)
1024 const char *str = TASK_STATE_TO_CHAR_STR;
1026 return str[prev_state];
1030 add_sched_out_event(struct work_atoms *atoms,
1034 struct work_atom *atom = zalloc(sizeof(*atom));
1036 pr_err("Non memory at %s", __func__);
1040 atom->sched_out_time = timestamp;
1042 if (run_state == 'R') {
1043 atom->state = THREAD_WAIT_CPU;
1044 atom->wake_up_time = atom->sched_out_time;
1047 list_add_tail(&atom->list, &atoms->work_list);
1052 add_runtime_event(struct work_atoms *atoms, u64 delta,
1053 u64 timestamp __maybe_unused)
1055 struct work_atom *atom;
1057 BUG_ON(list_empty(&atoms->work_list));
1059 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1061 atom->runtime += delta;
1062 atoms->total_runtime += delta;
1066 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1068 struct work_atom *atom;
1071 if (list_empty(&atoms->work_list))
1074 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1076 if (atom->state != THREAD_WAIT_CPU)
1079 if (timestamp < atom->wake_up_time) {
1080 atom->state = THREAD_IGNORE;
1084 atom->state = THREAD_SCHED_IN;
1085 atom->sched_in_time = timestamp;
1087 delta = atom->sched_in_time - atom->wake_up_time;
1088 atoms->total_lat += delta;
1089 if (delta > atoms->max_lat) {
1090 atoms->max_lat = delta;
1091 atoms->max_lat_at = timestamp;
1096 static int latency_switch_event(struct perf_sched *sched,
1097 struct evsel *evsel,
1098 struct perf_sample *sample,
1099 struct machine *machine)
1101 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1102 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1103 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
1104 struct work_atoms *out_events, *in_events;
1105 struct thread *sched_out, *sched_in;
1106 u64 timestamp0, timestamp = sample->time;
1107 int cpu = sample->cpu, err = -1;
1110 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1112 timestamp0 = sched->cpu_last_switched[cpu];
1113 sched->cpu_last_switched[cpu] = timestamp;
1115 delta = timestamp - timestamp0;
1120 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1124 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1125 sched_in = machine__findnew_thread(machine, -1, next_pid);
1126 if (sched_out == NULL || sched_in == NULL)
1129 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1131 if (thread_atoms_insert(sched, sched_out))
1133 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1135 pr_err("out-event: Internal tree error");
1139 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1142 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1144 if (thread_atoms_insert(sched, sched_in))
1146 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1148 pr_err("in-event: Internal tree error");
1152 * Take came in we have not heard about yet,
1153 * add in an initial atom in runnable state:
1155 if (add_sched_out_event(in_events, 'R', timestamp))
1158 add_sched_in_event(in_events, timestamp);
1161 thread__put(sched_out);
1162 thread__put(sched_in);
1166 static int latency_runtime_event(struct perf_sched *sched,
1167 struct evsel *evsel,
1168 struct perf_sample *sample,
1169 struct machine *machine)
1171 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1172 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1173 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1174 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1175 u64 timestamp = sample->time;
1176 int cpu = sample->cpu, err = -1;
1181 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1183 if (thread_atoms_insert(sched, thread))
1185 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1187 pr_err("in-event: Internal tree error");
1190 if (add_sched_out_event(atoms, 'R', timestamp))
1194 add_runtime_event(atoms, runtime, timestamp);
1197 thread__put(thread);
1201 static int latency_wakeup_event(struct perf_sched *sched,
1202 struct evsel *evsel,
1203 struct perf_sample *sample,
1204 struct machine *machine)
1206 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1207 struct work_atoms *atoms;
1208 struct work_atom *atom;
1209 struct thread *wakee;
1210 u64 timestamp = sample->time;
1213 wakee = machine__findnew_thread(machine, -1, pid);
1216 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1218 if (thread_atoms_insert(sched, wakee))
1220 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1222 pr_err("wakeup-event: Internal tree error");
1225 if (add_sched_out_event(atoms, 'S', timestamp))
1229 BUG_ON(list_empty(&atoms->work_list));
1231 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1234 * As we do not guarantee the wakeup event happens when
1235 * task is out of run queue, also may happen when task is
1236 * on run queue and wakeup only change ->state to TASK_RUNNING,
1237 * then we should not set the ->wake_up_time when wake up a
1238 * task which is on run queue.
1240 * You WILL be missing events if you've recorded only
1241 * one CPU, or are only looking at only one, so don't
1242 * skip in this case.
1244 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1247 sched->nr_timestamps++;
1248 if (atom->sched_out_time > timestamp) {
1249 sched->nr_unordered_timestamps++;
1253 atom->state = THREAD_WAIT_CPU;
1254 atom->wake_up_time = timestamp;
1262 static int latency_migrate_task_event(struct perf_sched *sched,
1263 struct evsel *evsel,
1264 struct perf_sample *sample,
1265 struct machine *machine)
1267 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1268 u64 timestamp = sample->time;
1269 struct work_atoms *atoms;
1270 struct work_atom *atom;
1271 struct thread *migrant;
1275 * Only need to worry about migration when profiling one CPU.
1277 if (sched->profile_cpu == -1)
1280 migrant = machine__findnew_thread(machine, -1, pid);
1281 if (migrant == NULL)
1283 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1285 if (thread_atoms_insert(sched, migrant))
1287 register_pid(sched, migrant->tid, thread__comm_str(migrant));
1288 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1290 pr_err("migration-event: Internal tree error");
1293 if (add_sched_out_event(atoms, 'R', timestamp))
1297 BUG_ON(list_empty(&atoms->work_list));
1299 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1300 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1302 sched->nr_timestamps++;
1304 if (atom->sched_out_time > timestamp)
1305 sched->nr_unordered_timestamps++;
1308 thread__put(migrant);
1312 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1317 char max_lat_at[32];
1319 if (!work_list->nb_atoms)
1322 * Ignore idle threads:
1324 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1327 sched->all_runtime += work_list->total_runtime;
1328 sched->all_count += work_list->nb_atoms;
1330 if (work_list->num_merged > 1)
1331 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1333 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1335 for (i = 0; i < 24 - ret; i++)
1338 avg = work_list->total_lat / work_list->nb_atoms;
1339 timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at));
1341 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
1342 (double)work_list->total_runtime / NSEC_PER_MSEC,
1343 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1344 (double)work_list->max_lat / NSEC_PER_MSEC,
1348 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1350 if (l->thread == r->thread)
1352 if (l->thread->tid < r->thread->tid)
1354 if (l->thread->tid > r->thread->tid)
1356 return (int)(l->thread - r->thread);
1359 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1369 avgl = l->total_lat / l->nb_atoms;
1370 avgr = r->total_lat / r->nb_atoms;
1380 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1382 if (l->max_lat < r->max_lat)
1384 if (l->max_lat > r->max_lat)
1390 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1392 if (l->nb_atoms < r->nb_atoms)
1394 if (l->nb_atoms > r->nb_atoms)
1400 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1402 if (l->total_runtime < r->total_runtime)
1404 if (l->total_runtime > r->total_runtime)
1410 static int sort_dimension__add(const char *tok, struct list_head *list)
1413 static struct sort_dimension avg_sort_dimension = {
1417 static struct sort_dimension max_sort_dimension = {
1421 static struct sort_dimension pid_sort_dimension = {
1425 static struct sort_dimension runtime_sort_dimension = {
1429 static struct sort_dimension switch_sort_dimension = {
1433 struct sort_dimension *available_sorts[] = {
1434 &pid_sort_dimension,
1435 &avg_sort_dimension,
1436 &max_sort_dimension,
1437 &switch_sort_dimension,
1438 &runtime_sort_dimension,
1441 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1442 if (!strcmp(available_sorts[i]->name, tok)) {
1443 list_add_tail(&available_sorts[i]->list, list);
1452 static void perf_sched__sort_lat(struct perf_sched *sched)
1454 struct rb_node *node;
1455 struct rb_root_cached *root = &sched->atom_root;
1458 struct work_atoms *data;
1459 node = rb_first_cached(root);
1463 rb_erase_cached(node, root);
1464 data = rb_entry(node, struct work_atoms, node);
1465 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1467 if (root == &sched->atom_root) {
1468 root = &sched->merged_atom_root;
1473 static int process_sched_wakeup_event(struct perf_tool *tool,
1474 struct evsel *evsel,
1475 struct perf_sample *sample,
1476 struct machine *machine)
1478 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1480 if (sched->tp_handler->wakeup_event)
1481 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1491 static bool thread__has_color(struct thread *thread)
1493 union map_priv priv = {
1494 .ptr = thread__priv(thread),
1500 static struct thread*
1501 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1503 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1504 union map_priv priv = {
1508 if (!sched->map.color_pids || !thread || thread__priv(thread))
1511 if (thread_map__has(sched->map.color_pids, tid))
1514 thread__set_priv(thread, priv.ptr);
1518 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1519 struct perf_sample *sample, struct machine *machine)
1521 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1522 struct thread *sched_in;
1523 struct thread_runtime *tr;
1525 u64 timestamp0, timestamp = sample->time;
1527 int i, this_cpu = sample->cpu;
1529 bool new_cpu = false;
1530 const char *color = PERF_COLOR_NORMAL;
1531 char stimestamp[32];
1533 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1535 if (this_cpu > sched->max_cpu)
1536 sched->max_cpu = this_cpu;
1538 if (sched->map.comp) {
1539 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1540 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
1541 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1545 cpus_nr = sched->max_cpu;
1547 timestamp0 = sched->cpu_last_switched[this_cpu];
1548 sched->cpu_last_switched[this_cpu] = timestamp;
1550 delta = timestamp - timestamp0;
1555 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1559 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1560 if (sched_in == NULL)
1563 tr = thread__get_runtime(sched_in);
1565 thread__put(sched_in);
1569 sched->curr_thread[this_cpu] = thread__get(sched_in);
1574 if (!tr->shortname[0]) {
1575 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1577 * Don't allocate a letter-number for swapper:0
1578 * as a shortname. Instead, we use '.' for it.
1580 tr->shortname[0] = '.';
1581 tr->shortname[1] = ' ';
1583 tr->shortname[0] = sched->next_shortname1;
1584 tr->shortname[1] = sched->next_shortname2;
1586 if (sched->next_shortname1 < 'Z') {
1587 sched->next_shortname1++;
1589 sched->next_shortname1 = 'A';
1590 if (sched->next_shortname2 < '9')
1591 sched->next_shortname2++;
1593 sched->next_shortname2 = '0';
1599 for (i = 0; i < cpus_nr; i++) {
1600 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
1601 struct thread *curr_thread = sched->curr_thread[cpu];
1602 struct thread_runtime *curr_tr;
1603 const char *pid_color = color;
1604 const char *cpu_color = color;
1606 if (curr_thread && thread__has_color(curr_thread))
1607 pid_color = COLOR_PIDS;
1609 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
1612 if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
1613 cpu_color = COLOR_CPUS;
1615 if (cpu != this_cpu)
1616 color_fprintf(stdout, color, " ");
1618 color_fprintf(stdout, cpu_color, "*");
1620 if (sched->curr_thread[cpu]) {
1621 curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
1622 if (curr_tr == NULL) {
1623 thread__put(sched_in);
1626 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1628 color_fprintf(stdout, color, " ");
1631 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
1634 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1635 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1636 if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
1637 const char *pid_color = color;
1639 if (thread__has_color(sched_in))
1640 pid_color = COLOR_PIDS;
1642 color_fprintf(stdout, pid_color, "%s => %s:%d",
1643 tr->shortname, thread__comm_str(sched_in), sched_in->tid);
1644 tr->comm_changed = false;
1647 if (sched->map.comp && new_cpu)
1648 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1651 color_fprintf(stdout, color, "\n");
1653 thread__put(sched_in);
1658 static int process_sched_switch_event(struct perf_tool *tool,
1659 struct evsel *evsel,
1660 struct perf_sample *sample,
1661 struct machine *machine)
1663 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1664 int this_cpu = sample->cpu, err = 0;
1665 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1666 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1668 if (sched->curr_pid[this_cpu] != (u32)-1) {
1670 * Are we trying to switch away a PID that is
1673 if (sched->curr_pid[this_cpu] != prev_pid)
1674 sched->nr_context_switch_bugs++;
1677 if (sched->tp_handler->switch_event)
1678 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1680 sched->curr_pid[this_cpu] = next_pid;
1684 static int process_sched_runtime_event(struct perf_tool *tool,
1685 struct evsel *evsel,
1686 struct perf_sample *sample,
1687 struct machine *machine)
1689 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1691 if (sched->tp_handler->runtime_event)
1692 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1697 static int perf_sched__process_fork_event(struct perf_tool *tool,
1698 union perf_event *event,
1699 struct perf_sample *sample,
1700 struct machine *machine)
1702 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1704 /* run the fork event through the perf machineruy */
1705 perf_event__process_fork(tool, event, sample, machine);
1707 /* and then run additional processing needed for this command */
1708 if (sched->tp_handler->fork_event)
1709 return sched->tp_handler->fork_event(sched, event, machine);
1714 static int process_sched_migrate_task_event(struct perf_tool *tool,
1715 struct evsel *evsel,
1716 struct perf_sample *sample,
1717 struct machine *machine)
1719 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1721 if (sched->tp_handler->migrate_task_event)
1722 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1727 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1728 struct evsel *evsel,
1729 struct perf_sample *sample,
1730 struct machine *machine);
1732 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1733 union perf_event *event __maybe_unused,
1734 struct perf_sample *sample,
1735 struct evsel *evsel,
1736 struct machine *machine)
1740 if (evsel->handler != NULL) {
1741 tracepoint_handler f = evsel->handler;
1742 err = f(tool, evsel, sample, machine);
1748 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1749 union perf_event *event,
1750 struct perf_sample *sample,
1751 struct machine *machine)
1753 struct thread *thread;
1754 struct thread_runtime *tr;
1757 err = perf_event__process_comm(tool, event, sample, machine);
1761 thread = machine__find_thread(machine, sample->pid, sample->tid);
1763 pr_err("Internal error: can't find thread\n");
1767 tr = thread__get_runtime(thread);
1769 thread__put(thread);
1773 tr->comm_changed = true;
1774 thread__put(thread);
1779 static int perf_sched__read_events(struct perf_sched *sched)
1781 const struct evsel_str_handler handlers[] = {
1782 { "sched:sched_switch", process_sched_switch_event, },
1783 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1784 { "sched:sched_wakeup", process_sched_wakeup_event, },
1785 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1786 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1788 struct perf_session *session;
1789 struct perf_data data = {
1791 .mode = PERF_DATA_MODE_READ,
1792 .force = sched->force,
1796 session = perf_session__new(&data, false, &sched->tool);
1797 if (session == NULL) {
1798 pr_debug("No Memory for session\n");
1802 symbol__init(&session->header.env);
1804 if (perf_session__set_tracepoints_handlers(session, handlers))
1807 if (perf_session__has_traces(session, "record -R")) {
1808 int err = perf_session__process_events(session);
1810 pr_err("Failed to process events, error %d", err);
1814 sched->nr_events = session->evlist->stats.nr_events[0];
1815 sched->nr_lost_events = session->evlist->stats.total_lost;
1816 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1821 perf_session__delete(session);
1826 * scheduling times are printed as msec.usec
1828 static inline void print_sched_time(unsigned long long nsecs, int width)
1830 unsigned long msecs;
1831 unsigned long usecs;
1833 msecs = nsecs / NSEC_PER_MSEC;
1834 nsecs -= msecs * NSEC_PER_MSEC;
1835 usecs = nsecs / NSEC_PER_USEC;
1836 printf("%*lu.%03lu ", width, msecs, usecs);
1840 * returns runtime data for event, allocating memory for it the
1841 * first time it is used.
1843 static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel)
1845 struct evsel_runtime *r = evsel->priv;
1848 r = zalloc(sizeof(struct evsel_runtime));
1856 * save last time event was seen per cpu
1858 static void perf_evsel__save_time(struct evsel *evsel,
1859 u64 timestamp, u32 cpu)
1861 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1866 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1867 int i, n = __roundup_pow_of_two(cpu+1);
1868 void *p = r->last_time;
1870 p = realloc(r->last_time, n * sizeof(u64));
1875 for (i = r->ncpu; i < n; ++i)
1876 r->last_time[i] = (u64) 0;
1881 r->last_time[cpu] = timestamp;
1884 /* returns last time this event was seen on the given cpu */
1885 static u64 perf_evsel__get_time(struct evsel *evsel, u32 cpu)
1887 struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1889 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1892 return r->last_time[cpu];
1895 static int comm_width = 30;
1897 static char *timehist_get_commstr(struct thread *thread)
1899 static char str[32];
1900 const char *comm = thread__comm_str(thread);
1901 pid_t tid = thread->tid;
1902 pid_t pid = thread->pid_;
1906 n = scnprintf(str, sizeof(str), "%s", comm);
1908 else if (tid != pid)
1909 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1912 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1920 static void timehist_header(struct perf_sched *sched)
1922 u32 ncpus = sched->max_cpu + 1;
1925 printf("%15s %6s ", "time", "cpu");
1927 if (sched->show_cpu_visual) {
1929 for (i = 0, j = 0; i < ncpus; ++i) {
1937 printf(" %-*s %9s %9s %9s", comm_width,
1938 "task name", "wait time", "sch delay", "run time");
1940 if (sched->show_state)
1941 printf(" %s", "state");
1948 printf("%15s %-6s ", "", "");
1950 if (sched->show_cpu_visual)
1951 printf(" %*s ", ncpus, "");
1953 printf(" %-*s %9s %9s %9s", comm_width,
1954 "[tid/pid]", "(msec)", "(msec)", "(msec)");
1956 if (sched->show_state)
1964 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
1966 if (sched->show_cpu_visual)
1967 printf(" %.*s ", ncpus, graph_dotted_line);
1969 printf(" %.*s %.9s %.9s %.9s", comm_width,
1970 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1973 if (sched->show_state)
1974 printf(" %.5s", graph_dotted_line);
1979 static char task_state_char(struct thread *thread, int state)
1981 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1982 unsigned bit = state ? ffs(state) : 0;
1985 if (thread->tid == 0)
1988 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1991 static void timehist_print_sample(struct perf_sched *sched,
1992 struct evsel *evsel,
1993 struct perf_sample *sample,
1994 struct addr_location *al,
1995 struct thread *thread,
1998 struct thread_runtime *tr = thread__priv(thread);
1999 const char *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
2000 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
2001 u32 max_cpus = sched->max_cpu + 1;
2006 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2007 printf("%15s [%04d] ", tstr, sample->cpu);
2009 if (sched->show_cpu_visual) {
2014 for (i = 0; i < max_cpus; ++i) {
2015 /* flag idle times with 'i'; others are sched events */
2016 if (i == sample->cpu)
2017 c = (thread->tid == 0) ? 'i' : 's';
2025 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2027 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2028 print_sched_time(wait_time, 6);
2030 print_sched_time(tr->dt_delay, 6);
2031 print_sched_time(tr->dt_run, 6);
2033 if (sched->show_state)
2034 printf(" %5c ", task_state_char(thread, state));
2036 if (sched->show_next) {
2037 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2038 printf(" %-*s", comm_width, nstr);
2041 if (sched->show_wakeups && !sched->show_next)
2042 printf(" %-*s", comm_width, "");
2044 if (thread->tid == 0)
2047 if (sched->show_callchain)
2050 sample__fprintf_sym(sample, al, 0,
2051 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2052 EVSEL__PRINT_CALLCHAIN_ARROW |
2053 EVSEL__PRINT_SKIP_IGNORED,
2054 &callchain_cursor, stdout);
2061 * Explanation of delta-time stats:
2063 * t = time of current schedule out event
2064 * tprev = time of previous sched out event
2065 * also time of schedule-in event for current task
2066 * last_time = time of last sched change event for current task
2067 * (i.e, time process was last scheduled out)
2068 * ready_to_run = time of wakeup for current task
2070 * -----|------------|------------|------------|------
2071 * last ready tprev t
2074 * |-------- dt_wait --------|
2075 * |- dt_delay -|-- dt_run --|
2077 * dt_run = run time of current task
2078 * dt_wait = time between last schedule out event for task and tprev
2079 * represents time spent off the cpu
2080 * dt_delay = time between wakeup and schedule-in of task
2083 static void timehist_update_runtime_stats(struct thread_runtime *r,
2093 r->dt_run = t - tprev;
2094 if (r->ready_to_run) {
2095 if (r->ready_to_run > tprev)
2096 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2098 r->dt_delay = tprev - r->ready_to_run;
2101 if (r->last_time > tprev)
2102 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2103 else if (r->last_time) {
2104 u64 dt_wait = tprev - r->last_time;
2106 if (r->last_state == TASK_RUNNING)
2107 r->dt_preempt = dt_wait;
2108 else if (r->last_state == TASK_UNINTERRUPTIBLE)
2109 r->dt_iowait = dt_wait;
2111 r->dt_sleep = dt_wait;
2115 update_stats(&r->run_stats, r->dt_run);
2117 r->total_run_time += r->dt_run;
2118 r->total_delay_time += r->dt_delay;
2119 r->total_sleep_time += r->dt_sleep;
2120 r->total_iowait_time += r->dt_iowait;
2121 r->total_preempt_time += r->dt_preempt;
2124 static bool is_idle_sample(struct perf_sample *sample,
2125 struct evsel *evsel)
2127 /* pid 0 == swapper == idle task */
2128 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0)
2129 return perf_evsel__intval(evsel, sample, "prev_pid") == 0;
2131 return sample->pid == 0;
2134 static void save_task_callchain(struct perf_sched *sched,
2135 struct perf_sample *sample,
2136 struct evsel *evsel,
2137 struct machine *machine)
2139 struct callchain_cursor *cursor = &callchain_cursor;
2140 struct thread *thread;
2142 /* want main thread for process - has maps */
2143 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2144 if (thread == NULL) {
2145 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2149 if (!sched->show_callchain || sample->callchain == NULL)
2152 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2153 NULL, NULL, sched->max_stack + 2) != 0) {
2155 pr_err("Failed to resolve callchain. Skipping\n");
2160 callchain_cursor_commit(cursor);
2163 struct callchain_cursor_node *node;
2166 node = callchain_cursor_current(cursor);
2172 if (!strcmp(sym->name, "schedule") ||
2173 !strcmp(sym->name, "__schedule") ||
2174 !strcmp(sym->name, "preempt_schedule"))
2178 callchain_cursor_advance(cursor);
2182 static int init_idle_thread(struct thread *thread)
2184 struct idle_thread_runtime *itr;
2186 thread__set_comm(thread, idle_comm, 0);
2188 itr = zalloc(sizeof(*itr));
2192 init_stats(&itr->tr.run_stats);
2193 callchain_init(&itr->callchain);
2194 callchain_cursor_reset(&itr->cursor);
2195 thread__set_priv(thread, itr);
2201 * Track idle stats per cpu by maintaining a local thread
2202 * struct for the idle task on each cpu.
2204 static int init_idle_threads(int ncpu)
2208 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2212 idle_max_cpu = ncpu;
2214 /* allocate the actual thread struct if needed */
2215 for (i = 0; i < ncpu; ++i) {
2216 idle_threads[i] = thread__new(0, 0);
2217 if (idle_threads[i] == NULL)
2220 ret = init_idle_thread(idle_threads[i]);
2228 static void free_idle_threads(void)
2232 if (idle_threads == NULL)
2235 for (i = 0; i < idle_max_cpu; ++i) {
2236 if ((idle_threads[i]))
2237 thread__delete(idle_threads[i]);
2243 static struct thread *get_idle_thread(int cpu)
2246 * expand/allocate array of pointers to local thread
2249 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2250 int i, j = __roundup_pow_of_two(cpu+1);
2253 p = realloc(idle_threads, j * sizeof(struct thread *));
2257 idle_threads = (struct thread **) p;
2258 for (i = idle_max_cpu; i < j; ++i)
2259 idle_threads[i] = NULL;
2264 /* allocate a new thread struct if needed */
2265 if (idle_threads[cpu] == NULL) {
2266 idle_threads[cpu] = thread__new(0, 0);
2268 if (idle_threads[cpu]) {
2269 if (init_idle_thread(idle_threads[cpu]) < 0)
2274 return idle_threads[cpu];
2277 static void save_idle_callchain(struct perf_sched *sched,
2278 struct idle_thread_runtime *itr,
2279 struct perf_sample *sample)
2281 if (!sched->show_callchain || sample->callchain == NULL)
2284 callchain_cursor__copy(&itr->cursor, &callchain_cursor);
2287 static struct thread *timehist_get_thread(struct perf_sched *sched,
2288 struct perf_sample *sample,
2289 struct machine *machine,
2290 struct evsel *evsel)
2292 struct thread *thread;
2294 if (is_idle_sample(sample, evsel)) {
2295 thread = get_idle_thread(sample->cpu);
2297 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2300 /* there were samples with tid 0 but non-zero pid */
2301 thread = machine__findnew_thread(machine, sample->pid,
2302 sample->tid ?: sample->pid);
2303 if (thread == NULL) {
2304 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2308 save_task_callchain(sched, sample, evsel, machine);
2309 if (sched->idle_hist) {
2310 struct thread *idle;
2311 struct idle_thread_runtime *itr;
2313 idle = get_idle_thread(sample->cpu);
2315 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2319 itr = thread__priv(idle);
2323 itr->last_thread = thread;
2325 /* copy task callchain when entering to idle */
2326 if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
2327 save_idle_callchain(sched, itr, sample);
2334 static bool timehist_skip_sample(struct perf_sched *sched,
2335 struct thread *thread,
2336 struct evsel *evsel,
2337 struct perf_sample *sample)
2341 if (thread__is_filtered(thread)) {
2343 sched->skipped_samples++;
2346 if (sched->idle_hist) {
2347 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch"))
2349 else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 &&
2350 perf_evsel__intval(evsel, sample, "next_pid") != 0)
2357 static void timehist_print_wakeup_event(struct perf_sched *sched,
2358 struct evsel *evsel,
2359 struct perf_sample *sample,
2360 struct machine *machine,
2361 struct thread *awakened)
2363 struct thread *thread;
2366 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2370 /* show wakeup unless both awakee and awaker are filtered */
2371 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2372 timehist_skip_sample(sched, awakened, evsel, sample)) {
2376 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2377 printf("%15s [%04d] ", tstr, sample->cpu);
2378 if (sched->show_cpu_visual)
2379 printf(" %*s ", sched->max_cpu + 1, "");
2381 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2384 printf(" %9s %9s %9s ", "", "", "");
2386 printf("awakened: %s", timehist_get_commstr(awakened));
2391 static int timehist_sched_wakeup_event(struct perf_tool *tool,
2392 union perf_event *event __maybe_unused,
2393 struct evsel *evsel,
2394 struct perf_sample *sample,
2395 struct machine *machine)
2397 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2398 struct thread *thread;
2399 struct thread_runtime *tr = NULL;
2400 /* want pid of awakened task not pid in sample */
2401 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2403 thread = machine__findnew_thread(machine, 0, pid);
2407 tr = thread__get_runtime(thread);
2411 if (tr->ready_to_run == 0)
2412 tr->ready_to_run = sample->time;
2414 /* show wakeups if requested */
2415 if (sched->show_wakeups &&
2416 !perf_time__skip_sample(&sched->ptime, sample->time))
2417 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2422 static void timehist_print_migration_event(struct perf_sched *sched,
2423 struct evsel *evsel,
2424 struct perf_sample *sample,
2425 struct machine *machine,
2426 struct thread *migrated)
2428 struct thread *thread;
2430 u32 max_cpus = sched->max_cpu + 1;
2433 if (sched->summary_only)
2436 max_cpus = sched->max_cpu + 1;
2437 ocpu = perf_evsel__intval(evsel, sample, "orig_cpu");
2438 dcpu = perf_evsel__intval(evsel, sample, "dest_cpu");
2440 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2444 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2445 timehist_skip_sample(sched, migrated, evsel, sample)) {
2449 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2450 printf("%15s [%04d] ", tstr, sample->cpu);
2452 if (sched->show_cpu_visual) {
2457 for (i = 0; i < max_cpus; ++i) {
2458 c = (i == sample->cpu) ? 'm' : ' ';
2464 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2467 printf(" %9s %9s %9s ", "", "", "");
2469 printf("migrated: %s", timehist_get_commstr(migrated));
2470 printf(" cpu %d => %d", ocpu, dcpu);
2475 static int timehist_migrate_task_event(struct perf_tool *tool,
2476 union perf_event *event __maybe_unused,
2477 struct evsel *evsel,
2478 struct perf_sample *sample,
2479 struct machine *machine)
2481 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2482 struct thread *thread;
2483 struct thread_runtime *tr = NULL;
2484 /* want pid of migrated task not pid in sample */
2485 const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2487 thread = machine__findnew_thread(machine, 0, pid);
2491 tr = thread__get_runtime(thread);
2497 /* show migrations if requested */
2498 timehist_print_migration_event(sched, evsel, sample, machine, thread);
2503 static int timehist_sched_change_event(struct perf_tool *tool,
2504 union perf_event *event,
2505 struct evsel *evsel,
2506 struct perf_sample *sample,
2507 struct machine *machine)
2509 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2510 struct perf_time_interval *ptime = &sched->ptime;
2511 struct addr_location al;
2512 struct thread *thread;
2513 struct thread_runtime *tr = NULL;
2514 u64 tprev, t = sample->time;
2516 int state = perf_evsel__intval(evsel, sample, "prev_state");
2519 if (machine__resolve(machine, &al, sample) < 0) {
2520 pr_err("problem processing %d event. skipping it\n",
2521 event->header.type);
2526 thread = timehist_get_thread(sched, sample, machine, evsel);
2527 if (thread == NULL) {
2532 if (timehist_skip_sample(sched, thread, evsel, sample))
2535 tr = thread__get_runtime(thread);
2541 tprev = perf_evsel__get_time(evsel, sample->cpu);
2544 * If start time given:
2545 * - sample time is under window user cares about - skip sample
2546 * - tprev is under window user cares about - reset to start of window
2548 if (ptime->start && ptime->start > t)
2551 if (tprev && ptime->start > tprev)
2552 tprev = ptime->start;
2555 * If end time given:
2556 * - previous sched event is out of window - we are done
2557 * - sample time is beyond window user cares about - reset it
2558 * to close out stats for time window interest
2561 if (tprev > ptime->end)
2568 if (!sched->idle_hist || thread->tid == 0) {
2569 timehist_update_runtime_stats(tr, t, tprev);
2571 if (sched->idle_hist) {
2572 struct idle_thread_runtime *itr = (void *)tr;
2573 struct thread_runtime *last_tr;
2575 BUG_ON(thread->tid != 0);
2577 if (itr->last_thread == NULL)
2580 /* add current idle time as last thread's runtime */
2581 last_tr = thread__get_runtime(itr->last_thread);
2582 if (last_tr == NULL)
2585 timehist_update_runtime_stats(last_tr, t, tprev);
2587 * remove delta time of last thread as it's not updated
2588 * and otherwise it will show an invalid value next
2589 * time. we only care total run time and run stat.
2591 last_tr->dt_run = 0;
2592 last_tr->dt_delay = 0;
2593 last_tr->dt_sleep = 0;
2594 last_tr->dt_iowait = 0;
2595 last_tr->dt_preempt = 0;
2598 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2600 itr->last_thread = NULL;
2604 if (!sched->summary_only)
2605 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2608 if (sched->hist_time.start == 0 && t >= ptime->start)
2609 sched->hist_time.start = t;
2610 if (ptime->end == 0 || t <= ptime->end)
2611 sched->hist_time.end = t;
2614 /* time of this sched_switch event becomes last time task seen */
2615 tr->last_time = sample->time;
2617 /* last state is used to determine where to account wait time */
2618 tr->last_state = state;
2620 /* sched out event for task so reset ready to run time */
2621 tr->ready_to_run = 0;
2624 perf_evsel__save_time(evsel, sample->time, sample->cpu);
2629 static int timehist_sched_switch_event(struct perf_tool *tool,
2630 union perf_event *event,
2631 struct evsel *evsel,
2632 struct perf_sample *sample,
2633 struct machine *machine __maybe_unused)
2635 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2638 static int process_lost(struct perf_tool *tool __maybe_unused,
2639 union perf_event *event,
2640 struct perf_sample *sample,
2641 struct machine *machine __maybe_unused)
2645 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2646 printf("%15s ", tstr);
2647 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2653 static void print_thread_runtime(struct thread *t,
2654 struct thread_runtime *r)
2656 double mean = avg_stats(&r->run_stats);
2659 printf("%*s %5d %9" PRIu64 " ",
2660 comm_width, timehist_get_commstr(t), t->ppid,
2661 (u64) r->run_stats.n);
2663 print_sched_time(r->total_run_time, 8);
2664 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2665 print_sched_time(r->run_stats.min, 6);
2667 print_sched_time((u64) mean, 6);
2669 print_sched_time(r->run_stats.max, 6);
2671 printf("%5.2f", stddev);
2672 printf(" %5" PRIu64, r->migrations);
2676 static void print_thread_waittime(struct thread *t,
2677 struct thread_runtime *r)
2679 printf("%*s %5d %9" PRIu64 " ",
2680 comm_width, timehist_get_commstr(t), t->ppid,
2681 (u64) r->run_stats.n);
2683 print_sched_time(r->total_run_time, 8);
2684 print_sched_time(r->total_sleep_time, 6);
2686 print_sched_time(r->total_iowait_time, 6);
2688 print_sched_time(r->total_preempt_time, 6);
2690 print_sched_time(r->total_delay_time, 6);
2694 struct total_run_stats {
2695 struct perf_sched *sched;
2701 static int __show_thread_runtime(struct thread *t, void *priv)
2703 struct total_run_stats *stats = priv;
2704 struct thread_runtime *r;
2706 if (thread__is_filtered(t))
2709 r = thread__priv(t);
2710 if (r && r->run_stats.n) {
2711 stats->task_count++;
2712 stats->sched_count += r->run_stats.n;
2713 stats->total_run_time += r->total_run_time;
2715 if (stats->sched->show_state)
2716 print_thread_waittime(t, r);
2718 print_thread_runtime(t, r);
2724 static int show_thread_runtime(struct thread *t, void *priv)
2729 return __show_thread_runtime(t, priv);
2732 static int show_deadthread_runtime(struct thread *t, void *priv)
2737 return __show_thread_runtime(t, priv);
2740 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2742 const char *sep = " <- ";
2743 struct callchain_list *chain;
2751 ret = callchain__fprintf_folded(fp, node->parent);
2754 list_for_each_entry(chain, &node->val, list) {
2755 if (chain->ip >= PERF_CONTEXT_MAX)
2757 if (chain->ms.sym && chain->ms.sym->ignore)
2759 ret += fprintf(fp, "%s%s", first ? "" : sep,
2760 callchain_list__sym_name(chain, bf, sizeof(bf),
2768 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2772 struct callchain_node *chain;
2773 struct rb_node *rb_node = rb_first_cached(root);
2775 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2776 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
2780 chain = rb_entry(rb_node, struct callchain_node, rb_node);
2781 rb_node = rb_next(rb_node);
2783 ret += fprintf(fp, " ");
2784 print_sched_time(chain->hit, 12);
2785 ret += 16; /* print_sched_time returns 2nd arg + 4 */
2786 ret += fprintf(fp, " %8d ", chain->count);
2787 ret += callchain__fprintf_folded(fp, chain);
2788 ret += fprintf(fp, "\n");
2794 static void timehist_print_summary(struct perf_sched *sched,
2795 struct perf_session *session)
2797 struct machine *m = &session->machines.host;
2798 struct total_run_stats totals;
2801 struct thread_runtime *r;
2803 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2805 memset(&totals, 0, sizeof(totals));
2806 totals.sched = sched;
2808 if (sched->idle_hist) {
2809 printf("\nIdle-time summary\n");
2810 printf("%*s parent sched-out ", comm_width, "comm");
2811 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
2812 } else if (sched->show_state) {
2813 printf("\nWait-time summary\n");
2814 printf("%*s parent sched-in ", comm_width, "comm");
2815 printf(" run-time sleep iowait preempt delay\n");
2817 printf("\nRuntime summary\n");
2818 printf("%*s parent sched-in ", comm_width, "comm");
2819 printf(" run-time min-run avg-run max-run stddev migrations\n");
2821 printf("%*s (count) ", comm_width, "");
2822 printf(" (msec) (msec) (msec) (msec) %s\n",
2823 sched->show_state ? "(msec)" : "%");
2824 printf("%.117s\n", graph_dotted_line);
2826 machine__for_each_thread(m, show_thread_runtime, &totals);
2827 task_count = totals.task_count;
2829 printf("<no still running tasks>\n");
2831 printf("\nTerminated tasks:\n");
2832 machine__for_each_thread(m, show_deadthread_runtime, &totals);
2833 if (task_count == totals.task_count)
2834 printf("<no terminated tasks>\n");
2836 /* CPU idle stats not tracked when samples were skipped */
2837 if (sched->skipped_samples && !sched->idle_hist)
2840 printf("\nIdle stats:\n");
2841 for (i = 0; i < idle_max_cpu; ++i) {
2842 t = idle_threads[i];
2846 r = thread__priv(t);
2847 if (r && r->run_stats.n) {
2848 totals.sched_count += r->run_stats.n;
2849 printf(" CPU %2d idle for ", i);
2850 print_sched_time(r->total_run_time, 6);
2851 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2853 printf(" CPU %2d idle entire time window\n", i);
2856 if (sched->idle_hist && sched->show_callchain) {
2857 callchain_param.mode = CHAIN_FOLDED;
2858 callchain_param.value = CCVAL_PERIOD;
2860 callchain_register_param(&callchain_param);
2862 printf("\nIdle stats by callchain:\n");
2863 for (i = 0; i < idle_max_cpu; ++i) {
2864 struct idle_thread_runtime *itr;
2866 t = idle_threads[i];
2870 itr = thread__priv(t);
2874 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2875 0, &callchain_param);
2877 printf(" CPU %2d:", i);
2878 print_sched_time(itr->tr.total_run_time, 6);
2880 timehist_print_idlehist_callchain(&itr->sorted_root);
2886 " Total number of unique tasks: %" PRIu64 "\n"
2887 "Total number of context switches: %" PRIu64 "\n",
2888 totals.task_count, totals.sched_count);
2890 printf(" Total run time (msec): ");
2891 print_sched_time(totals.total_run_time, 2);
2894 printf(" Total scheduling time (msec): ");
2895 print_sched_time(hist_time, 2);
2896 printf(" (x %d)\n", sched->max_cpu);
2899 typedef int (*sched_handler)(struct perf_tool *tool,
2900 union perf_event *event,
2901 struct evsel *evsel,
2902 struct perf_sample *sample,
2903 struct machine *machine);
2905 static int perf_timehist__process_sample(struct perf_tool *tool,
2906 union perf_event *event,
2907 struct perf_sample *sample,
2908 struct evsel *evsel,
2909 struct machine *machine)
2911 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2913 int this_cpu = sample->cpu;
2915 if (this_cpu > sched->max_cpu)
2916 sched->max_cpu = this_cpu;
2918 if (evsel->handler != NULL) {
2919 sched_handler f = evsel->handler;
2921 err = f(tool, event, evsel, sample, machine);
2927 static int timehist_check_attr(struct perf_sched *sched,
2928 struct evlist *evlist)
2930 struct evsel *evsel;
2931 struct evsel_runtime *er;
2933 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
2934 er = perf_evsel__get_runtime(evsel);
2936 pr_err("Failed to allocate memory for evsel runtime data\n");
2940 if (sched->show_callchain && !evsel__has_callchain(evsel)) {
2941 pr_info("Samples do not have callchains.\n");
2942 sched->show_callchain = 0;
2943 symbol_conf.use_callchain = 0;
2950 static int perf_sched__timehist(struct perf_sched *sched)
2952 const struct evsel_str_handler handlers[] = {
2953 { "sched:sched_switch", timehist_sched_switch_event, },
2954 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
2955 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
2957 const struct evsel_str_handler migrate_handlers[] = {
2958 { "sched:sched_migrate_task", timehist_migrate_task_event, },
2960 struct perf_data data = {
2962 .mode = PERF_DATA_MODE_READ,
2963 .force = sched->force,
2966 struct perf_session *session;
2967 struct evlist *evlist;
2971 * event handlers for timehist option
2973 sched->tool.sample = perf_timehist__process_sample;
2974 sched->tool.mmap = perf_event__process_mmap;
2975 sched->tool.comm = perf_event__process_comm;
2976 sched->tool.exit = perf_event__process_exit;
2977 sched->tool.fork = perf_event__process_fork;
2978 sched->tool.lost = process_lost;
2979 sched->tool.attr = perf_event__process_attr;
2980 sched->tool.tracing_data = perf_event__process_tracing_data;
2981 sched->tool.build_id = perf_event__process_build_id;
2983 sched->tool.ordered_events = true;
2984 sched->tool.ordering_requires_timestamps = true;
2986 symbol_conf.use_callchain = sched->show_callchain;
2988 session = perf_session__new(&data, false, &sched->tool);
2989 if (session == NULL)
2992 evlist = session->evlist;
2994 symbol__init(&session->header.env);
2996 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
2997 pr_err("Invalid time string\n");
3001 if (timehist_check_attr(sched, evlist) != 0)
3006 /* setup per-evsel handlers */
3007 if (perf_session__set_tracepoints_handlers(session, handlers))
3010 /* sched_switch event at a minimum needs to exist */
3011 if (!perf_evlist__find_tracepoint_by_name(session->evlist,
3012 "sched:sched_switch")) {
3013 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3017 if (sched->show_migrations &&
3018 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3021 /* pre-allocate struct for per-CPU idle stats */
3022 sched->max_cpu = session->header.env.nr_cpus_online;
3023 if (sched->max_cpu == 0)
3025 if (init_idle_threads(sched->max_cpu))
3028 /* summary_only implies summary option, but don't overwrite summary if set */
3029 if (sched->summary_only)
3030 sched->summary = sched->summary_only;
3032 if (!sched->summary_only)
3033 timehist_header(sched);
3035 err = perf_session__process_events(session);
3037 pr_err("Failed to process events, error %d", err);
3041 sched->nr_events = evlist->stats.nr_events[0];
3042 sched->nr_lost_events = evlist->stats.total_lost;
3043 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3046 timehist_print_summary(sched, session);
3049 free_idle_threads();
3050 perf_session__delete(session);
3056 static void print_bad_events(struct perf_sched *sched)
3058 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3059 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3060 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3061 sched->nr_unordered_timestamps, sched->nr_timestamps);
3063 if (sched->nr_lost_events && sched->nr_events) {
3064 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3065 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3066 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3068 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3069 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3070 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3071 sched->nr_context_switch_bugs, sched->nr_timestamps);
3072 if (sched->nr_lost_events)
3073 printf(" (due to lost events?)");
3078 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3080 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3081 struct work_atoms *this;
3082 const char *comm = thread__comm_str(data->thread), *this_comm;
3083 bool leftmost = true;
3088 this = container_of(*new, struct work_atoms, node);
3091 this_comm = thread__comm_str(this->thread);
3092 cmp = strcmp(comm, this_comm);
3094 new = &((*new)->rb_left);
3095 } else if (cmp < 0) {
3096 new = &((*new)->rb_right);
3100 this->total_runtime += data->total_runtime;
3101 this->nb_atoms += data->nb_atoms;
3102 this->total_lat += data->total_lat;
3103 list_splice(&data->work_list, &this->work_list);
3104 if (this->max_lat < data->max_lat) {
3105 this->max_lat = data->max_lat;
3106 this->max_lat_at = data->max_lat_at;
3114 rb_link_node(&data->node, parent, new);
3115 rb_insert_color_cached(&data->node, root, leftmost);
3118 static void perf_sched__merge_lat(struct perf_sched *sched)
3120 struct work_atoms *data;
3121 struct rb_node *node;
3123 if (sched->skip_merge)
3126 while ((node = rb_first_cached(&sched->atom_root))) {
3127 rb_erase_cached(node, &sched->atom_root);
3128 data = rb_entry(node, struct work_atoms, node);
3129 __merge_work_atoms(&sched->merged_atom_root, data);
3133 static int perf_sched__lat(struct perf_sched *sched)
3135 struct rb_node *next;
3139 if (perf_sched__read_events(sched))
3142 perf_sched__merge_lat(sched);
3143 perf_sched__sort_lat(sched);
3145 printf("\n -----------------------------------------------------------------------------------------------------------------\n");
3146 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
3147 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3149 next = rb_first_cached(&sched->sorted_atom_root);
3152 struct work_atoms *work_list;
3154 work_list = rb_entry(next, struct work_atoms, node);
3155 output_lat_thread(sched, work_list);
3156 next = rb_next(next);
3157 thread__zput(work_list->thread);
3160 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3161 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3162 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3164 printf(" ---------------------------------------------------\n");
3166 print_bad_events(sched);
3172 static int setup_map_cpus(struct perf_sched *sched)
3174 struct perf_cpu_map *map;
3176 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
3178 if (sched->map.comp) {
3179 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
3180 if (!sched->map.comp_cpus)
3184 if (!sched->map.cpus_str)
3187 map = perf_cpu_map__new(sched->map.cpus_str);
3189 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3193 sched->map.cpus = map;
3197 static int setup_color_pids(struct perf_sched *sched)
3199 struct perf_thread_map *map;
3201 if (!sched->map.color_pids_str)
3204 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3206 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3210 sched->map.color_pids = map;
3214 static int setup_color_cpus(struct perf_sched *sched)
3216 struct perf_cpu_map *map;
3218 if (!sched->map.color_cpus_str)
3221 map = perf_cpu_map__new(sched->map.color_cpus_str);
3223 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3227 sched->map.color_cpus = map;
3231 static int perf_sched__map(struct perf_sched *sched)
3233 if (setup_map_cpus(sched))
3236 if (setup_color_pids(sched))
3239 if (setup_color_cpus(sched))
3243 if (perf_sched__read_events(sched))
3245 print_bad_events(sched);
3249 static int perf_sched__replay(struct perf_sched *sched)
3253 calibrate_run_measurement_overhead(sched);
3254 calibrate_sleep_measurement_overhead(sched);
3256 test_calibrations(sched);
3258 if (perf_sched__read_events(sched))
3261 printf("nr_run_events: %ld\n", sched->nr_run_events);
3262 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3263 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3265 if (sched->targetless_wakeups)
3266 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3267 if (sched->multitarget_wakeups)
3268 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3269 if (sched->nr_run_events_optimized)
3270 printf("run atoms optimized: %ld\n",
3271 sched->nr_run_events_optimized);
3273 print_task_traces(sched);
3274 add_cross_task_wakeups(sched);
3276 create_tasks(sched);
3277 printf("------------------------------------------------------------\n");
3278 for (i = 0; i < sched->replay_repeat; i++)
3279 run_one_test(sched);
3284 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3285 const char * const usage_msg[])
3287 char *tmp, *tok, *str = strdup(sched->sort_order);
3289 for (tok = strtok_r(str, ", ", &tmp);
3290 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3291 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3292 usage_with_options_msg(usage_msg, options,
3293 "Unknown --sort key: `%s'", tok);
3299 sort_dimension__add("pid", &sched->cmp_pid);
3302 static int __cmd_record(int argc, const char **argv)
3304 unsigned int rec_argc, i, j;
3305 const char **rec_argv;
3306 const char * const record_args[] = {
3312 "-e", "sched:sched_switch",
3313 "-e", "sched:sched_stat_wait",
3314 "-e", "sched:sched_stat_sleep",
3315 "-e", "sched:sched_stat_iowait",
3316 "-e", "sched:sched_stat_runtime",
3317 "-e", "sched:sched_process_fork",
3318 "-e", "sched:sched_wakeup",
3319 "-e", "sched:sched_wakeup_new",
3320 "-e", "sched:sched_migrate_task",
3323 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
3324 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3326 if (rec_argv == NULL)
3329 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3330 rec_argv[i] = strdup(record_args[i]);
3332 for (j = 1; j < (unsigned int)argc; j++, i++)
3333 rec_argv[i] = argv[j];
3335 BUG_ON(i != rec_argc);
3337 return cmd_record(i, rec_argv);
3340 int cmd_sched(int argc, const char **argv)
3342 static const char default_sort_order[] = "avg, max, switch, runtime";
3343 struct perf_sched sched = {
3345 .sample = perf_sched__process_tracepoint_sample,
3346 .comm = perf_sched__process_comm,
3347 .namespaces = perf_event__process_namespaces,
3348 .lost = perf_event__process_lost,
3349 .fork = perf_sched__process_fork_event,
3350 .ordered_events = true,
3352 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3353 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3354 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
3355 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
3356 .sort_order = default_sort_order,
3357 .replay_repeat = 10,
3359 .next_shortname1 = 'A',
3360 .next_shortname2 = '0',
3362 .show_callchain = 1,
3365 const struct option sched_options[] = {
3366 OPT_STRING('i', "input", &input_name, "file",
3368 OPT_INCR('v', "verbose", &verbose,
3369 "be more verbose (show symbol address, etc)"),
3370 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3371 "dump raw trace in ASCII"),
3372 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3375 const struct option latency_options[] = {
3376 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3377 "sort by key(s): runtime, switch, avg, max"),
3378 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3379 "CPU to profile on"),
3380 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3381 "latency stats per pid instead of per comm"),
3382 OPT_PARENT(sched_options)
3384 const struct option replay_options[] = {
3385 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3386 "repeat the workload replay N times (-1: infinite)"),
3387 OPT_PARENT(sched_options)
3389 const struct option map_options[] = {
3390 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3391 "map output in compact mode"),
3392 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3393 "highlight given pids in map"),
3394 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3395 "highlight given CPUs in map"),
3396 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3397 "display given CPUs in map"),
3398 OPT_PARENT(sched_options)
3400 const struct option timehist_options[] = {
3401 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3402 "file", "vmlinux pathname"),
3403 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3404 "file", "kallsyms pathname"),
3405 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3406 "Display call chains if present (default on)"),
3407 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3408 "Maximum number of functions to display backtrace."),
3409 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3410 "Look for files with symbols relative to this directory"),
3411 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3412 "Show only syscall summary with statistics"),
3413 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3414 "Show all syscalls and summary with statistics"),
3415 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3416 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3417 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3418 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3419 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3420 OPT_STRING(0, "time", &sched.time_str, "str",
3421 "Time span for analysis (start,stop)"),
3422 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3423 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3424 "analyze events only for given process id(s)"),
3425 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3426 "analyze events only for given thread id(s)"),
3427 OPT_PARENT(sched_options)
3430 const char * const latency_usage[] = {
3431 "perf sched latency [<options>]",
3434 const char * const replay_usage[] = {
3435 "perf sched replay [<options>]",
3438 const char * const map_usage[] = {
3439 "perf sched map [<options>]",
3442 const char * const timehist_usage[] = {
3443 "perf sched timehist [<options>]",
3446 const char *const sched_subcommands[] = { "record", "latency", "map",
3449 const char *sched_usage[] = {
3453 struct trace_sched_handler lat_ops = {
3454 .wakeup_event = latency_wakeup_event,
3455 .switch_event = latency_switch_event,
3456 .runtime_event = latency_runtime_event,
3457 .migrate_task_event = latency_migrate_task_event,
3459 struct trace_sched_handler map_ops = {
3460 .switch_event = map_switch_event,
3462 struct trace_sched_handler replay_ops = {
3463 .wakeup_event = replay_wakeup_event,
3464 .switch_event = replay_switch_event,
3465 .fork_event = replay_fork_event,
3469 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
3470 sched.curr_pid[i] = -1;
3472 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3473 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3475 usage_with_options(sched_usage, sched_options);
3478 * Aliased to 'perf script' for now:
3480 if (!strcmp(argv[0], "script"))
3481 return cmd_script(argc, argv);
3483 if (!strncmp(argv[0], "rec", 3)) {
3484 return __cmd_record(argc, argv);
3485 } else if (!strncmp(argv[0], "lat", 3)) {
3486 sched.tp_handler = &lat_ops;
3488 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3490 usage_with_options(latency_usage, latency_options);
3492 setup_sorting(&sched, latency_options, latency_usage);
3493 return perf_sched__lat(&sched);
3494 } else if (!strcmp(argv[0], "map")) {
3496 argc = parse_options(argc, argv, map_options, map_usage, 0);
3498 usage_with_options(map_usage, map_options);
3500 sched.tp_handler = &map_ops;
3501 setup_sorting(&sched, latency_options, latency_usage);
3502 return perf_sched__map(&sched);
3503 } else if (!strncmp(argv[0], "rep", 3)) {
3504 sched.tp_handler = &replay_ops;
3506 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3508 usage_with_options(replay_usage, replay_options);
3510 return perf_sched__replay(&sched);
3511 } else if (!strcmp(argv[0], "timehist")) {
3513 argc = parse_options(argc, argv, timehist_options,
3516 usage_with_options(timehist_usage, timehist_options);
3518 if ((sched.show_wakeups || sched.show_next) &&
3519 sched.summary_only) {
3520 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3521 parse_options_usage(timehist_usage, timehist_options, "s", true);
3522 if (sched.show_wakeups)
3523 parse_options_usage(NULL, timehist_options, "w", true);
3524 if (sched.show_next)
3525 parse_options_usage(NULL, timehist_options, "n", true);
3529 return perf_sched__timehist(&sched);
3531 usage_with_options(sched_usage, sched_options);