1 // SPDX-License-Identifier: GPL-2.0
5 #include "util/cpumap.h"
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/evsel_fprintf.h"
9 #include "util/mutex.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/cloexec.h"
16 #include "util/thread_map.h"
17 #include "util/color.h"
18 #include "util/stat.h"
19 #include "util/string2.h"
20 #include "util/callchain.h"
21 #include "util/time-utils.h"
23 #include <subcmd/pager.h>
24 #include <subcmd/parse-options.h>
25 #include "util/trace-event.h"
27 #include "util/debug.h"
28 #include "util/event.h"
29 #include "util/util.h"
31 #include <linux/kernel.h>
32 #include <linux/log2.h>
33 #include <linux/zalloc.h>
34 #include <sys/prctl.h>
35 #include <sys/resource.h>
39 #include <semaphore.h>
42 #include <api/fs/fs.h>
43 #include <perf/cpumap.h>
44 #include <linux/time64.h>
45 #include <linux/err.h>
47 #include <linux/ctype.h>
49 #define PR_SET_NAME 15 /* Set process name */
53 #define MAX_PID 1024000
55 static const char *cpu_list;
56 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
65 unsigned long nr_events;
66 unsigned long curr_event;
67 struct sched_atom **atoms;
78 enum sched_event_type {
82 SCHED_EVENT_MIGRATION,
86 enum sched_event_type type;
92 struct task_desc *wakee;
103 struct list_head list;
104 enum thread_state state;
112 struct list_head work_list;
113 struct thread *thread;
124 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
128 struct trace_sched_handler {
129 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
130 struct perf_sample *sample, struct machine *machine);
132 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
133 struct perf_sample *sample, struct machine *machine);
135 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
136 struct perf_sample *sample, struct machine *machine);
138 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
139 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
140 struct machine *machine);
142 int (*migrate_task_event)(struct perf_sched *sched,
144 struct perf_sample *sample,
145 struct machine *machine);
148 #define COLOR_PIDS PERF_COLOR_BLUE
149 #define COLOR_CPUS PERF_COLOR_BG_RED
151 struct perf_sched_map {
152 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
153 struct perf_cpu *comp_cpus;
155 struct perf_thread_map *color_pids;
156 const char *color_pids_str;
157 struct perf_cpu_map *color_cpus;
158 const char *color_cpus_str;
159 struct perf_cpu_map *cpus;
160 const char *cpus_str;
164 struct perf_tool tool;
165 const char *sort_order;
166 unsigned long nr_tasks;
167 struct task_desc **pid_to_task;
168 struct task_desc **tasks;
169 const struct trace_sched_handler *tp_handler;
170 struct mutex start_work_mutex;
171 struct mutex work_done_wait_mutex;
174 * Track the current task - that way we can know whether there's any
175 * weird events, such as a task being switched away that is not current.
177 struct perf_cpu max_cpu;
179 struct thread **curr_thread;
180 char next_shortname1;
181 char next_shortname2;
182 unsigned int replay_repeat;
183 unsigned long nr_run_events;
184 unsigned long nr_sleep_events;
185 unsigned long nr_wakeup_events;
186 unsigned long nr_sleep_corrections;
187 unsigned long nr_run_events_optimized;
188 unsigned long targetless_wakeups;
189 unsigned long multitarget_wakeups;
190 unsigned long nr_runs;
191 unsigned long nr_timestamps;
192 unsigned long nr_unordered_timestamps;
193 unsigned long nr_context_switch_bugs;
194 unsigned long nr_events;
195 unsigned long nr_lost_chunks;
196 unsigned long nr_lost_events;
197 u64 run_measurement_overhead;
198 u64 sleep_measurement_overhead;
201 u64 runavg_cpu_usage;
202 u64 parent_cpu_usage;
203 u64 runavg_parent_cpu_usage;
209 u64 *cpu_last_switched;
210 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
211 struct list_head sort_list, cmp_pid;
214 struct perf_sched_map map;
216 /* options for timehist command */
221 unsigned int max_stack;
222 bool show_cpu_visual;
225 bool show_migrations;
228 const char *time_str;
229 struct perf_time_interval ptime;
230 struct perf_time_interval hist_time;
231 volatile bool thread_funcs_exit;
234 /* per thread run time data */
235 struct thread_runtime {
236 u64 last_time; /* time of previous sched in/out event */
237 u64 dt_run; /* run time */
238 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
239 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
240 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
241 u64 dt_delay; /* time between wakeup and sched-in */
242 u64 ready_to_run; /* time of wakeup */
244 struct stats run_stats;
246 u64 total_sleep_time;
247 u64 total_iowait_time;
248 u64 total_preempt_time;
249 u64 total_delay_time;
259 /* per event run time data */
260 struct evsel_runtime {
261 u64 *last_time; /* time this event was last seen per cpu */
262 u32 ncpu; /* highest cpu slot allocated */
265 /* per cpu idle time data */
266 struct idle_thread_runtime {
267 struct thread_runtime tr;
268 struct thread *last_thread;
269 struct rb_root_cached sorted_root;
270 struct callchain_root callchain;
271 struct callchain_cursor cursor;
274 /* track idle times per cpu */
275 static struct thread **idle_threads;
276 static int idle_max_cpu;
277 static char idle_comm[] = "<idle>";
279 static u64 get_nsecs(void)
283 clock_gettime(CLOCK_MONOTONIC, &ts);
285 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
288 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
290 u64 T0 = get_nsecs(), T1;
294 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
297 static void sleep_nsecs(u64 nsecs)
301 ts.tv_nsec = nsecs % 999999999;
302 ts.tv_sec = nsecs / 999999999;
304 nanosleep(&ts, NULL);
307 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
309 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
312 for (i = 0; i < 10; i++) {
314 burn_nsecs(sched, 0);
317 min_delta = min(min_delta, delta);
319 sched->run_measurement_overhead = min_delta;
321 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
324 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
326 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
329 for (i = 0; i < 10; i++) {
334 min_delta = min(min_delta, delta);
337 sched->sleep_measurement_overhead = min_delta;
339 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
342 static struct sched_atom *
343 get_new_event(struct task_desc *task, u64 timestamp)
345 struct sched_atom *event = zalloc(sizeof(*event));
346 unsigned long idx = task->nr_events;
349 event->timestamp = timestamp;
353 size = sizeof(struct sched_atom *) * task->nr_events;
354 task->atoms = realloc(task->atoms, size);
355 BUG_ON(!task->atoms);
357 task->atoms[idx] = event;
362 static struct sched_atom *last_event(struct task_desc *task)
364 if (!task->nr_events)
367 return task->atoms[task->nr_events - 1];
370 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
371 u64 timestamp, u64 duration)
373 struct sched_atom *event, *curr_event = last_event(task);
376 * optimize an existing RUN event by merging this one
379 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
380 sched->nr_run_events_optimized++;
381 curr_event->duration += duration;
385 event = get_new_event(task, timestamp);
387 event->type = SCHED_EVENT_RUN;
388 event->duration = duration;
390 sched->nr_run_events++;
393 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
394 u64 timestamp, struct task_desc *wakee)
396 struct sched_atom *event, *wakee_event;
398 event = get_new_event(task, timestamp);
399 event->type = SCHED_EVENT_WAKEUP;
400 event->wakee = wakee;
402 wakee_event = last_event(wakee);
403 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
404 sched->targetless_wakeups++;
407 if (wakee_event->wait_sem) {
408 sched->multitarget_wakeups++;
412 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
413 sem_init(wakee_event->wait_sem, 0, 0);
414 wakee_event->specific_wait = 1;
415 event->wait_sem = wakee_event->wait_sem;
417 sched->nr_wakeup_events++;
420 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
421 u64 timestamp, const char task_state __maybe_unused)
423 struct sched_atom *event = get_new_event(task, timestamp);
425 event->type = SCHED_EVENT_SLEEP;
427 sched->nr_sleep_events++;
430 static struct task_desc *register_pid(struct perf_sched *sched,
431 unsigned long pid, const char *comm)
433 struct task_desc *task;
436 if (sched->pid_to_task == NULL) {
437 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
439 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
441 if (pid >= (unsigned long)pid_max) {
442 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
443 sizeof(struct task_desc *))) == NULL);
444 while (pid >= (unsigned long)pid_max)
445 sched->pid_to_task[pid_max++] = NULL;
448 task = sched->pid_to_task[pid];
453 task = zalloc(sizeof(*task));
455 task->nr = sched->nr_tasks;
456 strcpy(task->comm, comm);
458 * every task starts in sleeping state - this gets ignored
459 * if there's no wakeup pointing to this sleep state:
461 add_sched_event_sleep(sched, task, 0, 0);
463 sched->pid_to_task[pid] = task;
465 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
466 BUG_ON(!sched->tasks);
467 sched->tasks[task->nr] = task;
470 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
476 static void print_task_traces(struct perf_sched *sched)
478 struct task_desc *task;
481 for (i = 0; i < sched->nr_tasks; i++) {
482 task = sched->tasks[i];
483 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
484 task->nr, task->comm, task->pid, task->nr_events);
488 static void add_cross_task_wakeups(struct perf_sched *sched)
490 struct task_desc *task1, *task2;
493 for (i = 0; i < sched->nr_tasks; i++) {
494 task1 = sched->tasks[i];
496 if (j == sched->nr_tasks)
498 task2 = sched->tasks[j];
499 add_sched_event_wakeup(sched, task1, 0, task2);
503 static void perf_sched__process_event(struct perf_sched *sched,
504 struct sched_atom *atom)
508 switch (atom->type) {
509 case SCHED_EVENT_RUN:
510 burn_nsecs(sched, atom->duration);
512 case SCHED_EVENT_SLEEP:
514 ret = sem_wait(atom->wait_sem);
517 case SCHED_EVENT_WAKEUP:
519 ret = sem_post(atom->wait_sem);
522 case SCHED_EVENT_MIGRATION:
529 static u64 get_cpu_usage_nsec_parent(void)
535 err = getrusage(RUSAGE_SELF, &ru);
538 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
539 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
544 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
546 struct perf_event_attr attr;
547 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
550 bool need_privilege = false;
552 memset(&attr, 0, sizeof(attr));
554 attr.type = PERF_TYPE_SOFTWARE;
555 attr.config = PERF_COUNT_SW_TASK_CLOCK;
558 fd = sys_perf_event_open(&attr, 0, -1, -1,
559 perf_event_open_cloexec_flag());
562 if (errno == EMFILE) {
564 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
565 limit.rlim_cur += sched->nr_tasks - cur_task;
566 if (limit.rlim_cur > limit.rlim_max) {
567 limit.rlim_max = limit.rlim_cur;
568 need_privilege = true;
570 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
571 if (need_privilege && errno == EPERM)
572 strcpy(info, "Need privilege\n");
576 strcpy(info, "Have a try with -f option\n");
578 pr_err("Error: sys_perf_event_open() syscall returned "
579 "with %d (%s)\n%s", fd,
580 str_error_r(errno, sbuf, sizeof(sbuf)), info);
586 static u64 get_cpu_usage_nsec_self(int fd)
591 ret = read(fd, &runtime, sizeof(runtime));
592 BUG_ON(ret != sizeof(runtime));
597 struct sched_thread_parms {
598 struct task_desc *task;
599 struct perf_sched *sched;
603 static void *thread_func(void *ctx)
605 struct sched_thread_parms *parms = ctx;
606 struct task_desc *this_task = parms->task;
607 struct perf_sched *sched = parms->sched;
608 u64 cpu_usage_0, cpu_usage_1;
609 unsigned long i, ret;
615 sprintf(comm2, ":%s", this_task->comm);
616 prctl(PR_SET_NAME, comm2);
620 while (!sched->thread_funcs_exit) {
621 ret = sem_post(&this_task->ready_for_work);
623 mutex_lock(&sched->start_work_mutex);
624 mutex_unlock(&sched->start_work_mutex);
626 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
628 for (i = 0; i < this_task->nr_events; i++) {
629 this_task->curr_event = i;
630 perf_sched__process_event(sched, this_task->atoms[i]);
633 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
634 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
635 ret = sem_post(&this_task->work_done_sem);
638 mutex_lock(&sched->work_done_wait_mutex);
639 mutex_unlock(&sched->work_done_wait_mutex);
644 static void create_tasks(struct perf_sched *sched)
645 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
646 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
648 struct task_desc *task;
653 err = pthread_attr_init(&attr);
655 err = pthread_attr_setstacksize(&attr,
656 (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
658 mutex_lock(&sched->start_work_mutex);
659 mutex_lock(&sched->work_done_wait_mutex);
660 for (i = 0; i < sched->nr_tasks; i++) {
661 struct sched_thread_parms *parms = malloc(sizeof(*parms));
662 BUG_ON(parms == NULL);
663 parms->task = task = sched->tasks[i];
664 parms->sched = sched;
665 parms->fd = self_open_counters(sched, i);
666 sem_init(&task->sleep_sem, 0, 0);
667 sem_init(&task->ready_for_work, 0, 0);
668 sem_init(&task->work_done_sem, 0, 0);
669 task->curr_event = 0;
670 err = pthread_create(&task->thread, &attr, thread_func, parms);
675 static void destroy_tasks(struct perf_sched *sched)
676 UNLOCK_FUNCTION(sched->start_work_mutex)
677 UNLOCK_FUNCTION(sched->work_done_wait_mutex)
679 struct task_desc *task;
683 mutex_unlock(&sched->start_work_mutex);
684 mutex_unlock(&sched->work_done_wait_mutex);
685 /* Get rid of threads so they won't be upset by mutex destrunction */
686 for (i = 0; i < sched->nr_tasks; i++) {
687 task = sched->tasks[i];
688 err = pthread_join(task->thread, NULL);
690 sem_destroy(&task->sleep_sem);
691 sem_destroy(&task->ready_for_work);
692 sem_destroy(&task->work_done_sem);
696 static void wait_for_tasks(struct perf_sched *sched)
697 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
698 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
700 u64 cpu_usage_0, cpu_usage_1;
701 struct task_desc *task;
702 unsigned long i, ret;
704 sched->start_time = get_nsecs();
705 sched->cpu_usage = 0;
706 mutex_unlock(&sched->work_done_wait_mutex);
708 for (i = 0; i < sched->nr_tasks; i++) {
709 task = sched->tasks[i];
710 ret = sem_wait(&task->ready_for_work);
712 sem_init(&task->ready_for_work, 0, 0);
714 mutex_lock(&sched->work_done_wait_mutex);
716 cpu_usage_0 = get_cpu_usage_nsec_parent();
718 mutex_unlock(&sched->start_work_mutex);
720 for (i = 0; i < sched->nr_tasks; i++) {
721 task = sched->tasks[i];
722 ret = sem_wait(&task->work_done_sem);
724 sem_init(&task->work_done_sem, 0, 0);
725 sched->cpu_usage += task->cpu_usage;
729 cpu_usage_1 = get_cpu_usage_nsec_parent();
730 if (!sched->runavg_cpu_usage)
731 sched->runavg_cpu_usage = sched->cpu_usage;
732 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
734 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
735 if (!sched->runavg_parent_cpu_usage)
736 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
737 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
738 sched->parent_cpu_usage)/sched->replay_repeat;
740 mutex_lock(&sched->start_work_mutex);
742 for (i = 0; i < sched->nr_tasks; i++) {
743 task = sched->tasks[i];
744 sem_init(&task->sleep_sem, 0, 0);
745 task->curr_event = 0;
749 static void run_one_test(struct perf_sched *sched)
750 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
751 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
753 u64 T0, T1, delta, avg_delta, fluct;
756 wait_for_tasks(sched);
760 sched->sum_runtime += delta;
763 avg_delta = sched->sum_runtime / sched->nr_runs;
764 if (delta < avg_delta)
765 fluct = avg_delta - delta;
767 fluct = delta - avg_delta;
768 sched->sum_fluct += fluct;
770 sched->run_avg = delta;
771 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
773 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
775 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
777 printf("cpu: %0.2f / %0.2f",
778 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
782 * rusage statistics done by the parent, these are less
783 * accurate than the sched->sum_exec_runtime based statistics:
785 printf(" [%0.2f / %0.2f]",
786 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
787 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
792 if (sched->nr_sleep_corrections)
793 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
794 sched->nr_sleep_corrections = 0;
797 static void test_calibrations(struct perf_sched *sched)
802 burn_nsecs(sched, NSEC_PER_MSEC);
805 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
808 sleep_nsecs(NSEC_PER_MSEC);
811 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
815 replay_wakeup_event(struct perf_sched *sched,
816 struct evsel *evsel, struct perf_sample *sample,
817 struct machine *machine __maybe_unused)
819 const char *comm = evsel__strval(evsel, sample, "comm");
820 const u32 pid = evsel__intval(evsel, sample, "pid");
821 struct task_desc *waker, *wakee;
824 printf("sched_wakeup event %p\n", evsel);
826 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
829 waker = register_pid(sched, sample->tid, "<unknown>");
830 wakee = register_pid(sched, pid, comm);
832 add_sched_event_wakeup(sched, waker, sample->time, wakee);
836 static int replay_switch_event(struct perf_sched *sched,
838 struct perf_sample *sample,
839 struct machine *machine __maybe_unused)
841 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
842 *next_comm = evsel__strval(evsel, sample, "next_comm");
843 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
844 next_pid = evsel__intval(evsel, sample, "next_pid");
845 const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
846 struct task_desc *prev, __maybe_unused *next;
847 u64 timestamp0, timestamp = sample->time;
848 int cpu = sample->cpu;
852 printf("sched_switch event %p\n", evsel);
854 if (cpu >= MAX_CPUS || cpu < 0)
857 timestamp0 = sched->cpu_last_switched[cpu];
859 delta = timestamp - timestamp0;
864 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
868 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
869 prev_comm, prev_pid, next_comm, next_pid, delta);
871 prev = register_pid(sched, prev_pid, prev_comm);
872 next = register_pid(sched, next_pid, next_comm);
874 sched->cpu_last_switched[cpu] = timestamp;
876 add_sched_event_run(sched, prev, timestamp, delta);
877 add_sched_event_sleep(sched, prev, timestamp, prev_state);
882 static int replay_fork_event(struct perf_sched *sched,
883 union perf_event *event,
884 struct machine *machine)
886 struct thread *child, *parent;
888 child = machine__findnew_thread(machine, event->fork.pid,
890 parent = machine__findnew_thread(machine, event->fork.ppid,
893 if (child == NULL || parent == NULL) {
894 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
900 printf("fork event\n");
901 printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
902 printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child));
905 register_pid(sched, thread__tid(parent), thread__comm_str(parent));
906 register_pid(sched, thread__tid(child), thread__comm_str(child));
913 struct sort_dimension {
916 struct list_head list;
920 * handle runtime stats saved per thread
922 static struct thread_runtime *thread__init_runtime(struct thread *thread)
924 struct thread_runtime *r;
926 r = zalloc(sizeof(struct thread_runtime));
930 init_stats(&r->run_stats);
931 thread__set_priv(thread, r);
936 static struct thread_runtime *thread__get_runtime(struct thread *thread)
938 struct thread_runtime *tr;
940 tr = thread__priv(thread);
942 tr = thread__init_runtime(thread);
944 pr_debug("Failed to malloc memory for runtime data.\n");
951 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
953 struct sort_dimension *sort;
956 BUG_ON(list_empty(list));
958 list_for_each_entry(sort, list, list) {
959 ret = sort->cmp(l, r);
967 static struct work_atoms *
968 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
969 struct list_head *sort_list)
971 struct rb_node *node = root->rb_root.rb_node;
972 struct work_atoms key = { .thread = thread };
975 struct work_atoms *atoms;
978 atoms = container_of(node, struct work_atoms, node);
980 cmp = thread_lat_cmp(sort_list, &key, atoms);
982 node = node->rb_left;
984 node = node->rb_right;
986 BUG_ON(thread != atoms->thread);
994 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
995 struct list_head *sort_list)
997 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
998 bool leftmost = true;
1001 struct work_atoms *this;
1004 this = container_of(*new, struct work_atoms, node);
1007 cmp = thread_lat_cmp(sort_list, data, this);
1010 new = &((*new)->rb_left);
1012 new = &((*new)->rb_right);
1017 rb_link_node(&data->node, parent, new);
1018 rb_insert_color_cached(&data->node, root, leftmost);
1021 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1023 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1025 pr_err("No memory at %s\n", __func__);
1029 atoms->thread = thread__get(thread);
1030 INIT_LIST_HEAD(&atoms->work_list);
1031 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1036 add_sched_out_event(struct work_atoms *atoms,
1040 struct work_atom *atom = zalloc(sizeof(*atom));
1042 pr_err("Non memory at %s", __func__);
1046 atom->sched_out_time = timestamp;
1048 if (run_state == 'R') {
1049 atom->state = THREAD_WAIT_CPU;
1050 atom->wake_up_time = atom->sched_out_time;
1053 list_add_tail(&atom->list, &atoms->work_list);
1058 add_runtime_event(struct work_atoms *atoms, u64 delta,
1059 u64 timestamp __maybe_unused)
1061 struct work_atom *atom;
1063 BUG_ON(list_empty(&atoms->work_list));
1065 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1067 atom->runtime += delta;
1068 atoms->total_runtime += delta;
1072 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1074 struct work_atom *atom;
1077 if (list_empty(&atoms->work_list))
1080 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1082 if (atom->state != THREAD_WAIT_CPU)
1085 if (timestamp < atom->wake_up_time) {
1086 atom->state = THREAD_IGNORE;
1090 atom->state = THREAD_SCHED_IN;
1091 atom->sched_in_time = timestamp;
1093 delta = atom->sched_in_time - atom->wake_up_time;
1094 atoms->total_lat += delta;
1095 if (delta > atoms->max_lat) {
1096 atoms->max_lat = delta;
1097 atoms->max_lat_start = atom->wake_up_time;
1098 atoms->max_lat_end = timestamp;
1103 static int latency_switch_event(struct perf_sched *sched,
1104 struct evsel *evsel,
1105 struct perf_sample *sample,
1106 struct machine *machine)
1108 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1109 next_pid = evsel__intval(evsel, sample, "next_pid");
1110 const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
1111 struct work_atoms *out_events, *in_events;
1112 struct thread *sched_out, *sched_in;
1113 u64 timestamp0, timestamp = sample->time;
1114 int cpu = sample->cpu, err = -1;
1117 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1119 timestamp0 = sched->cpu_last_switched[cpu];
1120 sched->cpu_last_switched[cpu] = timestamp;
1122 delta = timestamp - timestamp0;
1127 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1131 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1132 sched_in = machine__findnew_thread(machine, -1, next_pid);
1133 if (sched_out == NULL || sched_in == NULL)
1136 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1138 if (thread_atoms_insert(sched, sched_out))
1140 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1142 pr_err("out-event: Internal tree error");
1146 if (add_sched_out_event(out_events, prev_state, timestamp))
1149 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1151 if (thread_atoms_insert(sched, sched_in))
1153 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1155 pr_err("in-event: Internal tree error");
1159 * Take came in we have not heard about yet,
1160 * add in an initial atom in runnable state:
1162 if (add_sched_out_event(in_events, 'R', timestamp))
1165 add_sched_in_event(in_events, timestamp);
1168 thread__put(sched_out);
1169 thread__put(sched_in);
1173 static int latency_runtime_event(struct perf_sched *sched,
1174 struct evsel *evsel,
1175 struct perf_sample *sample,
1176 struct machine *machine)
1178 const u32 pid = evsel__intval(evsel, sample, "pid");
1179 const u64 runtime = evsel__intval(evsel, sample, "runtime");
1180 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1181 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1182 u64 timestamp = sample->time;
1183 int cpu = sample->cpu, err = -1;
1188 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1190 if (thread_atoms_insert(sched, thread))
1192 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1194 pr_err("in-event: Internal tree error");
1197 if (add_sched_out_event(atoms, 'R', timestamp))
1201 add_runtime_event(atoms, runtime, timestamp);
1204 thread__put(thread);
1208 static int latency_wakeup_event(struct perf_sched *sched,
1209 struct evsel *evsel,
1210 struct perf_sample *sample,
1211 struct machine *machine)
1213 const u32 pid = evsel__intval(evsel, sample, "pid");
1214 struct work_atoms *atoms;
1215 struct work_atom *atom;
1216 struct thread *wakee;
1217 u64 timestamp = sample->time;
1220 wakee = machine__findnew_thread(machine, -1, pid);
1223 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1225 if (thread_atoms_insert(sched, wakee))
1227 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1229 pr_err("wakeup-event: Internal tree error");
1232 if (add_sched_out_event(atoms, 'S', timestamp))
1236 BUG_ON(list_empty(&atoms->work_list));
1238 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1241 * As we do not guarantee the wakeup event happens when
1242 * task is out of run queue, also may happen when task is
1243 * on run queue and wakeup only change ->state to TASK_RUNNING,
1244 * then we should not set the ->wake_up_time when wake up a
1245 * task which is on run queue.
1247 * You WILL be missing events if you've recorded only
1248 * one CPU, or are only looking at only one, so don't
1249 * skip in this case.
1251 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1254 sched->nr_timestamps++;
1255 if (atom->sched_out_time > timestamp) {
1256 sched->nr_unordered_timestamps++;
1260 atom->state = THREAD_WAIT_CPU;
1261 atom->wake_up_time = timestamp;
1269 static int latency_migrate_task_event(struct perf_sched *sched,
1270 struct evsel *evsel,
1271 struct perf_sample *sample,
1272 struct machine *machine)
1274 const u32 pid = evsel__intval(evsel, sample, "pid");
1275 u64 timestamp = sample->time;
1276 struct work_atoms *atoms;
1277 struct work_atom *atom;
1278 struct thread *migrant;
1282 * Only need to worry about migration when profiling one CPU.
1284 if (sched->profile_cpu == -1)
1287 migrant = machine__findnew_thread(machine, -1, pid);
1288 if (migrant == NULL)
1290 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1292 if (thread_atoms_insert(sched, migrant))
1294 register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
1295 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1297 pr_err("migration-event: Internal tree error");
1300 if (add_sched_out_event(atoms, 'R', timestamp))
1304 BUG_ON(list_empty(&atoms->work_list));
1306 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1307 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1309 sched->nr_timestamps++;
1311 if (atom->sched_out_time > timestamp)
1312 sched->nr_unordered_timestamps++;
1315 thread__put(migrant);
1319 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1324 char max_lat_start[32], max_lat_end[32];
1326 if (!work_list->nb_atoms)
1329 * Ignore idle threads:
1331 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1334 sched->all_runtime += work_list->total_runtime;
1335 sched->all_count += work_list->nb_atoms;
1337 if (work_list->num_merged > 1) {
1338 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
1339 work_list->num_merged);
1341 ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
1342 thread__tid(work_list->thread));
1345 for (i = 0; i < 24 - ret; i++)
1348 avg = work_list->total_lat / work_list->nb_atoms;
1349 timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1350 timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1352 printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1353 (double)work_list->total_runtime / NSEC_PER_MSEC,
1354 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1355 (double)work_list->max_lat / NSEC_PER_MSEC,
1356 max_lat_start, max_lat_end);
1359 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1363 if (RC_CHK_EQUAL(l->thread, r->thread))
1365 l_tid = thread__tid(l->thread);
1366 r_tid = thread__tid(r->thread);
1371 return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
1374 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1384 avgl = l->total_lat / l->nb_atoms;
1385 avgr = r->total_lat / r->nb_atoms;
1395 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1397 if (l->max_lat < r->max_lat)
1399 if (l->max_lat > r->max_lat)
1405 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1407 if (l->nb_atoms < r->nb_atoms)
1409 if (l->nb_atoms > r->nb_atoms)
1415 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1417 if (l->total_runtime < r->total_runtime)
1419 if (l->total_runtime > r->total_runtime)
1425 static int sort_dimension__add(const char *tok, struct list_head *list)
1428 static struct sort_dimension avg_sort_dimension = {
1432 static struct sort_dimension max_sort_dimension = {
1436 static struct sort_dimension pid_sort_dimension = {
1440 static struct sort_dimension runtime_sort_dimension = {
1444 static struct sort_dimension switch_sort_dimension = {
1448 struct sort_dimension *available_sorts[] = {
1449 &pid_sort_dimension,
1450 &avg_sort_dimension,
1451 &max_sort_dimension,
1452 &switch_sort_dimension,
1453 &runtime_sort_dimension,
1456 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1457 if (!strcmp(available_sorts[i]->name, tok)) {
1458 list_add_tail(&available_sorts[i]->list, list);
1467 static void perf_sched__sort_lat(struct perf_sched *sched)
1469 struct rb_node *node;
1470 struct rb_root_cached *root = &sched->atom_root;
1473 struct work_atoms *data;
1474 node = rb_first_cached(root);
1478 rb_erase_cached(node, root);
1479 data = rb_entry(node, struct work_atoms, node);
1480 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1482 if (root == &sched->atom_root) {
1483 root = &sched->merged_atom_root;
1488 static int process_sched_wakeup_event(struct perf_tool *tool,
1489 struct evsel *evsel,
1490 struct perf_sample *sample,
1491 struct machine *machine)
1493 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1495 if (sched->tp_handler->wakeup_event)
1496 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1501 static int process_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
1502 struct evsel *evsel __maybe_unused,
1503 struct perf_sample *sample __maybe_unused,
1504 struct machine *machine __maybe_unused)
1514 static bool thread__has_color(struct thread *thread)
1516 union map_priv priv = {
1517 .ptr = thread__priv(thread),
1523 static struct thread*
1524 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1526 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1527 union map_priv priv = {
1531 if (!sched->map.color_pids || !thread || thread__priv(thread))
1534 if (thread_map__has(sched->map.color_pids, tid))
1537 thread__set_priv(thread, priv.ptr);
1541 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1542 struct perf_sample *sample, struct machine *machine)
1544 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1545 struct thread *sched_in;
1546 struct thread_runtime *tr;
1548 u64 timestamp0, timestamp = sample->time;
1551 struct perf_cpu this_cpu = {
1555 bool new_cpu = false;
1556 const char *color = PERF_COLOR_NORMAL;
1557 char stimestamp[32];
1559 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1561 if (this_cpu.cpu > sched->max_cpu.cpu)
1562 sched->max_cpu = this_cpu;
1564 if (sched->map.comp) {
1565 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1566 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1567 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1571 cpus_nr = sched->max_cpu.cpu;
1573 timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1574 sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1576 delta = timestamp - timestamp0;
1581 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1585 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1586 if (sched_in == NULL)
1589 tr = thread__get_runtime(sched_in);
1591 thread__put(sched_in);
1595 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
1600 if (!tr->shortname[0]) {
1601 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1603 * Don't allocate a letter-number for swapper:0
1604 * as a shortname. Instead, we use '.' for it.
1606 tr->shortname[0] = '.';
1607 tr->shortname[1] = ' ';
1609 tr->shortname[0] = sched->next_shortname1;
1610 tr->shortname[1] = sched->next_shortname2;
1612 if (sched->next_shortname1 < 'Z') {
1613 sched->next_shortname1++;
1615 sched->next_shortname1 = 'A';
1616 if (sched->next_shortname2 < '9')
1617 sched->next_shortname2++;
1619 sched->next_shortname2 = '0';
1625 for (i = 0; i < cpus_nr; i++) {
1626 struct perf_cpu cpu = {
1627 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1629 struct thread *curr_thread = sched->curr_thread[cpu.cpu];
1630 struct thread_runtime *curr_tr;
1631 const char *pid_color = color;
1632 const char *cpu_color = color;
1634 if (curr_thread && thread__has_color(curr_thread))
1635 pid_color = COLOR_PIDS;
1637 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu))
1640 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
1641 cpu_color = COLOR_CPUS;
1643 if (cpu.cpu != this_cpu.cpu)
1644 color_fprintf(stdout, color, " ");
1646 color_fprintf(stdout, cpu_color, "*");
1648 if (sched->curr_thread[cpu.cpu]) {
1649 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
1650 if (curr_tr == NULL) {
1651 thread__put(sched_in);
1654 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1656 color_fprintf(stdout, color, " ");
1659 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
1662 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1663 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1664 if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
1665 const char *pid_color = color;
1667 if (thread__has_color(sched_in))
1668 pid_color = COLOR_PIDS;
1670 color_fprintf(stdout, pid_color, "%s => %s:%d",
1671 tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
1672 tr->comm_changed = false;
1675 if (sched->map.comp && new_cpu)
1676 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1679 color_fprintf(stdout, color, "\n");
1681 thread__put(sched_in);
1686 static int process_sched_switch_event(struct perf_tool *tool,
1687 struct evsel *evsel,
1688 struct perf_sample *sample,
1689 struct machine *machine)
1691 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1692 int this_cpu = sample->cpu, err = 0;
1693 u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1694 next_pid = evsel__intval(evsel, sample, "next_pid");
1696 if (sched->curr_pid[this_cpu] != (u32)-1) {
1698 * Are we trying to switch away a PID that is
1701 if (sched->curr_pid[this_cpu] != prev_pid)
1702 sched->nr_context_switch_bugs++;
1705 if (sched->tp_handler->switch_event)
1706 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1708 sched->curr_pid[this_cpu] = next_pid;
1712 static int process_sched_runtime_event(struct perf_tool *tool,
1713 struct evsel *evsel,
1714 struct perf_sample *sample,
1715 struct machine *machine)
1717 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1719 if (sched->tp_handler->runtime_event)
1720 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1725 static int perf_sched__process_fork_event(struct perf_tool *tool,
1726 union perf_event *event,
1727 struct perf_sample *sample,
1728 struct machine *machine)
1730 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1732 /* run the fork event through the perf machinery */
1733 perf_event__process_fork(tool, event, sample, machine);
1735 /* and then run additional processing needed for this command */
1736 if (sched->tp_handler->fork_event)
1737 return sched->tp_handler->fork_event(sched, event, machine);
1742 static int process_sched_migrate_task_event(struct perf_tool *tool,
1743 struct evsel *evsel,
1744 struct perf_sample *sample,
1745 struct machine *machine)
1747 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1749 if (sched->tp_handler->migrate_task_event)
1750 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1755 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1756 struct evsel *evsel,
1757 struct perf_sample *sample,
1758 struct machine *machine);
1760 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1761 union perf_event *event __maybe_unused,
1762 struct perf_sample *sample,
1763 struct evsel *evsel,
1764 struct machine *machine)
1768 if (evsel->handler != NULL) {
1769 tracepoint_handler f = evsel->handler;
1770 err = f(tool, evsel, sample, machine);
1776 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1777 union perf_event *event,
1778 struct perf_sample *sample,
1779 struct machine *machine)
1781 struct thread *thread;
1782 struct thread_runtime *tr;
1785 err = perf_event__process_comm(tool, event, sample, machine);
1789 thread = machine__find_thread(machine, sample->pid, sample->tid);
1791 pr_err("Internal error: can't find thread\n");
1795 tr = thread__get_runtime(thread);
1797 thread__put(thread);
1801 tr->comm_changed = true;
1802 thread__put(thread);
1807 static int perf_sched__read_events(struct perf_sched *sched)
1809 struct evsel_str_handler handlers[] = {
1810 { "sched:sched_switch", process_sched_switch_event, },
1811 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1812 { "sched:sched_wakeup", process_sched_wakeup_event, },
1813 { "sched:sched_waking", process_sched_wakeup_event, },
1814 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1815 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1817 struct perf_session *session;
1818 struct perf_data data = {
1820 .mode = PERF_DATA_MODE_READ,
1821 .force = sched->force,
1825 session = perf_session__new(&data, &sched->tool);
1826 if (IS_ERR(session)) {
1827 pr_debug("Error creating perf session");
1828 return PTR_ERR(session);
1831 symbol__init(&session->header.env);
1833 /* prefer sched_waking if it is captured */
1834 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
1835 handlers[2].handler = process_sched_wakeup_ignore;
1837 if (perf_session__set_tracepoints_handlers(session, handlers))
1840 if (perf_session__has_traces(session, "record -R")) {
1841 int err = perf_session__process_events(session);
1843 pr_err("Failed to process events, error %d", err);
1847 sched->nr_events = session->evlist->stats.nr_events[0];
1848 sched->nr_lost_events = session->evlist->stats.total_lost;
1849 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1854 perf_session__delete(session);
1859 * scheduling times are printed as msec.usec
1861 static inline void print_sched_time(unsigned long long nsecs, int width)
1863 unsigned long msecs;
1864 unsigned long usecs;
1866 msecs = nsecs / NSEC_PER_MSEC;
1867 nsecs -= msecs * NSEC_PER_MSEC;
1868 usecs = nsecs / NSEC_PER_USEC;
1869 printf("%*lu.%03lu ", width, msecs, usecs);
1873 * returns runtime data for event, allocating memory for it the
1874 * first time it is used.
1876 static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1878 struct evsel_runtime *r = evsel->priv;
1881 r = zalloc(sizeof(struct evsel_runtime));
1889 * save last time event was seen per cpu
1891 static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
1893 struct evsel_runtime *r = evsel__get_runtime(evsel);
1898 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1899 int i, n = __roundup_pow_of_two(cpu+1);
1900 void *p = r->last_time;
1902 p = realloc(r->last_time, n * sizeof(u64));
1907 for (i = r->ncpu; i < n; ++i)
1908 r->last_time[i] = (u64) 0;
1913 r->last_time[cpu] = timestamp;
1916 /* returns last time this event was seen on the given cpu */
1917 static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
1919 struct evsel_runtime *r = evsel__get_runtime(evsel);
1921 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1924 return r->last_time[cpu];
1927 static int comm_width = 30;
1929 static char *timehist_get_commstr(struct thread *thread)
1931 static char str[32];
1932 const char *comm = thread__comm_str(thread);
1933 pid_t tid = thread__tid(thread);
1934 pid_t pid = thread__pid(thread);
1938 n = scnprintf(str, sizeof(str), "%s", comm);
1940 else if (tid != pid)
1941 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1944 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1952 static void timehist_header(struct perf_sched *sched)
1954 u32 ncpus = sched->max_cpu.cpu + 1;
1957 printf("%15s %6s ", "time", "cpu");
1959 if (sched->show_cpu_visual) {
1961 for (i = 0, j = 0; i < ncpus; ++i) {
1969 printf(" %-*s %9s %9s %9s", comm_width,
1970 "task name", "wait time", "sch delay", "run time");
1972 if (sched->show_state)
1973 printf(" %s", "state");
1980 printf("%15s %-6s ", "", "");
1982 if (sched->show_cpu_visual)
1983 printf(" %*s ", ncpus, "");
1985 printf(" %-*s %9s %9s %9s", comm_width,
1986 "[tid/pid]", "(msec)", "(msec)", "(msec)");
1988 if (sched->show_state)
1996 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
1998 if (sched->show_cpu_visual)
1999 printf(" %.*s ", ncpus, graph_dotted_line);
2001 printf(" %.*s %.9s %.9s %.9s", comm_width,
2002 graph_dotted_line, graph_dotted_line, graph_dotted_line,
2005 if (sched->show_state)
2006 printf(" %.5s", graph_dotted_line);
2011 static void timehist_print_sample(struct perf_sched *sched,
2012 struct evsel *evsel,
2013 struct perf_sample *sample,
2014 struct addr_location *al,
2015 struct thread *thread,
2016 u64 t, const char state)
2018 struct thread_runtime *tr = thread__priv(thread);
2019 const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2020 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2021 u32 max_cpus = sched->max_cpu.cpu + 1;
2026 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2029 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2030 printf("%15s [%04d] ", tstr, sample->cpu);
2032 if (sched->show_cpu_visual) {
2037 for (i = 0; i < max_cpus; ++i) {
2038 /* flag idle times with 'i'; others are sched events */
2039 if (i == sample->cpu)
2040 c = (thread__tid(thread) == 0) ? 'i' : 's';
2048 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2050 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2051 print_sched_time(wait_time, 6);
2053 print_sched_time(tr->dt_delay, 6);
2054 print_sched_time(tr->dt_run, 6);
2056 if (sched->show_state)
2057 printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state);
2059 if (sched->show_next) {
2060 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2061 printf(" %-*s", comm_width, nstr);
2064 if (sched->show_wakeups && !sched->show_next)
2065 printf(" %-*s", comm_width, "");
2067 if (thread__tid(thread) == 0)
2070 if (sched->show_callchain)
2073 sample__fprintf_sym(sample, al, 0,
2074 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2075 EVSEL__PRINT_CALLCHAIN_ARROW |
2076 EVSEL__PRINT_SKIP_IGNORED,
2077 get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout);
2084 * Explanation of delta-time stats:
2086 * t = time of current schedule out event
2087 * tprev = time of previous sched out event
2088 * also time of schedule-in event for current task
2089 * last_time = time of last sched change event for current task
2090 * (i.e, time process was last scheduled out)
2091 * ready_to_run = time of wakeup for current task
2093 * -----|------------|------------|------------|------
2094 * last ready tprev t
2097 * |-------- dt_wait --------|
2098 * |- dt_delay -|-- dt_run --|
2100 * dt_run = run time of current task
2101 * dt_wait = time between last schedule out event for task and tprev
2102 * represents time spent off the cpu
2103 * dt_delay = time between wakeup and schedule-in of task
2106 static void timehist_update_runtime_stats(struct thread_runtime *r,
2116 r->dt_run = t - tprev;
2117 if (r->ready_to_run) {
2118 if (r->ready_to_run > tprev)
2119 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2121 r->dt_delay = tprev - r->ready_to_run;
2124 if (r->last_time > tprev)
2125 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2126 else if (r->last_time) {
2127 u64 dt_wait = tprev - r->last_time;
2129 if (r->last_state == 'R')
2130 r->dt_preempt = dt_wait;
2131 else if (r->last_state == 'D')
2132 r->dt_iowait = dt_wait;
2134 r->dt_sleep = dt_wait;
2138 update_stats(&r->run_stats, r->dt_run);
2140 r->total_run_time += r->dt_run;
2141 r->total_delay_time += r->dt_delay;
2142 r->total_sleep_time += r->dt_sleep;
2143 r->total_iowait_time += r->dt_iowait;
2144 r->total_preempt_time += r->dt_preempt;
2147 static bool is_idle_sample(struct perf_sample *sample,
2148 struct evsel *evsel)
2150 /* pid 0 == swapper == idle task */
2151 if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0)
2152 return evsel__intval(evsel, sample, "prev_pid") == 0;
2154 return sample->pid == 0;
2157 static void save_task_callchain(struct perf_sched *sched,
2158 struct perf_sample *sample,
2159 struct evsel *evsel,
2160 struct machine *machine)
2162 struct callchain_cursor *cursor;
2163 struct thread *thread;
2165 /* want main thread for process - has maps */
2166 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2167 if (thread == NULL) {
2168 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2172 if (!sched->show_callchain || sample->callchain == NULL)
2175 cursor = get_tls_callchain_cursor();
2177 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2178 NULL, NULL, sched->max_stack + 2) != 0) {
2180 pr_err("Failed to resolve callchain. Skipping\n");
2185 callchain_cursor_commit(cursor);
2188 struct callchain_cursor_node *node;
2191 node = callchain_cursor_current(cursor);
2197 if (!strcmp(sym->name, "schedule") ||
2198 !strcmp(sym->name, "__schedule") ||
2199 !strcmp(sym->name, "preempt_schedule"))
2203 callchain_cursor_advance(cursor);
2207 static int init_idle_thread(struct thread *thread)
2209 struct idle_thread_runtime *itr;
2211 thread__set_comm(thread, idle_comm, 0);
2213 itr = zalloc(sizeof(*itr));
2217 init_stats(&itr->tr.run_stats);
2218 callchain_init(&itr->callchain);
2219 callchain_cursor_reset(&itr->cursor);
2220 thread__set_priv(thread, itr);
2226 * Track idle stats per cpu by maintaining a local thread
2227 * struct for the idle task on each cpu.
2229 static int init_idle_threads(int ncpu)
2233 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2237 idle_max_cpu = ncpu;
2239 /* allocate the actual thread struct if needed */
2240 for (i = 0; i < ncpu; ++i) {
2241 idle_threads[i] = thread__new(0, 0);
2242 if (idle_threads[i] == NULL)
2245 ret = init_idle_thread(idle_threads[i]);
2253 static void free_idle_threads(void)
2257 if (idle_threads == NULL)
2260 for (i = 0; i < idle_max_cpu; ++i) {
2261 if ((idle_threads[i]))
2262 thread__delete(idle_threads[i]);
2268 static struct thread *get_idle_thread(int cpu)
2271 * expand/allocate array of pointers to local thread
2274 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2275 int i, j = __roundup_pow_of_two(cpu+1);
2278 p = realloc(idle_threads, j * sizeof(struct thread *));
2282 idle_threads = (struct thread **) p;
2283 for (i = idle_max_cpu; i < j; ++i)
2284 idle_threads[i] = NULL;
2289 /* allocate a new thread struct if needed */
2290 if (idle_threads[cpu] == NULL) {
2291 idle_threads[cpu] = thread__new(0, 0);
2293 if (idle_threads[cpu]) {
2294 if (init_idle_thread(idle_threads[cpu]) < 0)
2299 return idle_threads[cpu];
2302 static void save_idle_callchain(struct perf_sched *sched,
2303 struct idle_thread_runtime *itr,
2304 struct perf_sample *sample)
2306 struct callchain_cursor *cursor;
2308 if (!sched->show_callchain || sample->callchain == NULL)
2311 cursor = get_tls_callchain_cursor();
2315 callchain_cursor__copy(&itr->cursor, cursor);
2318 static struct thread *timehist_get_thread(struct perf_sched *sched,
2319 struct perf_sample *sample,
2320 struct machine *machine,
2321 struct evsel *evsel)
2323 struct thread *thread;
2325 if (is_idle_sample(sample, evsel)) {
2326 thread = get_idle_thread(sample->cpu);
2328 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2331 /* there were samples with tid 0 but non-zero pid */
2332 thread = machine__findnew_thread(machine, sample->pid,
2333 sample->tid ?: sample->pid);
2334 if (thread == NULL) {
2335 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2339 save_task_callchain(sched, sample, evsel, machine);
2340 if (sched->idle_hist) {
2341 struct thread *idle;
2342 struct idle_thread_runtime *itr;
2344 idle = get_idle_thread(sample->cpu);
2346 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2350 itr = thread__priv(idle);
2354 itr->last_thread = thread;
2356 /* copy task callchain when entering to idle */
2357 if (evsel__intval(evsel, sample, "next_pid") == 0)
2358 save_idle_callchain(sched, itr, sample);
2365 static bool timehist_skip_sample(struct perf_sched *sched,
2366 struct thread *thread,
2367 struct evsel *evsel,
2368 struct perf_sample *sample)
2372 if (thread__is_filtered(thread)) {
2374 sched->skipped_samples++;
2377 if (sched->idle_hist) {
2378 if (strcmp(evsel__name(evsel), "sched:sched_switch"))
2380 else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2381 evsel__intval(evsel, sample, "next_pid") != 0)
2388 static void timehist_print_wakeup_event(struct perf_sched *sched,
2389 struct evsel *evsel,
2390 struct perf_sample *sample,
2391 struct machine *machine,
2392 struct thread *awakened)
2394 struct thread *thread;
2397 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2401 /* show wakeup unless both awakee and awaker are filtered */
2402 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2403 timehist_skip_sample(sched, awakened, evsel, sample)) {
2407 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2408 printf("%15s [%04d] ", tstr, sample->cpu);
2409 if (sched->show_cpu_visual)
2410 printf(" %*s ", sched->max_cpu.cpu + 1, "");
2412 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2415 printf(" %9s %9s %9s ", "", "", "");
2417 printf("awakened: %s", timehist_get_commstr(awakened));
2422 static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
2423 union perf_event *event __maybe_unused,
2424 struct evsel *evsel __maybe_unused,
2425 struct perf_sample *sample __maybe_unused,
2426 struct machine *machine __maybe_unused)
2431 static int timehist_sched_wakeup_event(struct perf_tool *tool,
2432 union perf_event *event __maybe_unused,
2433 struct evsel *evsel,
2434 struct perf_sample *sample,
2435 struct machine *machine)
2437 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2438 struct thread *thread;
2439 struct thread_runtime *tr = NULL;
2440 /* want pid of awakened task not pid in sample */
2441 const u32 pid = evsel__intval(evsel, sample, "pid");
2443 thread = machine__findnew_thread(machine, 0, pid);
2447 tr = thread__get_runtime(thread);
2451 if (tr->ready_to_run == 0)
2452 tr->ready_to_run = sample->time;
2454 /* show wakeups if requested */
2455 if (sched->show_wakeups &&
2456 !perf_time__skip_sample(&sched->ptime, sample->time))
2457 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2462 static void timehist_print_migration_event(struct perf_sched *sched,
2463 struct evsel *evsel,
2464 struct perf_sample *sample,
2465 struct machine *machine,
2466 struct thread *migrated)
2468 struct thread *thread;
2473 if (sched->summary_only)
2476 max_cpus = sched->max_cpu.cpu + 1;
2477 ocpu = evsel__intval(evsel, sample, "orig_cpu");
2478 dcpu = evsel__intval(evsel, sample, "dest_cpu");
2480 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2484 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2485 timehist_skip_sample(sched, migrated, evsel, sample)) {
2489 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2490 printf("%15s [%04d] ", tstr, sample->cpu);
2492 if (sched->show_cpu_visual) {
2497 for (i = 0; i < max_cpus; ++i) {
2498 c = (i == sample->cpu) ? 'm' : ' ';
2504 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2507 printf(" %9s %9s %9s ", "", "", "");
2509 printf("migrated: %s", timehist_get_commstr(migrated));
2510 printf(" cpu %d => %d", ocpu, dcpu);
2515 static int timehist_migrate_task_event(struct perf_tool *tool,
2516 union perf_event *event __maybe_unused,
2517 struct evsel *evsel,
2518 struct perf_sample *sample,
2519 struct machine *machine)
2521 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2522 struct thread *thread;
2523 struct thread_runtime *tr = NULL;
2524 /* want pid of migrated task not pid in sample */
2525 const u32 pid = evsel__intval(evsel, sample, "pid");
2527 thread = machine__findnew_thread(machine, 0, pid);
2531 tr = thread__get_runtime(thread);
2537 /* show migrations if requested */
2538 timehist_print_migration_event(sched, evsel, sample, machine, thread);
2543 static int timehist_sched_change_event(struct perf_tool *tool,
2544 union perf_event *event,
2545 struct evsel *evsel,
2546 struct perf_sample *sample,
2547 struct machine *machine)
2549 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2550 struct perf_time_interval *ptime = &sched->ptime;
2551 struct addr_location al;
2552 struct thread *thread;
2553 struct thread_runtime *tr = NULL;
2554 u64 tprev, t = sample->time;
2556 const char state = evsel__taskstate(evsel, sample, "prev_state");
2558 addr_location__init(&al);
2559 if (machine__resolve(machine, &al, sample) < 0) {
2560 pr_err("problem processing %d event. skipping it\n",
2561 event->header.type);
2566 thread = timehist_get_thread(sched, sample, machine, evsel);
2567 if (thread == NULL) {
2572 if (timehist_skip_sample(sched, thread, evsel, sample))
2575 tr = thread__get_runtime(thread);
2581 tprev = evsel__get_time(evsel, sample->cpu);
2584 * If start time given:
2585 * - sample time is under window user cares about - skip sample
2586 * - tprev is under window user cares about - reset to start of window
2588 if (ptime->start && ptime->start > t)
2591 if (tprev && ptime->start > tprev)
2592 tprev = ptime->start;
2595 * If end time given:
2596 * - previous sched event is out of window - we are done
2597 * - sample time is beyond window user cares about - reset it
2598 * to close out stats for time window interest
2601 if (tprev > ptime->end)
2608 if (!sched->idle_hist || thread__tid(thread) == 0) {
2609 if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2610 timehist_update_runtime_stats(tr, t, tprev);
2612 if (sched->idle_hist) {
2613 struct idle_thread_runtime *itr = (void *)tr;
2614 struct thread_runtime *last_tr;
2616 BUG_ON(thread__tid(thread) != 0);
2618 if (itr->last_thread == NULL)
2621 /* add current idle time as last thread's runtime */
2622 last_tr = thread__get_runtime(itr->last_thread);
2623 if (last_tr == NULL)
2626 timehist_update_runtime_stats(last_tr, t, tprev);
2628 * remove delta time of last thread as it's not updated
2629 * and otherwise it will show an invalid value next
2630 * time. we only care total run time and run stat.
2632 last_tr->dt_run = 0;
2633 last_tr->dt_delay = 0;
2634 last_tr->dt_sleep = 0;
2635 last_tr->dt_iowait = 0;
2636 last_tr->dt_preempt = 0;
2639 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2641 itr->last_thread = NULL;
2645 if (!sched->summary_only)
2646 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2649 if (sched->hist_time.start == 0 && t >= ptime->start)
2650 sched->hist_time.start = t;
2651 if (ptime->end == 0 || t <= ptime->end)
2652 sched->hist_time.end = t;
2655 /* time of this sched_switch event becomes last time task seen */
2656 tr->last_time = sample->time;
2658 /* last state is used to determine where to account wait time */
2659 tr->last_state = state;
2661 /* sched out event for task so reset ready to run time */
2662 tr->ready_to_run = 0;
2665 evsel__save_time(evsel, sample->time, sample->cpu);
2667 addr_location__exit(&al);
2671 static int timehist_sched_switch_event(struct perf_tool *tool,
2672 union perf_event *event,
2673 struct evsel *evsel,
2674 struct perf_sample *sample,
2675 struct machine *machine __maybe_unused)
2677 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2680 static int process_lost(struct perf_tool *tool __maybe_unused,
2681 union perf_event *event,
2682 struct perf_sample *sample,
2683 struct machine *machine __maybe_unused)
2687 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2688 printf("%15s ", tstr);
2689 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2695 static void print_thread_runtime(struct thread *t,
2696 struct thread_runtime *r)
2698 double mean = avg_stats(&r->run_stats);
2701 printf("%*s %5d %9" PRIu64 " ",
2702 comm_width, timehist_get_commstr(t), thread__ppid(t),
2703 (u64) r->run_stats.n);
2705 print_sched_time(r->total_run_time, 8);
2706 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2707 print_sched_time(r->run_stats.min, 6);
2709 print_sched_time((u64) mean, 6);
2711 print_sched_time(r->run_stats.max, 6);
2713 printf("%5.2f", stddev);
2714 printf(" %5" PRIu64, r->migrations);
2718 static void print_thread_waittime(struct thread *t,
2719 struct thread_runtime *r)
2721 printf("%*s %5d %9" PRIu64 " ",
2722 comm_width, timehist_get_commstr(t), thread__ppid(t),
2723 (u64) r->run_stats.n);
2725 print_sched_time(r->total_run_time, 8);
2726 print_sched_time(r->total_sleep_time, 6);
2728 print_sched_time(r->total_iowait_time, 6);
2730 print_sched_time(r->total_preempt_time, 6);
2732 print_sched_time(r->total_delay_time, 6);
2736 struct total_run_stats {
2737 struct perf_sched *sched;
2743 static int show_thread_runtime(struct thread *t, void *priv)
2745 struct total_run_stats *stats = priv;
2746 struct thread_runtime *r;
2748 if (thread__is_filtered(t))
2751 r = thread__priv(t);
2752 if (r && r->run_stats.n) {
2753 stats->task_count++;
2754 stats->sched_count += r->run_stats.n;
2755 stats->total_run_time += r->total_run_time;
2757 if (stats->sched->show_state)
2758 print_thread_waittime(t, r);
2760 print_thread_runtime(t, r);
2766 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2768 const char *sep = " <- ";
2769 struct callchain_list *chain;
2777 ret = callchain__fprintf_folded(fp, node->parent);
2780 list_for_each_entry(chain, &node->val, list) {
2781 if (chain->ip >= PERF_CONTEXT_MAX)
2783 if (chain->ms.sym && chain->ms.sym->ignore)
2785 ret += fprintf(fp, "%s%s", first ? "" : sep,
2786 callchain_list__sym_name(chain, bf, sizeof(bf),
2794 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2798 struct callchain_node *chain;
2799 struct rb_node *rb_node = rb_first_cached(root);
2801 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2802 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
2806 chain = rb_entry(rb_node, struct callchain_node, rb_node);
2807 rb_node = rb_next(rb_node);
2809 ret += fprintf(fp, " ");
2810 print_sched_time(chain->hit, 12);
2811 ret += 16; /* print_sched_time returns 2nd arg + 4 */
2812 ret += fprintf(fp, " %8d ", chain->count);
2813 ret += callchain__fprintf_folded(fp, chain);
2814 ret += fprintf(fp, "\n");
2820 static void timehist_print_summary(struct perf_sched *sched,
2821 struct perf_session *session)
2823 struct machine *m = &session->machines.host;
2824 struct total_run_stats totals;
2827 struct thread_runtime *r;
2829 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2831 memset(&totals, 0, sizeof(totals));
2832 totals.sched = sched;
2834 if (sched->idle_hist) {
2835 printf("\nIdle-time summary\n");
2836 printf("%*s parent sched-out ", comm_width, "comm");
2837 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
2838 } else if (sched->show_state) {
2839 printf("\nWait-time summary\n");
2840 printf("%*s parent sched-in ", comm_width, "comm");
2841 printf(" run-time sleep iowait preempt delay\n");
2843 printf("\nRuntime summary\n");
2844 printf("%*s parent sched-in ", comm_width, "comm");
2845 printf(" run-time min-run avg-run max-run stddev migrations\n");
2847 printf("%*s (count) ", comm_width, "");
2848 printf(" (msec) (msec) (msec) (msec) %s\n",
2849 sched->show_state ? "(msec)" : "%");
2850 printf("%.117s\n", graph_dotted_line);
2852 machine__for_each_thread(m, show_thread_runtime, &totals);
2853 task_count = totals.task_count;
2855 printf("<no still running tasks>\n");
2857 /* CPU idle stats not tracked when samples were skipped */
2858 if (sched->skipped_samples && !sched->idle_hist)
2861 printf("\nIdle stats:\n");
2862 for (i = 0; i < idle_max_cpu; ++i) {
2863 if (cpu_list && !test_bit(i, cpu_bitmap))
2866 t = idle_threads[i];
2870 r = thread__priv(t);
2871 if (r && r->run_stats.n) {
2872 totals.sched_count += r->run_stats.n;
2873 printf(" CPU %2d idle for ", i);
2874 print_sched_time(r->total_run_time, 6);
2875 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2877 printf(" CPU %2d idle entire time window\n", i);
2880 if (sched->idle_hist && sched->show_callchain) {
2881 callchain_param.mode = CHAIN_FOLDED;
2882 callchain_param.value = CCVAL_PERIOD;
2884 callchain_register_param(&callchain_param);
2886 printf("\nIdle stats by callchain:\n");
2887 for (i = 0; i < idle_max_cpu; ++i) {
2888 struct idle_thread_runtime *itr;
2890 t = idle_threads[i];
2894 itr = thread__priv(t);
2898 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2899 0, &callchain_param);
2901 printf(" CPU %2d:", i);
2902 print_sched_time(itr->tr.total_run_time, 6);
2904 timehist_print_idlehist_callchain(&itr->sorted_root);
2910 " Total number of unique tasks: %" PRIu64 "\n"
2911 "Total number of context switches: %" PRIu64 "\n",
2912 totals.task_count, totals.sched_count);
2914 printf(" Total run time (msec): ");
2915 print_sched_time(totals.total_run_time, 2);
2918 printf(" Total scheduling time (msec): ");
2919 print_sched_time(hist_time, 2);
2920 printf(" (x %d)\n", sched->max_cpu.cpu);
2923 typedef int (*sched_handler)(struct perf_tool *tool,
2924 union perf_event *event,
2925 struct evsel *evsel,
2926 struct perf_sample *sample,
2927 struct machine *machine);
2929 static int perf_timehist__process_sample(struct perf_tool *tool,
2930 union perf_event *event,
2931 struct perf_sample *sample,
2932 struct evsel *evsel,
2933 struct machine *machine)
2935 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2937 struct perf_cpu this_cpu = {
2941 if (this_cpu.cpu > sched->max_cpu.cpu)
2942 sched->max_cpu = this_cpu;
2944 if (evsel->handler != NULL) {
2945 sched_handler f = evsel->handler;
2947 err = f(tool, event, evsel, sample, machine);
2953 static int timehist_check_attr(struct perf_sched *sched,
2954 struct evlist *evlist)
2956 struct evsel *evsel;
2957 struct evsel_runtime *er;
2959 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
2960 er = evsel__get_runtime(evsel);
2962 pr_err("Failed to allocate memory for evsel runtime data\n");
2966 if (sched->show_callchain && !evsel__has_callchain(evsel)) {
2967 pr_info("Samples do not have callchains.\n");
2968 sched->show_callchain = 0;
2969 symbol_conf.use_callchain = 0;
2976 static int perf_sched__timehist(struct perf_sched *sched)
2978 struct evsel_str_handler handlers[] = {
2979 { "sched:sched_switch", timehist_sched_switch_event, },
2980 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
2981 { "sched:sched_waking", timehist_sched_wakeup_event, },
2982 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
2984 const struct evsel_str_handler migrate_handlers[] = {
2985 { "sched:sched_migrate_task", timehist_migrate_task_event, },
2987 struct perf_data data = {
2989 .mode = PERF_DATA_MODE_READ,
2990 .force = sched->force,
2993 struct perf_session *session;
2994 struct evlist *evlist;
2998 * event handlers for timehist option
3000 sched->tool.sample = perf_timehist__process_sample;
3001 sched->tool.mmap = perf_event__process_mmap;
3002 sched->tool.comm = perf_event__process_comm;
3003 sched->tool.exit = perf_event__process_exit;
3004 sched->tool.fork = perf_event__process_fork;
3005 sched->tool.lost = process_lost;
3006 sched->tool.attr = perf_event__process_attr;
3007 sched->tool.tracing_data = perf_event__process_tracing_data;
3008 sched->tool.build_id = perf_event__process_build_id;
3010 sched->tool.ordered_events = true;
3011 sched->tool.ordering_requires_timestamps = true;
3013 symbol_conf.use_callchain = sched->show_callchain;
3015 session = perf_session__new(&data, &sched->tool);
3016 if (IS_ERR(session))
3017 return PTR_ERR(session);
3020 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3025 evlist = session->evlist;
3027 symbol__init(&session->header.env);
3029 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3030 pr_err("Invalid time string\n");
3034 if (timehist_check_attr(sched, evlist) != 0)
3039 /* prefer sched_waking if it is captured */
3040 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
3041 handlers[1].handler = timehist_sched_wakeup_ignore;
3043 /* setup per-evsel handlers */
3044 if (perf_session__set_tracepoints_handlers(session, handlers))
3047 /* sched_switch event at a minimum needs to exist */
3048 if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
3049 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3053 if (sched->show_migrations &&
3054 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3057 /* pre-allocate struct for per-CPU idle stats */
3058 sched->max_cpu.cpu = session->header.env.nr_cpus_online;
3059 if (sched->max_cpu.cpu == 0)
3060 sched->max_cpu.cpu = 4;
3061 if (init_idle_threads(sched->max_cpu.cpu))
3064 /* summary_only implies summary option, but don't overwrite summary if set */
3065 if (sched->summary_only)
3066 sched->summary = sched->summary_only;
3068 if (!sched->summary_only)
3069 timehist_header(sched);
3071 err = perf_session__process_events(session);
3073 pr_err("Failed to process events, error %d", err);
3077 sched->nr_events = evlist->stats.nr_events[0];
3078 sched->nr_lost_events = evlist->stats.total_lost;
3079 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3082 timehist_print_summary(sched, session);
3085 free_idle_threads();
3086 perf_session__delete(session);
3092 static void print_bad_events(struct perf_sched *sched)
3094 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3095 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3096 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3097 sched->nr_unordered_timestamps, sched->nr_timestamps);
3099 if (sched->nr_lost_events && sched->nr_events) {
3100 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3101 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3102 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3104 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3105 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3106 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3107 sched->nr_context_switch_bugs, sched->nr_timestamps);
3108 if (sched->nr_lost_events)
3109 printf(" (due to lost events?)");
3114 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3116 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3117 struct work_atoms *this;
3118 const char *comm = thread__comm_str(data->thread), *this_comm;
3119 bool leftmost = true;
3124 this = container_of(*new, struct work_atoms, node);
3127 this_comm = thread__comm_str(this->thread);
3128 cmp = strcmp(comm, this_comm);
3130 new = &((*new)->rb_left);
3131 } else if (cmp < 0) {
3132 new = &((*new)->rb_right);
3136 this->total_runtime += data->total_runtime;
3137 this->nb_atoms += data->nb_atoms;
3138 this->total_lat += data->total_lat;
3139 list_splice(&data->work_list, &this->work_list);
3140 if (this->max_lat < data->max_lat) {
3141 this->max_lat = data->max_lat;
3142 this->max_lat_start = data->max_lat_start;
3143 this->max_lat_end = data->max_lat_end;
3151 rb_link_node(&data->node, parent, new);
3152 rb_insert_color_cached(&data->node, root, leftmost);
3155 static void perf_sched__merge_lat(struct perf_sched *sched)
3157 struct work_atoms *data;
3158 struct rb_node *node;
3160 if (sched->skip_merge)
3163 while ((node = rb_first_cached(&sched->atom_root))) {
3164 rb_erase_cached(node, &sched->atom_root);
3165 data = rb_entry(node, struct work_atoms, node);
3166 __merge_work_atoms(&sched->merged_atom_root, data);
3170 static int setup_cpus_switch_event(struct perf_sched *sched)
3174 sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
3175 if (!sched->cpu_last_switched)
3178 sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
3179 if (!sched->curr_pid) {
3180 zfree(&sched->cpu_last_switched);
3184 for (i = 0; i < MAX_CPUS; i++)
3185 sched->curr_pid[i] = -1;
3190 static void free_cpus_switch_event(struct perf_sched *sched)
3192 zfree(&sched->curr_pid);
3193 zfree(&sched->cpu_last_switched);
3196 static int perf_sched__lat(struct perf_sched *sched)
3199 struct rb_node *next;
3203 if (setup_cpus_switch_event(sched))
3206 if (perf_sched__read_events(sched))
3207 goto out_free_cpus_switch_event;
3209 perf_sched__merge_lat(sched);
3210 perf_sched__sort_lat(sched);
3212 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3213 printf(" Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
3214 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3216 next = rb_first_cached(&sched->sorted_atom_root);
3219 struct work_atoms *work_list;
3221 work_list = rb_entry(next, struct work_atoms, node);
3222 output_lat_thread(sched, work_list);
3223 next = rb_next(next);
3224 thread__zput(work_list->thread);
3227 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3228 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3229 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3231 printf(" ---------------------------------------------------\n");
3233 print_bad_events(sched);
3238 out_free_cpus_switch_event:
3239 free_cpus_switch_event(sched);
3243 static int setup_map_cpus(struct perf_sched *sched)
3245 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
3247 if (sched->map.comp) {
3248 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
3249 if (!sched->map.comp_cpus)
3253 if (sched->map.cpus_str) {
3254 sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
3255 if (!sched->map.cpus) {
3256 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3257 zfree(&sched->map.comp_cpus);
3265 static int setup_color_pids(struct perf_sched *sched)
3267 struct perf_thread_map *map;
3269 if (!sched->map.color_pids_str)
3272 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3274 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3278 sched->map.color_pids = map;
3282 static int setup_color_cpus(struct perf_sched *sched)
3284 struct perf_cpu_map *map;
3286 if (!sched->map.color_cpus_str)
3289 map = perf_cpu_map__new(sched->map.color_cpus_str);
3291 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3295 sched->map.color_cpus = map;
3299 static int perf_sched__map(struct perf_sched *sched)
3303 sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
3304 if (!sched->curr_thread)
3307 if (setup_cpus_switch_event(sched))
3308 goto out_free_curr_thread;
3310 if (setup_map_cpus(sched))
3311 goto out_free_cpus_switch_event;
3313 if (setup_color_pids(sched))
3314 goto out_put_map_cpus;
3316 if (setup_color_cpus(sched))
3317 goto out_put_color_pids;
3320 if (perf_sched__read_events(sched))
3321 goto out_put_color_cpus;
3324 print_bad_events(sched);
3327 perf_cpu_map__put(sched->map.color_cpus);
3330 perf_thread_map__put(sched->map.color_pids);
3333 zfree(&sched->map.comp_cpus);
3334 perf_cpu_map__put(sched->map.cpus);
3336 out_free_cpus_switch_event:
3337 free_cpus_switch_event(sched);
3339 out_free_curr_thread:
3340 zfree(&sched->curr_thread);
3344 static int perf_sched__replay(struct perf_sched *sched)
3349 mutex_init(&sched->start_work_mutex);
3350 mutex_init(&sched->work_done_wait_mutex);
3352 ret = setup_cpus_switch_event(sched);
3354 goto out_mutex_destroy;
3356 calibrate_run_measurement_overhead(sched);
3357 calibrate_sleep_measurement_overhead(sched);
3359 test_calibrations(sched);
3361 ret = perf_sched__read_events(sched);
3363 goto out_free_cpus_switch_event;
3365 printf("nr_run_events: %ld\n", sched->nr_run_events);
3366 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3367 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3369 if (sched->targetless_wakeups)
3370 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3371 if (sched->multitarget_wakeups)
3372 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3373 if (sched->nr_run_events_optimized)
3374 printf("run atoms optimized: %ld\n",
3375 sched->nr_run_events_optimized);
3377 print_task_traces(sched);
3378 add_cross_task_wakeups(sched);
3380 sched->thread_funcs_exit = false;
3381 create_tasks(sched);
3382 printf("------------------------------------------------------------\n");
3383 for (i = 0; i < sched->replay_repeat; i++)
3384 run_one_test(sched);
3386 sched->thread_funcs_exit = true;
3387 destroy_tasks(sched);
3389 out_free_cpus_switch_event:
3390 free_cpus_switch_event(sched);
3393 mutex_destroy(&sched->start_work_mutex);
3394 mutex_destroy(&sched->work_done_wait_mutex);
3398 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3399 const char * const usage_msg[])
3401 char *tmp, *tok, *str = strdup(sched->sort_order);
3403 for (tok = strtok_r(str, ", ", &tmp);
3404 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3405 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3406 usage_with_options_msg(usage_msg, options,
3407 "Unknown --sort key: `%s'", tok);
3413 sort_dimension__add("pid", &sched->cmp_pid);
3416 static bool schedstat_events_exposed(void)
3419 * Select "sched:sched_stat_wait" event to check
3420 * whether schedstat tracepoints are exposed.
3422 return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3426 static int __cmd_record(int argc, const char **argv)
3428 unsigned int rec_argc, i, j;
3430 const char **rec_argv_copy;
3431 const char * const record_args[] = {
3437 "-e", "sched:sched_switch",
3438 "-e", "sched:sched_stat_runtime",
3439 "-e", "sched:sched_process_fork",
3440 "-e", "sched:sched_wakeup_new",
3441 "-e", "sched:sched_migrate_task",
3445 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3446 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3447 * to prevent "perf sched record" execution failure, determine
3448 * whether to record schedstat events according to actual situation.
3450 const char * const schedstat_args[] = {
3451 "-e", "sched:sched_stat_wait",
3452 "-e", "sched:sched_stat_sleep",
3453 "-e", "sched:sched_stat_iowait",
3455 unsigned int schedstat_argc = schedstat_events_exposed() ?
3456 ARRAY_SIZE(schedstat_args) : 0;
3458 struct tep_event *waking_event;
3462 * +2 for either "-e", "sched:sched_wakeup" or
3463 * "-e", "sched:sched_waking"
3465 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3466 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3467 if (rec_argv == NULL)
3469 rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3470 if (rec_argv_copy == NULL) {
3475 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3476 rec_argv[i] = strdup(record_args[i]);
3478 rec_argv[i++] = strdup("-e");
3479 waking_event = trace_event__tp_format("sched", "sched_waking");
3480 if (!IS_ERR(waking_event))
3481 rec_argv[i++] = strdup("sched:sched_waking");
3483 rec_argv[i++] = strdup("sched:sched_wakeup");
3485 for (j = 0; j < schedstat_argc; j++)
3486 rec_argv[i++] = strdup(schedstat_args[j]);
3488 for (j = 1; j < (unsigned int)argc; j++, i++)
3489 rec_argv[i] = strdup(argv[j]);
3491 BUG_ON(i != rec_argc);
3493 memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3494 ret = cmd_record(rec_argc, rec_argv_copy);
3496 for (i = 0; i < rec_argc; i++)
3499 free(rec_argv_copy);
3504 int cmd_sched(int argc, const char **argv)
3506 static const char default_sort_order[] = "avg, max, switch, runtime";
3507 struct perf_sched sched = {
3509 .sample = perf_sched__process_tracepoint_sample,
3510 .comm = perf_sched__process_comm,
3511 .namespaces = perf_event__process_namespaces,
3512 .lost = perf_event__process_lost,
3513 .fork = perf_sched__process_fork_event,
3514 .ordered_events = true,
3516 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3517 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3518 .sort_order = default_sort_order,
3519 .replay_repeat = 10,
3521 .next_shortname1 = 'A',
3522 .next_shortname2 = '0',
3524 .show_callchain = 1,
3527 const struct option sched_options[] = {
3528 OPT_STRING('i', "input", &input_name, "file",
3530 OPT_INCR('v', "verbose", &verbose,
3531 "be more verbose (show symbol address, etc)"),
3532 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3533 "dump raw trace in ASCII"),
3534 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3537 const struct option latency_options[] = {
3538 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3539 "sort by key(s): runtime, switch, avg, max"),
3540 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3541 "CPU to profile on"),
3542 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3543 "latency stats per pid instead of per comm"),
3544 OPT_PARENT(sched_options)
3546 const struct option replay_options[] = {
3547 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3548 "repeat the workload replay N times (-1: infinite)"),
3549 OPT_PARENT(sched_options)
3551 const struct option map_options[] = {
3552 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3553 "map output in compact mode"),
3554 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3555 "highlight given pids in map"),
3556 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3557 "highlight given CPUs in map"),
3558 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3559 "display given CPUs in map"),
3560 OPT_PARENT(sched_options)
3562 const struct option timehist_options[] = {
3563 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3564 "file", "vmlinux pathname"),
3565 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3566 "file", "kallsyms pathname"),
3567 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3568 "Display call chains if present (default on)"),
3569 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3570 "Maximum number of functions to display backtrace."),
3571 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3572 "Look for files with symbols relative to this directory"),
3573 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3574 "Show only syscall summary with statistics"),
3575 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3576 "Show all syscalls and summary with statistics"),
3577 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3578 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3579 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3580 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3581 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3582 OPT_STRING(0, "time", &sched.time_str, "str",
3583 "Time span for analysis (start,stop)"),
3584 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3585 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3586 "analyze events only for given process id(s)"),
3587 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3588 "analyze events only for given thread id(s)"),
3589 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3590 OPT_PARENT(sched_options)
3593 const char * const latency_usage[] = {
3594 "perf sched latency [<options>]",
3597 const char * const replay_usage[] = {
3598 "perf sched replay [<options>]",
3601 const char * const map_usage[] = {
3602 "perf sched map [<options>]",
3605 const char * const timehist_usage[] = {
3606 "perf sched timehist [<options>]",
3609 const char *const sched_subcommands[] = { "record", "latency", "map",
3612 const char *sched_usage[] = {
3616 struct trace_sched_handler lat_ops = {
3617 .wakeup_event = latency_wakeup_event,
3618 .switch_event = latency_switch_event,
3619 .runtime_event = latency_runtime_event,
3620 .migrate_task_event = latency_migrate_task_event,
3622 struct trace_sched_handler map_ops = {
3623 .switch_event = map_switch_event,
3625 struct trace_sched_handler replay_ops = {
3626 .wakeup_event = replay_wakeup_event,
3627 .switch_event = replay_switch_event,
3628 .fork_event = replay_fork_event,
3632 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3633 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3635 usage_with_options(sched_usage, sched_options);
3638 * Aliased to 'perf script' for now:
3640 if (!strcmp(argv[0], "script")) {
3641 return cmd_script(argc, argv);
3642 } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3643 return __cmd_record(argc, argv);
3644 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
3645 sched.tp_handler = &lat_ops;
3647 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3649 usage_with_options(latency_usage, latency_options);
3651 setup_sorting(&sched, latency_options, latency_usage);
3652 return perf_sched__lat(&sched);
3653 } else if (!strcmp(argv[0], "map")) {
3655 argc = parse_options(argc, argv, map_options, map_usage, 0);
3657 usage_with_options(map_usage, map_options);
3659 sched.tp_handler = &map_ops;
3660 setup_sorting(&sched, latency_options, latency_usage);
3661 return perf_sched__map(&sched);
3662 } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
3663 sched.tp_handler = &replay_ops;
3665 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3667 usage_with_options(replay_usage, replay_options);
3669 return perf_sched__replay(&sched);
3670 } else if (!strcmp(argv[0], "timehist")) {
3672 argc = parse_options(argc, argv, timehist_options,
3675 usage_with_options(timehist_usage, timehist_options);
3677 if ((sched.show_wakeups || sched.show_next) &&
3678 sched.summary_only) {
3679 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3680 parse_options_usage(timehist_usage, timehist_options, "s", true);
3681 if (sched.show_wakeups)
3682 parse_options_usage(NULL, timehist_options, "w", true);
3683 if (sched.show_next)
3684 parse_options_usage(NULL, timehist_options, "n", true);
3687 ret = symbol__validate_sym_arguments();
3691 return perf_sched__timehist(&sched);
3693 usage_with_options(sched_usage, sched_options);