4 * Builtin top command: Display a continuously updated profile of
5 * any workload, CPU or specific PID.
7 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
8 * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Improvements and fixes by:
12 * Arjan van de Ven <arjan@linux.intel.com>
13 * Yanmin Zhang <yanmin.zhang@intel.com>
14 * Wu Fengguang <fengguang.wu@intel.com>
15 * Mike Galbraith <efault@gmx.de>
16 * Paul Mackerras <paulus@samba.org>
18 * Released under the GPL v2. (and only v2, not any later version)
24 #include "util/annotate.h"
25 #include "util/bpf-event.h"
26 #include "util/config.h"
27 #include "util/color.h"
28 #include "util/evlist.h"
29 #include "util/evsel.h"
30 #include "util/event.h"
31 #include "util/machine.h"
33 #include "util/session.h"
34 #include "util/symbol.h"
35 #include "util/thread.h"
36 #include "util/thread_map.h"
38 #include <linux/rbtree.h>
39 #include <subcmd/parse-options.h>
40 #include "util/parse-events.h"
41 #include "util/cpumap.h"
42 #include "util/xyarray.h"
43 #include "util/sort.h"
44 #include "util/term.h"
45 #include "util/intlist.h"
46 #include "util/parse-branch-options.h"
47 #include "arch/common.h"
49 #include "util/debug.h"
50 #include "util/ordered-events.h"
66 #include <sys/syscall.h>
67 #include <sys/ioctl.h>
69 #include <sys/prctl.h>
72 #include <sys/utsname.h>
75 #include <linux/stringify.h>
76 #include <linux/time64.h>
77 #include <linux/types.h>
79 #include "sane_ctype.h"
81 static volatile int done;
82 static volatile int resize;
84 #define HEADER_LINE_NR 5
86 static void perf_top__update_print_entries(struct perf_top *top)
88 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
91 static void winch_sig(int sig __maybe_unused)
96 static void perf_top__resize(struct perf_top *top)
98 get_term_dimensions(&top->winsize);
99 perf_top__update_print_entries(top);
102 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
104 struct perf_evsel *evsel = hists_to_evsel(he->hists);
106 struct annotation *notes;
110 if (!he || !he->ms.sym)
117 * We can't annotate with just /proc/kallsyms
119 if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
120 !dso__is_kcore(map->dso)) {
121 pr_err("Can't annotate %s: No vmlinux file was found in the "
122 "path\n", sym->name);
127 notes = symbol__annotation(sym);
128 pthread_mutex_lock(¬es->lock);
130 if (!symbol__hists(sym, top->evlist->nr_entries)) {
131 pthread_mutex_unlock(¬es->lock);
132 pr_err("Not enough memory for annotating '%s' symbol!\n",
138 err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL);
140 top->sym_filter_entry = he;
143 symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
144 pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
147 pthread_mutex_unlock(¬es->lock);
151 static void __zero_source_counters(struct hist_entry *he)
153 struct symbol *sym = he->ms.sym;
154 symbol__annotate_zero_histograms(sym);
157 static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
160 int err = uname(&uts);
162 ui__warning("Out of bounds address found:\n\n"
163 "Addr: %" PRIx64 "\n"
165 "Map: %" PRIx64 "-%" PRIx64 "\n"
166 "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
170 "Not all samples will be on the annotation output.\n\n"
171 "Please report to linux-kernel@vger.kernel.org\n",
172 ip, map->dso->long_name, dso__symtab_origin(map->dso),
173 map->start, map->end, sym->start, sym->end,
174 sym->binding == STB_GLOBAL ? 'g' :
175 sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
176 err ? "[unknown]" : uts.machine,
177 err ? "[unknown]" : uts.release, perf_version_string);
178 if (use_browser <= 0)
181 map->erange_warned = true;
184 static void perf_top__record_precise_ip(struct perf_top *top,
185 struct hist_entry *he,
186 struct perf_sample *sample,
187 struct perf_evsel *evsel, u64 ip)
189 struct annotation *notes;
190 struct symbol *sym = he->ms.sym;
193 if (sym == NULL || (use_browser == 0 &&
194 (top->sym_filter_entry == NULL ||
195 top->sym_filter_entry->ms.sym != sym)))
198 notes = symbol__annotation(sym);
200 if (pthread_mutex_trylock(¬es->lock))
203 err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
205 pthread_mutex_unlock(¬es->lock);
209 * This function is now called with he->hists->lock held.
210 * Release it before going to sleep.
212 pthread_mutex_unlock(&he->hists->lock);
214 if (err == -ERANGE && !he->ms.map->erange_warned)
215 ui__warn_map_erange(he->ms.map, sym, ip);
216 else if (err == -ENOMEM) {
217 pr_err("Not enough memory for annotating '%s' symbol!\n",
222 pthread_mutex_lock(&he->hists->lock);
226 static void perf_top__show_details(struct perf_top *top)
228 struct hist_entry *he = top->sym_filter_entry;
229 struct perf_evsel *evsel = hists_to_evsel(he->hists);
230 struct annotation *notes;
231 struct symbol *symbol;
238 notes = symbol__annotation(symbol);
240 pthread_mutex_lock(¬es->lock);
242 symbol__calc_percent(symbol, evsel);
244 if (notes->src == NULL)
247 printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
248 printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
250 more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts);
252 if (top->evlist->enabled) {
254 symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
256 symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
259 printf("%d lines not displayed, maybe increase display entries [e]\n", more);
261 pthread_mutex_unlock(¬es->lock);
264 static void perf_top__print_sym_table(struct perf_top *top)
268 const int win_width = top->winsize.ws_col - 1;
269 struct perf_evsel *evsel = top->sym_evsel;
270 struct hists *hists = evsel__hists(evsel);
274 perf_top__header_snprintf(top, bf, sizeof(bf));
277 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
279 if (!top->record_opts.overwrite &&
280 (hists->stats.nr_lost_warned !=
281 hists->stats.nr_events[PERF_RECORD_LOST])) {
282 hists->stats.nr_lost_warned =
283 hists->stats.nr_events[PERF_RECORD_LOST];
284 color_fprintf(stdout, PERF_COLOR_RED,
285 "WARNING: LOST %d chunks, Check IO/CPU overload",
286 hists->stats.nr_lost_warned);
290 if (top->sym_filter_entry) {
291 perf_top__show_details(top);
295 if (top->evlist->enabled) {
297 hists__delete_entries(hists);
299 hists__decay_entries(hists, top->hide_user_symbols,
300 top->hide_kernel_symbols);
304 hists__collapse_resort(hists, NULL);
305 perf_evsel__output_resort(evsel, NULL);
307 hists__output_recalc_col_len(hists, top->print_entries - printed);
309 hists__fprintf(hists, false, top->print_entries - printed, win_width,
310 top->min_percent, stdout, !symbol_conf.use_callchain);
313 static void prompt_integer(int *target, const char *msg)
315 char *buf = malloc(0), *p;
319 fprintf(stdout, "\n%s: ", msg);
320 if (getline(&buf, &dummy, stdin) < 0)
323 p = strchr(buf, '\n');
333 tmp = strtoul(buf, NULL, 10);
339 static void prompt_percent(int *target, const char *msg)
343 prompt_integer(&tmp, msg);
344 if (tmp >= 0 && tmp <= 100)
348 static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
350 char *buf = malloc(0), *p;
351 struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
352 struct hists *hists = evsel__hists(top->sym_evsel);
353 struct rb_node *next;
356 /* zero counters of active symbol */
358 __zero_source_counters(syme);
359 top->sym_filter_entry = NULL;
362 fprintf(stdout, "\n%s: ", msg);
363 if (getline(&buf, &dummy, stdin) < 0)
366 p = strchr(buf, '\n');
370 next = rb_first_cached(&hists->entries);
372 n = rb_entry(next, struct hist_entry, rb_node);
373 if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
377 next = rb_next(&n->rb_node);
381 fprintf(stderr, "Sorry, %s is not active.\n", buf);
384 perf_top__parse_source(top, found);
390 static void perf_top__print_mapped_keys(struct perf_top *top)
394 if (top->sym_filter_entry) {
395 struct symbol *sym = top->sym_filter_entry->ms.sym;
399 fprintf(stdout, "\nMapped keys:\n");
400 fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
401 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
403 if (top->evlist->nr_entries > 1)
404 fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
406 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
408 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
409 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
410 fprintf(stdout, "\t[S] stop annotation.\n");
413 "\t[K] hide kernel symbols. \t(%s)\n",
414 top->hide_kernel_symbols ? "yes" : "no");
416 "\t[U] hide user symbols. \t(%s)\n",
417 top->hide_user_symbols ? "yes" : "no");
418 fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
419 fprintf(stdout, "\t[qQ] quit.\n");
422 static int perf_top__key_mapped(struct perf_top *top, int c)
438 return top->evlist->nr_entries > 1 ? 1 : 0;
446 static bool perf_top__handle_keypress(struct perf_top *top, int c)
450 if (!perf_top__key_mapped(top, c)) {
451 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
454 perf_top__print_mapped_keys(top);
455 fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
458 set_term_quiet_input(&save);
460 poll(&stdin_poll, 1, -1);
463 tcsetattr(0, TCSAFLUSH, &save);
464 if (!perf_top__key_mapped(top, c))
470 prompt_integer(&top->delay_secs, "Enter display delay");
471 if (top->delay_secs < 1)
475 prompt_integer(&top->print_entries, "Enter display entries (lines)");
476 if (top->print_entries == 0) {
477 perf_top__resize(top);
478 signal(SIGWINCH, winch_sig);
480 signal(SIGWINCH, SIG_DFL);
484 if (top->evlist->nr_entries > 1) {
485 /* Select 0 as the default event: */
488 fprintf(stderr, "\nAvailable events:");
490 evlist__for_each_entry(top->evlist, top->sym_evsel)
491 fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
493 prompt_integer(&counter, "Enter details event counter");
495 if (counter >= top->evlist->nr_entries) {
496 top->sym_evsel = perf_evlist__first(top->evlist);
497 fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
501 evlist__for_each_entry(top->evlist, top->sym_evsel)
502 if (top->sym_evsel->idx == counter)
505 top->sym_evsel = perf_evlist__first(top->evlist);
508 prompt_integer(&top->count_filter, "Enter display event count filter");
511 prompt_percent(&top->annotation_opts.min_pcnt,
512 "Enter details display event filter (percent)");
515 top->hide_kernel_symbols = !top->hide_kernel_symbols;
519 printf("exiting.\n");
520 if (top->dump_symtab)
521 perf_session__fprintf_dsos(top->session, stderr);
525 perf_top__prompt_symbol(top, "Enter details symbol");
528 if (!top->sym_filter_entry)
531 struct hist_entry *syme = top->sym_filter_entry;
533 top->sym_filter_entry = NULL;
534 __zero_source_counters(syme);
538 top->hide_user_symbols = !top->hide_user_symbols;
541 top->zero = !top->zero;
550 static void perf_top__sort_new_samples(void *arg)
552 struct perf_top *t = arg;
553 struct perf_evsel *evsel = t->sym_evsel;
556 if (t->evlist->selected != NULL)
557 t->sym_evsel = t->evlist->selected;
559 hists = evsel__hists(evsel);
561 if (t->evlist->enabled) {
563 hists__delete_entries(hists);
565 hists__decay_entries(hists, t->hide_user_symbols,
566 t->hide_kernel_symbols);
570 hists__collapse_resort(hists, NULL);
571 perf_evsel__output_resort(evsel, NULL);
573 if (t->lost || t->drop)
574 pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
577 static void stop_top(void)
583 static void *display_thread_tui(void *arg)
585 struct perf_evsel *pos;
586 struct perf_top *top = arg;
587 const char *help = "For a higher level overview, try: perf top --sort comm,dso";
588 struct hist_browser_timer hbt = {
589 .timer = perf_top__sort_new_samples,
591 .refresh = top->delay_secs,
594 /* In order to read symbols from other namespaces perf to needs to call
595 * setns(2). This isn't permitted if the struct_fs has multiple users.
596 * unshare(2) the fs so that we may continue to setns into namespaces
597 * that we're observing.
601 perf_top__sort_new_samples(top);
604 * Initialize the uid_filter_str, in the future the TUI will allow
605 * Zooming in/out UIDs. For now just use whatever the user passed
608 evlist__for_each_entry(top->evlist, pos) {
609 struct hists *hists = evsel__hists(pos);
610 hists->uid_filter_str = top->record_opts.target.uid_str;
613 perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
615 &top->session->header.env,
616 !top->record_opts.overwrite,
617 &top->annotation_opts);
623 static void display_sig(int sig __maybe_unused)
628 static void display_setup_sig(void)
630 signal(SIGSEGV, sighandler_dump_stack);
631 signal(SIGFPE, sighandler_dump_stack);
632 signal(SIGINT, display_sig);
633 signal(SIGQUIT, display_sig);
634 signal(SIGTERM, display_sig);
637 static void *display_thread(void *arg)
639 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
641 struct perf_top *top = arg;
644 /* In order to read symbols from other namespaces perf to needs to call
645 * setns(2). This isn't permitted if the struct_fs has multiple users.
646 * unshare(2) the fs so that we may continue to setns into namespaces
647 * that we're observing.
652 pthread__unblock_sigwinch();
654 delay_msecs = top->delay_secs * MSEC_PER_SEC;
655 set_term_quiet_input(&save);
660 perf_top__print_sym_table(top);
662 * Either timeout expired or we got an EINTR due to SIGWINCH,
663 * refresh screen in both cases.
665 switch (poll(&stdin_poll, 1, delay_msecs)) {
674 tcsetattr(0, TCSAFLUSH, &save);
676 if (perf_top__handle_keypress(top, c))
682 tcsetattr(0, TCSAFLUSH, &save);
686 static int hist_iter__top_callback(struct hist_entry_iter *iter,
687 struct addr_location *al, bool single,
690 struct perf_top *top = arg;
691 struct hist_entry *he = iter->he;
692 struct perf_evsel *evsel = iter->evsel;
694 if (perf_hpp_list.sym && single)
695 perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
697 hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
698 !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
702 static void perf_event__process_sample(struct perf_tool *tool,
703 const union perf_event *event,
704 struct perf_evsel *evsel,
705 struct perf_sample *sample,
706 struct machine *machine)
708 struct perf_top *top = container_of(tool, struct perf_top, tool);
709 struct addr_location al;
712 if (!machine && perf_guest) {
713 static struct intlist *seen;
716 seen = intlist__new(NULL);
718 if (!intlist__has_entry(seen, sample->pid)) {
719 pr_err("Can't find guest [%d]'s kernel information\n",
721 intlist__add(seen, sample->pid);
727 pr_err("%u unprocessable samples recorded.\r",
728 top->session->evlist->stats.nr_unprocessable_samples++);
732 if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
733 top->exact_samples++;
735 if (machine__resolve(machine, &al, sample) < 0)
738 if (!machine->kptr_restrict_warned &&
739 symbol_conf.kptr_restrict &&
740 al.cpumode == PERF_RECORD_MISC_KERNEL) {
741 if (!perf_evlist__exclude_kernel(top->session->evlist)) {
743 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
744 "Check /proc/sys/kernel/kptr_restrict.\n\n"
745 "Kernel%s samples will not be resolved.\n",
746 al.map && map__has_symbols(al.map) ?
748 if (use_browser <= 0)
751 machine->kptr_restrict_warned = true;
754 if (al.sym == NULL && al.map != NULL) {
755 const char *msg = "Kernel samples will not be resolved.\n";
757 * As we do lazy loading of symtabs we only will know if the
758 * specified vmlinux file is invalid when we actually have a
759 * hit in kernel space and then try to load it. So if we get
760 * here and there are _no_ symbols in the DSO backing the
761 * kernel map, bail out.
763 * We may never get here, for instance, if we use -K/
764 * --hide-kernel-symbols, even if the user specifies an
765 * invalid --vmlinux ;-)
767 if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
768 __map__is_kernel(al.map) && map__has_symbols(al.map)) {
769 if (symbol_conf.vmlinux_name) {
771 dso__strerror_load(al.map->dso, serr, sizeof(serr));
772 ui__warning("The %s file can't be used: %s\n%s",
773 symbol_conf.vmlinux_name, serr, msg);
775 ui__warning("A vmlinux file was not found.\n%s",
779 if (use_browser <= 0)
781 top->vmlinux_warned = true;
785 if (al.sym == NULL || !al.sym->idle) {
786 struct hists *hists = evsel__hists(evsel);
787 struct hist_entry_iter iter = {
790 .add_entry_cb = hist_iter__top_callback,
793 if (symbol_conf.cumulate_callchain)
794 iter.ops = &hist_iter_cumulative;
796 iter.ops = &hist_iter_normal;
798 pthread_mutex_lock(&hists->lock);
800 err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
802 pr_err("Problem incrementing symbol period, skipping event\n");
804 pthread_mutex_unlock(&hists->lock);
807 addr_location__put(&al);
811 perf_top__process_lost(struct perf_top *top, union perf_event *event,
812 struct perf_evsel *evsel)
814 struct hists *hists = evsel__hists(evsel);
816 top->lost += event->lost.lost;
817 top->lost_total += event->lost.lost;
818 hists->stats.total_lost += event->lost.lost;
822 perf_top__process_lost_samples(struct perf_top *top,
823 union perf_event *event,
824 struct perf_evsel *evsel)
826 struct hists *hists = evsel__hists(evsel);
828 top->lost += event->lost_samples.lost;
829 top->lost_total += event->lost_samples.lost;
830 hists->stats.total_lost_samples += event->lost_samples.lost;
833 static u64 last_timestamp;
835 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
837 struct record_opts *opts = &top->record_opts;
838 struct perf_evlist *evlist = top->evlist;
839 struct perf_mmap *md;
840 union perf_event *event;
842 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
843 if (perf_mmap__read_init(md) < 0)
846 while ((event = perf_mmap__read_event(md)) != NULL) {
849 ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
850 if (ret && ret != -1)
853 ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0);
857 perf_mmap__consume(md);
859 if (top->qe.rotate) {
860 pthread_mutex_lock(&top->qe.mutex);
861 top->qe.rotate = false;
862 pthread_cond_signal(&top->qe.cond);
863 pthread_mutex_unlock(&top->qe.mutex);
867 perf_mmap__read_done(md);
870 static void perf_top__mmap_read(struct perf_top *top)
872 bool overwrite = top->record_opts.overwrite;
873 struct perf_evlist *evlist = top->evlist;
877 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
879 for (i = 0; i < top->evlist->nr_mmaps; i++)
880 perf_top__mmap_read_idx(top, i);
883 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
884 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
889 * Check per-event overwrite term.
890 * perf top should support consistent term for all events.
891 * - All events don't have per-event term
892 * E.g. "cpu/cpu-cycles/,cpu/instructions/"
893 * Nothing change, return 0.
894 * - All events have same per-event term
895 * E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
896 * Using the per-event setting to replace the opts->overwrite if
897 * they are different, then return 0.
898 * - Events have different per-event term
899 * E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
901 * - Some of the event set per-event term, but some not.
902 * E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
905 static int perf_top__overwrite_check(struct perf_top *top)
907 struct record_opts *opts = &top->record_opts;
908 struct perf_evlist *evlist = top->evlist;
909 struct perf_evsel_config_term *term;
910 struct list_head *config_terms;
911 struct perf_evsel *evsel;
912 int set, overwrite = -1;
914 evlist__for_each_entry(evlist, evsel) {
916 config_terms = &evsel->config_terms;
917 list_for_each_entry(term, config_terms, list) {
918 if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
919 set = term->val.overwrite ? 1 : 0;
922 /* no term for current and previous event (likely) */
923 if ((overwrite < 0) && (set < 0))
926 /* has term for both current and previous event, compare */
927 if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
930 /* no term for current event but has term for previous one */
931 if ((overwrite >= 0) && (set < 0))
934 /* has term for current event */
935 if ((overwrite < 0) && (set >= 0)) {
936 /* if it's first event, set overwrite */
937 if (evsel == perf_evlist__first(evlist))
944 if ((overwrite >= 0) && (opts->overwrite != overwrite))
945 opts->overwrite = overwrite;
950 static int perf_top_overwrite_fallback(struct perf_top *top,
951 struct perf_evsel *evsel)
953 struct record_opts *opts = &top->record_opts;
954 struct perf_evlist *evlist = top->evlist;
955 struct perf_evsel *counter;
957 if (!opts->overwrite)
960 /* only fall back when first event fails */
961 if (evsel != perf_evlist__first(evlist))
964 evlist__for_each_entry(evlist, counter)
965 counter->attr.write_backward = false;
966 opts->overwrite = false;
967 pr_debug2("fall back to non-overwrite mode\n");
971 static int perf_top__start_counters(struct perf_top *top)
974 struct perf_evsel *counter;
975 struct perf_evlist *evlist = top->evlist;
976 struct record_opts *opts = &top->record_opts;
978 if (perf_top__overwrite_check(top)) {
979 ui__error("perf top only support consistent per-event "
980 "overwrite setting for all events\n");
984 perf_evlist__config(evlist, opts, &callchain_param);
986 evlist__for_each_entry(evlist, counter) {
988 if (perf_evsel__open(counter, top->evlist->cpus,
989 top->evlist->threads) < 0) {
992 * Specially handle overwrite fall back.
993 * Because perf top is the only tool which has
994 * overwrite mode by default, support
995 * both overwrite and non-overwrite mode, and
996 * require consistent mode for all events.
998 * May move it to generic code with more tools
999 * have similar attribute.
1001 if (perf_missing_features.write_backward &&
1002 perf_top_overwrite_fallback(top, counter))
1005 if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
1007 ui__warning("%s\n", msg);
1011 perf_evsel__open_strerror(counter, &opts->target,
1012 errno, msg, sizeof(msg));
1013 ui__error("%s\n", msg);
1018 if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
1019 ui__error("Failed to mmap with %d (%s)\n",
1020 errno, str_error_r(errno, msg, sizeof(msg)));
1030 static int callchain_param__setup_sample_type(struct callchain_param *callchain)
1032 if (callchain->mode != CHAIN_NONE) {
1033 if (callchain_register_param(callchain) < 0) {
1034 ui__error("Can't register callchain params.\n");
1042 static struct ordered_events *rotate_queues(struct perf_top *top)
1044 struct ordered_events *in = top->qe.in;
1046 if (top->qe.in == &top->qe.data[1])
1047 top->qe.in = &top->qe.data[0];
1049 top->qe.in = &top->qe.data[1];
1054 static void *process_thread(void *arg)
1056 struct perf_top *top = arg;
1059 struct ordered_events *out, *in = top->qe.in;
1061 if (!in->nr_events) {
1066 out = rotate_queues(top);
1068 pthread_mutex_lock(&top->qe.mutex);
1069 top->qe.rotate = true;
1070 pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
1071 pthread_mutex_unlock(&top->qe.mutex);
1073 if (ordered_events__flush(out, OE_FLUSH__TOP))
1074 pr_err("failed to process events\n");
1081 * Allow only 'top->delay_secs' seconds behind samples.
1083 static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1085 union perf_event *event = qevent->event;
1086 u64 delay_timestamp;
1088 if (event->header.type != PERF_RECORD_SAMPLE)
1091 delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1092 return delay_timestamp < last_timestamp;
1095 static int deliver_event(struct ordered_events *qe,
1096 struct ordered_event *qevent)
1098 struct perf_top *top = qe->data;
1099 struct perf_evlist *evlist = top->evlist;
1100 struct perf_session *session = top->session;
1101 union perf_event *event = qevent->event;
1102 struct perf_sample sample;
1103 struct perf_evsel *evsel;
1104 struct machine *machine;
1107 if (should_drop(qevent, top)) {
1113 ret = perf_evlist__parse_sample(evlist, event, &sample);
1115 pr_err("Can't parse sample, err = %d\n", ret);
1119 evsel = perf_evlist__id2evsel(session->evlist, sample.id);
1120 assert(evsel != NULL);
1122 if (event->header.type == PERF_RECORD_SAMPLE)
1125 switch (sample.cpumode) {
1126 case PERF_RECORD_MISC_USER:
1128 if (top->hide_user_symbols)
1130 machine = &session->machines.host;
1132 case PERF_RECORD_MISC_KERNEL:
1133 ++top->kernel_samples;
1134 if (top->hide_kernel_symbols)
1136 machine = &session->machines.host;
1138 case PERF_RECORD_MISC_GUEST_KERNEL:
1139 ++top->guest_kernel_samples;
1140 machine = perf_session__find_machine(session,
1143 case PERF_RECORD_MISC_GUEST_USER:
1144 ++top->guest_us_samples;
1146 * TODO: we don't process guest user from host side
1147 * except simple counting.
1151 if (event->header.type == PERF_RECORD_SAMPLE)
1153 machine = &session->machines.host;
1157 if (event->header.type == PERF_RECORD_SAMPLE) {
1158 perf_event__process_sample(&top->tool, event, evsel,
1160 } else if (event->header.type == PERF_RECORD_LOST) {
1161 perf_top__process_lost(top, event, evsel);
1162 } else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
1163 perf_top__process_lost_samples(top, event, evsel);
1164 } else if (event->header.type < PERF_RECORD_MAX) {
1165 hists__inc_nr_events(evsel__hists(evsel), event->header.type);
1166 machine__process_event(machine, event, &sample);
1168 ++session->evlist->stats.nr_unknown_events;
1175 static void init_process_thread(struct perf_top *top)
1177 ordered_events__init(&top->qe.data[0], deliver_event, top);
1178 ordered_events__init(&top->qe.data[1], deliver_event, top);
1179 ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1180 ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1181 top->qe.in = &top->qe.data[0];
1182 pthread_mutex_init(&top->qe.mutex, NULL);
1183 pthread_cond_init(&top->qe.cond, NULL);
1186 static int __cmd_top(struct perf_top *top)
1188 struct record_opts *opts = &top->record_opts;
1189 pthread_t thread, thread_process;
1192 if (!top->annotation_opts.objdump_path) {
1193 ret = perf_env__lookup_objdump(&top->session->header.env,
1194 &top->annotation_opts.objdump_path);
1199 ret = callchain_param__setup_sample_type(&callchain_param);
1203 if (perf_session__register_idle_thread(top->session) < 0)
1206 if (top->nr_threads_synthesize > 1)
1207 perf_set_multithreaded();
1209 init_process_thread(top);
1211 ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
1212 &top->session->machines.host,
1215 pr_warning("Couldn't synthesize bpf events.\n");
1217 machine__synthesize_threads(&top->session->machines.host, &opts->target,
1218 top->evlist->threads, false,
1219 top->nr_threads_synthesize);
1221 if (top->nr_threads_synthesize > 1)
1222 perf_set_singlethreaded();
1224 if (perf_hpp_list.socket) {
1225 ret = perf_env__read_cpu_topology_map(&perf_env);
1227 char errbuf[BUFSIZ];
1228 const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1230 ui__error("Could not read the CPU topology map: %s\n", err);
1235 ret = perf_top__start_counters(top);
1239 top->session->evlist = top->evlist;
1240 perf_session__set_id_hdr_size(top->session);
1243 * When perf is starting the traced process, all the events (apart from
1244 * group members) have enable_on_exec=1 set, so don't spoil it by
1245 * prematurely enabling them.
1247 * XXX 'top' still doesn't start workloads like record, trace, but should,
1248 * so leave the check here.
1250 if (!target__none(&opts->target))
1251 perf_evlist__enable(top->evlist);
1254 if (pthread_create(&thread_process, NULL, process_thread, top)) {
1255 ui__error("Could not create process thread.\n");
1259 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1260 display_thread), top)) {
1261 ui__error("Could not create display thread.\n");
1262 goto out_join_thread;
1265 if (top->realtime_prio) {
1266 struct sched_param param;
1268 param.sched_priority = top->realtime_prio;
1269 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
1270 ui__error("Could not set realtime priority.\n");
1275 /* Wait for a minimal set of events before starting the snapshot */
1276 perf_evlist__poll(top->evlist, 100);
1278 perf_top__mmap_read(top);
1281 u64 hits = top->samples;
1283 perf_top__mmap_read(top);
1285 if (opts->overwrite || (hits == top->samples))
1286 ret = perf_evlist__poll(top->evlist, 100);
1289 perf_top__resize(top);
1296 pthread_join(thread, NULL);
1298 pthread_cond_signal(&top->qe.cond);
1299 pthread_join(thread_process, NULL);
1304 callchain_opt(const struct option *opt, const char *arg, int unset)
1306 symbol_conf.use_callchain = true;
1307 return record_callchain_opt(opt, arg, unset);
1311 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1313 struct callchain_param *callchain = opt->value;
1315 callchain->enabled = !unset;
1316 callchain->record_mode = CALLCHAIN_FP;
1322 symbol_conf.use_callchain = false;
1323 callchain->record_mode = CALLCHAIN_NONE;
1327 return parse_callchain_top_opt(arg);
1330 static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
1332 if (!strcmp(var, "top.call-graph")) {
1333 var = "call-graph.record-mode";
1334 return perf_default_config(var, value, cb);
1336 if (!strcmp(var, "top.children")) {
1337 symbol_conf.cumulate_callchain = perf_config_bool(var, value);
1345 parse_percent_limit(const struct option *opt, const char *arg,
1346 int unset __maybe_unused)
1348 struct perf_top *top = opt->value;
1350 top->min_percent = strtof(arg, NULL);
1354 const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1355 "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1357 int cmd_top(int argc, const char **argv)
1359 char errbuf[BUFSIZ];
1360 struct perf_top top = {
1364 .mmap_pages = UINT_MAX,
1365 .user_freq = UINT_MAX,
1366 .user_interval = ULLONG_MAX,
1367 .freq = 4000, /* 4 KHz */
1372 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1373 * when we pause, fix that and reenable. Probably using a
1374 * separate evlist with a dummy event, i.e. a non-overwrite
1375 * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1376 * stays in overwrite mode. -acme
1379 .sample_time = true,
1380 .sample_time_set = true,
1382 .max_stack = sysctl__max_stack(),
1383 .annotation_opts = annotation__default_options,
1384 .nr_threads_synthesize = UINT_MAX,
1386 struct record_opts *opts = &top.record_opts;
1387 struct target *target = &opts->target;
1388 const struct option options[] = {
1389 OPT_CALLBACK('e', "event", &top.evlist, "event",
1390 "event selector. use 'perf list' to list available events",
1391 parse_events_option),
1392 OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
1393 OPT_STRING('p', "pid", &target->pid, "pid",
1394 "profile events on existing process id"),
1395 OPT_STRING('t', "tid", &target->tid, "tid",
1396 "profile events on existing thread id"),
1397 OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
1398 "system-wide collection from all CPUs"),
1399 OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
1400 "list of cpus to monitor"),
1401 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1402 "file", "vmlinux pathname"),
1403 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1404 "don't load vmlinux even if found"),
1405 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1406 "file", "kallsyms pathname"),
1407 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1408 "hide kernel symbols"),
1409 OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
1410 "number of mmap data pages",
1411 perf_evlist__parse_mmap_pages),
1412 OPT_INTEGER('r', "realtime", &top.realtime_prio,
1413 "collect data with this RT SCHED_FIFO priority"),
1414 OPT_INTEGER('d', "delay", &top.delay_secs,
1415 "number of seconds to delay between refreshes"),
1416 OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
1417 "dump the symbol table used for profiling"),
1418 OPT_INTEGER('f', "count-filter", &top.count_filter,
1419 "only display functions with more events than this"),
1420 OPT_BOOLEAN(0, "group", &opts->group,
1421 "put the counters into a counter group"),
1422 OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
1423 "child tasks do not inherit counters"),
1424 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1425 "symbol to annotate"),
1426 OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
1427 OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
1428 "profile at this frequency",
1429 record__parse_freq),
1430 OPT_INTEGER('E', "entries", &top.print_entries,
1431 "display this many functions"),
1432 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1433 "hide user symbols"),
1434 OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
1435 OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
1436 OPT_INCR('v', "verbose", &verbose,
1437 "be more verbose (show counter open errors, etc)"),
1438 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1439 "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
1440 " Please refer the man page for the complete list."),
1441 OPT_STRING(0, "fields", &field_order, "key[,keys...]",
1442 "output field(s): overhead, period, sample plus all of sort keys"),
1443 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1444 "Show a column with the number of samples"),
1445 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1446 NULL, "enables call-graph recording and display",
1448 OPT_CALLBACK(0, "call-graph", &callchain_param,
1449 "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1450 top_callchain_help, &parse_callchain_opt),
1451 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1452 "Accumulate callchains of children and show total overhead as well"),
1453 OPT_INTEGER(0, "max-stack", &top.max_stack,
1454 "Set the maximum stack depth when parsing the callchain. "
1455 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1456 OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1457 "ignore callees of these functions in call graphs",
1458 report_parse_ignore_callees_opt),
1459 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1460 "Show a column with the sum of periods"),
1461 OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1462 "only consider symbols in these dsos"),
1463 OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1464 "only consider symbols in these comms"),
1465 OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1466 "only consider these symbols"),
1467 OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
1468 "Interleave source code with assembly code (default)"),
1469 OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
1470 "Display raw encoding of assembly instructions (default)"),
1471 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1472 "Enable kernel symbol demangling"),
1473 OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
1474 OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
1475 "objdump binary to use for disassembly and annotations"),
1476 OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
1477 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1478 OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
1479 OPT_CALLBACK(0, "percent-limit", &top, "percent",
1480 "Don't show entries under that percent", parse_percent_limit),
1481 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1482 "How to display percentage of filtered entries", parse_filter_percentage),
1483 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1485 "don't try to adjust column width, use these fixed values"),
1486 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1487 "per thread proc mmap processing timeout in ms"),
1488 OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
1489 "branch any", "sample any taken branches",
1490 parse_branch_stack),
1491 OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
1492 "branch filter mask", "branch stack filter modes",
1493 parse_branch_stack),
1494 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1495 "Show raw trace event output (do not use print fmt or plugins)"),
1496 OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
1497 "Show entries in a hierarchy"),
1498 OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
1499 "Use a backward ring buffer, default: no"),
1500 OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
1501 OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
1502 "number of thread to run event synthesize"),
1505 struct perf_evlist *sb_evlist = NULL;
1506 const char * const top_usage[] = {
1507 "perf top [<options>]",
1510 int status = hists__init();
1515 top.annotation_opts.min_pcnt = 5;
1516 top.annotation_opts.context = 4;
1518 top.evlist = perf_evlist__new();
1519 if (top.evlist == NULL)
1522 status = perf_config(perf_top_config, &top);
1526 argc = parse_options(argc, argv, options, top_usage, 0);
1528 usage_with_options(top_usage, options);
1530 if (!top.evlist->nr_entries &&
1531 perf_evlist__add_default(top.evlist) < 0) {
1532 pr_err("Not enough memory for event selector list\n");
1533 goto out_delete_evlist;
1536 if (symbol_conf.report_hierarchy) {
1537 /* disable incompatible options */
1538 symbol_conf.event_group = false;
1539 symbol_conf.cumulate_callchain = false;
1542 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1543 parse_options_usage(top_usage, options, "fields", 0);
1544 parse_options_usage(NULL, options, "hierarchy", 0);
1545 goto out_delete_evlist;
1549 if (opts->branch_stack && callchain_param.enabled)
1550 symbol_conf.show_branchflag_count = true;
1552 sort__mode = SORT_MODE__TOP;
1553 /* display thread wants entries to be collapsed in a different tree */
1554 perf_hpp_list.need_collapse = 1;
1558 else if (top.use_tui)
1561 setup_browser(false);
1563 if (setup_sorting(top.evlist) < 0) {
1565 parse_options_usage(top_usage, options, "s", 1);
1567 parse_options_usage(sort_order ? NULL : top_usage,
1568 options, "fields", 0);
1569 goto out_delete_evlist;
1572 status = target__validate(target);
1574 target__strerror(target, status, errbuf, BUFSIZ);
1575 ui__warning("%s\n", errbuf);
1578 status = target__parse_uid(target);
1580 int saved_errno = errno;
1582 target__strerror(target, status, errbuf, BUFSIZ);
1583 ui__error("%s\n", errbuf);
1585 status = -saved_errno;
1586 goto out_delete_evlist;
1589 if (target__none(target))
1590 target->system_wide = true;
1592 if (perf_evlist__create_maps(top.evlist, target) < 0) {
1593 ui__error("Couldn't create thread/CPU maps: %s\n",
1594 errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
1595 goto out_delete_evlist;
1598 if (top.delay_secs < 1)
1601 if (record_opts__config(opts)) {
1603 goto out_delete_evlist;
1606 top.sym_evsel = perf_evlist__first(top.evlist);
1608 if (!callchain_param.enabled) {
1609 symbol_conf.cumulate_callchain = false;
1610 perf_hpp__cancel_cumulate();
1613 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1614 callchain_param.order = ORDER_CALLER;
1616 status = symbol__annotation_init();
1618 goto out_delete_evlist;
1620 annotation_config__init();
1622 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1623 status = symbol__init(NULL);
1625 goto out_delete_evlist;
1627 sort__setup_elide(stdout);
1629 get_term_dimensions(&top.winsize);
1630 if (top.print_entries == 0) {
1631 perf_top__update_print_entries(&top);
1632 signal(SIGWINCH, winch_sig);
1635 top.session = perf_session__new(NULL, false, NULL);
1636 if (top.session == NULL) {
1638 goto out_delete_evlist;
1641 if (!top.record_opts.no_bpf_event)
1642 bpf_event__add_sb_event(&sb_evlist, &perf_env);
1644 if (perf_evlist__start_sb_thread(sb_evlist, target)) {
1645 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1646 opts->no_bpf_event = true;
1649 status = __cmd_top(&top);
1651 if (!opts->no_bpf_event)
1652 perf_evlist__stop_sb_thread(sb_evlist);
1655 perf_evlist__delete(top.evlist);
1656 perf_session__delete(top.session);