1 // SPDX-License-Identifier: GPL-2.0
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/config.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/callchain.h"
16 #include "util/time-utils.h"
18 #include <subcmd/pager.h>
19 #include <subcmd/parse-options.h>
20 #include "util/trace-event.h"
21 #include "util/data.h"
22 #include "util/cpumap.h"
24 #include "util/debug.h"
25 #include "util/string2.h"
27 #include <linux/kernel.h>
28 #include <linux/rbtree.h>
29 #include <linux/string.h>
30 #include <linux/zalloc.h>
36 #include <linux/ctype.h>
41 static long kmem_page_size;
45 } kmem_default = KMEM_SLAB; /* for backward compatibility */
48 typedef int (*sort_fn_t)(void *, void *);
50 static int alloc_flag;
51 static int caller_flag;
53 static int alloc_lines = -1;
54 static int caller_lines = -1;
72 static struct rb_root root_alloc_stat;
73 static struct rb_root root_alloc_sorted;
74 static struct rb_root root_caller_stat;
75 static struct rb_root root_caller_sorted;
77 static unsigned long total_requested, total_allocated, total_freed;
78 static unsigned long nr_allocs, nr_cross_allocs;
80 /* filters for controlling start and stop of time of analysis */
81 static struct perf_time_interval ptime;
84 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
85 int bytes_req, int bytes_alloc, int cpu)
87 struct rb_node **node = &root_alloc_stat.rb_node;
88 struct rb_node *parent = NULL;
89 struct alloc_stat *data = NULL;
93 data = rb_entry(*node, struct alloc_stat, node);
96 node = &(*node)->rb_right;
97 else if (ptr < data->ptr)
98 node = &(*node)->rb_left;
103 if (data && data->ptr == ptr) {
105 data->bytes_req += bytes_req;
106 data->bytes_alloc += bytes_alloc;
108 data = malloc(sizeof(*data));
110 pr_err("%s: malloc failed\n", __func__);
116 data->bytes_req = bytes_req;
117 data->bytes_alloc = bytes_alloc;
119 rb_link_node(&data->node, parent, node);
120 rb_insert_color(&data->node, &root_alloc_stat);
122 data->call_site = call_site;
123 data->alloc_cpu = cpu;
124 data->last_alloc = bytes_alloc;
129 static int insert_caller_stat(unsigned long call_site,
130 int bytes_req, int bytes_alloc)
132 struct rb_node **node = &root_caller_stat.rb_node;
133 struct rb_node *parent = NULL;
134 struct alloc_stat *data = NULL;
138 data = rb_entry(*node, struct alloc_stat, node);
140 if (call_site > data->call_site)
141 node = &(*node)->rb_right;
142 else if (call_site < data->call_site)
143 node = &(*node)->rb_left;
148 if (data && data->call_site == call_site) {
150 data->bytes_req += bytes_req;
151 data->bytes_alloc += bytes_alloc;
153 data = malloc(sizeof(*data));
155 pr_err("%s: malloc failed\n", __func__);
158 data->call_site = call_site;
161 data->bytes_req = bytes_req;
162 data->bytes_alloc = bytes_alloc;
164 rb_link_node(&data->node, parent, node);
165 rb_insert_color(&data->node, &root_caller_stat);
171 static int perf_evsel__process_alloc_event(struct evsel *evsel,
172 struct perf_sample *sample)
174 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
175 call_site = perf_evsel__intval(evsel, sample, "call_site");
176 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
177 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
179 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
180 insert_caller_stat(call_site, bytes_req, bytes_alloc))
183 total_requested += bytes_req;
184 total_allocated += bytes_alloc;
190 static int perf_evsel__process_alloc_node_event(struct evsel *evsel,
191 struct perf_sample *sample)
193 int ret = perf_evsel__process_alloc_event(evsel, sample);
196 int node1 = cpu__get_node(sample->cpu),
197 node2 = perf_evsel__intval(evsel, sample, "node");
206 static int ptr_cmp(void *, void *);
207 static int slab_callsite_cmp(void *, void *);
209 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
210 unsigned long call_site,
211 struct rb_root *root,
214 struct rb_node *node = root->rb_node;
215 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
218 struct alloc_stat *data;
221 data = rb_entry(node, struct alloc_stat, node);
223 cmp = sort_fn(&key, data);
225 node = node->rb_left;
227 node = node->rb_right;
234 static int perf_evsel__process_free_event(struct evsel *evsel,
235 struct perf_sample *sample)
237 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
238 struct alloc_stat *s_alloc, *s_caller;
240 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
244 total_freed += s_alloc->last_alloc;
246 if ((short)sample->cpu != s_alloc->alloc_cpu) {
249 s_caller = search_alloc_stat(0, s_alloc->call_site,
254 s_caller->pingpong++;
256 s_alloc->alloc_cpu = -1;
261 static u64 total_page_alloc_bytes;
262 static u64 total_page_free_bytes;
263 static u64 total_page_nomatch_bytes;
264 static u64 total_page_fail_bytes;
265 static unsigned long nr_page_allocs;
266 static unsigned long nr_page_frees;
267 static unsigned long nr_page_fails;
268 static unsigned long nr_page_nomatch;
271 static bool live_page;
272 static struct perf_session *kmem_session;
274 #define MAX_MIGRATE_TYPES 6
275 #define MAX_PAGE_ORDER 11
277 static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
285 unsigned migrate_type;
292 static struct rb_root page_live_tree;
293 static struct rb_root page_alloc_tree;
294 static struct rb_root page_alloc_sorted;
295 static struct rb_root page_caller_tree;
296 static struct rb_root page_caller_sorted;
304 static int nr_alloc_funcs;
305 static struct alloc_func *alloc_func_list;
307 static int funcmp(const void *a, const void *b)
309 const struct alloc_func *fa = a;
310 const struct alloc_func *fb = b;
312 if (fa->start > fb->start)
318 static int callcmp(const void *a, const void *b)
320 const struct alloc_func *fa = a;
321 const struct alloc_func *fb = b;
323 if (fb->start <= fa->start && fa->end < fb->end)
326 if (fa->start > fb->start)
332 static int build_alloc_func_list(void)
335 struct map *kernel_map;
337 struct rb_node *node;
338 struct alloc_func *func;
339 struct machine *machine = &kmem_session->machines.host;
340 regex_t alloc_func_regex;
341 static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
343 ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
347 regerror(ret, &alloc_func_regex, err, sizeof(err));
348 pr_err("Invalid regex: %s\n%s", pattern, err);
352 kernel_map = machine__kernel_map(machine);
353 if (map__load(kernel_map) < 0) {
354 pr_err("cannot load kernel map\n");
358 map__for_each_symbol(kernel_map, sym, node) {
359 if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
362 func = realloc(alloc_func_list,
363 (nr_alloc_funcs + 1) * sizeof(*func));
367 pr_debug("alloc func: %s\n", sym->name);
368 func[nr_alloc_funcs].start = sym->start;
369 func[nr_alloc_funcs].end = sym->end;
370 func[nr_alloc_funcs].name = sym->name;
372 alloc_func_list = func;
376 qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
378 regfree(&alloc_func_regex);
383 * Find first non-memory allocation function from callchain.
384 * The allocation functions are in the 'alloc_func_list'.
386 static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
388 struct addr_location al;
389 struct machine *machine = &kmem_session->machines.host;
390 struct callchain_cursor_node *node;
392 if (alloc_func_list == NULL) {
393 if (build_alloc_func_list() < 0)
397 al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
398 sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
400 callchain_cursor_commit(&callchain_cursor);
402 struct alloc_func key, *caller;
405 node = callchain_cursor_current(&callchain_cursor);
409 key.start = key.end = node->ip;
410 caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
411 sizeof(key), callcmp);
415 addr = map__unmap_ip(node->map, node->ip);
421 pr_debug3("skipping alloc function: %s\n", caller->name);
423 callchain_cursor_advance(&callchain_cursor);
427 pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
431 struct sort_dimension {
434 struct list_head list;
437 static LIST_HEAD(page_alloc_sort_input);
438 static LIST_HEAD(page_caller_sort_input);
440 static struct page_stat *
441 __page_stat__findnew_page(struct page_stat *pstat, bool create)
443 struct rb_node **node = &page_live_tree.rb_node;
444 struct rb_node *parent = NULL;
445 struct page_stat *data;
451 data = rb_entry(*node, struct page_stat, node);
453 cmp = data->page - pstat->page;
455 node = &parent->rb_left;
457 node = &parent->rb_right;
465 data = zalloc(sizeof(*data));
467 data->page = pstat->page;
468 data->order = pstat->order;
469 data->gfp_flags = pstat->gfp_flags;
470 data->migrate_type = pstat->migrate_type;
472 rb_link_node(&data->node, parent, node);
473 rb_insert_color(&data->node, &page_live_tree);
479 static struct page_stat *page_stat__find_page(struct page_stat *pstat)
481 return __page_stat__findnew_page(pstat, false);
484 static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
486 return __page_stat__findnew_page(pstat, true);
489 static struct page_stat *
490 __page_stat__findnew_alloc(struct page_stat *pstat, bool create)
492 struct rb_node **node = &page_alloc_tree.rb_node;
493 struct rb_node *parent = NULL;
494 struct page_stat *data;
495 struct sort_dimension *sort;
501 data = rb_entry(*node, struct page_stat, node);
503 list_for_each_entry(sort, &page_alloc_sort_input, list) {
504 cmp = sort->cmp(pstat, data);
510 node = &parent->rb_left;
512 node = &parent->rb_right;
520 data = zalloc(sizeof(*data));
522 data->page = pstat->page;
523 data->order = pstat->order;
524 data->gfp_flags = pstat->gfp_flags;
525 data->migrate_type = pstat->migrate_type;
527 rb_link_node(&data->node, parent, node);
528 rb_insert_color(&data->node, &page_alloc_tree);
534 static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
536 return __page_stat__findnew_alloc(pstat, false);
539 static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
541 return __page_stat__findnew_alloc(pstat, true);
544 static struct page_stat *
545 __page_stat__findnew_caller(struct page_stat *pstat, bool create)
547 struct rb_node **node = &page_caller_tree.rb_node;
548 struct rb_node *parent = NULL;
549 struct page_stat *data;
550 struct sort_dimension *sort;
556 data = rb_entry(*node, struct page_stat, node);
558 list_for_each_entry(sort, &page_caller_sort_input, list) {
559 cmp = sort->cmp(pstat, data);
565 node = &parent->rb_left;
567 node = &parent->rb_right;
575 data = zalloc(sizeof(*data));
577 data->callsite = pstat->callsite;
578 data->order = pstat->order;
579 data->gfp_flags = pstat->gfp_flags;
580 data->migrate_type = pstat->migrate_type;
582 rb_link_node(&data->node, parent, node);
583 rb_insert_color(&data->node, &page_caller_tree);
589 static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
591 return __page_stat__findnew_caller(pstat, false);
594 static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
596 return __page_stat__findnew_caller(pstat, true);
599 static bool valid_page(u64 pfn_or_page)
601 if (use_pfn && pfn_or_page == -1UL)
603 if (!use_pfn && pfn_or_page == 0)
611 char *human_readable;
614 static struct gfp_flag *gfps;
617 static int gfpcmp(const void *a, const void *b)
619 const struct gfp_flag *fa = a;
620 const struct gfp_flag *fb = b;
622 return fa->flags - fb->flags;
625 /* see include/trace/events/mmflags.h */
626 static const struct {
627 const char *original;
629 } gfp_compact_table[] = {
630 { "GFP_TRANSHUGE", "THP" },
631 { "GFP_TRANSHUGE_LIGHT", "THL" },
632 { "GFP_HIGHUSER_MOVABLE", "HUM" },
633 { "GFP_HIGHUSER", "HU" },
635 { "GFP_KERNEL_ACCOUNT", "KAC" },
636 { "GFP_KERNEL", "K" },
637 { "GFP_NOFS", "NF" },
638 { "GFP_ATOMIC", "A" },
639 { "GFP_NOIO", "NI" },
640 { "GFP_NOWAIT", "NW" },
642 { "__GFP_HIGHMEM", "HM" },
643 { "GFP_DMA32", "D32" },
644 { "__GFP_HIGH", "H" },
645 { "__GFP_ATOMIC", "_A" },
648 { "__GFP_NOWARN", "NWR" },
649 { "__GFP_RETRY_MAYFAIL", "R" },
650 { "__GFP_NOFAIL", "NF" },
651 { "__GFP_NORETRY", "NR" },
652 { "__GFP_COMP", "C" },
653 { "__GFP_ZERO", "Z" },
654 { "__GFP_NOMEMALLOC", "NMA" },
655 { "__GFP_MEMALLOC", "MA" },
656 { "__GFP_HARDWALL", "HW" },
657 { "__GFP_THISNODE", "TN" },
658 { "__GFP_RECLAIMABLE", "RC" },
659 { "__GFP_MOVABLE", "M" },
660 { "__GFP_ACCOUNT", "AC" },
661 { "__GFP_WRITE", "WR" },
662 { "__GFP_RECLAIM", "R" },
663 { "__GFP_DIRECT_RECLAIM", "DR" },
664 { "__GFP_KSWAPD_RECLAIM", "KR" },
667 static size_t max_gfp_len;
669 static char *compact_gfp_flags(char *gfp_flags)
671 char *orig_flags = strdup(gfp_flags);
672 char *new_flags = NULL;
673 char *str, *pos = NULL;
676 if (orig_flags == NULL)
679 str = strtok_r(orig_flags, "|", &pos);
685 for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
686 if (strcmp(gfp_compact_table[i].original, str))
689 cpt = gfp_compact_table[i].compact;
690 new = realloc(new_flags, len + strlen(cpt) + 2);
699 strcpy(new_flags, cpt);
701 strcat(new_flags, "|");
702 strcat(new_flags, cpt);
709 str = strtok_r(NULL, "|", &pos);
712 if (max_gfp_len < len)
719 static char *compact_gfp_string(unsigned long gfp_flags)
721 struct gfp_flag key = {
724 struct gfp_flag *gfp;
726 gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
728 return gfp->compact_str;
733 static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
734 unsigned int gfp_flags)
736 struct tep_record record = {
738 .data = sample->raw_data,
739 .size = sample->raw_size,
741 struct trace_seq seq;
742 char *str, *pos = NULL;
745 struct gfp_flag key = {
749 if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
753 trace_seq_init(&seq);
754 tep_print_event(evsel->tp_format->tep,
755 &seq, &record, "%s", TEP_PRINT_INFO);
757 str = strtok_r(seq.buffer, " ", &pos);
759 if (!strncmp(str, "gfp_flags=", 10)) {
760 struct gfp_flag *new;
762 new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
769 new->flags = gfp_flags;
770 new->human_readable = strdup(str + 10);
771 new->compact_str = compact_gfp_flags(str + 10);
772 if (!new->human_readable || !new->compact_str)
775 qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
778 str = strtok_r(NULL, " ", &pos);
781 trace_seq_destroy(&seq);
785 static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
786 struct perf_sample *sample)
789 unsigned int order = perf_evsel__intval(evsel, sample, "order");
790 unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
791 unsigned int migrate_type = perf_evsel__intval(evsel, sample,
793 u64 bytes = kmem_page_size << order;
795 struct page_stat *pstat;
796 struct page_stat this = {
798 .gfp_flags = gfp_flags,
799 .migrate_type = migrate_type,
803 page = perf_evsel__intval(evsel, sample, "pfn");
805 page = perf_evsel__intval(evsel, sample, "page");
808 total_page_alloc_bytes += bytes;
810 if (!valid_page(page)) {
812 total_page_fail_bytes += bytes;
817 if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
820 callsite = find_callsite(evsel, sample);
823 * This is to find the current page (with correct gfp flags and
824 * migrate type) at free event.
827 pstat = page_stat__findnew_page(&this);
832 pstat->alloc_bytes += bytes;
833 pstat->callsite = callsite;
836 pstat = page_stat__findnew_alloc(&this);
841 pstat->alloc_bytes += bytes;
842 pstat->callsite = callsite;
845 this.callsite = callsite;
846 pstat = page_stat__findnew_caller(&this);
851 pstat->alloc_bytes += bytes;
853 order_stats[order][migrate_type]++;
858 static int perf_evsel__process_page_free_event(struct evsel *evsel,
859 struct perf_sample *sample)
862 unsigned int order = perf_evsel__intval(evsel, sample, "order");
863 u64 bytes = kmem_page_size << order;
864 struct page_stat *pstat;
865 struct page_stat this = {
870 page = perf_evsel__intval(evsel, sample, "pfn");
872 page = perf_evsel__intval(evsel, sample, "page");
875 total_page_free_bytes += bytes;
878 pstat = page_stat__find_page(&this);
880 pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
884 total_page_nomatch_bytes += bytes;
889 this.gfp_flags = pstat->gfp_flags;
890 this.migrate_type = pstat->migrate_type;
891 this.callsite = pstat->callsite;
893 rb_erase(&pstat->node, &page_live_tree);
897 order_stats[this.order][this.migrate_type]--;
899 pstat = page_stat__find_alloc(&this);
904 pstat->free_bytes += bytes;
907 pstat = page_stat__find_caller(&this);
912 pstat->free_bytes += bytes;
916 pstat->alloc_bytes -= bytes;
918 if (pstat->nr_alloc == 0) {
919 rb_erase(&pstat->node, &page_caller_tree);
927 static bool perf_kmem__skip_sample(struct perf_sample *sample)
929 /* skip sample based on time? */
930 if (perf_time__skip_sample(&ptime, sample->time))
936 typedef int (*tracepoint_handler)(struct evsel *evsel,
937 struct perf_sample *sample);
939 static int process_sample_event(struct perf_tool *tool __maybe_unused,
940 union perf_event *event,
941 struct perf_sample *sample,
943 struct machine *machine)
946 struct thread *thread = machine__findnew_thread(machine, sample->pid,
949 if (thread == NULL) {
950 pr_debug("problem processing %d event, skipping it.\n",
955 if (perf_kmem__skip_sample(sample))
958 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
960 if (evsel->handler != NULL) {
961 tracepoint_handler f = evsel->handler;
962 err = f(evsel, sample);
970 static struct perf_tool perf_kmem = {
971 .sample = process_sample_event,
972 .comm = perf_event__process_comm,
973 .mmap = perf_event__process_mmap,
974 .mmap2 = perf_event__process_mmap2,
975 .namespaces = perf_event__process_namespaces,
976 .ordered_events = true,
979 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
984 return 100.0 - (100.0 * n_req / n_alloc);
987 static void __print_slab_result(struct rb_root *root,
988 struct perf_session *session,
989 int n_lines, int is_caller)
991 struct rb_node *next;
992 struct machine *machine = &session->machines.host;
994 printf("%.105s\n", graph_dotted_line);
995 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
996 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
997 printf("%.105s\n", graph_dotted_line);
999 next = rb_first(root);
1001 while (next && n_lines--) {
1002 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
1004 struct symbol *sym = NULL;
1010 addr = data->call_site;
1012 sym = machine__find_kernel_symbol(machine, addr, &map);
1017 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1018 addr - map->unmap_ip(map, sym->start));
1020 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1021 printf(" %-34s |", buf);
1023 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1024 (unsigned long long)data->bytes_alloc,
1025 (unsigned long)data->bytes_alloc / data->hit,
1026 (unsigned long long)data->bytes_req,
1027 (unsigned long)data->bytes_req / data->hit,
1028 (unsigned long)data->hit,
1029 (unsigned long)data->pingpong,
1030 fragmentation(data->bytes_req, data->bytes_alloc));
1032 next = rb_next(next);
1036 printf(" ... | ... | ... | ... | ... | ... \n");
1038 printf("%.105s\n", graph_dotted_line);
1041 static const char * const migrate_type_str[] = {
1050 static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1052 struct rb_node *next = rb_first(&page_alloc_sorted);
1053 struct machine *machine = &session->machines.host;
1055 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1057 printf("\n%.105s\n", graph_dotted_line);
1058 printf(" %-16s | %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1059 use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1060 gfp_len, "GFP flags");
1061 printf("%.105s\n", graph_dotted_line);
1064 format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1066 format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1068 while (next && n_lines--) {
1069 struct page_stat *data;
1075 data = rb_entry(next, struct page_stat, node);
1076 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1080 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1082 printf(format, (unsigned long long)data->page,
1083 (unsigned long long)data->alloc_bytes / 1024,
1084 data->nr_alloc, data->order,
1085 migrate_type_str[data->migrate_type],
1086 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1088 next = rb_next(next);
1091 if (n_lines == -1) {
1092 printf(" ... | ... | ... | ... | ... | %-*s | ...\n",
1096 printf("%.105s\n", graph_dotted_line);
1099 static void __print_page_caller_result(struct perf_session *session, int n_lines)
1101 struct rb_node *next = rb_first(&page_caller_sorted);
1102 struct machine *machine = &session->machines.host;
1103 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1105 printf("\n%.105s\n", graph_dotted_line);
1106 printf(" %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1107 live_page ? "Live" : "Total", gfp_len, "GFP flags");
1108 printf("%.105s\n", graph_dotted_line);
1110 while (next && n_lines--) {
1111 struct page_stat *data;
1117 data = rb_entry(next, struct page_stat, node);
1118 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1122 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1124 printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1125 (unsigned long long)data->alloc_bytes / 1024,
1126 data->nr_alloc, data->order,
1127 migrate_type_str[data->migrate_type],
1128 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1130 next = rb_next(next);
1133 if (n_lines == -1) {
1134 printf(" ... | ... | ... | ... | %-*s | ...\n",
1138 printf("%.105s\n", graph_dotted_line);
1141 static void print_gfp_flags(void)
1146 printf("# GFP flags\n");
1147 printf("# ---------\n");
1148 for (i = 0; i < nr_gfps; i++) {
1149 printf("# %08x: %*s: %s\n", gfps[i].flags,
1150 (int) max_gfp_len, gfps[i].compact_str,
1151 gfps[i].human_readable);
1155 static void print_slab_summary(void)
1157 printf("\nSUMMARY (SLAB allocator)");
1158 printf("\n========================\n");
1159 printf("Total bytes requested: %'lu\n", total_requested);
1160 printf("Total bytes allocated: %'lu\n", total_allocated);
1161 printf("Total bytes freed: %'lu\n", total_freed);
1162 if (total_allocated > total_freed) {
1163 printf("Net total bytes allocated: %'lu\n",
1164 total_allocated - total_freed);
1166 printf("Total bytes wasted on internal fragmentation: %'lu\n",
1167 total_allocated - total_requested);
1168 printf("Internal fragmentation: %f%%\n",
1169 fragmentation(total_requested, total_allocated));
1170 printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1173 static void print_page_summary(void)
1176 u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1177 u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1179 printf("\nSUMMARY (page allocator)");
1180 printf("\n========================\n");
1181 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1182 nr_page_allocs, total_page_alloc_bytes / 1024);
1183 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
1184 nr_page_frees, total_page_free_bytes / 1024);
1187 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1188 nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1189 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1190 nr_page_allocs - nr_alloc_freed,
1191 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1192 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1193 nr_page_nomatch, total_page_nomatch_bytes / 1024);
1196 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1197 nr_page_fails, total_page_fail_bytes / 1024);
1200 printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
1201 "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1202 printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
1203 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1204 graph_dotted_line, graph_dotted_line);
1206 for (o = 0; o < MAX_PAGE_ORDER; o++) {
1208 for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1209 if (order_stats[o][m])
1210 printf(" %'12d", order_stats[o][m]);
1212 printf(" %12c", '.');
1218 static void print_slab_result(struct perf_session *session)
1221 __print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1223 __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1224 print_slab_summary();
1227 static void print_page_result(struct perf_session *session)
1229 if (caller_flag || alloc_flag)
1232 __print_page_caller_result(session, caller_lines);
1234 __print_page_alloc_result(session, alloc_lines);
1235 print_page_summary();
1238 static void print_result(struct perf_session *session)
1241 print_slab_result(session);
1243 print_page_result(session);
1246 static LIST_HEAD(slab_caller_sort);
1247 static LIST_HEAD(slab_alloc_sort);
1248 static LIST_HEAD(page_caller_sort);
1249 static LIST_HEAD(page_alloc_sort);
1251 static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1252 struct list_head *sort_list)
1254 struct rb_node **new = &(root->rb_node);
1255 struct rb_node *parent = NULL;
1256 struct sort_dimension *sort;
1259 struct alloc_stat *this;
1262 this = rb_entry(*new, struct alloc_stat, node);
1265 list_for_each_entry(sort, sort_list, list) {
1266 cmp = sort->cmp(data, this);
1272 new = &((*new)->rb_left);
1274 new = &((*new)->rb_right);
1277 rb_link_node(&data->node, parent, new);
1278 rb_insert_color(&data->node, root);
1281 static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1282 struct list_head *sort_list)
1284 struct rb_node *node;
1285 struct alloc_stat *data;
1288 node = rb_first(root);
1292 rb_erase(node, root);
1293 data = rb_entry(node, struct alloc_stat, node);
1294 sort_slab_insert(root_sorted, data, sort_list);
1298 static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1299 struct list_head *sort_list)
1301 struct rb_node **new = &root->rb_node;
1302 struct rb_node *parent = NULL;
1303 struct sort_dimension *sort;
1306 struct page_stat *this;
1309 this = rb_entry(*new, struct page_stat, node);
1312 list_for_each_entry(sort, sort_list, list) {
1313 cmp = sort->cmp(data, this);
1319 new = &parent->rb_left;
1321 new = &parent->rb_right;
1324 rb_link_node(&data->node, parent, new);
1325 rb_insert_color(&data->node, root);
1328 static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1329 struct list_head *sort_list)
1331 struct rb_node *node;
1332 struct page_stat *data;
1335 node = rb_first(root);
1339 rb_erase(node, root);
1340 data = rb_entry(node, struct page_stat, node);
1341 sort_page_insert(root_sorted, data, sort_list);
1345 static void sort_result(void)
1348 __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1350 __sort_slab_result(&root_caller_stat, &root_caller_sorted,
1355 __sort_page_result(&page_live_tree, &page_alloc_sorted,
1358 __sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1361 __sort_page_result(&page_caller_tree, &page_caller_sorted,
1366 static int __cmd_kmem(struct perf_session *session)
1369 struct evsel *evsel;
1370 const struct evsel_str_handler kmem_tracepoints[] = {
1371 /* slab allocator */
1372 { "kmem:kmalloc", perf_evsel__process_alloc_event, },
1373 { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
1374 { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
1375 { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
1376 { "kmem:kfree", perf_evsel__process_free_event, },
1377 { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
1378 /* page allocator */
1379 { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
1380 { "kmem:mm_page_free", perf_evsel__process_page_free_event, },
1383 if (!perf_session__has_traces(session, "kmem record"))
1386 if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1387 pr_err("Initializing perf session tracepoint handlers failed\n");
1391 evlist__for_each_entry(session->evlist, evsel) {
1392 if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
1393 perf_evsel__field(evsel, "pfn")) {
1400 err = perf_session__process_events(session);
1402 pr_err("error during process events: %d\n", err);
1406 print_result(session);
1411 /* slab sort keys */
1412 static int ptr_cmp(void *a, void *b)
1414 struct alloc_stat *l = a;
1415 struct alloc_stat *r = b;
1417 if (l->ptr < r->ptr)
1419 else if (l->ptr > r->ptr)
1424 static struct sort_dimension ptr_sort_dimension = {
1429 static int slab_callsite_cmp(void *a, void *b)
1431 struct alloc_stat *l = a;
1432 struct alloc_stat *r = b;
1434 if (l->call_site < r->call_site)
1436 else if (l->call_site > r->call_site)
1441 static struct sort_dimension callsite_sort_dimension = {
1443 .cmp = slab_callsite_cmp,
1446 static int hit_cmp(void *a, void *b)
1448 struct alloc_stat *l = a;
1449 struct alloc_stat *r = b;
1451 if (l->hit < r->hit)
1453 else if (l->hit > r->hit)
1458 static struct sort_dimension hit_sort_dimension = {
1463 static int bytes_cmp(void *a, void *b)
1465 struct alloc_stat *l = a;
1466 struct alloc_stat *r = b;
1468 if (l->bytes_alloc < r->bytes_alloc)
1470 else if (l->bytes_alloc > r->bytes_alloc)
1475 static struct sort_dimension bytes_sort_dimension = {
1480 static int frag_cmp(void *a, void *b)
1483 struct alloc_stat *l = a;
1484 struct alloc_stat *r = b;
1486 x = fragmentation(l->bytes_req, l->bytes_alloc);
1487 y = fragmentation(r->bytes_req, r->bytes_alloc);
1496 static struct sort_dimension frag_sort_dimension = {
1501 static int pingpong_cmp(void *a, void *b)
1503 struct alloc_stat *l = a;
1504 struct alloc_stat *r = b;
1506 if (l->pingpong < r->pingpong)
1508 else if (l->pingpong > r->pingpong)
1513 static struct sort_dimension pingpong_sort_dimension = {
1515 .cmp = pingpong_cmp,
1518 /* page sort keys */
1519 static int page_cmp(void *a, void *b)
1521 struct page_stat *l = a;
1522 struct page_stat *r = b;
1524 if (l->page < r->page)
1526 else if (l->page > r->page)
1531 static struct sort_dimension page_sort_dimension = {
1536 static int page_callsite_cmp(void *a, void *b)
1538 struct page_stat *l = a;
1539 struct page_stat *r = b;
1541 if (l->callsite < r->callsite)
1543 else if (l->callsite > r->callsite)
1548 static struct sort_dimension page_callsite_sort_dimension = {
1550 .cmp = page_callsite_cmp,
1553 static int page_hit_cmp(void *a, void *b)
1555 struct page_stat *l = a;
1556 struct page_stat *r = b;
1558 if (l->nr_alloc < r->nr_alloc)
1560 else if (l->nr_alloc > r->nr_alloc)
1565 static struct sort_dimension page_hit_sort_dimension = {
1567 .cmp = page_hit_cmp,
1570 static int page_bytes_cmp(void *a, void *b)
1572 struct page_stat *l = a;
1573 struct page_stat *r = b;
1575 if (l->alloc_bytes < r->alloc_bytes)
1577 else if (l->alloc_bytes > r->alloc_bytes)
1582 static struct sort_dimension page_bytes_sort_dimension = {
1584 .cmp = page_bytes_cmp,
1587 static int page_order_cmp(void *a, void *b)
1589 struct page_stat *l = a;
1590 struct page_stat *r = b;
1592 if (l->order < r->order)
1594 else if (l->order > r->order)
1599 static struct sort_dimension page_order_sort_dimension = {
1601 .cmp = page_order_cmp,
1604 static int migrate_type_cmp(void *a, void *b)
1606 struct page_stat *l = a;
1607 struct page_stat *r = b;
1609 /* for internal use to find free'd page */
1610 if (l->migrate_type == -1U)
1613 if (l->migrate_type < r->migrate_type)
1615 else if (l->migrate_type > r->migrate_type)
1620 static struct sort_dimension migrate_type_sort_dimension = {
1622 .cmp = migrate_type_cmp,
1625 static int gfp_flags_cmp(void *a, void *b)
1627 struct page_stat *l = a;
1628 struct page_stat *r = b;
1630 /* for internal use to find free'd page */
1631 if (l->gfp_flags == -1U)
1634 if (l->gfp_flags < r->gfp_flags)
1636 else if (l->gfp_flags > r->gfp_flags)
1641 static struct sort_dimension gfp_flags_sort_dimension = {
1643 .cmp = gfp_flags_cmp,
1646 static struct sort_dimension *slab_sorts[] = {
1647 &ptr_sort_dimension,
1648 &callsite_sort_dimension,
1649 &hit_sort_dimension,
1650 &bytes_sort_dimension,
1651 &frag_sort_dimension,
1652 &pingpong_sort_dimension,
1655 static struct sort_dimension *page_sorts[] = {
1656 &page_sort_dimension,
1657 &page_callsite_sort_dimension,
1658 &page_hit_sort_dimension,
1659 &page_bytes_sort_dimension,
1660 &page_order_sort_dimension,
1661 &migrate_type_sort_dimension,
1662 &gfp_flags_sort_dimension,
1665 static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1667 struct sort_dimension *sort;
1670 for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1671 if (!strcmp(slab_sorts[i]->name, tok)) {
1672 sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1674 pr_err("%s: memdup failed\n", __func__);
1677 list_add_tail(&sort->list, list);
1685 static int page_sort_dimension__add(const char *tok, struct list_head *list)
1687 struct sort_dimension *sort;
1690 for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1691 if (!strcmp(page_sorts[i]->name, tok)) {
1692 sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1694 pr_err("%s: memdup failed\n", __func__);
1697 list_add_tail(&sort->list, list);
1705 static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1708 char *str = strdup(arg);
1712 pr_err("%s: strdup failed\n", __func__);
1717 tok = strsep(&pos, ",");
1720 if (slab_sort_dimension__add(tok, sort_list) < 0) {
1721 pr_err("Unknown slab --sort key: '%s'", tok);
1731 static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1734 char *str = strdup(arg);
1738 pr_err("%s: strdup failed\n", __func__);
1743 tok = strsep(&pos, ",");
1746 if (page_sort_dimension__add(tok, sort_list) < 0) {
1747 pr_err("Unknown page --sort key: '%s'", tok);
1757 static int parse_sort_opt(const struct option *opt __maybe_unused,
1758 const char *arg, int unset __maybe_unused)
1763 if (kmem_page > kmem_slab ||
1764 (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1765 if (caller_flag > alloc_flag)
1766 return setup_page_sorting(&page_caller_sort, arg);
1768 return setup_page_sorting(&page_alloc_sort, arg);
1770 if (caller_flag > alloc_flag)
1771 return setup_slab_sorting(&slab_caller_sort, arg);
1773 return setup_slab_sorting(&slab_alloc_sort, arg);
1779 static int parse_caller_opt(const struct option *opt __maybe_unused,
1780 const char *arg __maybe_unused,
1781 int unset __maybe_unused)
1783 caller_flag = (alloc_flag + 1);
1787 static int parse_alloc_opt(const struct option *opt __maybe_unused,
1788 const char *arg __maybe_unused,
1789 int unset __maybe_unused)
1791 alloc_flag = (caller_flag + 1);
1795 static int parse_slab_opt(const struct option *opt __maybe_unused,
1796 const char *arg __maybe_unused,
1797 int unset __maybe_unused)
1799 kmem_slab = (kmem_page + 1);
1803 static int parse_page_opt(const struct option *opt __maybe_unused,
1804 const char *arg __maybe_unused,
1805 int unset __maybe_unused)
1807 kmem_page = (kmem_slab + 1);
1811 static int parse_line_opt(const struct option *opt __maybe_unused,
1812 const char *arg, int unset __maybe_unused)
1819 lines = strtoul(arg, NULL, 10);
1821 if (caller_flag > alloc_flag)
1822 caller_lines = lines;
1824 alloc_lines = lines;
1829 static int __cmd_record(int argc, const char **argv)
1831 const char * const record_args[] = {
1832 "record", "-a", "-R", "-c", "1",
1834 const char * const slab_events[] = {
1835 "-e", "kmem:kmalloc",
1836 "-e", "kmem:kmalloc_node",
1838 "-e", "kmem:kmem_cache_alloc",
1839 "-e", "kmem:kmem_cache_alloc_node",
1840 "-e", "kmem:kmem_cache_free",
1842 const char * const page_events[] = {
1843 "-e", "kmem:mm_page_alloc",
1844 "-e", "kmem:mm_page_free",
1846 unsigned int rec_argc, i, j;
1847 const char **rec_argv;
1849 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1851 rec_argc += ARRAY_SIZE(slab_events);
1853 rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1855 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1857 if (rec_argv == NULL)
1860 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1861 rec_argv[i] = strdup(record_args[i]);
1864 for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1865 rec_argv[i] = strdup(slab_events[j]);
1868 rec_argv[i++] = strdup("-g");
1870 for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1871 rec_argv[i] = strdup(page_events[j]);
1874 for (j = 1; j < (unsigned int)argc; j++, i++)
1875 rec_argv[i] = argv[j];
1877 return cmd_record(i, rec_argv);
1880 static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1882 if (!strcmp(var, "kmem.default")) {
1883 if (!strcmp(value, "slab"))
1884 kmem_default = KMEM_SLAB;
1885 else if (!strcmp(value, "page"))
1886 kmem_default = KMEM_PAGE;
1888 pr_err("invalid default value ('slab' or 'page' required): %s\n",
1896 int cmd_kmem(int argc, const char **argv)
1898 const char * const default_slab_sort = "frag,hit,bytes";
1899 const char * const default_page_sort = "bytes,hit";
1900 struct perf_data data = {
1901 .mode = PERF_DATA_MODE_READ,
1903 const struct option kmem_options[] = {
1904 OPT_STRING('i', "input", &input_name, "file", "input file name"),
1905 OPT_INCR('v', "verbose", &verbose,
1906 "be more verbose (show symbol address, etc)"),
1907 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1908 "show per-callsite statistics", parse_caller_opt),
1909 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1910 "show per-allocation statistics", parse_alloc_opt),
1911 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1912 "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1913 "page, order, migtype, gfp", parse_sort_opt),
1914 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1915 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1916 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
1917 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1919 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1921 OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1922 OPT_STRING(0, "time", &time_str, "str",
1923 "Time span of interest (start,stop)"),
1926 const char *const kmem_subcommands[] = { "record", "stat", NULL };
1927 const char *kmem_usage[] = {
1931 struct perf_session *session;
1932 static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
1933 int ret = perf_config(kmem_config, NULL);
1938 argc = parse_options_subcommand(argc, argv, kmem_options,
1939 kmem_subcommands, kmem_usage, 0);
1942 usage_with_options(kmem_usage, kmem_options);
1944 if (kmem_slab == 0 && kmem_page == 0) {
1945 if (kmem_default == KMEM_SLAB)
1951 if (!strncmp(argv[0], "rec", 3)) {
1953 return __cmd_record(argc, argv);
1956 data.path = input_name;
1958 kmem_session = session = perf_session__new(&data, false, &perf_kmem);
1959 if (session == NULL)
1965 if (!perf_evlist__find_tracepoint_by_name(session->evlist,
1967 pr_err(errmsg, "slab", "slab");
1973 struct evsel *evsel;
1975 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
1976 "kmem:mm_page_alloc");
1977 if (evsel == NULL) {
1978 pr_err(errmsg, "page", "page");
1982 kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
1983 symbol_conf.use_callchain = true;
1986 symbol__init(&session->header.env);
1988 if (perf_time__parse_str(&ptime, time_str) != 0) {
1989 pr_err("Invalid time string\n");
1994 if (!strcmp(argv[0], "stat")) {
1995 setlocale(LC_ALL, "");
1997 if (cpu__setup_cpunode_map())
2000 if (list_empty(&slab_caller_sort))
2001 setup_slab_sorting(&slab_caller_sort, default_slab_sort);
2002 if (list_empty(&slab_alloc_sort))
2003 setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
2004 if (list_empty(&page_caller_sort))
2005 setup_page_sorting(&page_caller_sort, default_page_sort);
2006 if (list_empty(&page_alloc_sort))
2007 setup_page_sorting(&page_alloc_sort, default_page_sort);
2010 setup_page_sorting(&page_alloc_sort_input,
2011 "page,order,migtype,gfp");
2012 setup_page_sorting(&page_caller_sort_input,
2013 "callsite,order,migtype,gfp");
2015 ret = __cmd_kmem(session);
2017 usage_with_options(kmem_usage, kmem_options);
2020 perf_session__delete(session);