13 #include <symbol/kallsyms.h>
15 #include "linux/hash.h"
17 static void dsos__init(struct dsos *dsos)
19 INIT_LIST_HEAD(&dsos->head);
23 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
25 map_groups__init(&machine->kmaps, machine);
26 RB_CLEAR_NODE(&machine->rb_node);
27 dsos__init(&machine->user_dsos);
28 dsos__init(&machine->kernel_dsos);
30 machine->threads = RB_ROOT;
31 INIT_LIST_HEAD(&machine->dead_threads);
32 machine->last_match = NULL;
34 machine->vdso_info = NULL;
38 machine->symbol_filter = NULL;
39 machine->id_hdr_size = 0;
40 machine->comm_exec = false;
41 machine->kernel_start = 0;
43 machine->root_dir = strdup(root_dir);
44 if (machine->root_dir == NULL)
47 if (pid != HOST_KERNEL_ID) {
48 struct thread *thread = machine__findnew_thread(machine, -1,
55 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
56 thread__set_comm(thread, comm, 0);
59 machine->current_tid = NULL;
64 struct machine *machine__new_host(void)
66 struct machine *machine = malloc(sizeof(*machine));
68 if (machine != NULL) {
69 machine__init(machine, "", HOST_KERNEL_ID);
71 if (machine__create_kernel_maps(machine) < 0)
81 static void dsos__delete(struct dsos *dsos)
85 list_for_each_entry_safe(pos, n, &dsos->head, node) {
86 RB_CLEAR_NODE(&pos->rb_node);
92 void machine__delete_dead_threads(struct machine *machine)
96 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
102 void machine__delete_threads(struct machine *machine)
104 struct rb_node *nd = rb_first(&machine->threads);
107 struct thread *t = rb_entry(nd, struct thread, rb_node);
109 rb_erase(&t->rb_node, &machine->threads);
115 void machine__exit(struct machine *machine)
117 map_groups__exit(&machine->kmaps);
118 dsos__delete(&machine->user_dsos);
119 dsos__delete(&machine->kernel_dsos);
121 zfree(&machine->root_dir);
122 zfree(&machine->current_tid);
125 void machine__delete(struct machine *machine)
127 machine__exit(machine);
131 void machines__init(struct machines *machines)
133 machine__init(&machines->host, "", HOST_KERNEL_ID);
134 machines->guests = RB_ROOT;
135 machines->symbol_filter = NULL;
138 void machines__exit(struct machines *machines)
140 machine__exit(&machines->host);
144 struct machine *machines__add(struct machines *machines, pid_t pid,
145 const char *root_dir)
147 struct rb_node **p = &machines->guests.rb_node;
148 struct rb_node *parent = NULL;
149 struct machine *pos, *machine = malloc(sizeof(*machine));
154 if (machine__init(machine, root_dir, pid) != 0) {
159 machine->symbol_filter = machines->symbol_filter;
163 pos = rb_entry(parent, struct machine, rb_node);
170 rb_link_node(&machine->rb_node, parent, p);
171 rb_insert_color(&machine->rb_node, &machines->guests);
176 void machines__set_symbol_filter(struct machines *machines,
177 symbol_filter_t symbol_filter)
181 machines->symbol_filter = symbol_filter;
182 machines->host.symbol_filter = symbol_filter;
184 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
185 struct machine *machine = rb_entry(nd, struct machine, rb_node);
187 machine->symbol_filter = symbol_filter;
191 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
195 machines->host.comm_exec = comm_exec;
197 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
198 struct machine *machine = rb_entry(nd, struct machine, rb_node);
200 machine->comm_exec = comm_exec;
204 struct machine *machines__find(struct machines *machines, pid_t pid)
206 struct rb_node **p = &machines->guests.rb_node;
207 struct rb_node *parent = NULL;
208 struct machine *machine;
209 struct machine *default_machine = NULL;
211 if (pid == HOST_KERNEL_ID)
212 return &machines->host;
216 machine = rb_entry(parent, struct machine, rb_node);
217 if (pid < machine->pid)
219 else if (pid > machine->pid)
224 default_machine = machine;
227 return default_machine;
230 struct machine *machines__findnew(struct machines *machines, pid_t pid)
233 const char *root_dir = "";
234 struct machine *machine = machines__find(machines, pid);
236 if (machine && (machine->pid == pid))
239 if ((pid != HOST_KERNEL_ID) &&
240 (pid != DEFAULT_GUEST_KERNEL_ID) &&
241 (symbol_conf.guestmount)) {
242 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
243 if (access(path, R_OK)) {
244 static struct strlist *seen;
247 seen = strlist__new(true, NULL);
249 if (!strlist__has_entry(seen, path)) {
250 pr_err("Can't access file %s\n", path);
251 strlist__add(seen, path);
259 machine = machines__add(machines, pid, root_dir);
264 void machines__process_guests(struct machines *machines,
265 machine__process_t process, void *data)
269 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
270 struct machine *pos = rb_entry(nd, struct machine, rb_node);
275 char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
277 if (machine__is_host(machine))
278 snprintf(bf, size, "[%s]", "kernel.kallsyms");
279 else if (machine__is_default_guest(machine))
280 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
282 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
289 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
291 struct rb_node *node;
292 struct machine *machine;
294 machines->host.id_hdr_size = id_hdr_size;
296 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
297 machine = rb_entry(node, struct machine, rb_node);
298 machine->id_hdr_size = id_hdr_size;
304 static void machine__update_thread_pid(struct machine *machine,
305 struct thread *th, pid_t pid)
307 struct thread *leader;
309 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
314 if (th->pid_ == th->tid)
317 leader = machine__findnew_thread(machine, th->pid_, th->pid_);
322 leader->mg = map_groups__new(machine);
327 if (th->mg == leader->mg)
332 * Maps are created from MMAP events which provide the pid and
333 * tid. Consequently there never should be any maps on a thread
334 * with an unknown pid. Just print an error if there are.
336 if (!map_groups__empty(th->mg))
337 pr_err("Discarding thread maps for %d:%d\n",
339 map_groups__delete(th->mg);
342 th->mg = map_groups__get(leader->mg);
347 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
350 static struct thread *__machine__findnew_thread(struct machine *machine,
351 pid_t pid, pid_t tid,
354 struct rb_node **p = &machine->threads.rb_node;
355 struct rb_node *parent = NULL;
359 * Front-end cache - TID lookups come in blocks,
360 * so most of the time we dont have to look up
363 th = machine->last_match;
364 if (th && th->tid == tid) {
365 machine__update_thread_pid(machine, th, pid);
371 th = rb_entry(parent, struct thread, rb_node);
373 if (th->tid == tid) {
374 machine->last_match = th;
375 machine__update_thread_pid(machine, th, pid);
388 th = thread__new(pid, tid);
390 rb_link_node(&th->rb_node, parent, p);
391 rb_insert_color(&th->rb_node, &machine->threads);
392 machine->last_match = th;
395 * We have to initialize map_groups separately
396 * after rb tree is updated.
398 * The reason is that we call machine__findnew_thread
399 * within thread__init_map_groups to find the thread
400 * leader and that would screwed the rb tree.
402 if (thread__init_map_groups(th, machine)) {
411 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
414 return __machine__findnew_thread(machine, pid, tid, true);
417 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
420 return __machine__findnew_thread(machine, pid, tid, false);
423 struct comm *machine__thread_exec_comm(struct machine *machine,
424 struct thread *thread)
426 if (machine->comm_exec)
427 return thread__exec_comm(thread);
429 return thread__comm(thread);
432 int machine__process_comm_event(struct machine *machine, union perf_event *event,
433 struct perf_sample *sample)
435 struct thread *thread = machine__findnew_thread(machine,
438 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
441 machine->comm_exec = true;
444 perf_event__fprintf_comm(event, stdout);
446 if (thread == NULL ||
447 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
448 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
455 int machine__process_lost_event(struct machine *machine __maybe_unused,
456 union perf_event *event, struct perf_sample *sample __maybe_unused)
458 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
459 event->lost.id, event->lost.lost);
463 struct map *machine__new_module(struct machine *machine, u64 start,
464 const char *filename)
467 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
473 map = map__new2(start, dso, MAP__FUNCTION);
477 if (machine__is_host(machine))
478 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
480 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
482 /* _KMODULE_COMP should be next to _KMODULE */
483 if (is_kernel_module(filename, &compressed) && compressed)
486 map_groups__insert(&machine->kmaps, map);
490 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
493 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) +
494 __dsos__fprintf(&machines->host.user_dsos.head, fp);
496 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
497 struct machine *pos = rb_entry(nd, struct machine, rb_node);
498 ret += __dsos__fprintf(&pos->kernel_dsos.head, fp);
499 ret += __dsos__fprintf(&pos->user_dsos.head, fp);
505 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
506 bool (skip)(struct dso *dso, int parm), int parm)
508 return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) +
509 __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm);
512 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
513 bool (skip)(struct dso *dso, int parm), int parm)
516 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
518 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
519 struct machine *pos = rb_entry(nd, struct machine, rb_node);
520 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
525 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
529 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
531 if (kdso->has_build_id) {
532 char filename[PATH_MAX];
533 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
534 printed += fprintf(fp, "[0] %s\n", filename);
537 for (i = 0; i < vmlinux_path__nr_entries; ++i)
538 printed += fprintf(fp, "[%d] %s\n",
539 i + kdso->has_build_id, vmlinux_path[i]);
544 size_t machine__fprintf(struct machine *machine, FILE *fp)
549 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
550 struct thread *pos = rb_entry(nd, struct thread, rb_node);
552 ret += thread__fprintf(pos, fp);
558 static struct dso *machine__get_kernel(struct machine *machine)
560 const char *vmlinux_name = NULL;
563 if (machine__is_host(machine)) {
564 vmlinux_name = symbol_conf.vmlinux_name;
566 vmlinux_name = "[kernel.kallsyms]";
568 kernel = dso__kernel_findnew(machine, vmlinux_name,
574 if (machine__is_default_guest(machine))
575 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
577 vmlinux_name = machine__mmap_name(machine, bf,
580 kernel = dso__kernel_findnew(machine, vmlinux_name,
582 DSO_TYPE_GUEST_KERNEL);
585 if (kernel != NULL && (!kernel->has_build_id))
586 dso__read_running_kernel_build_id(kernel, machine);
591 struct process_args {
595 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
598 if (machine__is_default_guest(machine))
599 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
601 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
604 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
606 /* Figure out the start address of kernel map from /proc/kallsyms.
607 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
608 * symbol_name if it's not that important.
610 static u64 machine__get_running_kernel_start(struct machine *machine,
611 const char **symbol_name)
613 char filename[PATH_MAX];
618 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
620 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
623 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
624 addr = kallsyms__get_function_start(filename, name);
635 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
638 u64 start = machine__get_running_kernel_start(machine, NULL);
640 for (type = 0; type < MAP__NR_TYPES; ++type) {
643 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
644 if (machine->vmlinux_maps[type] == NULL)
647 machine->vmlinux_maps[type]->map_ip =
648 machine->vmlinux_maps[type]->unmap_ip =
650 kmap = map__kmap(machine->vmlinux_maps[type]);
651 kmap->kmaps = &machine->kmaps;
652 map_groups__insert(&machine->kmaps,
653 machine->vmlinux_maps[type]);
659 void machine__destroy_kernel_maps(struct machine *machine)
663 for (type = 0; type < MAP__NR_TYPES; ++type) {
666 if (machine->vmlinux_maps[type] == NULL)
669 kmap = map__kmap(machine->vmlinux_maps[type]);
670 map_groups__remove(&machine->kmaps,
671 machine->vmlinux_maps[type]);
672 if (kmap->ref_reloc_sym) {
674 * ref_reloc_sym is shared among all maps, so free just
677 if (type == MAP__FUNCTION) {
678 zfree((char **)&kmap->ref_reloc_sym->name);
679 zfree(&kmap->ref_reloc_sym);
681 kmap->ref_reloc_sym = NULL;
684 map__delete(machine->vmlinux_maps[type]);
685 machine->vmlinux_maps[type] = NULL;
689 int machines__create_guest_kernel_maps(struct machines *machines)
692 struct dirent **namelist = NULL;
698 if (symbol_conf.default_guest_vmlinux_name ||
699 symbol_conf.default_guest_modules ||
700 symbol_conf.default_guest_kallsyms) {
701 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
704 if (symbol_conf.guestmount) {
705 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
708 for (i = 0; i < items; i++) {
709 if (!isdigit(namelist[i]->d_name[0])) {
710 /* Filter out . and .. */
713 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
714 if ((*endp != '\0') ||
715 (endp == namelist[i]->d_name) ||
717 pr_debug("invalid directory (%s). Skipping.\n",
718 namelist[i]->d_name);
721 sprintf(path, "%s/%s/proc/kallsyms",
722 symbol_conf.guestmount,
723 namelist[i]->d_name);
724 ret = access(path, R_OK);
726 pr_debug("Can't access file %s\n", path);
729 machines__create_kernel_maps(machines, pid);
738 void machines__destroy_kernel_maps(struct machines *machines)
740 struct rb_node *next = rb_first(&machines->guests);
742 machine__destroy_kernel_maps(&machines->host);
745 struct machine *pos = rb_entry(next, struct machine, rb_node);
747 next = rb_next(&pos->rb_node);
748 rb_erase(&pos->rb_node, &machines->guests);
749 machine__delete(pos);
753 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
755 struct machine *machine = machines__findnew(machines, pid);
760 return machine__create_kernel_maps(machine);
763 int machine__load_kallsyms(struct machine *machine, const char *filename,
764 enum map_type type, symbol_filter_t filter)
766 struct map *map = machine->vmlinux_maps[type];
767 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
770 dso__set_loaded(map->dso, type);
772 * Since /proc/kallsyms will have multiple sessions for the
773 * kernel, with modules between them, fixup the end of all
776 __map_groups__fixup_end(&machine->kmaps, type);
782 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
783 symbol_filter_t filter)
785 struct map *map = machine->vmlinux_maps[type];
786 int ret = dso__load_vmlinux_path(map->dso, map, filter);
789 dso__set_loaded(map->dso, type);
794 static void map_groups__fixup_end(struct map_groups *mg)
797 for (i = 0; i < MAP__NR_TYPES; ++i)
798 __map_groups__fixup_end(mg, i);
801 static char *get_kernel_version(const char *root_dir)
803 char version[PATH_MAX];
806 const char *prefix = "Linux version ";
808 sprintf(version, "%s/proc/version", root_dir);
809 file = fopen(version, "r");
814 tmp = fgets(version, sizeof(version), file);
817 name = strstr(version, prefix);
820 name += strlen(prefix);
821 tmp = strchr(name, ' ');
828 static int map_groups__set_modules_path_dir(struct map_groups *mg,
829 const char *dir_name, int depth)
832 DIR *dir = opendir(dir_name);
836 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
840 while ((dent = readdir(dir)) != NULL) {
844 /*sshfs might return bad dent->d_type, so we have to stat*/
845 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
849 if (S_ISDIR(st.st_mode)) {
850 if (!strcmp(dent->d_name, ".") ||
851 !strcmp(dent->d_name, ".."))
854 /* Do not follow top-level source and build symlinks */
856 if (!strcmp(dent->d_name, "source") ||
857 !strcmp(dent->d_name, "build"))
861 ret = map_groups__set_modules_path_dir(mg, path,
866 char *dot = strrchr(dent->d_name, '.'),
874 /* On some system, modules are compressed like .ko.gz */
875 if (is_supported_compression(dot + 1) &&
876 is_kmodule_extension(dot - 2))
879 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
880 (int)(dot - dent->d_name), dent->d_name);
882 strxfrchar(dso_name, '-', '_');
883 map = map_groups__find_by_name(mg, MAP__FUNCTION,
888 long_name = strdup(path);
889 if (long_name == NULL) {
893 dso__set_long_name(map->dso, long_name, true);
894 dso__kernel_module_get_build_id(map->dso, "");
903 static int machine__set_modules_path(struct machine *machine)
906 char modules_path[PATH_MAX];
908 version = get_kernel_version(machine->root_dir);
912 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
913 machine->root_dir, version);
916 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
919 static int machine__create_module(void *arg, const char *name, u64 start)
921 struct machine *machine = arg;
924 map = machine__new_module(machine, start, name);
928 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
933 static int machine__create_modules(struct machine *machine)
938 if (machine__is_default_guest(machine)) {
939 modules = symbol_conf.default_guest_modules;
941 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
945 if (symbol__restricted_filename(modules, "/proc/modules"))
948 if (modules__parse(modules, machine, machine__create_module))
951 if (!machine__set_modules_path(machine))
954 pr_debug("Problems setting modules path maps, continuing anyway...\n");
959 int machine__create_kernel_maps(struct machine *machine)
961 struct dso *kernel = machine__get_kernel(machine);
963 u64 addr = machine__get_running_kernel_start(machine, &name);
967 if (kernel == NULL ||
968 __machine__create_kernel_maps(machine, kernel) < 0)
971 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
972 if (machine__is_host(machine))
973 pr_debug("Problems creating module maps, "
974 "continuing anyway...\n");
976 pr_debug("Problems creating module maps for guest %d, "
977 "continuing anyway...\n", machine->pid);
981 * Now that we have all the maps created, just set the ->end of them:
983 map_groups__fixup_end(&machine->kmaps);
985 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
987 machine__destroy_kernel_maps(machine);
994 static void machine__set_kernel_mmap_len(struct machine *machine,
995 union perf_event *event)
999 for (i = 0; i < MAP__NR_TYPES; i++) {
1000 machine->vmlinux_maps[i]->start = event->mmap.start;
1001 machine->vmlinux_maps[i]->end = (event->mmap.start +
1004 * Be a bit paranoid here, some perf.data file came with
1005 * a zero sized synthesized MMAP event for the kernel.
1007 if (machine->vmlinux_maps[i]->end == 0)
1008 machine->vmlinux_maps[i]->end = ~0ULL;
1012 static bool machine__uses_kcore(struct machine *machine)
1016 list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
1017 if (dso__is_kcore(dso))
1024 static int machine__process_kernel_mmap_event(struct machine *machine,
1025 union perf_event *event)
1028 char kmmap_prefix[PATH_MAX];
1029 enum dso_kernel_type kernel_type;
1030 bool is_kernel_mmap;
1032 /* If we have maps from kcore then we do not need or want any others */
1033 if (machine__uses_kcore(machine))
1036 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1037 if (machine__is_host(machine))
1038 kernel_type = DSO_TYPE_KERNEL;
1040 kernel_type = DSO_TYPE_GUEST_KERNEL;
1042 is_kernel_mmap = memcmp(event->mmap.filename,
1044 strlen(kmmap_prefix) - 1) == 0;
1045 if (event->mmap.filename[0] == '/' ||
1046 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1048 char short_module_name[1024];
1051 if (event->mmap.filename[0] == '/') {
1052 name = strrchr(event->mmap.filename, '/');
1056 ++name; /* skip / */
1057 dot = strrchr(name, '.');
1060 /* On some system, modules are compressed like .ko.gz */
1061 if (is_supported_compression(dot + 1))
1063 if (!is_kmodule_extension(dot + 1))
1065 snprintf(short_module_name, sizeof(short_module_name),
1066 "[%.*s]", (int)(dot - name), name);
1067 strxfrchar(short_module_name, '-', '_');
1069 strcpy(short_module_name, event->mmap.filename);
1071 map = machine__new_module(machine, event->mmap.start,
1072 event->mmap.filename);
1076 name = strdup(short_module_name);
1080 dso__set_short_name(map->dso, name, true);
1081 map->end = map->start + event->mmap.len;
1082 } else if (is_kernel_mmap) {
1083 const char *symbol_name = (event->mmap.filename +
1084 strlen(kmmap_prefix));
1086 * Should be there already, from the build-id table in
1089 struct dso *kernel = NULL;
1092 list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
1093 if (is_kernel_module(dso->long_name, NULL))
1101 kernel = __dsos__findnew(&machine->kernel_dsos,
1106 kernel->kernel = kernel_type;
1107 if (__machine__create_kernel_maps(machine, kernel) < 0)
1110 if (strstr(kernel->long_name, "vmlinux"))
1111 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1113 machine__set_kernel_mmap_len(machine, event);
1116 * Avoid using a zero address (kptr_restrict) for the ref reloc
1117 * symbol. Effectively having zero here means that at record
1118 * time /proc/sys/kernel/kptr_restrict was non zero.
1120 if (event->mmap.pgoff != 0) {
1121 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1126 if (machine__is_default_guest(machine)) {
1128 * preload dso of guest kernel and modules
1130 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1139 int machine__process_mmap2_event(struct machine *machine,
1140 union perf_event *event,
1141 struct perf_sample *sample __maybe_unused)
1143 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1144 struct thread *thread;
1150 perf_event__fprintf_mmap2(event, stdout);
1152 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1153 cpumode == PERF_RECORD_MISC_KERNEL) {
1154 ret = machine__process_kernel_mmap_event(machine, event);
1160 thread = machine__findnew_thread(machine, event->mmap2.pid,
1165 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1166 type = MAP__VARIABLE;
1168 type = MAP__FUNCTION;
1170 map = map__new(machine, event->mmap2.start,
1171 event->mmap2.len, event->mmap2.pgoff,
1172 event->mmap2.pid, event->mmap2.maj,
1173 event->mmap2.min, event->mmap2.ino,
1174 event->mmap2.ino_generation,
1177 event->mmap2.filename, type, thread);
1182 thread__insert_map(thread, map);
1186 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1190 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1191 struct perf_sample *sample __maybe_unused)
1193 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1194 struct thread *thread;
1200 perf_event__fprintf_mmap(event, stdout);
1202 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1203 cpumode == PERF_RECORD_MISC_KERNEL) {
1204 ret = machine__process_kernel_mmap_event(machine, event);
1210 thread = machine__findnew_thread(machine, event->mmap.pid,
1215 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1216 type = MAP__VARIABLE;
1218 type = MAP__FUNCTION;
1220 map = map__new(machine, event->mmap.start,
1221 event->mmap.len, event->mmap.pgoff,
1222 event->mmap.pid, 0, 0, 0, 0, 0, 0,
1223 event->mmap.filename,
1229 thread__insert_map(thread, map);
1233 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1237 static void machine__remove_thread(struct machine *machine, struct thread *th)
1239 machine->last_match = NULL;
1240 rb_erase(&th->rb_node, &machine->threads);
1242 * We may have references to this thread, for instance in some hist_entry
1243 * instances, so just move them to a separate list.
1245 list_add_tail(&th->node, &machine->dead_threads);
1248 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1249 struct perf_sample *sample)
1251 struct thread *thread = machine__find_thread(machine,
1254 struct thread *parent = machine__findnew_thread(machine,
1258 /* if a thread currently exists for the thread id remove it */
1260 machine__remove_thread(machine, thread);
1262 thread = machine__findnew_thread(machine, event->fork.pid,
1265 perf_event__fprintf_task(event, stdout);
1267 if (thread == NULL || parent == NULL ||
1268 thread__fork(thread, parent, sample->time) < 0) {
1269 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1276 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1277 struct perf_sample *sample __maybe_unused)
1279 struct thread *thread = machine__find_thread(machine,
1284 perf_event__fprintf_task(event, stdout);
1287 thread__exited(thread);
1292 int machine__process_event(struct machine *machine, union perf_event *event,
1293 struct perf_sample *sample)
1297 switch (event->header.type) {
1298 case PERF_RECORD_COMM:
1299 ret = machine__process_comm_event(machine, event, sample); break;
1300 case PERF_RECORD_MMAP:
1301 ret = machine__process_mmap_event(machine, event, sample); break;
1302 case PERF_RECORD_MMAP2:
1303 ret = machine__process_mmap2_event(machine, event, sample); break;
1304 case PERF_RECORD_FORK:
1305 ret = machine__process_fork_event(machine, event, sample); break;
1306 case PERF_RECORD_EXIT:
1307 ret = machine__process_exit_event(machine, event, sample); break;
1308 case PERF_RECORD_LOST:
1309 ret = machine__process_lost_event(machine, event, sample); break;
1318 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1320 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
1325 static void ip__resolve_ams(struct thread *thread,
1326 struct addr_map_symbol *ams,
1329 struct addr_location al;
1331 memset(&al, 0, sizeof(al));
1333 * We cannot use the header.misc hint to determine whether a
1334 * branch stack address is user, kernel, guest, hypervisor.
1335 * Branches may straddle the kernel/user/hypervisor boundaries.
1336 * Thus, we have to try consecutively until we find a match
1337 * or else, the symbol is unknown
1339 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
1342 ams->al_addr = al.addr;
1347 static void ip__resolve_data(struct thread *thread,
1348 u8 m, struct addr_map_symbol *ams, u64 addr)
1350 struct addr_location al;
1352 memset(&al, 0, sizeof(al));
1354 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
1355 if (al.map == NULL) {
1357 * some shared data regions have execute bit set which puts
1358 * their mapping in the MAP__FUNCTION type array.
1359 * Check there as a fallback option before dropping the sample.
1361 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1365 ams->al_addr = al.addr;
1370 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1371 struct addr_location *al)
1373 struct mem_info *mi = zalloc(sizeof(*mi));
1378 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1379 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
1380 mi->data_src.val = sample->data_src;
1385 static int add_callchain_ip(struct thread *thread,
1386 struct symbol **parent,
1387 struct addr_location *root_al,
1388 bool branch_history,
1391 struct addr_location al;
1396 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1399 u8 cpumode = PERF_RECORD_MISC_USER;
1401 if (ip >= PERF_CONTEXT_MAX) {
1403 case PERF_CONTEXT_HV:
1404 cpumode = PERF_RECORD_MISC_HYPERVISOR;
1406 case PERF_CONTEXT_KERNEL:
1407 cpumode = PERF_RECORD_MISC_KERNEL;
1409 case PERF_CONTEXT_USER:
1410 cpumode = PERF_RECORD_MISC_USER;
1413 pr_debug("invalid callchain context: "
1414 "%"PRId64"\n", (s64) ip);
1416 * It seems the callchain is corrupted.
1419 callchain_cursor_reset(&callchain_cursor);
1424 thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
1428 if (al.sym != NULL) {
1429 if (sort__has_parent && !*parent &&
1430 symbol__match_regex(al.sym, &parent_regex))
1432 else if (have_ignore_callees && root_al &&
1433 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1434 /* Treat this symbol as the root,
1435 forgetting its callees. */
1437 callchain_cursor_reset(&callchain_cursor);
1441 return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
1444 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1445 struct addr_location *al)
1448 const struct branch_stack *bs = sample->branch_stack;
1449 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1454 for (i = 0; i < bs->nr; i++) {
1455 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1456 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1457 bi[i].flags = bs->entries[i].flags;
1464 #define NO_ENTRY 0xff
1466 #define PERF_MAX_BRANCH_DEPTH 127
1469 static int remove_loops(struct branch_entry *l, int nr)
1472 unsigned char chash[CHASHSZ];
1474 memset(chash, NO_ENTRY, sizeof(chash));
1476 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1478 for (i = 0; i < nr; i++) {
1479 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1481 /* no collision handling for now */
1482 if (chash[h] == NO_ENTRY) {
1484 } else if (l[chash[h]].from == l[i].from) {
1485 bool is_loop = true;
1486 /* check if it is a real loop */
1488 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1489 if (l[j].from != l[i + off].from) {
1494 memmove(l + i, l + i + off,
1495 (nr - (i + off)) * sizeof(*l));
1503 static int thread__resolve_callchain_sample(struct thread *thread,
1504 struct ip_callchain *chain,
1505 struct branch_stack *branch,
1506 struct symbol **parent,
1507 struct addr_location *root_al,
1510 int chain_nr = min(max_stack, (int)chain->nr);
1516 * Based on DWARF debug information, some architectures skip
1517 * a callchain entry saved by the kernel.
1519 if (chain->nr < PERF_MAX_STACK_DEPTH)
1520 skip_idx = arch_skip_callchain_idx(thread, chain);
1522 callchain_cursor_reset(&callchain_cursor);
1525 * Add branches to call stack for easier browsing. This gives
1526 * more context for a sample than just the callers.
1528 * This uses individual histograms of paths compared to the
1529 * aggregated histograms the normal LBR mode uses.
1531 * Limitations for now:
1532 * - No extra filters
1533 * - No annotations (should annotate somehow)
1536 if (branch && callchain_param.branch_callstack) {
1537 int nr = min(max_stack, (int)branch->nr);
1538 struct branch_entry be[nr];
1540 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
1541 pr_warning("corrupted branch chain. skipping...\n");
1545 for (i = 0; i < nr; i++) {
1546 if (callchain_param.order == ORDER_CALLEE) {
1547 be[i] = branch->entries[i];
1549 * Check for overlap into the callchain.
1550 * The return address is one off compared to
1551 * the branch entry. To adjust for this
1552 * assume the calling instruction is not longer
1555 if (i == skip_idx ||
1556 chain->ips[first_call] >= PERF_CONTEXT_MAX)
1558 else if (be[i].from < chain->ips[first_call] &&
1559 be[i].from >= chain->ips[first_call] - 8)
1562 be[i] = branch->entries[branch->nr - i - 1];
1565 nr = remove_loops(be, nr);
1567 for (i = 0; i < nr; i++) {
1568 err = add_callchain_ip(thread, parent, root_al,
1571 err = add_callchain_ip(thread, parent, root_al,
1582 if (chain->nr > PERF_MAX_STACK_DEPTH) {
1583 pr_warning("corrupted callchain. skipping...\n");
1587 for (i = first_call; i < chain_nr; i++) {
1590 if (callchain_param.order == ORDER_CALLEE)
1593 j = chain->nr - i - 1;
1595 #ifdef HAVE_SKIP_CALLCHAIN_IDX
1601 err = add_callchain_ip(thread, parent, root_al, false, ip);
1604 return (err < 0) ? err : 0;
1610 static int unwind_entry(struct unwind_entry *entry, void *arg)
1612 struct callchain_cursor *cursor = arg;
1613 return callchain_cursor_append(cursor, entry->ip,
1614 entry->map, entry->sym);
1617 int thread__resolve_callchain(struct thread *thread,
1618 struct perf_evsel *evsel,
1619 struct perf_sample *sample,
1620 struct symbol **parent,
1621 struct addr_location *root_al,
1624 int ret = thread__resolve_callchain_sample(thread, sample->callchain,
1625 sample->branch_stack,
1626 parent, root_al, max_stack);
1630 /* Can we do dwarf post unwind? */
1631 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1632 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1635 /* Bail out if nothing was captured. */
1636 if ((!sample->user_regs.regs) ||
1637 (!sample->user_stack.size))
1640 return unwind__get_entries(unwind_entry, &callchain_cursor,
1641 thread, sample, max_stack);
1645 int machine__for_each_thread(struct machine *machine,
1646 int (*fn)(struct thread *thread, void *p),
1650 struct thread *thread;
1653 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1654 thread = rb_entry(nd, struct thread, rb_node);
1655 rc = fn(thread, priv);
1660 list_for_each_entry(thread, &machine->dead_threads, node) {
1661 rc = fn(thread, priv);
1668 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1669 struct target *target, struct thread_map *threads,
1670 perf_event__handler_t process, bool data_mmap)
1672 if (target__has_task(target))
1673 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1674 else if (target__has_cpu(target))
1675 return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1676 /* command specified */
1680 pid_t machine__get_current_tid(struct machine *machine, int cpu)
1682 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1685 return machine->current_tid[cpu];
1688 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1691 struct thread *thread;
1696 if (!machine->current_tid) {
1699 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1700 if (!machine->current_tid)
1702 for (i = 0; i < MAX_NR_CPUS; i++)
1703 machine->current_tid[i] = -1;
1706 if (cpu >= MAX_NR_CPUS) {
1707 pr_err("Requested CPU %d too large. ", cpu);
1708 pr_err("Consider raising MAX_NR_CPUS\n");
1712 machine->current_tid[cpu] = tid;
1714 thread = machine__findnew_thread(machine, pid, tid);
1723 int machine__get_kernel_start(struct machine *machine)
1725 struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
1729 * The only addresses above 2^63 are kernel addresses of a 64-bit
1730 * kernel. Note that addresses are unsigned so that on a 32-bit system
1731 * all addresses including kernel addresses are less than 2^32. In
1732 * that case (32-bit system), if the kernel mapping is unknown, all
1733 * addresses will be assumed to be in user space - see
1734 * machine__kernel_ip().
1736 machine->kernel_start = 1ULL << 63;
1738 err = map__load(map, machine->symbol_filter);
1740 machine->kernel_start = map->start;