6b73a0eeb6a1aaa00d7d1ed67e0dc066979c1694
[linux-2.6-microblaze.git] / tools / perf / util / symbol.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <string.h>
7 #include <linux/kernel.h>
8 #include <linux/mman.h>
9 #include <linux/time64.h>
10 #include <sys/types.h>
11 #include <sys/stat.h>
12 #include <sys/param.h>
13 #include <fcntl.h>
14 #include <unistd.h>
15 #include <inttypes.h>
16 #include "annotate.h"
17 #include "build-id.h"
18 #include "util.h"
19 #include "debug.h"
20 #include "machine.h"
21 #include "map.h"
22 #include "symbol.h"
23 #include "strlist.h"
24 #include "intlist.h"
25 #include "namespaces.h"
26 #include "header.h"
27 #include "path.h"
28 #include "sane_ctype.h"
29
30 #include <elf.h>
31 #include <limits.h>
32 #include <symbol/kallsyms.h>
33 #include <sys/utsname.h>
34
35 static int dso__load_kernel_sym(struct dso *dso, struct map *map);
36 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
37 static bool symbol__is_idle(const char *name);
38
39 int vmlinux_path__nr_entries;
40 char **vmlinux_path;
41
42 struct symbol_conf symbol_conf = {
43         .nanosecs               = false,
44         .use_modules            = true,
45         .try_vmlinux_path       = true,
46         .demangle               = true,
47         .demangle_kernel        = false,
48         .cumulate_callchain     = true,
49         .time_quantum           = 100 * NSEC_PER_MSEC, /* 100ms */
50         .show_hist_headers      = true,
51         .symfs                  = "",
52         .event_group            = true,
53         .inline_name            = true,
54 };
55
56 static enum dso_binary_type binary_type_symtab[] = {
57         DSO_BINARY_TYPE__KALLSYMS,
58         DSO_BINARY_TYPE__GUEST_KALLSYMS,
59         DSO_BINARY_TYPE__JAVA_JIT,
60         DSO_BINARY_TYPE__DEBUGLINK,
61         DSO_BINARY_TYPE__BUILD_ID_CACHE,
62         DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
63         DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
64         DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
65         DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
66         DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
67         DSO_BINARY_TYPE__GUEST_KMODULE,
68         DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
69         DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
70         DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
71         DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
72         DSO_BINARY_TYPE__NOT_FOUND,
73 };
74
75 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
76
77 static bool symbol_type__filter(char symbol_type)
78 {
79         symbol_type = toupper(symbol_type);
80         return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
81 }
82
83 static int prefix_underscores_count(const char *str)
84 {
85         const char *tail = str;
86
87         while (*tail == '_')
88                 tail++;
89
90         return tail - str;
91 }
92
93 const char * __weak arch__normalize_symbol_name(const char *name)
94 {
95         return name;
96 }
97
98 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
99 {
100         return strcmp(namea, nameb);
101 }
102
103 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
104                                         unsigned int n)
105 {
106         return strncmp(namea, nameb, n);
107 }
108
109 int __weak arch__choose_best_symbol(struct symbol *syma,
110                                     struct symbol *symb __maybe_unused)
111 {
112         /* Avoid "SyS" kernel syscall aliases */
113         if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
114                 return SYMBOL_B;
115         if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
116                 return SYMBOL_B;
117
118         return SYMBOL_A;
119 }
120
121 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
122 {
123         s64 a;
124         s64 b;
125         size_t na, nb;
126
127         /* Prefer a symbol with non zero length */
128         a = syma->end - syma->start;
129         b = symb->end - symb->start;
130         if ((b == 0) && (a > 0))
131                 return SYMBOL_A;
132         else if ((a == 0) && (b > 0))
133                 return SYMBOL_B;
134
135         /* Prefer a non weak symbol over a weak one */
136         a = syma->binding == STB_WEAK;
137         b = symb->binding == STB_WEAK;
138         if (b && !a)
139                 return SYMBOL_A;
140         if (a && !b)
141                 return SYMBOL_B;
142
143         /* Prefer a global symbol over a non global one */
144         a = syma->binding == STB_GLOBAL;
145         b = symb->binding == STB_GLOBAL;
146         if (a && !b)
147                 return SYMBOL_A;
148         if (b && !a)
149                 return SYMBOL_B;
150
151         /* Prefer a symbol with less underscores */
152         a = prefix_underscores_count(syma->name);
153         b = prefix_underscores_count(symb->name);
154         if (b > a)
155                 return SYMBOL_A;
156         else if (a > b)
157                 return SYMBOL_B;
158
159         /* Choose the symbol with the longest name */
160         na = strlen(syma->name);
161         nb = strlen(symb->name);
162         if (na > nb)
163                 return SYMBOL_A;
164         else if (na < nb)
165                 return SYMBOL_B;
166
167         return arch__choose_best_symbol(syma, symb);
168 }
169
170 void symbols__fixup_duplicate(struct rb_root_cached *symbols)
171 {
172         struct rb_node *nd;
173         struct symbol *curr, *next;
174
175         if (symbol_conf.allow_aliases)
176                 return;
177
178         nd = rb_first_cached(symbols);
179
180         while (nd) {
181                 curr = rb_entry(nd, struct symbol, rb_node);
182 again:
183                 nd = rb_next(&curr->rb_node);
184                 next = rb_entry(nd, struct symbol, rb_node);
185
186                 if (!nd)
187                         break;
188
189                 if (curr->start != next->start)
190                         continue;
191
192                 if (choose_best_symbol(curr, next) == SYMBOL_A) {
193                         rb_erase_cached(&next->rb_node, symbols);
194                         symbol__delete(next);
195                         goto again;
196                 } else {
197                         nd = rb_next(&curr->rb_node);
198                         rb_erase_cached(&curr->rb_node, symbols);
199                         symbol__delete(curr);
200                 }
201         }
202 }
203
204 void symbols__fixup_end(struct rb_root_cached *symbols)
205 {
206         struct rb_node *nd, *prevnd = rb_first_cached(symbols);
207         struct symbol *curr, *prev;
208
209         if (prevnd == NULL)
210                 return;
211
212         curr = rb_entry(prevnd, struct symbol, rb_node);
213
214         for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
215                 prev = curr;
216                 curr = rb_entry(nd, struct symbol, rb_node);
217
218                 if (prev->end == prev->start && prev->end != curr->start)
219                         prev->end = curr->start;
220         }
221
222         /* Last entry */
223         if (curr->end == curr->start)
224                 curr->end = roundup(curr->start, 4096) + 4096;
225 }
226
227 void map_groups__fixup_end(struct map_groups *mg)
228 {
229         struct maps *maps = &mg->maps;
230         struct map *next, *curr;
231
232         down_write(&maps->lock);
233
234         curr = maps__first(maps);
235         if (curr == NULL)
236                 goto out_unlock;
237
238         for (next = map__next(curr); next; next = map__next(curr)) {
239                 if (!curr->end)
240                         curr->end = next->start;
241                 curr = next;
242         }
243
244         /*
245          * We still haven't the actual symbols, so guess the
246          * last map final address.
247          */
248         if (!curr->end)
249                 curr->end = ~0ULL;
250
251 out_unlock:
252         up_write(&maps->lock);
253 }
254
255 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
256 {
257         size_t namelen = strlen(name) + 1;
258         struct symbol *sym = calloc(1, (symbol_conf.priv_size +
259                                         sizeof(*sym) + namelen));
260         if (sym == NULL)
261                 return NULL;
262
263         if (symbol_conf.priv_size) {
264                 if (symbol_conf.init_annotation) {
265                         struct annotation *notes = (void *)sym;
266                         pthread_mutex_init(&notes->lock, NULL);
267                 }
268                 sym = ((void *)sym) + symbol_conf.priv_size;
269         }
270
271         sym->start   = start;
272         sym->end     = len ? start + len : start;
273         sym->type    = type;
274         sym->binding = binding;
275         sym->namelen = namelen - 1;
276
277         pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
278                   __func__, name, start, sym->end);
279         memcpy(sym->name, name, namelen);
280
281         return sym;
282 }
283
284 void symbol__delete(struct symbol *sym)
285 {
286         free(((void *)sym) - symbol_conf.priv_size);
287 }
288
289 void symbols__delete(struct rb_root_cached *symbols)
290 {
291         struct symbol *pos;
292         struct rb_node *next = rb_first_cached(symbols);
293
294         while (next) {
295                 pos = rb_entry(next, struct symbol, rb_node);
296                 next = rb_next(&pos->rb_node);
297                 rb_erase_cached(&pos->rb_node, symbols);
298                 symbol__delete(pos);
299         }
300 }
301
302 void __symbols__insert(struct rb_root_cached *symbols,
303                        struct symbol *sym, bool kernel)
304 {
305         struct rb_node **p = &symbols->rb_root.rb_node;
306         struct rb_node *parent = NULL;
307         const u64 ip = sym->start;
308         struct symbol *s;
309         bool leftmost = true;
310
311         if (kernel) {
312                 const char *name = sym->name;
313                 /*
314                  * ppc64 uses function descriptors and appends a '.' to the
315                  * start of every instruction address. Remove it.
316                  */
317                 if (name[0] == '.')
318                         name++;
319                 sym->idle = symbol__is_idle(name);
320         }
321
322         while (*p != NULL) {
323                 parent = *p;
324                 s = rb_entry(parent, struct symbol, rb_node);
325                 if (ip < s->start)
326                         p = &(*p)->rb_left;
327                 else {
328                         p = &(*p)->rb_right;
329                         leftmost = false;
330                 }
331         }
332         rb_link_node(&sym->rb_node, parent, p);
333         rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
334 }
335
336 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
337 {
338         __symbols__insert(symbols, sym, false);
339 }
340
341 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
342 {
343         struct rb_node *n;
344
345         if (symbols == NULL)
346                 return NULL;
347
348         n = symbols->rb_root.rb_node;
349
350         while (n) {
351                 struct symbol *s = rb_entry(n, struct symbol, rb_node);
352
353                 if (ip < s->start)
354                         n = n->rb_left;
355                 else if (ip > s->end || (ip == s->end && ip != s->start))
356                         n = n->rb_right;
357                 else
358                         return s;
359         }
360
361         return NULL;
362 }
363
364 static struct symbol *symbols__first(struct rb_root_cached *symbols)
365 {
366         struct rb_node *n = rb_first_cached(symbols);
367
368         if (n)
369                 return rb_entry(n, struct symbol, rb_node);
370
371         return NULL;
372 }
373
374 static struct symbol *symbols__last(struct rb_root_cached *symbols)
375 {
376         struct rb_node *n = rb_last(&symbols->rb_root);
377
378         if (n)
379                 return rb_entry(n, struct symbol, rb_node);
380
381         return NULL;
382 }
383
384 static struct symbol *symbols__next(struct symbol *sym)
385 {
386         struct rb_node *n = rb_next(&sym->rb_node);
387
388         if (n)
389                 return rb_entry(n, struct symbol, rb_node);
390
391         return NULL;
392 }
393
394 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
395 {
396         struct rb_node **p = &symbols->rb_root.rb_node;
397         struct rb_node *parent = NULL;
398         struct symbol_name_rb_node *symn, *s;
399         bool leftmost = true;
400
401         symn = container_of(sym, struct symbol_name_rb_node, sym);
402
403         while (*p != NULL) {
404                 parent = *p;
405                 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
406                 if (strcmp(sym->name, s->sym.name) < 0)
407                         p = &(*p)->rb_left;
408                 else {
409                         p = &(*p)->rb_right;
410                         leftmost = false;
411                 }
412         }
413         rb_link_node(&symn->rb_node, parent, p);
414         rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
415 }
416
417 static void symbols__sort_by_name(struct rb_root_cached *symbols,
418                                   struct rb_root_cached *source)
419 {
420         struct rb_node *nd;
421
422         for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
423                 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
424                 symbols__insert_by_name(symbols, pos);
425         }
426 }
427
428 int symbol__match_symbol_name(const char *name, const char *str,
429                               enum symbol_tag_include includes)
430 {
431         const char *versioning;
432
433         if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
434             (versioning = strstr(name, "@@"))) {
435                 int len = strlen(str);
436
437                 if (len < versioning - name)
438                         len = versioning - name;
439
440                 return arch__compare_symbol_names_n(name, str, len);
441         } else
442                 return arch__compare_symbol_names(name, str);
443 }
444
445 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
446                                             const char *name,
447                                             enum symbol_tag_include includes)
448 {
449         struct rb_node *n;
450         struct symbol_name_rb_node *s = NULL;
451
452         if (symbols == NULL)
453                 return NULL;
454
455         n = symbols->rb_root.rb_node;
456
457         while (n) {
458                 int cmp;
459
460                 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
461                 cmp = symbol__match_symbol_name(s->sym.name, name, includes);
462
463                 if (cmp > 0)
464                         n = n->rb_left;
465                 else if (cmp < 0)
466                         n = n->rb_right;
467                 else
468                         break;
469         }
470
471         if (n == NULL)
472                 return NULL;
473
474         if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
475                 /* return first symbol that has same name (if any) */
476                 for (n = rb_prev(n); n; n = rb_prev(n)) {
477                         struct symbol_name_rb_node *tmp;
478
479                         tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
480                         if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
481                                 break;
482
483                         s = tmp;
484                 }
485
486         return &s->sym;
487 }
488
489 void dso__reset_find_symbol_cache(struct dso *dso)
490 {
491         dso->last_find_result.addr   = 0;
492         dso->last_find_result.symbol = NULL;
493 }
494
495 void dso__insert_symbol(struct dso *dso, struct symbol *sym)
496 {
497         __symbols__insert(&dso->symbols, sym, dso->kernel);
498
499         /* update the symbol cache if necessary */
500         if (dso->last_find_result.addr >= sym->start &&
501             (dso->last_find_result.addr < sym->end ||
502             sym->start == sym->end)) {
503                 dso->last_find_result.symbol = sym;
504         }
505 }
506
507 struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
508 {
509         if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
510                 dso->last_find_result.addr   = addr;
511                 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
512         }
513
514         return dso->last_find_result.symbol;
515 }
516
517 struct symbol *dso__first_symbol(struct dso *dso)
518 {
519         return symbols__first(&dso->symbols);
520 }
521
522 struct symbol *dso__last_symbol(struct dso *dso)
523 {
524         return symbols__last(&dso->symbols);
525 }
526
527 struct symbol *dso__next_symbol(struct symbol *sym)
528 {
529         return symbols__next(sym);
530 }
531
532 struct symbol *symbol__next_by_name(struct symbol *sym)
533 {
534         struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
535         struct rb_node *n = rb_next(&s->rb_node);
536
537         return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
538 }
539
540  /*
541   * Returns first symbol that matched with @name.
542   */
543 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
544 {
545         struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
546                                                  SYMBOL_TAG_INCLUDE__NONE);
547         if (!s)
548                 s = symbols__find_by_name(&dso->symbol_names, name,
549                                           SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
550         return s;
551 }
552
553 void dso__sort_by_name(struct dso *dso)
554 {
555         dso__set_sorted_by_name(dso);
556         return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
557 }
558
559 int modules__parse(const char *filename, void *arg,
560                    int (*process_module)(void *arg, const char *name,
561                                          u64 start, u64 size))
562 {
563         char *line = NULL;
564         size_t n;
565         FILE *file;
566         int err = 0;
567
568         file = fopen(filename, "r");
569         if (file == NULL)
570                 return -1;
571
572         while (1) {
573                 char name[PATH_MAX];
574                 u64 start, size;
575                 char *sep, *endptr;
576                 ssize_t line_len;
577
578                 line_len = getline(&line, &n, file);
579                 if (line_len < 0) {
580                         if (feof(file))
581                                 break;
582                         err = -1;
583                         goto out;
584                 }
585
586                 if (!line) {
587                         err = -1;
588                         goto out;
589                 }
590
591                 line[--line_len] = '\0'; /* \n */
592
593                 sep = strrchr(line, 'x');
594                 if (sep == NULL)
595                         continue;
596
597                 hex2u64(sep + 1, &start);
598
599                 sep = strchr(line, ' ');
600                 if (sep == NULL)
601                         continue;
602
603                 *sep = '\0';
604
605                 scnprintf(name, sizeof(name), "[%s]", line);
606
607                 size = strtoul(sep + 1, &endptr, 0);
608                 if (*endptr != ' ' && *endptr != '\t')
609                         continue;
610
611                 err = process_module(arg, name, start, size);
612                 if (err)
613                         break;
614         }
615 out:
616         free(line);
617         fclose(file);
618         return err;
619 }
620
621 /*
622  * These are symbols in the kernel image, so make sure that
623  * sym is from a kernel DSO.
624  */
625 static bool symbol__is_idle(const char *name)
626 {
627         const char * const idle_symbols[] = {
628                 "arch_cpu_idle",
629                 "cpu_idle",
630                 "cpu_startup_entry",
631                 "intel_idle",
632                 "default_idle",
633                 "native_safe_halt",
634                 "enter_idle",
635                 "exit_idle",
636                 "mwait_idle",
637                 "mwait_idle_with_hints",
638                 "poll_idle",
639                 "ppc64_runlatch_off",
640                 "pseries_dedicated_idle_sleep",
641                 NULL
642         };
643         int i;
644
645         for (i = 0; idle_symbols[i]; i++) {
646                 if (!strcmp(idle_symbols[i], name))
647                         return true;
648         }
649
650         return false;
651 }
652
653 static int map__process_kallsym_symbol(void *arg, const char *name,
654                                        char type, u64 start)
655 {
656         struct symbol *sym;
657         struct dso *dso = arg;
658         struct rb_root_cached *root = &dso->symbols;
659
660         if (!symbol_type__filter(type))
661                 return 0;
662
663         /*
664          * module symbols are not sorted so we add all
665          * symbols, setting length to 0, and rely on
666          * symbols__fixup_end() to fix it up.
667          */
668         sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
669         if (sym == NULL)
670                 return -ENOMEM;
671         /*
672          * We will pass the symbols to the filter later, in
673          * map__split_kallsyms, when we have split the maps per module
674          */
675         __symbols__insert(root, sym, !strchr(name, '['));
676
677         return 0;
678 }
679
680 /*
681  * Loads the function entries in /proc/kallsyms into kernel_map->dso,
682  * so that we can in the next step set the symbol ->end address and then
683  * call kernel_maps__split_kallsyms.
684  */
685 static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
686 {
687         return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
688 }
689
690 static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
691 {
692         struct map *curr_map;
693         struct symbol *pos;
694         int count = 0;
695         struct rb_root_cached old_root = dso->symbols;
696         struct rb_root_cached *root = &dso->symbols;
697         struct rb_node *next = rb_first_cached(root);
698
699         if (!kmaps)
700                 return -1;
701
702         *root = RB_ROOT_CACHED;
703
704         while (next) {
705                 char *module;
706
707                 pos = rb_entry(next, struct symbol, rb_node);
708                 next = rb_next(&pos->rb_node);
709
710                 rb_erase_cached(&pos->rb_node, &old_root);
711                 RB_CLEAR_NODE(&pos->rb_node);
712                 module = strchr(pos->name, '\t');
713                 if (module)
714                         *module = '\0';
715
716                 curr_map = map_groups__find(kmaps, pos->start);
717
718                 if (!curr_map) {
719                         symbol__delete(pos);
720                         continue;
721                 }
722
723                 pos->start -= curr_map->start - curr_map->pgoff;
724                 if (pos->end > curr_map->end)
725                         pos->end = curr_map->end;
726                 if (pos->end)
727                         pos->end -= curr_map->start - curr_map->pgoff;
728                 symbols__insert(&curr_map->dso->symbols, pos);
729                 ++count;
730         }
731
732         /* Symbols have been adjusted */
733         dso->adjust_symbols = 1;
734
735         return count;
736 }
737
738 /*
739  * Split the symbols into maps, making sure there are no overlaps, i.e. the
740  * kernel range is broken in several maps, named [kernel].N, as we don't have
741  * the original ELF section names vmlinux have.
742  */
743 static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
744                                       struct map *initial_map)
745 {
746         struct machine *machine;
747         struct map *curr_map = initial_map;
748         struct symbol *pos;
749         int count = 0, moved = 0;
750         struct rb_root_cached *root = &dso->symbols;
751         struct rb_node *next = rb_first_cached(root);
752         int kernel_range = 0;
753         bool x86_64;
754
755         if (!kmaps)
756                 return -1;
757
758         machine = kmaps->machine;
759
760         x86_64 = machine__is(machine, "x86_64");
761
762         while (next) {
763                 char *module;
764
765                 pos = rb_entry(next, struct symbol, rb_node);
766                 next = rb_next(&pos->rb_node);
767
768                 module = strchr(pos->name, '\t');
769                 if (module) {
770                         if (!symbol_conf.use_modules)
771                                 goto discard_symbol;
772
773                         *module++ = '\0';
774
775                         if (strcmp(curr_map->dso->short_name, module)) {
776                                 if (curr_map != initial_map &&
777                                     dso->kernel == DSO_TYPE_GUEST_KERNEL &&
778                                     machine__is_default_guest(machine)) {
779                                         /*
780                                          * We assume all symbols of a module are
781                                          * continuous in * kallsyms, so curr_map
782                                          * points to a module and all its
783                                          * symbols are in its kmap. Mark it as
784                                          * loaded.
785                                          */
786                                         dso__set_loaded(curr_map->dso);
787                                 }
788
789                                 curr_map = map_groups__find_by_name(kmaps, module);
790                                 if (curr_map == NULL) {
791                                         pr_debug("%s/proc/{kallsyms,modules} "
792                                                  "inconsistency while looking "
793                                                  "for \"%s\" module!\n",
794                                                  machine->root_dir, module);
795                                         curr_map = initial_map;
796                                         goto discard_symbol;
797                                 }
798
799                                 if (curr_map->dso->loaded &&
800                                     !machine__is_default_guest(machine))
801                                         goto discard_symbol;
802                         }
803                         /*
804                          * So that we look just like we get from .ko files,
805                          * i.e. not prelinked, relative to initial_map->start.
806                          */
807                         pos->start = curr_map->map_ip(curr_map, pos->start);
808                         pos->end   = curr_map->map_ip(curr_map, pos->end);
809                 } else if (x86_64 && is_entry_trampoline(pos->name)) {
810                         /*
811                          * These symbols are not needed anymore since the
812                          * trampoline maps refer to the text section and it's
813                          * symbols instead. Avoid having to deal with
814                          * relocations, and the assumption that the first symbol
815                          * is the start of kernel text, by simply removing the
816                          * symbols at this point.
817                          */
818                         goto discard_symbol;
819                 } else if (curr_map != initial_map) {
820                         char dso_name[PATH_MAX];
821                         struct dso *ndso;
822
823                         if (delta) {
824                                 /* Kernel was relocated at boot time */
825                                 pos->start -= delta;
826                                 pos->end -= delta;
827                         }
828
829                         if (count == 0) {
830                                 curr_map = initial_map;
831                                 goto add_symbol;
832                         }
833
834                         if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
835                                 snprintf(dso_name, sizeof(dso_name),
836                                         "[guest.kernel].%d",
837                                         kernel_range++);
838                         else
839                                 snprintf(dso_name, sizeof(dso_name),
840                                         "[kernel].%d",
841                                         kernel_range++);
842
843                         ndso = dso__new(dso_name);
844                         if (ndso == NULL)
845                                 return -1;
846
847                         ndso->kernel = dso->kernel;
848
849                         curr_map = map__new2(pos->start, ndso);
850                         if (curr_map == NULL) {
851                                 dso__put(ndso);
852                                 return -1;
853                         }
854
855                         curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
856                         map_groups__insert(kmaps, curr_map);
857                         ++kernel_range;
858                 } else if (delta) {
859                         /* Kernel was relocated at boot time */
860                         pos->start -= delta;
861                         pos->end -= delta;
862                 }
863 add_symbol:
864                 if (curr_map != initial_map) {
865                         rb_erase_cached(&pos->rb_node, root);
866                         symbols__insert(&curr_map->dso->symbols, pos);
867                         ++moved;
868                 } else
869                         ++count;
870
871                 continue;
872 discard_symbol:
873                 rb_erase_cached(&pos->rb_node, root);
874                 symbol__delete(pos);
875         }
876
877         if (curr_map != initial_map &&
878             dso->kernel == DSO_TYPE_GUEST_KERNEL &&
879             machine__is_default_guest(kmaps->machine)) {
880                 dso__set_loaded(curr_map->dso);
881         }
882
883         return count + moved;
884 }
885
886 bool symbol__restricted_filename(const char *filename,
887                                  const char *restricted_filename)
888 {
889         bool restricted = false;
890
891         if (symbol_conf.kptr_restrict) {
892                 char *r = realpath(filename, NULL);
893
894                 if (r != NULL) {
895                         restricted = strcmp(r, restricted_filename) == 0;
896                         free(r);
897                         return restricted;
898                 }
899         }
900
901         return restricted;
902 }
903
904 struct module_info {
905         struct rb_node rb_node;
906         char *name;
907         u64 start;
908 };
909
910 static void add_module(struct module_info *mi, struct rb_root *modules)
911 {
912         struct rb_node **p = &modules->rb_node;
913         struct rb_node *parent = NULL;
914         struct module_info *m;
915
916         while (*p != NULL) {
917                 parent = *p;
918                 m = rb_entry(parent, struct module_info, rb_node);
919                 if (strcmp(mi->name, m->name) < 0)
920                         p = &(*p)->rb_left;
921                 else
922                         p = &(*p)->rb_right;
923         }
924         rb_link_node(&mi->rb_node, parent, p);
925         rb_insert_color(&mi->rb_node, modules);
926 }
927
928 static void delete_modules(struct rb_root *modules)
929 {
930         struct module_info *mi;
931         struct rb_node *next = rb_first(modules);
932
933         while (next) {
934                 mi = rb_entry(next, struct module_info, rb_node);
935                 next = rb_next(&mi->rb_node);
936                 rb_erase(&mi->rb_node, modules);
937                 zfree(&mi->name);
938                 free(mi);
939         }
940 }
941
942 static struct module_info *find_module(const char *name,
943                                        struct rb_root *modules)
944 {
945         struct rb_node *n = modules->rb_node;
946
947         while (n) {
948                 struct module_info *m;
949                 int cmp;
950
951                 m = rb_entry(n, struct module_info, rb_node);
952                 cmp = strcmp(name, m->name);
953                 if (cmp < 0)
954                         n = n->rb_left;
955                 else if (cmp > 0)
956                         n = n->rb_right;
957                 else
958                         return m;
959         }
960
961         return NULL;
962 }
963
964 static int __read_proc_modules(void *arg, const char *name, u64 start,
965                                u64 size __maybe_unused)
966 {
967         struct rb_root *modules = arg;
968         struct module_info *mi;
969
970         mi = zalloc(sizeof(struct module_info));
971         if (!mi)
972                 return -ENOMEM;
973
974         mi->name = strdup(name);
975         mi->start = start;
976
977         if (!mi->name) {
978                 free(mi);
979                 return -ENOMEM;
980         }
981
982         add_module(mi, modules);
983
984         return 0;
985 }
986
987 static int read_proc_modules(const char *filename, struct rb_root *modules)
988 {
989         if (symbol__restricted_filename(filename, "/proc/modules"))
990                 return -1;
991
992         if (modules__parse(filename, modules, __read_proc_modules)) {
993                 delete_modules(modules);
994                 return -1;
995         }
996
997         return 0;
998 }
999
1000 int compare_proc_modules(const char *from, const char *to)
1001 {
1002         struct rb_root from_modules = RB_ROOT;
1003         struct rb_root to_modules = RB_ROOT;
1004         struct rb_node *from_node, *to_node;
1005         struct module_info *from_m, *to_m;
1006         int ret = -1;
1007
1008         if (read_proc_modules(from, &from_modules))
1009                 return -1;
1010
1011         if (read_proc_modules(to, &to_modules))
1012                 goto out_delete_from;
1013
1014         from_node = rb_first(&from_modules);
1015         to_node = rb_first(&to_modules);
1016         while (from_node) {
1017                 if (!to_node)
1018                         break;
1019
1020                 from_m = rb_entry(from_node, struct module_info, rb_node);
1021                 to_m = rb_entry(to_node, struct module_info, rb_node);
1022
1023                 if (from_m->start != to_m->start ||
1024                     strcmp(from_m->name, to_m->name))
1025                         break;
1026
1027                 from_node = rb_next(from_node);
1028                 to_node = rb_next(to_node);
1029         }
1030
1031         if (!from_node && !to_node)
1032                 ret = 0;
1033
1034         delete_modules(&to_modules);
1035 out_delete_from:
1036         delete_modules(&from_modules);
1037
1038         return ret;
1039 }
1040
1041 struct map *map_groups__first(struct map_groups *mg)
1042 {
1043         return maps__first(&mg->maps);
1044 }
1045
1046 static int do_validate_kcore_modules(const char *filename,
1047                                   struct map_groups *kmaps)
1048 {
1049         struct rb_root modules = RB_ROOT;
1050         struct map *old_map;
1051         int err;
1052
1053         err = read_proc_modules(filename, &modules);
1054         if (err)
1055                 return err;
1056
1057         old_map = map_groups__first(kmaps);
1058         while (old_map) {
1059                 struct map *next = map_groups__next(old_map);
1060                 struct module_info *mi;
1061
1062                 if (!__map__is_kmodule(old_map)) {
1063                         old_map = next;
1064                         continue;
1065                 }
1066
1067                 /* Module must be in memory at the same address */
1068                 mi = find_module(old_map->dso->short_name, &modules);
1069                 if (!mi || mi->start != old_map->start) {
1070                         err = -EINVAL;
1071                         goto out;
1072                 }
1073
1074                 old_map = next;
1075         }
1076 out:
1077         delete_modules(&modules);
1078         return err;
1079 }
1080
1081 /*
1082  * If kallsyms is referenced by name then we look for filename in the same
1083  * directory.
1084  */
1085 static bool filename_from_kallsyms_filename(char *filename,
1086                                             const char *base_name,
1087                                             const char *kallsyms_filename)
1088 {
1089         char *name;
1090
1091         strcpy(filename, kallsyms_filename);
1092         name = strrchr(filename, '/');
1093         if (!name)
1094                 return false;
1095
1096         name += 1;
1097
1098         if (!strcmp(name, "kallsyms")) {
1099                 strcpy(name, base_name);
1100                 return true;
1101         }
1102
1103         return false;
1104 }
1105
1106 static int validate_kcore_modules(const char *kallsyms_filename,
1107                                   struct map *map)
1108 {
1109         struct map_groups *kmaps = map__kmaps(map);
1110         char modules_filename[PATH_MAX];
1111
1112         if (!kmaps)
1113                 return -EINVAL;
1114
1115         if (!filename_from_kallsyms_filename(modules_filename, "modules",
1116                                              kallsyms_filename))
1117                 return -EINVAL;
1118
1119         if (do_validate_kcore_modules(modules_filename, kmaps))
1120                 return -EINVAL;
1121
1122         return 0;
1123 }
1124
1125 static int validate_kcore_addresses(const char *kallsyms_filename,
1126                                     struct map *map)
1127 {
1128         struct kmap *kmap = map__kmap(map);
1129
1130         if (!kmap)
1131                 return -EINVAL;
1132
1133         if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1134                 u64 start;
1135
1136                 if (kallsyms__get_function_start(kallsyms_filename,
1137                                                  kmap->ref_reloc_sym->name, &start))
1138                         return -ENOENT;
1139                 if (start != kmap->ref_reloc_sym->addr)
1140                         return -EINVAL;
1141         }
1142
1143         return validate_kcore_modules(kallsyms_filename, map);
1144 }
1145
1146 struct kcore_mapfn_data {
1147         struct dso *dso;
1148         struct list_head maps;
1149 };
1150
1151 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1152 {
1153         struct kcore_mapfn_data *md = data;
1154         struct map *map;
1155
1156         map = map__new2(start, md->dso);
1157         if (map == NULL)
1158                 return -ENOMEM;
1159
1160         map->end = map->start + len;
1161         map->pgoff = pgoff;
1162
1163         list_add(&map->node, &md->maps);
1164
1165         return 0;
1166 }
1167
1168 static int dso__load_kcore(struct dso *dso, struct map *map,
1169                            const char *kallsyms_filename)
1170 {
1171         struct map_groups *kmaps = map__kmaps(map);
1172         struct kcore_mapfn_data md;
1173         struct map *old_map, *new_map, *replacement_map = NULL;
1174         struct machine *machine;
1175         bool is_64_bit;
1176         int err, fd;
1177         char kcore_filename[PATH_MAX];
1178         u64 stext;
1179
1180         if (!kmaps)
1181                 return -EINVAL;
1182
1183         machine = kmaps->machine;
1184
1185         /* This function requires that the map is the kernel map */
1186         if (!__map__is_kernel(map))
1187                 return -EINVAL;
1188
1189         if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1190                                              kallsyms_filename))
1191                 return -EINVAL;
1192
1193         /* Modules and kernel must be present at their original addresses */
1194         if (validate_kcore_addresses(kallsyms_filename, map))
1195                 return -EINVAL;
1196
1197         md.dso = dso;
1198         INIT_LIST_HEAD(&md.maps);
1199
1200         fd = open(kcore_filename, O_RDONLY);
1201         if (fd < 0) {
1202                 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1203                          kcore_filename);
1204                 return -EINVAL;
1205         }
1206
1207         /* Read new maps into temporary lists */
1208         err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1209                               &is_64_bit);
1210         if (err)
1211                 goto out_err;
1212         dso->is_64_bit = is_64_bit;
1213
1214         if (list_empty(&md.maps)) {
1215                 err = -EINVAL;
1216                 goto out_err;
1217         }
1218
1219         /* Remove old maps */
1220         old_map = map_groups__first(kmaps);
1221         while (old_map) {
1222                 struct map *next = map_groups__next(old_map);
1223
1224                 if (old_map != map)
1225                         map_groups__remove(kmaps, old_map);
1226                 old_map = next;
1227         }
1228         machine->trampolines_mapped = false;
1229
1230         /* Find the kernel map using the '_stext' symbol */
1231         if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1232                 list_for_each_entry(new_map, &md.maps, node) {
1233                         if (stext >= new_map->start && stext < new_map->end) {
1234                                 replacement_map = new_map;
1235                                 break;
1236                         }
1237                 }
1238         }
1239
1240         if (!replacement_map)
1241                 replacement_map = list_entry(md.maps.next, struct map, node);
1242
1243         /* Add new maps */
1244         while (!list_empty(&md.maps)) {
1245                 new_map = list_entry(md.maps.next, struct map, node);
1246                 list_del_init(&new_map->node);
1247                 if (new_map == replacement_map) {
1248                         map->start      = new_map->start;
1249                         map->end        = new_map->end;
1250                         map->pgoff      = new_map->pgoff;
1251                         map->map_ip     = new_map->map_ip;
1252                         map->unmap_ip   = new_map->unmap_ip;
1253                         /* Ensure maps are correctly ordered */
1254                         map__get(map);
1255                         map_groups__remove(kmaps, map);
1256                         map_groups__insert(kmaps, map);
1257                         map__put(map);
1258                 } else {
1259                         map_groups__insert(kmaps, new_map);
1260                 }
1261
1262                 map__put(new_map);
1263         }
1264
1265         if (machine__is(machine, "x86_64")) {
1266                 u64 addr;
1267
1268                 /*
1269                  * If one of the corresponding symbols is there, assume the
1270                  * entry trampoline maps are too.
1271                  */
1272                 if (!kallsyms__get_function_start(kallsyms_filename,
1273                                                   ENTRY_TRAMPOLINE_NAME,
1274                                                   &addr))
1275                         machine->trampolines_mapped = true;
1276         }
1277
1278         /*
1279          * Set the data type and long name so that kcore can be read via
1280          * dso__data_read_addr().
1281          */
1282         if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1283                 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1284         else
1285                 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1286         dso__set_long_name(dso, strdup(kcore_filename), true);
1287
1288         close(fd);
1289
1290         if (map->prot & PROT_EXEC)
1291                 pr_debug("Using %s for kernel object code\n", kcore_filename);
1292         else
1293                 pr_debug("Using %s for kernel data\n", kcore_filename);
1294
1295         return 0;
1296
1297 out_err:
1298         while (!list_empty(&md.maps)) {
1299                 map = list_entry(md.maps.next, struct map, node);
1300                 list_del_init(&map->node);
1301                 map__put(map);
1302         }
1303         close(fd);
1304         return -EINVAL;
1305 }
1306
1307 /*
1308  * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
1309  * delta based on the relocation reference symbol.
1310  */
1311 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1312 {
1313         u64 addr;
1314
1315         if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1316                 return 0;
1317
1318         if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1319                 return -1;
1320
1321         *delta = addr - kmap->ref_reloc_sym->addr;
1322         return 0;
1323 }
1324
1325 int __dso__load_kallsyms(struct dso *dso, const char *filename,
1326                          struct map *map, bool no_kcore)
1327 {
1328         struct kmap *kmap = map__kmap(map);
1329         u64 delta = 0;
1330
1331         if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1332                 return -1;
1333
1334         if (!kmap || !kmap->kmaps)
1335                 return -1;
1336
1337         if (dso__load_all_kallsyms(dso, filename) < 0)
1338                 return -1;
1339
1340         if (kallsyms__delta(kmap, filename, &delta))
1341                 return -1;
1342
1343         symbols__fixup_end(&dso->symbols);
1344         symbols__fixup_duplicate(&dso->symbols);
1345
1346         if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1347                 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1348         else
1349                 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1350
1351         if (!no_kcore && !dso__load_kcore(dso, map, filename))
1352                 return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
1353         else
1354                 return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
1355 }
1356
1357 int dso__load_kallsyms(struct dso *dso, const char *filename,
1358                        struct map *map)
1359 {
1360         return __dso__load_kallsyms(dso, filename, map, false);
1361 }
1362
1363 static int dso__load_perf_map(const char *map_path, struct dso *dso)
1364 {
1365         char *line = NULL;
1366         size_t n;
1367         FILE *file;
1368         int nr_syms = 0;
1369
1370         file = fopen(map_path, "r");
1371         if (file == NULL)
1372                 goto out_failure;
1373
1374         while (!feof(file)) {
1375                 u64 start, size;
1376                 struct symbol *sym;
1377                 int line_len, len;
1378
1379                 line_len = getline(&line, &n, file);
1380                 if (line_len < 0)
1381                         break;
1382
1383                 if (!line)
1384                         goto out_failure;
1385
1386                 line[--line_len] = '\0'; /* \n */
1387
1388                 len = hex2u64(line, &start);
1389
1390                 len++;
1391                 if (len + 2 >= line_len)
1392                         continue;
1393
1394                 len += hex2u64(line + len, &size);
1395
1396                 len++;
1397                 if (len + 2 >= line_len)
1398                         continue;
1399
1400                 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1401
1402                 if (sym == NULL)
1403                         goto out_delete_line;
1404
1405                 symbols__insert(&dso->symbols, sym);
1406                 nr_syms++;
1407         }
1408
1409         free(line);
1410         fclose(file);
1411
1412         return nr_syms;
1413
1414 out_delete_line:
1415         free(line);
1416 out_failure:
1417         return -1;
1418 }
1419
1420 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1421                                            enum dso_binary_type type)
1422 {
1423         switch (type) {
1424         case DSO_BINARY_TYPE__JAVA_JIT:
1425         case DSO_BINARY_TYPE__DEBUGLINK:
1426         case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1427         case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1428         case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1429         case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1430         case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1431                 return !kmod && dso->kernel == DSO_TYPE_USER;
1432
1433         case DSO_BINARY_TYPE__KALLSYMS:
1434         case DSO_BINARY_TYPE__VMLINUX:
1435         case DSO_BINARY_TYPE__KCORE:
1436                 return dso->kernel == DSO_TYPE_KERNEL;
1437
1438         case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1439         case DSO_BINARY_TYPE__GUEST_VMLINUX:
1440         case DSO_BINARY_TYPE__GUEST_KCORE:
1441                 return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1442
1443         case DSO_BINARY_TYPE__GUEST_KMODULE:
1444         case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1445         case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1446         case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1447                 /*
1448                  * kernel modules know their symtab type - it's set when
1449                  * creating a module dso in machine__findnew_module_map().
1450                  */
1451                 return kmod && dso->symtab_type == type;
1452
1453         case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1454         case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1455                 return true;
1456
1457         case DSO_BINARY_TYPE__NOT_FOUND:
1458         default:
1459                 return false;
1460         }
1461 }
1462
1463 /* Checks for the existence of the perf-<pid>.map file in two different
1464  * locations.  First, if the process is a separate mount namespace, check in
1465  * that namespace using the pid of the innermost pid namespace.  If's not in a
1466  * namespace, or the file can't be found there, try in the mount namespace of
1467  * the tracing process using our view of its pid.
1468  */
1469 static int dso__find_perf_map(char *filebuf, size_t bufsz,
1470                               struct nsinfo **nsip)
1471 {
1472         struct nscookie nsc;
1473         struct nsinfo *nsi;
1474         struct nsinfo *nnsi;
1475         int rc = -1;
1476
1477         nsi = *nsip;
1478
1479         if (nsi->need_setns) {
1480                 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
1481                 nsinfo__mountns_enter(nsi, &nsc);
1482                 rc = access(filebuf, R_OK);
1483                 nsinfo__mountns_exit(&nsc);
1484                 if (rc == 0)
1485                         return rc;
1486         }
1487
1488         nnsi = nsinfo__copy(nsi);
1489         if (nnsi) {
1490                 nsinfo__put(nsi);
1491
1492                 nnsi->need_setns = false;
1493                 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
1494                 *nsip = nnsi;
1495                 rc = 0;
1496         }
1497
1498         return rc;
1499 }
1500
1501 int dso__load(struct dso *dso, struct map *map)
1502 {
1503         char *name;
1504         int ret = -1;
1505         u_int i;
1506         struct machine *machine;
1507         char *root_dir = (char *) "";
1508         int ss_pos = 0;
1509         struct symsrc ss_[2];
1510         struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1511         bool kmod;
1512         bool perfmap;
1513         unsigned char build_id[BUILD_ID_SIZE];
1514         struct nscookie nsc;
1515         char newmapname[PATH_MAX];
1516         const char *map_path = dso->long_name;
1517
1518         perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1519         if (perfmap) {
1520                 if (dso->nsinfo && (dso__find_perf_map(newmapname,
1521                     sizeof(newmapname), &dso->nsinfo) == 0)) {
1522                         map_path = newmapname;
1523                 }
1524         }
1525
1526         nsinfo__mountns_enter(dso->nsinfo, &nsc);
1527         pthread_mutex_lock(&dso->lock);
1528
1529         /* check again under the dso->lock */
1530         if (dso__loaded(dso)) {
1531                 ret = 1;
1532                 goto out;
1533         }
1534
1535         if (map->groups && map->groups->machine)
1536                 machine = map->groups->machine;
1537         else
1538                 machine = NULL;
1539
1540         if (dso->kernel) {
1541                 if (dso->kernel == DSO_TYPE_KERNEL)
1542                         ret = dso__load_kernel_sym(dso, map);
1543                 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1544                         ret = dso__load_guest_kernel_sym(dso, map);
1545
1546                 if (machine__is(machine, "x86_64"))
1547                         machine__map_x86_64_entry_trampolines(machine, dso);
1548                 goto out;
1549         }
1550
1551         dso->adjust_symbols = 0;
1552
1553         if (perfmap) {
1554                 ret = dso__load_perf_map(map_path, dso);
1555                 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1556                                              DSO_BINARY_TYPE__NOT_FOUND;
1557                 goto out;
1558         }
1559
1560         if (machine)
1561                 root_dir = machine->root_dir;
1562
1563         name = malloc(PATH_MAX);
1564         if (!name)
1565                 goto out;
1566
1567         kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1568                 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1569                 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1570                 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1571
1572
1573         /*
1574          * Read the build id if possible. This is required for
1575          * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1576          */
1577         if (!dso->has_build_id &&
1578             is_regular_file(dso->long_name)) {
1579             __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1580             if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1581                 dso__set_build_id(dso, build_id);
1582         }
1583
1584         /*
1585          * Iterate over candidate debug images.
1586          * Keep track of "interesting" ones (those which have a symtab, dynsym,
1587          * and/or opd section) for processing.
1588          */
1589         for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1590                 struct symsrc *ss = &ss_[ss_pos];
1591                 bool next_slot = false;
1592                 bool is_reg;
1593                 bool nsexit;
1594                 int sirc = -1;
1595
1596                 enum dso_binary_type symtab_type = binary_type_symtab[i];
1597
1598                 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1599                     symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1600
1601                 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1602                         continue;
1603
1604                 if (dso__read_binary_type_filename(dso, symtab_type,
1605                                                    root_dir, name, PATH_MAX))
1606                         continue;
1607
1608                 if (nsexit)
1609                         nsinfo__mountns_exit(&nsc);
1610
1611                 is_reg = is_regular_file(name);
1612                 if (is_reg)
1613                         sirc = symsrc__init(ss, dso, name, symtab_type);
1614
1615                 if (nsexit)
1616                         nsinfo__mountns_enter(dso->nsinfo, &nsc);
1617
1618                 if (!is_reg || sirc < 0)
1619                         continue;
1620
1621                 if (!syms_ss && symsrc__has_symtab(ss)) {
1622                         syms_ss = ss;
1623                         next_slot = true;
1624                         if (!dso->symsrc_filename)
1625                                 dso->symsrc_filename = strdup(name);
1626                 }
1627
1628                 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1629                         runtime_ss = ss;
1630                         next_slot = true;
1631                 }
1632
1633                 if (next_slot) {
1634                         ss_pos++;
1635
1636                         if (syms_ss && runtime_ss)
1637                                 break;
1638                 } else {
1639                         symsrc__destroy(ss);
1640                 }
1641
1642         }
1643
1644         if (!runtime_ss && !syms_ss)
1645                 goto out_free;
1646
1647         if (runtime_ss && !syms_ss) {
1648                 syms_ss = runtime_ss;
1649         }
1650
1651         /* We'll have to hope for the best */
1652         if (!runtime_ss && syms_ss)
1653                 runtime_ss = syms_ss;
1654
1655         if (syms_ss)
1656                 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1657         else
1658                 ret = -1;
1659
1660         if (ret > 0) {
1661                 int nr_plt;
1662
1663                 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1664                 if (nr_plt > 0)
1665                         ret += nr_plt;
1666         }
1667
1668         for (; ss_pos > 0; ss_pos--)
1669                 symsrc__destroy(&ss_[ss_pos - 1]);
1670 out_free:
1671         free(name);
1672         if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1673                 ret = 0;
1674 out:
1675         dso__set_loaded(dso);
1676         pthread_mutex_unlock(&dso->lock);
1677         nsinfo__mountns_exit(&nsc);
1678
1679         return ret;
1680 }
1681
1682 struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1683 {
1684         struct maps *maps = &mg->maps;
1685         struct map *map;
1686         struct rb_node *node;
1687
1688         down_read(&maps->lock);
1689
1690         for (node = maps->names.rb_node; node; ) {
1691                 int rc;
1692
1693                 map = rb_entry(node, struct map, rb_node_name);
1694
1695                 rc = strcmp(map->dso->short_name, name);
1696                 if (rc < 0)
1697                         node = node->rb_left;
1698                 else if (rc > 0)
1699                         node = node->rb_right;
1700                 else
1701
1702                         goto out_unlock;
1703         }
1704
1705         map = NULL;
1706
1707 out_unlock:
1708         up_read(&maps->lock);
1709         return map;
1710 }
1711
1712 int dso__load_vmlinux(struct dso *dso, struct map *map,
1713                       const char *vmlinux, bool vmlinux_allocated)
1714 {
1715         int err = -1;
1716         struct symsrc ss;
1717         char symfs_vmlinux[PATH_MAX];
1718         enum dso_binary_type symtab_type;
1719
1720         if (vmlinux[0] == '/')
1721                 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1722         else
1723                 symbol__join_symfs(symfs_vmlinux, vmlinux);
1724
1725         if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1726                 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1727         else
1728                 symtab_type = DSO_BINARY_TYPE__VMLINUX;
1729
1730         if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1731                 return -1;
1732
1733         err = dso__load_sym(dso, map, &ss, &ss, 0);
1734         symsrc__destroy(&ss);
1735
1736         if (err > 0) {
1737                 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1738                         dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1739                 else
1740                         dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1741                 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1742                 dso__set_loaded(dso);
1743                 pr_debug("Using %s for symbols\n", symfs_vmlinux);
1744         }
1745
1746         return err;
1747 }
1748
1749 int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1750 {
1751         int i, err = 0;
1752         char *filename = NULL;
1753
1754         pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1755                  vmlinux_path__nr_entries + 1);
1756
1757         for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1758                 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1759                 if (err > 0)
1760                         goto out;
1761         }
1762
1763         if (!symbol_conf.ignore_vmlinux_buildid)
1764                 filename = dso__build_id_filename(dso, NULL, 0, false);
1765         if (filename != NULL) {
1766                 err = dso__load_vmlinux(dso, map, filename, true);
1767                 if (err > 0)
1768                         goto out;
1769                 free(filename);
1770         }
1771 out:
1772         return err;
1773 }
1774
1775 static bool visible_dir_filter(const char *name, struct dirent *d)
1776 {
1777         if (d->d_type != DT_DIR)
1778                 return false;
1779         return lsdir_no_dot_filter(name, d);
1780 }
1781
1782 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1783 {
1784         char kallsyms_filename[PATH_MAX];
1785         int ret = -1;
1786         struct strlist *dirs;
1787         struct str_node *nd;
1788
1789         dirs = lsdir(dir, visible_dir_filter);
1790         if (!dirs)
1791                 return -1;
1792
1793         strlist__for_each_entry(nd, dirs) {
1794                 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1795                           "%s/%s/kallsyms", dir, nd->s);
1796                 if (!validate_kcore_addresses(kallsyms_filename, map)) {
1797                         strlcpy(dir, kallsyms_filename, dir_sz);
1798                         ret = 0;
1799                         break;
1800                 }
1801         }
1802
1803         strlist__delete(dirs);
1804
1805         return ret;
1806 }
1807
1808 /*
1809  * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
1810  * since access(R_OK) only checks with real UID/GID but open() use effective
1811  * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
1812  */
1813 static bool filename__readable(const char *file)
1814 {
1815         int fd = open(file, O_RDONLY);
1816         if (fd < 0)
1817                 return false;
1818         close(fd);
1819         return true;
1820 }
1821
1822 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1823 {
1824         u8 host_build_id[BUILD_ID_SIZE];
1825         char sbuild_id[SBUILD_ID_SIZE];
1826         bool is_host = false;
1827         char path[PATH_MAX];
1828
1829         if (!dso->has_build_id) {
1830                 /*
1831                  * Last resort, if we don't have a build-id and couldn't find
1832                  * any vmlinux file, try the running kernel kallsyms table.
1833                  */
1834                 goto proc_kallsyms;
1835         }
1836
1837         if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1838                                  sizeof(host_build_id)) == 0)
1839                 is_host = dso__build_id_equal(dso, host_build_id);
1840
1841         /* Try a fast path for /proc/kallsyms if possible */
1842         if (is_host) {
1843                 /*
1844                  * Do not check the build-id cache, unless we know we cannot use
1845                  * /proc/kcore or module maps don't match to /proc/kallsyms.
1846                  * To check readability of /proc/kcore, do not use access(R_OK)
1847                  * since /proc/kcore requires CAP_SYS_RAWIO to read and access
1848                  * can't check it.
1849                  */
1850                 if (filename__readable("/proc/kcore") &&
1851                     !validate_kcore_addresses("/proc/kallsyms", map))
1852                         goto proc_kallsyms;
1853         }
1854
1855         build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1856
1857         /* Find kallsyms in build-id cache with kcore */
1858         scnprintf(path, sizeof(path), "%s/%s/%s",
1859                   buildid_dir, DSO__NAME_KCORE, sbuild_id);
1860
1861         if (!find_matching_kcore(map, path, sizeof(path)))
1862                 return strdup(path);
1863
1864         /* Use current /proc/kallsyms if possible */
1865         if (is_host) {
1866 proc_kallsyms:
1867                 return strdup("/proc/kallsyms");
1868         }
1869
1870         /* Finally, find a cache of kallsyms */
1871         if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
1872                 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1873                        sbuild_id);
1874                 return NULL;
1875         }
1876
1877         return strdup(path);
1878 }
1879
1880 static int dso__load_kernel_sym(struct dso *dso, struct map *map)
1881 {
1882         int err;
1883         const char *kallsyms_filename = NULL;
1884         char *kallsyms_allocated_filename = NULL;
1885         /*
1886          * Step 1: if the user specified a kallsyms or vmlinux filename, use
1887          * it and only it, reporting errors to the user if it cannot be used.
1888          *
1889          * For instance, try to analyse an ARM perf.data file _without_ a
1890          * build-id, or if the user specifies the wrong path to the right
1891          * vmlinux file, obviously we can't fallback to another vmlinux (a
1892          * x86_86 one, on the machine where analysis is being performed, say),
1893          * or worse, /proc/kallsyms.
1894          *
1895          * If the specified file _has_ a build-id and there is a build-id
1896          * section in the perf.data file, we will still do the expected
1897          * validation in dso__load_vmlinux and will bail out if they don't
1898          * match.
1899          */
1900         if (symbol_conf.kallsyms_name != NULL) {
1901                 kallsyms_filename = symbol_conf.kallsyms_name;
1902                 goto do_kallsyms;
1903         }
1904
1905         if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
1906                 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
1907         }
1908
1909         if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
1910                 err = dso__load_vmlinux_path(dso, map);
1911                 if (err > 0)
1912                         return err;
1913         }
1914
1915         /* do not try local files if a symfs was given */
1916         if (symbol_conf.symfs[0] != 0)
1917                 return -1;
1918
1919         kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
1920         if (!kallsyms_allocated_filename)
1921                 return -1;
1922
1923         kallsyms_filename = kallsyms_allocated_filename;
1924
1925 do_kallsyms:
1926         err = dso__load_kallsyms(dso, kallsyms_filename, map);
1927         if (err > 0)
1928                 pr_debug("Using %s for symbols\n", kallsyms_filename);
1929         free(kallsyms_allocated_filename);
1930
1931         if (err > 0 && !dso__is_kcore(dso)) {
1932                 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
1933                 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
1934                 map__fixup_start(map);
1935                 map__fixup_end(map);
1936         }
1937
1938         return err;
1939 }
1940
1941 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
1942 {
1943         int err;
1944         const char *kallsyms_filename = NULL;
1945         struct machine *machine;
1946         char path[PATH_MAX];
1947
1948         if (!map->groups) {
1949                 pr_debug("Guest kernel map hasn't the point to groups\n");
1950                 return -1;
1951         }
1952         machine = map->groups->machine;
1953
1954         if (machine__is_default_guest(machine)) {
1955                 /*
1956                  * if the user specified a vmlinux filename, use it and only
1957                  * it, reporting errors to the user if it cannot be used.
1958                  * Or use file guest_kallsyms inputted by user on commandline
1959                  */
1960                 if (symbol_conf.default_guest_vmlinux_name != NULL) {
1961                         err = dso__load_vmlinux(dso, map,
1962                                                 symbol_conf.default_guest_vmlinux_name,
1963                                                 false);
1964                         return err;
1965                 }
1966
1967                 kallsyms_filename = symbol_conf.default_guest_kallsyms;
1968                 if (!kallsyms_filename)
1969                         return -1;
1970         } else {
1971                 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1972                 kallsyms_filename = path;
1973         }
1974
1975         err = dso__load_kallsyms(dso, kallsyms_filename, map);
1976         if (err > 0)
1977                 pr_debug("Using %s for symbols\n", kallsyms_filename);
1978         if (err > 0 && !dso__is_kcore(dso)) {
1979                 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1980                 dso__set_long_name(dso, machine->mmap_name, false);
1981                 map__fixup_start(map);
1982                 map__fixup_end(map);
1983         }
1984
1985         return err;
1986 }
1987
1988 static void vmlinux_path__exit(void)
1989 {
1990         while (--vmlinux_path__nr_entries >= 0)
1991                 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
1992         vmlinux_path__nr_entries = 0;
1993
1994         zfree(&vmlinux_path);
1995 }
1996
1997 static const char * const vmlinux_paths[] = {
1998         "vmlinux",
1999         "/boot/vmlinux"
2000 };
2001
2002 static const char * const vmlinux_paths_upd[] = {
2003         "/boot/vmlinux-%s",
2004         "/usr/lib/debug/boot/vmlinux-%s",
2005         "/lib/modules/%s/build/vmlinux",
2006         "/usr/lib/debug/lib/modules/%s/vmlinux",
2007         "/usr/lib/debug/boot/vmlinux-%s.debug"
2008 };
2009
2010 static int vmlinux_path__add(const char *new_entry)
2011 {
2012         vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2013         if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2014                 return -1;
2015         ++vmlinux_path__nr_entries;
2016
2017         return 0;
2018 }
2019
2020 static int vmlinux_path__init(struct perf_env *env)
2021 {
2022         struct utsname uts;
2023         char bf[PATH_MAX];
2024         char *kernel_version;
2025         unsigned int i;
2026
2027         vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2028                               ARRAY_SIZE(vmlinux_paths_upd)));
2029         if (vmlinux_path == NULL)
2030                 return -1;
2031
2032         for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2033                 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2034                         goto out_fail;
2035
2036         /* only try kernel version if no symfs was given */
2037         if (symbol_conf.symfs[0] != 0)
2038                 return 0;
2039
2040         if (env) {
2041                 kernel_version = env->os_release;
2042         } else {
2043                 if (uname(&uts) < 0)
2044                         goto out_fail;
2045
2046                 kernel_version = uts.release;
2047         }
2048
2049         for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2050                 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2051                 if (vmlinux_path__add(bf) < 0)
2052                         goto out_fail;
2053         }
2054
2055         return 0;
2056
2057 out_fail:
2058         vmlinux_path__exit();
2059         return -1;
2060 }
2061
2062 int setup_list(struct strlist **list, const char *list_str,
2063                       const char *list_name)
2064 {
2065         if (list_str == NULL)
2066                 return 0;
2067
2068         *list = strlist__new(list_str, NULL);
2069         if (!*list) {
2070                 pr_err("problems parsing %s list\n", list_name);
2071                 return -1;
2072         }
2073
2074         symbol_conf.has_filter = true;
2075         return 0;
2076 }
2077
2078 int setup_intlist(struct intlist **list, const char *list_str,
2079                   const char *list_name)
2080 {
2081         if (list_str == NULL)
2082                 return 0;
2083
2084         *list = intlist__new(list_str);
2085         if (!*list) {
2086                 pr_err("problems parsing %s list\n", list_name);
2087                 return -1;
2088         }
2089         return 0;
2090 }
2091
2092 static bool symbol__read_kptr_restrict(void)
2093 {
2094         bool value = false;
2095         FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2096
2097         if (fp != NULL) {
2098                 char line[8];
2099
2100                 if (fgets(line, sizeof(line), fp) != NULL)
2101                         value = ((geteuid() != 0) || (getuid() != 0)) ?
2102                                         (atoi(line) != 0) :
2103                                         (atoi(line) == 2);
2104
2105                 fclose(fp);
2106         }
2107
2108         return value;
2109 }
2110
2111 int symbol__annotation_init(void)
2112 {
2113         if (symbol_conf.init_annotation)
2114                 return 0;
2115
2116         if (symbol_conf.initialized) {
2117                 pr_err("Annotation needs to be init before symbol__init()\n");
2118                 return -1;
2119         }
2120
2121         symbol_conf.priv_size += sizeof(struct annotation);
2122         symbol_conf.init_annotation = true;
2123         return 0;
2124 }
2125
2126 int symbol__init(struct perf_env *env)
2127 {
2128         const char *symfs;
2129
2130         if (symbol_conf.initialized)
2131                 return 0;
2132
2133         symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2134
2135         symbol__elf_init();
2136
2137         if (symbol_conf.sort_by_name)
2138                 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2139                                           sizeof(struct symbol));
2140
2141         if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2142                 return -1;
2143
2144         if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2145                 pr_err("'.' is the only non valid --field-separator argument\n");
2146                 return -1;
2147         }
2148
2149         if (setup_list(&symbol_conf.dso_list,
2150                        symbol_conf.dso_list_str, "dso") < 0)
2151                 return -1;
2152
2153         if (setup_list(&symbol_conf.comm_list,
2154                        symbol_conf.comm_list_str, "comm") < 0)
2155                 goto out_free_dso_list;
2156
2157         if (setup_intlist(&symbol_conf.pid_list,
2158                        symbol_conf.pid_list_str, "pid") < 0)
2159                 goto out_free_comm_list;
2160
2161         if (setup_intlist(&symbol_conf.tid_list,
2162                        symbol_conf.tid_list_str, "tid") < 0)
2163                 goto out_free_pid_list;
2164
2165         if (setup_list(&symbol_conf.sym_list,
2166                        symbol_conf.sym_list_str, "symbol") < 0)
2167                 goto out_free_tid_list;
2168
2169         if (setup_list(&symbol_conf.bt_stop_list,
2170                        symbol_conf.bt_stop_list_str, "symbol") < 0)
2171                 goto out_free_sym_list;
2172
2173         /*
2174          * A path to symbols of "/" is identical to ""
2175          * reset here for simplicity.
2176          */
2177         symfs = realpath(symbol_conf.symfs, NULL);
2178         if (symfs == NULL)
2179                 symfs = symbol_conf.symfs;
2180         if (strcmp(symfs, "/") == 0)
2181                 symbol_conf.symfs = "";
2182         if (symfs != symbol_conf.symfs)
2183                 free((void *)symfs);
2184
2185         symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2186
2187         symbol_conf.initialized = true;
2188         return 0;
2189
2190 out_free_sym_list:
2191         strlist__delete(symbol_conf.sym_list);
2192 out_free_tid_list:
2193         intlist__delete(symbol_conf.tid_list);
2194 out_free_pid_list:
2195         intlist__delete(symbol_conf.pid_list);
2196 out_free_comm_list:
2197         strlist__delete(symbol_conf.comm_list);
2198 out_free_dso_list:
2199         strlist__delete(symbol_conf.dso_list);
2200         return -1;
2201 }
2202
2203 void symbol__exit(void)
2204 {
2205         if (!symbol_conf.initialized)
2206                 return;
2207         strlist__delete(symbol_conf.bt_stop_list);
2208         strlist__delete(symbol_conf.sym_list);
2209         strlist__delete(symbol_conf.dso_list);
2210         strlist__delete(symbol_conf.comm_list);
2211         intlist__delete(symbol_conf.tid_list);
2212         intlist__delete(symbol_conf.pid_list);
2213         vmlinux_path__exit();
2214         symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2215         symbol_conf.bt_stop_list = NULL;
2216         symbol_conf.initialized = false;
2217 }
2218
2219 int symbol__config_symfs(const struct option *opt __maybe_unused,
2220                          const char *dir, int unset __maybe_unused)
2221 {
2222         char *bf = NULL;
2223         int ret;
2224
2225         symbol_conf.symfs = strdup(dir);
2226         if (symbol_conf.symfs == NULL)
2227                 return -ENOMEM;
2228
2229         /* skip the locally configured cache if a symfs is given, and
2230          * config buildid dir to symfs/.debug
2231          */
2232         ret = asprintf(&bf, "%s/%s", dir, ".debug");
2233         if (ret < 0)
2234                 return -ENOMEM;
2235
2236         set_buildid_dir(bf);
2237
2238         free(bf);
2239         return 0;
2240 }
2241
2242 struct mem_info *mem_info__get(struct mem_info *mi)
2243 {
2244         if (mi)
2245                 refcount_inc(&mi->refcnt);
2246         return mi;
2247 }
2248
2249 void mem_info__put(struct mem_info *mi)
2250 {
2251         if (mi && refcount_dec_and_test(&mi->refcnt))
2252                 free(mi);
2253 }
2254
2255 struct mem_info *mem_info__new(void)
2256 {
2257         struct mem_info *mi = zalloc(sizeof(*mi));
2258
2259         if (mi)
2260                 refcount_set(&mi->refcnt, 1);
2261         return mi;
2262 }