GNU Linux-libre 5.10.217-gnu1
[releases.git] / tools / perf / util / symbol.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <string.h>
7 #include <linux/capability.h>
8 #include <linux/kernel.h>
9 #include <linux/mman.h>
10 #include <linux/string.h>
11 #include <linux/time64.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <sys/param.h>
15 #include <fcntl.h>
16 #include <unistd.h>
17 #include <inttypes.h>
18 #include "annotate.h"
19 #include "build-id.h"
20 #include "cap.h"
21 #include "dso.h"
22 #include "util.h" // lsdir()
23 #include "debug.h"
24 #include "event.h"
25 #include "machine.h"
26 #include "map.h"
27 #include "symbol.h"
28 #include "map_symbol.h"
29 #include "mem-events.h"
30 #include "symsrc.h"
31 #include "strlist.h"
32 #include "intlist.h"
33 #include "namespaces.h"
34 #include "header.h"
35 #include "path.h"
36 #include <linux/ctype.h>
37 #include <linux/zalloc.h>
38
39 #include <elf.h>
40 #include <limits.h>
41 #include <symbol/kallsyms.h>
42 #include <sys/utsname.h>
43
44 static int dso__load_kernel_sym(struct dso *dso, struct map *map);
45 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
46 static bool symbol__is_idle(const char *name);
47
48 int vmlinux_path__nr_entries;
49 char **vmlinux_path;
50
51 struct symbol_conf symbol_conf = {
52         .nanosecs               = false,
53         .use_modules            = true,
54         .try_vmlinux_path       = true,
55         .demangle               = true,
56         .demangle_kernel        = false,
57         .cumulate_callchain     = true,
58         .time_quantum           = 100 * NSEC_PER_MSEC, /* 100ms */
59         .show_hist_headers      = true,
60         .symfs                  = "",
61         .event_group            = true,
62         .inline_name            = true,
63         .res_sample             = 0,
64 };
65
66 static enum dso_binary_type binary_type_symtab[] = {
67         DSO_BINARY_TYPE__KALLSYMS,
68         DSO_BINARY_TYPE__GUEST_KALLSYMS,
69         DSO_BINARY_TYPE__JAVA_JIT,
70         DSO_BINARY_TYPE__DEBUGLINK,
71         DSO_BINARY_TYPE__BUILD_ID_CACHE,
72         DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
73         DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
74         DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
75         DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
76         DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
77         DSO_BINARY_TYPE__GUEST_KMODULE,
78         DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
79         DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
80         DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
81         DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
82         DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
83         DSO_BINARY_TYPE__NOT_FOUND,
84 };
85
86 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
87
88 static bool symbol_type__filter(char symbol_type)
89 {
90         symbol_type = toupper(symbol_type);
91         return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
92 }
93
94 static int prefix_underscores_count(const char *str)
95 {
96         const char *tail = str;
97
98         while (*tail == '_')
99                 tail++;
100
101         return tail - str;
102 }
103
104 const char * __weak arch__normalize_symbol_name(const char *name)
105 {
106         return name;
107 }
108
109 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
110 {
111         return strcmp(namea, nameb);
112 }
113
114 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
115                                         unsigned int n)
116 {
117         return strncmp(namea, nameb, n);
118 }
119
120 int __weak arch__choose_best_symbol(struct symbol *syma,
121                                     struct symbol *symb __maybe_unused)
122 {
123         /* Avoid "SyS" kernel syscall aliases */
124         if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
125                 return SYMBOL_B;
126         if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
127                 return SYMBOL_B;
128
129         return SYMBOL_A;
130 }
131
132 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
133 {
134         s64 a;
135         s64 b;
136         size_t na, nb;
137
138         /* Prefer a symbol with non zero length */
139         a = syma->end - syma->start;
140         b = symb->end - symb->start;
141         if ((b == 0) && (a > 0))
142                 return SYMBOL_A;
143         else if ((a == 0) && (b > 0))
144                 return SYMBOL_B;
145
146         /* Prefer a non weak symbol over a weak one */
147         a = syma->binding == STB_WEAK;
148         b = symb->binding == STB_WEAK;
149         if (b && !a)
150                 return SYMBOL_A;
151         if (a && !b)
152                 return SYMBOL_B;
153
154         /* Prefer a global symbol over a non global one */
155         a = syma->binding == STB_GLOBAL;
156         b = symb->binding == STB_GLOBAL;
157         if (a && !b)
158                 return SYMBOL_A;
159         if (b && !a)
160                 return SYMBOL_B;
161
162         /* Prefer a symbol with less underscores */
163         a = prefix_underscores_count(syma->name);
164         b = prefix_underscores_count(symb->name);
165         if (b > a)
166                 return SYMBOL_A;
167         else if (a > b)
168                 return SYMBOL_B;
169
170         /* Choose the symbol with the longest name */
171         na = strlen(syma->name);
172         nb = strlen(symb->name);
173         if (na > nb)
174                 return SYMBOL_A;
175         else if (na < nb)
176                 return SYMBOL_B;
177
178         return arch__choose_best_symbol(syma, symb);
179 }
180
181 void symbols__fixup_duplicate(struct rb_root_cached *symbols)
182 {
183         struct rb_node *nd;
184         struct symbol *curr, *next;
185
186         if (symbol_conf.allow_aliases)
187                 return;
188
189         nd = rb_first_cached(symbols);
190
191         while (nd) {
192                 curr = rb_entry(nd, struct symbol, rb_node);
193 again:
194                 nd = rb_next(&curr->rb_node);
195                 next = rb_entry(nd, struct symbol, rb_node);
196
197                 if (!nd)
198                         break;
199
200                 if (curr->start != next->start)
201                         continue;
202
203                 if (choose_best_symbol(curr, next) == SYMBOL_A) {
204                         rb_erase_cached(&next->rb_node, symbols);
205                         symbol__delete(next);
206                         goto again;
207                 } else {
208                         nd = rb_next(&curr->rb_node);
209                         rb_erase_cached(&curr->rb_node, symbols);
210                         symbol__delete(curr);
211                 }
212         }
213 }
214
215 /* Update zero-sized symbols using the address of the next symbol */
216 void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
217 {
218         struct rb_node *nd, *prevnd = rb_first_cached(symbols);
219         struct symbol *curr, *prev;
220
221         if (prevnd == NULL)
222                 return;
223
224         curr = rb_entry(prevnd, struct symbol, rb_node);
225
226         for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
227                 prev = curr;
228                 curr = rb_entry(nd, struct symbol, rb_node);
229
230                 /*
231                  * On some architecture kernel text segment start is located at
232                  * some low memory address, while modules are located at high
233                  * memory addresses (or vice versa).  The gap between end of
234                  * kernel text segment and beginning of first module's text
235                  * segment is very big.  Therefore do not fill this gap and do
236                  * not assign it to the kernel dso map (kallsyms).
237                  *
238                  * In kallsyms, it determines module symbols using '[' character
239                  * like in:
240                  *   ffffffffc1937000 T hdmi_driver_init  [snd_hda_codec_hdmi]
241                  */
242                 if (prev->end == prev->start) {
243                         /* Last kernel/module symbol mapped to end of page */
244                         if (is_kallsyms && (!strchr(prev->name, '[') !=
245                                             !strchr(curr->name, '[')))
246                                 prev->end = roundup(prev->end + 4096, 4096);
247                         else
248                                 prev->end = curr->start;
249
250                         pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
251                                   __func__, prev->name, prev->end);
252                 }
253         }
254
255         /* Last entry */
256         if (curr->end == curr->start)
257                 curr->end = roundup(curr->start, 4096) + 4096;
258 }
259
260 void maps__fixup_end(struct maps *maps)
261 {
262         struct map *prev = NULL, *curr;
263
264         down_write(&maps->lock);
265
266         maps__for_each_entry(maps, curr) {
267                 if (prev != NULL && !prev->end)
268                         prev->end = curr->start;
269
270                 prev = curr;
271         }
272
273         /*
274          * We still haven't the actual symbols, so guess the
275          * last map final address.
276          */
277         if (curr && !curr->end)
278                 curr->end = ~0ULL;
279
280         up_write(&maps->lock);
281 }
282
283 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
284 {
285         size_t namelen = strlen(name) + 1;
286         struct symbol *sym = calloc(1, (symbol_conf.priv_size +
287                                         sizeof(*sym) + namelen));
288         if (sym == NULL)
289                 return NULL;
290
291         if (symbol_conf.priv_size) {
292                 if (symbol_conf.init_annotation) {
293                         struct annotation *notes = (void *)sym;
294                         pthread_mutex_init(&notes->lock, NULL);
295                 }
296                 sym = ((void *)sym) + symbol_conf.priv_size;
297         }
298
299         sym->start   = start;
300         sym->end     = len ? start + len : start;
301         sym->type    = type;
302         sym->binding = binding;
303         sym->namelen = namelen - 1;
304
305         pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
306                   __func__, name, start, sym->end);
307         memcpy(sym->name, name, namelen);
308
309         return sym;
310 }
311
312 void symbol__delete(struct symbol *sym)
313 {
314         free(((void *)sym) - symbol_conf.priv_size);
315 }
316
317 void symbols__delete(struct rb_root_cached *symbols)
318 {
319         struct symbol *pos;
320         struct rb_node *next = rb_first_cached(symbols);
321
322         while (next) {
323                 pos = rb_entry(next, struct symbol, rb_node);
324                 next = rb_next(&pos->rb_node);
325                 rb_erase_cached(&pos->rb_node, symbols);
326                 symbol__delete(pos);
327         }
328 }
329
330 void __symbols__insert(struct rb_root_cached *symbols,
331                        struct symbol *sym, bool kernel)
332 {
333         struct rb_node **p = &symbols->rb_root.rb_node;
334         struct rb_node *parent = NULL;
335         const u64 ip = sym->start;
336         struct symbol *s;
337         bool leftmost = true;
338
339         if (kernel) {
340                 const char *name = sym->name;
341                 /*
342                  * ppc64 uses function descriptors and appends a '.' to the
343                  * start of every instruction address. Remove it.
344                  */
345                 if (name[0] == '.')
346                         name++;
347                 sym->idle = symbol__is_idle(name);
348         }
349
350         while (*p != NULL) {
351                 parent = *p;
352                 s = rb_entry(parent, struct symbol, rb_node);
353                 if (ip < s->start)
354                         p = &(*p)->rb_left;
355                 else {
356                         p = &(*p)->rb_right;
357                         leftmost = false;
358                 }
359         }
360         rb_link_node(&sym->rb_node, parent, p);
361         rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
362 }
363
364 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
365 {
366         __symbols__insert(symbols, sym, false);
367 }
368
369 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
370 {
371         struct rb_node *n;
372
373         if (symbols == NULL)
374                 return NULL;
375
376         n = symbols->rb_root.rb_node;
377
378         while (n) {
379                 struct symbol *s = rb_entry(n, struct symbol, rb_node);
380
381                 if (ip < s->start)
382                         n = n->rb_left;
383                 else if (ip > s->end || (ip == s->end && ip != s->start))
384                         n = n->rb_right;
385                 else
386                         return s;
387         }
388
389         return NULL;
390 }
391
392 static struct symbol *symbols__first(struct rb_root_cached *symbols)
393 {
394         struct rb_node *n = rb_first_cached(symbols);
395
396         if (n)
397                 return rb_entry(n, struct symbol, rb_node);
398
399         return NULL;
400 }
401
402 static struct symbol *symbols__last(struct rb_root_cached *symbols)
403 {
404         struct rb_node *n = rb_last(&symbols->rb_root);
405
406         if (n)
407                 return rb_entry(n, struct symbol, rb_node);
408
409         return NULL;
410 }
411
412 static struct symbol *symbols__next(struct symbol *sym)
413 {
414         struct rb_node *n = rb_next(&sym->rb_node);
415
416         if (n)
417                 return rb_entry(n, struct symbol, rb_node);
418
419         return NULL;
420 }
421
422 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
423 {
424         struct rb_node **p = &symbols->rb_root.rb_node;
425         struct rb_node *parent = NULL;
426         struct symbol_name_rb_node *symn, *s;
427         bool leftmost = true;
428
429         symn = container_of(sym, struct symbol_name_rb_node, sym);
430
431         while (*p != NULL) {
432                 parent = *p;
433                 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
434                 if (strcmp(sym->name, s->sym.name) < 0)
435                         p = &(*p)->rb_left;
436                 else {
437                         p = &(*p)->rb_right;
438                         leftmost = false;
439                 }
440         }
441         rb_link_node(&symn->rb_node, parent, p);
442         rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
443 }
444
445 static void symbols__sort_by_name(struct rb_root_cached *symbols,
446                                   struct rb_root_cached *source)
447 {
448         struct rb_node *nd;
449
450         for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
451                 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
452                 symbols__insert_by_name(symbols, pos);
453         }
454 }
455
456 int symbol__match_symbol_name(const char *name, const char *str,
457                               enum symbol_tag_include includes)
458 {
459         const char *versioning;
460
461         if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
462             (versioning = strstr(name, "@@"))) {
463                 int len = strlen(str);
464
465                 if (len < versioning - name)
466                         len = versioning - name;
467
468                 return arch__compare_symbol_names_n(name, str, len);
469         } else
470                 return arch__compare_symbol_names(name, str);
471 }
472
473 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
474                                             const char *name,
475                                             enum symbol_tag_include includes)
476 {
477         struct rb_node *n;
478         struct symbol_name_rb_node *s = NULL;
479
480         if (symbols == NULL)
481                 return NULL;
482
483         n = symbols->rb_root.rb_node;
484
485         while (n) {
486                 int cmp;
487
488                 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
489                 cmp = symbol__match_symbol_name(s->sym.name, name, includes);
490
491                 if (cmp > 0)
492                         n = n->rb_left;
493                 else if (cmp < 0)
494                         n = n->rb_right;
495                 else
496                         break;
497         }
498
499         if (n == NULL)
500                 return NULL;
501
502         if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
503                 /* return first symbol that has same name (if any) */
504                 for (n = rb_prev(n); n; n = rb_prev(n)) {
505                         struct symbol_name_rb_node *tmp;
506
507                         tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
508                         if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
509                                 break;
510
511                         s = tmp;
512                 }
513
514         return &s->sym;
515 }
516
517 void dso__reset_find_symbol_cache(struct dso *dso)
518 {
519         dso->last_find_result.addr   = 0;
520         dso->last_find_result.symbol = NULL;
521 }
522
523 void dso__insert_symbol(struct dso *dso, struct symbol *sym)
524 {
525         __symbols__insert(&dso->symbols, sym, dso->kernel);
526
527         /* update the symbol cache if necessary */
528         if (dso->last_find_result.addr >= sym->start &&
529             (dso->last_find_result.addr < sym->end ||
530             sym->start == sym->end)) {
531                 dso->last_find_result.symbol = sym;
532         }
533 }
534
535 void dso__delete_symbol(struct dso *dso, struct symbol *sym)
536 {
537         rb_erase_cached(&sym->rb_node, &dso->symbols);
538         symbol__delete(sym);
539         dso__reset_find_symbol_cache(dso);
540 }
541
542 struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
543 {
544         if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
545                 dso->last_find_result.addr   = addr;
546                 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
547         }
548
549         return dso->last_find_result.symbol;
550 }
551
552 struct symbol *dso__first_symbol(struct dso *dso)
553 {
554         return symbols__first(&dso->symbols);
555 }
556
557 struct symbol *dso__last_symbol(struct dso *dso)
558 {
559         return symbols__last(&dso->symbols);
560 }
561
562 struct symbol *dso__next_symbol(struct symbol *sym)
563 {
564         return symbols__next(sym);
565 }
566
567 struct symbol *symbol__next_by_name(struct symbol *sym)
568 {
569         struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
570         struct rb_node *n = rb_next(&s->rb_node);
571
572         return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
573 }
574
575  /*
576   * Returns first symbol that matched with @name.
577   */
578 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
579 {
580         struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
581                                                  SYMBOL_TAG_INCLUDE__NONE);
582         if (!s)
583                 s = symbols__find_by_name(&dso->symbol_names, name,
584                                           SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
585         return s;
586 }
587
588 void dso__sort_by_name(struct dso *dso)
589 {
590         dso__set_sorted_by_name(dso);
591         return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
592 }
593
594 /*
595  * While we find nice hex chars, build a long_val.
596  * Return number of chars processed.
597  */
598 static int hex2u64(const char *ptr, u64 *long_val)
599 {
600         char *p;
601
602         *long_val = strtoull(ptr, &p, 16);
603
604         return p - ptr;
605 }
606
607
608 int modules__parse(const char *filename, void *arg,
609                    int (*process_module)(void *arg, const char *name,
610                                          u64 start, u64 size))
611 {
612         char *line = NULL;
613         size_t n;
614         FILE *file;
615         int err = 0;
616
617         file = fopen(filename, "r");
618         if (file == NULL)
619                 return -1;
620
621         while (1) {
622                 char name[PATH_MAX];
623                 u64 start, size;
624                 char *sep, *endptr;
625                 ssize_t line_len;
626
627                 line_len = getline(&line, &n, file);
628                 if (line_len < 0) {
629                         if (feof(file))
630                                 break;
631                         err = -1;
632                         goto out;
633                 }
634
635                 if (!line) {
636                         err = -1;
637                         goto out;
638                 }
639
640                 line[--line_len] = '\0'; /* \n */
641
642                 sep = strrchr(line, 'x');
643                 if (sep == NULL)
644                         continue;
645
646                 hex2u64(sep + 1, &start);
647
648                 sep = strchr(line, ' ');
649                 if (sep == NULL)
650                         continue;
651
652                 *sep = '\0';
653
654                 scnprintf(name, sizeof(name), "[%s]", line);
655
656                 size = strtoul(sep + 1, &endptr, 0);
657                 if (*endptr != ' ' && *endptr != '\t')
658                         continue;
659
660                 err = process_module(arg, name, start, size);
661                 if (err)
662                         break;
663         }
664 out:
665         free(line);
666         fclose(file);
667         return err;
668 }
669
670 /*
671  * These are symbols in the kernel image, so make sure that
672  * sym is from a kernel DSO.
673  */
674 static bool symbol__is_idle(const char *name)
675 {
676         const char * const idle_symbols[] = {
677                 "acpi_idle_do_entry",
678                 "acpi_processor_ffh_cstate_enter",
679                 "arch_cpu_idle",
680                 "cpu_idle",
681                 "cpu_startup_entry",
682                 "idle_cpu",
683                 "intel_idle",
684                 "default_idle",
685                 "native_safe_halt",
686                 "enter_idle",
687                 "exit_idle",
688                 "mwait_idle",
689                 "mwait_idle_with_hints",
690                 "mwait_idle_with_hints.constprop.0",
691                 "poll_idle",
692                 "ppc64_runlatch_off",
693                 "pseries_dedicated_idle_sleep",
694                 "psw_idle",
695                 "psw_idle_exit",
696                 NULL
697         };
698         int i;
699         static struct strlist *idle_symbols_list;
700
701         if (idle_symbols_list)
702                 return strlist__has_entry(idle_symbols_list, name);
703
704         idle_symbols_list = strlist__new(NULL, NULL);
705
706         for (i = 0; idle_symbols[i]; i++)
707                 strlist__add(idle_symbols_list, idle_symbols[i]);
708
709         return strlist__has_entry(idle_symbols_list, name);
710 }
711
712 static int map__process_kallsym_symbol(void *arg, const char *name,
713                                        char type, u64 start)
714 {
715         struct symbol *sym;
716         struct dso *dso = arg;
717         struct rb_root_cached *root = &dso->symbols;
718
719         if (!symbol_type__filter(type))
720                 return 0;
721
722         /*
723          * module symbols are not sorted so we add all
724          * symbols, setting length to 0, and rely on
725          * symbols__fixup_end() to fix it up.
726          */
727         sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
728         if (sym == NULL)
729                 return -ENOMEM;
730         /*
731          * We will pass the symbols to the filter later, in
732          * map__split_kallsyms, when we have split the maps per module
733          */
734         __symbols__insert(root, sym, !strchr(name, '['));
735
736         return 0;
737 }
738
739 /*
740  * Loads the function entries in /proc/kallsyms into kernel_map->dso,
741  * so that we can in the next step set the symbol ->end address and then
742  * call kernel_maps__split_kallsyms.
743  */
744 static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
745 {
746         return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
747 }
748
749 static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
750 {
751         struct map *curr_map;
752         struct symbol *pos;
753         int count = 0;
754         struct rb_root_cached old_root = dso->symbols;
755         struct rb_root_cached *root = &dso->symbols;
756         struct rb_node *next = rb_first_cached(root);
757
758         if (!kmaps)
759                 return -1;
760
761         *root = RB_ROOT_CACHED;
762
763         while (next) {
764                 char *module;
765
766                 pos = rb_entry(next, struct symbol, rb_node);
767                 next = rb_next(&pos->rb_node);
768
769                 rb_erase_cached(&pos->rb_node, &old_root);
770                 RB_CLEAR_NODE(&pos->rb_node);
771                 module = strchr(pos->name, '\t');
772                 if (module)
773                         *module = '\0';
774
775                 curr_map = maps__find(kmaps, pos->start);
776
777                 if (!curr_map) {
778                         symbol__delete(pos);
779                         continue;
780                 }
781
782                 pos->start -= curr_map->start - curr_map->pgoff;
783                 if (pos->end > curr_map->end)
784                         pos->end = curr_map->end;
785                 if (pos->end)
786                         pos->end -= curr_map->start - curr_map->pgoff;
787                 symbols__insert(&curr_map->dso->symbols, pos);
788                 ++count;
789         }
790
791         /* Symbols have been adjusted */
792         dso->adjust_symbols = 1;
793
794         return count;
795 }
796
797 /*
798  * Split the symbols into maps, making sure there are no overlaps, i.e. the
799  * kernel range is broken in several maps, named [kernel].N, as we don't have
800  * the original ELF section names vmlinux have.
801  */
802 static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
803                                 struct map *initial_map)
804 {
805         struct machine *machine;
806         struct map *curr_map = initial_map;
807         struct symbol *pos;
808         int count = 0, moved = 0;
809         struct rb_root_cached *root = &dso->symbols;
810         struct rb_node *next = rb_first_cached(root);
811         int kernel_range = 0;
812         bool x86_64;
813
814         if (!kmaps)
815                 return -1;
816
817         machine = kmaps->machine;
818
819         x86_64 = machine__is(machine, "x86_64");
820
821         while (next) {
822                 char *module;
823
824                 pos = rb_entry(next, struct symbol, rb_node);
825                 next = rb_next(&pos->rb_node);
826
827                 module = strchr(pos->name, '\t');
828                 if (module) {
829                         if (!symbol_conf.use_modules)
830                                 goto discard_symbol;
831
832                         *module++ = '\0';
833
834                         if (strcmp(curr_map->dso->short_name, module)) {
835                                 if (curr_map != initial_map &&
836                                     dso->kernel == DSO_SPACE__KERNEL_GUEST &&
837                                     machine__is_default_guest(machine)) {
838                                         /*
839                                          * We assume all symbols of a module are
840                                          * continuous in * kallsyms, so curr_map
841                                          * points to a module and all its
842                                          * symbols are in its kmap. Mark it as
843                                          * loaded.
844                                          */
845                                         dso__set_loaded(curr_map->dso);
846                                 }
847
848                                 curr_map = maps__find_by_name(kmaps, module);
849                                 if (curr_map == NULL) {
850                                         pr_debug("%s/proc/{kallsyms,modules} "
851                                                  "inconsistency while looking "
852                                                  "for \"%s\" module!\n",
853                                                  machine->root_dir, module);
854                                         curr_map = initial_map;
855                                         goto discard_symbol;
856                                 }
857
858                                 if (curr_map->dso->loaded &&
859                                     !machine__is_default_guest(machine))
860                                         goto discard_symbol;
861                         }
862                         /*
863                          * So that we look just like we get from .ko files,
864                          * i.e. not prelinked, relative to initial_map->start.
865                          */
866                         pos->start = curr_map->map_ip(curr_map, pos->start);
867                         pos->end   = curr_map->map_ip(curr_map, pos->end);
868                 } else if (x86_64 && is_entry_trampoline(pos->name)) {
869                         /*
870                          * These symbols are not needed anymore since the
871                          * trampoline maps refer to the text section and it's
872                          * symbols instead. Avoid having to deal with
873                          * relocations, and the assumption that the first symbol
874                          * is the start of kernel text, by simply removing the
875                          * symbols at this point.
876                          */
877                         goto discard_symbol;
878                 } else if (curr_map != initial_map) {
879                         char dso_name[PATH_MAX];
880                         struct dso *ndso;
881
882                         if (delta) {
883                                 /* Kernel was relocated at boot time */
884                                 pos->start -= delta;
885                                 pos->end -= delta;
886                         }
887
888                         if (count == 0) {
889                                 curr_map = initial_map;
890                                 goto add_symbol;
891                         }
892
893                         if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
894                                 snprintf(dso_name, sizeof(dso_name),
895                                         "[guest.kernel].%d",
896                                         kernel_range++);
897                         else
898                                 snprintf(dso_name, sizeof(dso_name),
899                                         "[kernel].%d",
900                                         kernel_range++);
901
902                         ndso = dso__new(dso_name);
903                         if (ndso == NULL)
904                                 return -1;
905
906                         ndso->kernel = dso->kernel;
907
908                         curr_map = map__new2(pos->start, ndso);
909                         if (curr_map == NULL) {
910                                 dso__put(ndso);
911                                 return -1;
912                         }
913
914                         curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
915                         maps__insert(kmaps, curr_map);
916                         ++kernel_range;
917                 } else if (delta) {
918                         /* Kernel was relocated at boot time */
919                         pos->start -= delta;
920                         pos->end -= delta;
921                 }
922 add_symbol:
923                 if (curr_map != initial_map) {
924                         rb_erase_cached(&pos->rb_node, root);
925                         symbols__insert(&curr_map->dso->symbols, pos);
926                         ++moved;
927                 } else
928                         ++count;
929
930                 continue;
931 discard_symbol:
932                 rb_erase_cached(&pos->rb_node, root);
933                 symbol__delete(pos);
934         }
935
936         if (curr_map != initial_map &&
937             dso->kernel == DSO_SPACE__KERNEL_GUEST &&
938             machine__is_default_guest(kmaps->machine)) {
939                 dso__set_loaded(curr_map->dso);
940         }
941
942         return count + moved;
943 }
944
945 bool symbol__restricted_filename(const char *filename,
946                                  const char *restricted_filename)
947 {
948         bool restricted = false;
949
950         if (symbol_conf.kptr_restrict) {
951                 char *r = realpath(filename, NULL);
952
953                 if (r != NULL) {
954                         restricted = strcmp(r, restricted_filename) == 0;
955                         free(r);
956                         return restricted;
957                 }
958         }
959
960         return restricted;
961 }
962
963 struct module_info {
964         struct rb_node rb_node;
965         char *name;
966         u64 start;
967 };
968
969 static void add_module(struct module_info *mi, struct rb_root *modules)
970 {
971         struct rb_node **p = &modules->rb_node;
972         struct rb_node *parent = NULL;
973         struct module_info *m;
974
975         while (*p != NULL) {
976                 parent = *p;
977                 m = rb_entry(parent, struct module_info, rb_node);
978                 if (strcmp(mi->name, m->name) < 0)
979                         p = &(*p)->rb_left;
980                 else
981                         p = &(*p)->rb_right;
982         }
983         rb_link_node(&mi->rb_node, parent, p);
984         rb_insert_color(&mi->rb_node, modules);
985 }
986
987 static void delete_modules(struct rb_root *modules)
988 {
989         struct module_info *mi;
990         struct rb_node *next = rb_first(modules);
991
992         while (next) {
993                 mi = rb_entry(next, struct module_info, rb_node);
994                 next = rb_next(&mi->rb_node);
995                 rb_erase(&mi->rb_node, modules);
996                 zfree(&mi->name);
997                 free(mi);
998         }
999 }
1000
1001 static struct module_info *find_module(const char *name,
1002                                        struct rb_root *modules)
1003 {
1004         struct rb_node *n = modules->rb_node;
1005
1006         while (n) {
1007                 struct module_info *m;
1008                 int cmp;
1009
1010                 m = rb_entry(n, struct module_info, rb_node);
1011                 cmp = strcmp(name, m->name);
1012                 if (cmp < 0)
1013                         n = n->rb_left;
1014                 else if (cmp > 0)
1015                         n = n->rb_right;
1016                 else
1017                         return m;
1018         }
1019
1020         return NULL;
1021 }
1022
1023 static int __read_proc_modules(void *arg, const char *name, u64 start,
1024                                u64 size __maybe_unused)
1025 {
1026         struct rb_root *modules = arg;
1027         struct module_info *mi;
1028
1029         mi = zalloc(sizeof(struct module_info));
1030         if (!mi)
1031                 return -ENOMEM;
1032
1033         mi->name = strdup(name);
1034         mi->start = start;
1035
1036         if (!mi->name) {
1037                 free(mi);
1038                 return -ENOMEM;
1039         }
1040
1041         add_module(mi, modules);
1042
1043         return 0;
1044 }
1045
1046 static int read_proc_modules(const char *filename, struct rb_root *modules)
1047 {
1048         if (symbol__restricted_filename(filename, "/proc/modules"))
1049                 return -1;
1050
1051         if (modules__parse(filename, modules, __read_proc_modules)) {
1052                 delete_modules(modules);
1053                 return -1;
1054         }
1055
1056         return 0;
1057 }
1058
1059 int compare_proc_modules(const char *from, const char *to)
1060 {
1061         struct rb_root from_modules = RB_ROOT;
1062         struct rb_root to_modules = RB_ROOT;
1063         struct rb_node *from_node, *to_node;
1064         struct module_info *from_m, *to_m;
1065         int ret = -1;
1066
1067         if (read_proc_modules(from, &from_modules))
1068                 return -1;
1069
1070         if (read_proc_modules(to, &to_modules))
1071                 goto out_delete_from;
1072
1073         from_node = rb_first(&from_modules);
1074         to_node = rb_first(&to_modules);
1075         while (from_node) {
1076                 if (!to_node)
1077                         break;
1078
1079                 from_m = rb_entry(from_node, struct module_info, rb_node);
1080                 to_m = rb_entry(to_node, struct module_info, rb_node);
1081
1082                 if (from_m->start != to_m->start ||
1083                     strcmp(from_m->name, to_m->name))
1084                         break;
1085
1086                 from_node = rb_next(from_node);
1087                 to_node = rb_next(to_node);
1088         }
1089
1090         if (!from_node && !to_node)
1091                 ret = 0;
1092
1093         delete_modules(&to_modules);
1094 out_delete_from:
1095         delete_modules(&from_modules);
1096
1097         return ret;
1098 }
1099
1100 static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
1101 {
1102         struct rb_root modules = RB_ROOT;
1103         struct map *old_map;
1104         int err;
1105
1106         err = read_proc_modules(filename, &modules);
1107         if (err)
1108                 return err;
1109
1110         maps__for_each_entry(kmaps, old_map) {
1111                 struct module_info *mi;
1112
1113                 if (!__map__is_kmodule(old_map)) {
1114                         continue;
1115                 }
1116
1117                 /* Module must be in memory at the same address */
1118                 mi = find_module(old_map->dso->short_name, &modules);
1119                 if (!mi || mi->start != old_map->start) {
1120                         err = -EINVAL;
1121                         goto out;
1122                 }
1123         }
1124 out:
1125         delete_modules(&modules);
1126         return err;
1127 }
1128
1129 /*
1130  * If kallsyms is referenced by name then we look for filename in the same
1131  * directory.
1132  */
1133 static bool filename_from_kallsyms_filename(char *filename,
1134                                             const char *base_name,
1135                                             const char *kallsyms_filename)
1136 {
1137         char *name;
1138
1139         strcpy(filename, kallsyms_filename);
1140         name = strrchr(filename, '/');
1141         if (!name)
1142                 return false;
1143
1144         name += 1;
1145
1146         if (!strcmp(name, "kallsyms")) {
1147                 strcpy(name, base_name);
1148                 return true;
1149         }
1150
1151         return false;
1152 }
1153
1154 static int validate_kcore_modules(const char *kallsyms_filename,
1155                                   struct map *map)
1156 {
1157         struct maps *kmaps = map__kmaps(map);
1158         char modules_filename[PATH_MAX];
1159
1160         if (!kmaps)
1161                 return -EINVAL;
1162
1163         if (!filename_from_kallsyms_filename(modules_filename, "modules",
1164                                              kallsyms_filename))
1165                 return -EINVAL;
1166
1167         if (do_validate_kcore_modules(modules_filename, kmaps))
1168                 return -EINVAL;
1169
1170         return 0;
1171 }
1172
1173 static int validate_kcore_addresses(const char *kallsyms_filename,
1174                                     struct map *map)
1175 {
1176         struct kmap *kmap = map__kmap(map);
1177
1178         if (!kmap)
1179                 return -EINVAL;
1180
1181         if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1182                 u64 start;
1183
1184                 if (kallsyms__get_function_start(kallsyms_filename,
1185                                                  kmap->ref_reloc_sym->name, &start))
1186                         return -ENOENT;
1187                 if (start != kmap->ref_reloc_sym->addr)
1188                         return -EINVAL;
1189         }
1190
1191         return validate_kcore_modules(kallsyms_filename, map);
1192 }
1193
1194 struct kcore_mapfn_data {
1195         struct dso *dso;
1196         struct list_head maps;
1197 };
1198
1199 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1200 {
1201         struct kcore_mapfn_data *md = data;
1202         struct map *map;
1203
1204         map = map__new2(start, md->dso);
1205         if (map == NULL)
1206                 return -ENOMEM;
1207
1208         map->end = map->start + len;
1209         map->pgoff = pgoff;
1210
1211         list_add(&map->node, &md->maps);
1212
1213         return 0;
1214 }
1215
1216 /*
1217  * Merges map into maps by splitting the new map within the existing map
1218  * regions.
1219  */
1220 int maps__merge_in(struct maps *kmaps, struct map *new_map)
1221 {
1222         struct map *old_map;
1223         LIST_HEAD(merged);
1224
1225         maps__for_each_entry(kmaps, old_map) {
1226                 /* no overload with this one */
1227                 if (new_map->end < old_map->start ||
1228                     new_map->start >= old_map->end)
1229                         continue;
1230
1231                 if (new_map->start < old_map->start) {
1232                         /*
1233                          * |new......
1234                          *       |old....
1235                          */
1236                         if (new_map->end < old_map->end) {
1237                                 /*
1238                                  * |new......|     -> |new..|
1239                                  *       |old....| ->       |old....|
1240                                  */
1241                                 new_map->end = old_map->start;
1242                         } else {
1243                                 /*
1244                                  * |new.............| -> |new..|       |new..|
1245                                  *       |old....|    ->       |old....|
1246                                  */
1247                                 struct map *m = map__clone(new_map);
1248
1249                                 if (!m)
1250                                         return -ENOMEM;
1251
1252                                 m->end = old_map->start;
1253                                 list_add_tail(&m->node, &merged);
1254                                 new_map->pgoff += old_map->end - new_map->start;
1255                                 new_map->start = old_map->end;
1256                         }
1257                 } else {
1258                         /*
1259                          *      |new......
1260                          * |old....
1261                          */
1262                         if (new_map->end < old_map->end) {
1263                                 /*
1264                                  *      |new..|   -> x
1265                                  * |old.........| -> |old.........|
1266                                  */
1267                                 map__put(new_map);
1268                                 new_map = NULL;
1269                                 break;
1270                         } else {
1271                                 /*
1272                                  *      |new......| ->         |new...|
1273                                  * |old....|        -> |old....|
1274                                  */
1275                                 new_map->pgoff += old_map->end - new_map->start;
1276                                 new_map->start = old_map->end;
1277                         }
1278                 }
1279         }
1280
1281         while (!list_empty(&merged)) {
1282                 old_map = list_entry(merged.next, struct map, node);
1283                 list_del_init(&old_map->node);
1284                 maps__insert(kmaps, old_map);
1285                 map__put(old_map);
1286         }
1287
1288         if (new_map) {
1289                 maps__insert(kmaps, new_map);
1290                 map__put(new_map);
1291         }
1292         return 0;
1293 }
1294
1295 static int dso__load_kcore(struct dso *dso, struct map *map,
1296                            const char *kallsyms_filename)
1297 {
1298         struct maps *kmaps = map__kmaps(map);
1299         struct kcore_mapfn_data md;
1300         struct map *old_map, *new_map, *replacement_map = NULL, *next;
1301         struct machine *machine;
1302         bool is_64_bit;
1303         int err, fd;
1304         char kcore_filename[PATH_MAX];
1305         u64 stext;
1306
1307         if (!kmaps)
1308                 return -EINVAL;
1309
1310         machine = kmaps->machine;
1311
1312         /* This function requires that the map is the kernel map */
1313         if (!__map__is_kernel(map))
1314                 return -EINVAL;
1315
1316         if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1317                                              kallsyms_filename))
1318                 return -EINVAL;
1319
1320         /* Modules and kernel must be present at their original addresses */
1321         if (validate_kcore_addresses(kallsyms_filename, map))
1322                 return -EINVAL;
1323
1324         md.dso = dso;
1325         INIT_LIST_HEAD(&md.maps);
1326
1327         fd = open(kcore_filename, O_RDONLY);
1328         if (fd < 0) {
1329                 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1330                          kcore_filename);
1331                 return -EINVAL;
1332         }
1333
1334         /* Read new maps into temporary lists */
1335         err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1336                               &is_64_bit);
1337         if (err)
1338                 goto out_err;
1339         dso->is_64_bit = is_64_bit;
1340
1341         if (list_empty(&md.maps)) {
1342                 err = -EINVAL;
1343                 goto out_err;
1344         }
1345
1346         /* Remove old maps */
1347         maps__for_each_entry_safe(kmaps, old_map, next) {
1348                 /*
1349                  * We need to preserve eBPF maps even if they are
1350                  * covered by kcore, because we need to access
1351                  * eBPF dso for source data.
1352                  */
1353                 if (old_map != map && !__map__is_bpf_prog(old_map))
1354                         maps__remove(kmaps, old_map);
1355         }
1356         machine->trampolines_mapped = false;
1357
1358         /* Find the kernel map using the '_stext' symbol */
1359         if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1360                 list_for_each_entry(new_map, &md.maps, node) {
1361                         if (stext >= new_map->start && stext < new_map->end) {
1362                                 replacement_map = new_map;
1363                                 break;
1364                         }
1365                 }
1366         }
1367
1368         if (!replacement_map)
1369                 replacement_map = list_entry(md.maps.next, struct map, node);
1370
1371         /* Add new maps */
1372         while (!list_empty(&md.maps)) {
1373                 new_map = list_entry(md.maps.next, struct map, node);
1374                 list_del_init(&new_map->node);
1375                 if (new_map == replacement_map) {
1376                         map->start      = new_map->start;
1377                         map->end        = new_map->end;
1378                         map->pgoff      = new_map->pgoff;
1379                         map->map_ip     = new_map->map_ip;
1380                         map->unmap_ip   = new_map->unmap_ip;
1381                         /* Ensure maps are correctly ordered */
1382                         map__get(map);
1383                         maps__remove(kmaps, map);
1384                         maps__insert(kmaps, map);
1385                         map__put(map);
1386                         map__put(new_map);
1387                 } else {
1388                         /*
1389                          * Merge kcore map into existing maps,
1390                          * and ensure that current maps (eBPF)
1391                          * stay intact.
1392                          */
1393                         if (maps__merge_in(kmaps, new_map))
1394                                 goto out_err;
1395                 }
1396         }
1397
1398         if (machine__is(machine, "x86_64")) {
1399                 u64 addr;
1400
1401                 /*
1402                  * If one of the corresponding symbols is there, assume the
1403                  * entry trampoline maps are too.
1404                  */
1405                 if (!kallsyms__get_function_start(kallsyms_filename,
1406                                                   ENTRY_TRAMPOLINE_NAME,
1407                                                   &addr))
1408                         machine->trampolines_mapped = true;
1409         }
1410
1411         /*
1412          * Set the data type and long name so that kcore can be read via
1413          * dso__data_read_addr().
1414          */
1415         if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1416                 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1417         else
1418                 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1419         dso__set_long_name(dso, strdup(kcore_filename), true);
1420
1421         close(fd);
1422
1423         if (map->prot & PROT_EXEC)
1424                 pr_debug("Using %s for kernel object code\n", kcore_filename);
1425         else
1426                 pr_debug("Using %s for kernel data\n", kcore_filename);
1427
1428         return 0;
1429
1430 out_err:
1431         while (!list_empty(&md.maps)) {
1432                 map = list_entry(md.maps.next, struct map, node);
1433                 list_del_init(&map->node);
1434                 map__put(map);
1435         }
1436         close(fd);
1437         return -EINVAL;
1438 }
1439
1440 /*
1441  * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
1442  * delta based on the relocation reference symbol.
1443  */
1444 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1445 {
1446         u64 addr;
1447
1448         if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1449                 return 0;
1450
1451         if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1452                 return -1;
1453
1454         *delta = addr - kmap->ref_reloc_sym->addr;
1455         return 0;
1456 }
1457
1458 int __dso__load_kallsyms(struct dso *dso, const char *filename,
1459                          struct map *map, bool no_kcore)
1460 {
1461         struct kmap *kmap = map__kmap(map);
1462         u64 delta = 0;
1463
1464         if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1465                 return -1;
1466
1467         if (!kmap || !kmap->kmaps)
1468                 return -1;
1469
1470         if (dso__load_all_kallsyms(dso, filename) < 0)
1471                 return -1;
1472
1473         if (kallsyms__delta(kmap, filename, &delta))
1474                 return -1;
1475
1476         symbols__fixup_end(&dso->symbols, true);
1477         symbols__fixup_duplicate(&dso->symbols);
1478
1479         if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1480                 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1481         else
1482                 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1483
1484         if (!no_kcore && !dso__load_kcore(dso, map, filename))
1485                 return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
1486         else
1487                 return maps__split_kallsyms(kmap->kmaps, dso, delta, map);
1488 }
1489
1490 int dso__load_kallsyms(struct dso *dso, const char *filename,
1491                        struct map *map)
1492 {
1493         return __dso__load_kallsyms(dso, filename, map, false);
1494 }
1495
1496 static int dso__load_perf_map(const char *map_path, struct dso *dso)
1497 {
1498         char *line = NULL;
1499         size_t n;
1500         FILE *file;
1501         int nr_syms = 0;
1502
1503         file = fopen(map_path, "r");
1504         if (file == NULL)
1505                 goto out_failure;
1506
1507         while (!feof(file)) {
1508                 u64 start, size;
1509                 struct symbol *sym;
1510                 int line_len, len;
1511
1512                 line_len = getline(&line, &n, file);
1513                 if (line_len < 0)
1514                         break;
1515
1516                 if (!line)
1517                         goto out_failure;
1518
1519                 line[--line_len] = '\0'; /* \n */
1520
1521                 len = hex2u64(line, &start);
1522
1523                 len++;
1524                 if (len + 2 >= line_len)
1525                         continue;
1526
1527                 len += hex2u64(line + len, &size);
1528
1529                 len++;
1530                 if (len + 2 >= line_len)
1531                         continue;
1532
1533                 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1534
1535                 if (sym == NULL)
1536                         goto out_delete_line;
1537
1538                 symbols__insert(&dso->symbols, sym);
1539                 nr_syms++;
1540         }
1541
1542         free(line);
1543         fclose(file);
1544
1545         return nr_syms;
1546
1547 out_delete_line:
1548         free(line);
1549 out_failure:
1550         return -1;
1551 }
1552
1553 #ifdef HAVE_LIBBFD_SUPPORT
1554 #define PACKAGE 'perf'
1555 #include <bfd.h>
1556
1557 static int bfd_symbols__cmpvalue(const void *a, const void *b)
1558 {
1559         const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b;
1560
1561         if (bfd_asymbol_value(as) != bfd_asymbol_value(bs))
1562                 return bfd_asymbol_value(as) - bfd_asymbol_value(bs);
1563
1564         return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0];
1565 }
1566
1567 static int bfd2elf_binding(asymbol *symbol)
1568 {
1569         if (symbol->flags & BSF_WEAK)
1570                 return STB_WEAK;
1571         if (symbol->flags & BSF_GLOBAL)
1572                 return STB_GLOBAL;
1573         if (symbol->flags & BSF_LOCAL)
1574                 return STB_LOCAL;
1575         return -1;
1576 }
1577
1578 int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
1579 {
1580         int err = -1;
1581         long symbols_size, symbols_count, i;
1582         asection *section;
1583         asymbol **symbols, *sym;
1584         struct symbol *symbol;
1585         bfd *abfd;
1586         u64 start, len;
1587
1588         abfd = bfd_openr(dso->long_name, NULL);
1589         if (!abfd)
1590                 return -1;
1591
1592         if (!bfd_check_format(abfd, bfd_object)) {
1593                 pr_debug2("%s: cannot read %s bfd file.\n", __func__,
1594                           dso->long_name);
1595                 goto out_close;
1596         }
1597
1598         if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
1599                 goto out_close;
1600
1601         section = bfd_get_section_by_name(abfd, ".text");
1602         if (section)
1603                 dso->text_offset = section->vma - section->filepos;
1604
1605         bfd_close(abfd);
1606
1607         abfd = bfd_openr(debugfile, NULL);
1608         if (!abfd)
1609                 return -1;
1610
1611         if (!bfd_check_format(abfd, bfd_object)) {
1612                 pr_debug2("%s: cannot read %s bfd file.\n", __func__,
1613                           debugfile);
1614                 goto out_close;
1615         }
1616
1617         if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
1618                 goto out_close;
1619
1620         symbols_size = bfd_get_symtab_upper_bound(abfd);
1621         if (symbols_size == 0) {
1622                 bfd_close(abfd);
1623                 return 0;
1624         }
1625
1626         if (symbols_size < 0)
1627                 goto out_close;
1628
1629         symbols = malloc(symbols_size);
1630         if (!symbols)
1631                 goto out_close;
1632
1633         symbols_count = bfd_canonicalize_symtab(abfd, symbols);
1634         if (symbols_count < 0)
1635                 goto out_free;
1636
1637         qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
1638
1639 #ifdef bfd_get_section
1640 #define bfd_asymbol_section bfd_get_section
1641 #endif
1642         for (i = 0; i < symbols_count; ++i) {
1643                 sym = symbols[i];
1644                 section = bfd_asymbol_section(sym);
1645                 if (bfd2elf_binding(sym) < 0)
1646                         continue;
1647
1648                 while (i + 1 < symbols_count &&
1649                        bfd_asymbol_section(symbols[i + 1]) == section &&
1650                        bfd2elf_binding(symbols[i + 1]) < 0)
1651                         i++;
1652
1653                 if (i + 1 < symbols_count &&
1654                     bfd_asymbol_section(symbols[i + 1]) == section)
1655                         len = symbols[i + 1]->value - sym->value;
1656                 else
1657                         len = section->size - sym->value;
1658
1659                 start = bfd_asymbol_value(sym) - dso->text_offset;
1660                 symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
1661                                      bfd_asymbol_name(sym));
1662                 if (!symbol)
1663                         goto out_free;
1664
1665                 symbols__insert(&dso->symbols, symbol);
1666         }
1667 #ifdef bfd_get_section
1668 #undef bfd_asymbol_section
1669 #endif
1670
1671         symbols__fixup_end(&dso->symbols, false);
1672         symbols__fixup_duplicate(&dso->symbols);
1673         dso->adjust_symbols = 1;
1674
1675         err = 0;
1676 out_free:
1677         free(symbols);
1678 out_close:
1679         bfd_close(abfd);
1680         return err;
1681 }
1682 #endif
1683
1684 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1685                                            enum dso_binary_type type)
1686 {
1687         switch (type) {
1688         case DSO_BINARY_TYPE__JAVA_JIT:
1689         case DSO_BINARY_TYPE__DEBUGLINK:
1690         case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1691         case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1692         case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1693         case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
1694         case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1695         case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1696                 return !kmod && dso->kernel == DSO_SPACE__USER;
1697
1698         case DSO_BINARY_TYPE__KALLSYMS:
1699         case DSO_BINARY_TYPE__VMLINUX:
1700         case DSO_BINARY_TYPE__KCORE:
1701                 return dso->kernel == DSO_SPACE__KERNEL;
1702
1703         case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1704         case DSO_BINARY_TYPE__GUEST_VMLINUX:
1705         case DSO_BINARY_TYPE__GUEST_KCORE:
1706                 return dso->kernel == DSO_SPACE__KERNEL_GUEST;
1707
1708         case DSO_BINARY_TYPE__GUEST_KMODULE:
1709         case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1710         case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1711         case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1712                 /*
1713                  * kernel modules know their symtab type - it's set when
1714                  * creating a module dso in machine__addnew_module_map().
1715                  */
1716                 return kmod && dso->symtab_type == type;
1717
1718         case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1719         case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1720                 return true;
1721
1722         case DSO_BINARY_TYPE__BPF_PROG_INFO:
1723         case DSO_BINARY_TYPE__BPF_IMAGE:
1724         case DSO_BINARY_TYPE__OOL:
1725         case DSO_BINARY_TYPE__NOT_FOUND:
1726         default:
1727                 return false;
1728         }
1729 }
1730
1731 /* Checks for the existence of the perf-<pid>.map file in two different
1732  * locations.  First, if the process is a separate mount namespace, check in
1733  * that namespace using the pid of the innermost pid namespace.  If's not in a
1734  * namespace, or the file can't be found there, try in the mount namespace of
1735  * the tracing process using our view of its pid.
1736  */
1737 static int dso__find_perf_map(char *filebuf, size_t bufsz,
1738                               struct nsinfo **nsip)
1739 {
1740         struct nscookie nsc;
1741         struct nsinfo *nsi;
1742         struct nsinfo *nnsi;
1743         int rc = -1;
1744
1745         nsi = *nsip;
1746
1747         if (nsi->need_setns) {
1748                 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
1749                 nsinfo__mountns_enter(nsi, &nsc);
1750                 rc = access(filebuf, R_OK);
1751                 nsinfo__mountns_exit(&nsc);
1752                 if (rc == 0)
1753                         return rc;
1754         }
1755
1756         nnsi = nsinfo__copy(nsi);
1757         if (nnsi) {
1758                 nsinfo__put(nsi);
1759
1760                 nnsi->need_setns = false;
1761                 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
1762                 *nsip = nnsi;
1763                 rc = 0;
1764         }
1765
1766         return rc;
1767 }
1768
1769 int dso__load(struct dso *dso, struct map *map)
1770 {
1771         char *name;
1772         int ret = -1;
1773         u_int i;
1774         struct machine *machine = NULL;
1775         char *root_dir = (char *) "";
1776         int ss_pos = 0;
1777         struct symsrc ss_[2];
1778         struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1779         bool kmod;
1780         bool perfmap;
1781         struct build_id bid;
1782         struct nscookie nsc;
1783         char newmapname[PATH_MAX];
1784         const char *map_path = dso->long_name;
1785
1786         perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1787         if (perfmap) {
1788                 if (dso->nsinfo && (dso__find_perf_map(newmapname,
1789                     sizeof(newmapname), &dso->nsinfo) == 0)) {
1790                         map_path = newmapname;
1791                 }
1792         }
1793
1794         nsinfo__mountns_enter(dso->nsinfo, &nsc);
1795         pthread_mutex_lock(&dso->lock);
1796
1797         /* check again under the dso->lock */
1798         if (dso__loaded(dso)) {
1799                 ret = 1;
1800                 goto out;
1801         }
1802
1803         kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1804                 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1805                 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1806                 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1807
1808         if (dso->kernel && !kmod) {
1809                 if (dso->kernel == DSO_SPACE__KERNEL)
1810                         ret = dso__load_kernel_sym(dso, map);
1811                 else if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1812                         ret = dso__load_guest_kernel_sym(dso, map);
1813
1814                 machine = map__kmaps(map)->machine;
1815                 if (machine__is(machine, "x86_64"))
1816                         machine__map_x86_64_entry_trampolines(machine, dso);
1817                 goto out;
1818         }
1819
1820         dso->adjust_symbols = 0;
1821
1822         if (perfmap) {
1823                 ret = dso__load_perf_map(map_path, dso);
1824                 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1825                                              DSO_BINARY_TYPE__NOT_FOUND;
1826                 goto out;
1827         }
1828
1829         if (machine)
1830                 root_dir = machine->root_dir;
1831
1832         name = malloc(PATH_MAX);
1833         if (!name)
1834                 goto out;
1835
1836         /*
1837          * Read the build id if possible. This is required for
1838          * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1839          */
1840         if (!dso->has_build_id &&
1841             is_regular_file(dso->long_name)) {
1842             __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1843                 if (filename__read_build_id(name, &bid) > 0)
1844                         dso__set_build_id(dso, &bid);
1845         }
1846
1847         /*
1848          * Iterate over candidate debug images.
1849          * Keep track of "interesting" ones (those which have a symtab, dynsym,
1850          * and/or opd section) for processing.
1851          */
1852         for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1853                 struct symsrc *ss = &ss_[ss_pos];
1854                 bool next_slot = false;
1855                 bool is_reg;
1856                 bool nsexit;
1857                 int bfdrc = -1;
1858                 int sirc = -1;
1859
1860                 enum dso_binary_type symtab_type = binary_type_symtab[i];
1861
1862                 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1863                     symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1864
1865                 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1866                         continue;
1867
1868                 if (dso__read_binary_type_filename(dso, symtab_type,
1869                                                    root_dir, name, PATH_MAX))
1870                         continue;
1871
1872                 if (nsexit)
1873                         nsinfo__mountns_exit(&nsc);
1874
1875                 is_reg = is_regular_file(name);
1876 #ifdef HAVE_LIBBFD_SUPPORT
1877                 if (is_reg)
1878                         bfdrc = dso__load_bfd_symbols(dso, name);
1879 #endif
1880                 if (is_reg && bfdrc < 0)
1881                         sirc = symsrc__init(ss, dso, name, symtab_type);
1882
1883                 if (nsexit)
1884                         nsinfo__mountns_enter(dso->nsinfo, &nsc);
1885
1886                 if (bfdrc == 0) {
1887                         ret = 0;
1888                         break;
1889                 }
1890
1891                 if (!is_reg || sirc < 0)
1892                         continue;
1893
1894                 if (!syms_ss && symsrc__has_symtab(ss)) {
1895                         syms_ss = ss;
1896                         next_slot = true;
1897                         if (!dso->symsrc_filename)
1898                                 dso->symsrc_filename = strdup(name);
1899                 }
1900
1901                 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1902                         runtime_ss = ss;
1903                         next_slot = true;
1904                 }
1905
1906                 if (next_slot) {
1907                         ss_pos++;
1908
1909                         if (syms_ss && runtime_ss)
1910                                 break;
1911                 } else {
1912                         symsrc__destroy(ss);
1913                 }
1914
1915         }
1916
1917         if (!runtime_ss && !syms_ss)
1918                 goto out_free;
1919
1920         if (runtime_ss && !syms_ss) {
1921                 syms_ss = runtime_ss;
1922         }
1923
1924         /* We'll have to hope for the best */
1925         if (!runtime_ss && syms_ss)
1926                 runtime_ss = syms_ss;
1927
1928         if (syms_ss)
1929                 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1930         else
1931                 ret = -1;
1932
1933         if (ret > 0) {
1934                 int nr_plt;
1935
1936                 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1937                 if (nr_plt > 0)
1938                         ret += nr_plt;
1939         }
1940
1941         for (; ss_pos > 0; ss_pos--)
1942                 symsrc__destroy(&ss_[ss_pos - 1]);
1943 out_free:
1944         free(name);
1945         if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1946                 ret = 0;
1947 out:
1948         dso__set_loaded(dso);
1949         pthread_mutex_unlock(&dso->lock);
1950         nsinfo__mountns_exit(&nsc);
1951
1952         return ret;
1953 }
1954
1955 static int map__strcmp(const void *a, const void *b)
1956 {
1957         const struct map *ma = *(const struct map **)a, *mb = *(const struct map **)b;
1958         return strcmp(ma->dso->short_name, mb->dso->short_name);
1959 }
1960
1961 static int map__strcmp_name(const void *name, const void *b)
1962 {
1963         const struct map *map = *(const struct map **)b;
1964         return strcmp(name, map->dso->short_name);
1965 }
1966
1967 void __maps__sort_by_name(struct maps *maps)
1968 {
1969         qsort(maps->maps_by_name, maps->nr_maps, sizeof(struct map *), map__strcmp);
1970 }
1971
1972 static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
1973 {
1974         struct map *map;
1975         struct map **maps_by_name = realloc(maps->maps_by_name, maps->nr_maps * sizeof(map));
1976         int i = 0;
1977
1978         if (maps_by_name == NULL)
1979                 return -1;
1980
1981         maps->maps_by_name = maps_by_name;
1982         maps->nr_maps_allocated = maps->nr_maps;
1983
1984         maps__for_each_entry(maps, map)
1985                 maps_by_name[i++] = map;
1986
1987         __maps__sort_by_name(maps);
1988         return 0;
1989 }
1990
1991 static struct map *__maps__find_by_name(struct maps *maps, const char *name)
1992 {
1993         struct map **mapp;
1994
1995         if (maps->maps_by_name == NULL &&
1996             map__groups__sort_by_name_from_rbtree(maps))
1997                 return NULL;
1998
1999         mapp = bsearch(name, maps->maps_by_name, maps->nr_maps, sizeof(*mapp), map__strcmp_name);
2000         if (mapp)
2001                 return *mapp;
2002         return NULL;
2003 }
2004
2005 struct map *maps__find_by_name(struct maps *maps, const char *name)
2006 {
2007         struct map *map;
2008
2009         down_read(&maps->lock);
2010
2011         if (maps->last_search_by_name && strcmp(maps->last_search_by_name->dso->short_name, name) == 0) {
2012                 map = maps->last_search_by_name;
2013                 goto out_unlock;
2014         }
2015         /*
2016          * If we have maps->maps_by_name, then the name isn't in the rbtree,
2017          * as maps->maps_by_name mirrors the rbtree when lookups by name are
2018          * made.
2019          */
2020         map = __maps__find_by_name(maps, name);
2021         if (map || maps->maps_by_name != NULL)
2022                 goto out_unlock;
2023
2024         /* Fallback to traversing the rbtree... */
2025         maps__for_each_entry(maps, map)
2026                 if (strcmp(map->dso->short_name, name) == 0) {
2027                         maps->last_search_by_name = map;
2028                         goto out_unlock;
2029                 }
2030
2031         map = NULL;
2032
2033 out_unlock:
2034         up_read(&maps->lock);
2035         return map;
2036 }
2037
2038 int dso__load_vmlinux(struct dso *dso, struct map *map,
2039                       const char *vmlinux, bool vmlinux_allocated)
2040 {
2041         int err = -1;
2042         struct symsrc ss;
2043         char symfs_vmlinux[PATH_MAX];
2044         enum dso_binary_type symtab_type;
2045
2046         if (vmlinux[0] == '/')
2047                 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
2048         else
2049                 symbol__join_symfs(symfs_vmlinux, vmlinux);
2050
2051         if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
2052                 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
2053         else
2054                 symtab_type = DSO_BINARY_TYPE__VMLINUX;
2055
2056         if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
2057                 return -1;
2058
2059         err = dso__load_sym(dso, map, &ss, &ss, 0);
2060         symsrc__destroy(&ss);
2061
2062         if (err > 0) {
2063                 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
2064                         dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
2065                 else
2066                         dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
2067                 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
2068                 dso__set_loaded(dso);
2069                 pr_debug("Using %s for symbols\n", symfs_vmlinux);
2070         }
2071
2072         return err;
2073 }
2074
2075 int dso__load_vmlinux_path(struct dso *dso, struct map *map)
2076 {
2077         int i, err = 0;
2078         char *filename = NULL;
2079
2080         pr_debug("Looking at the vmlinux_path (%d entries long)\n",
2081                  vmlinux_path__nr_entries + 1);
2082
2083         for (i = 0; i < vmlinux_path__nr_entries; ++i) {
2084                 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
2085                 if (err > 0)
2086                         goto out;
2087         }
2088
2089         if (!symbol_conf.ignore_vmlinux_buildid)
2090                 filename = dso__build_id_filename(dso, NULL, 0, false);
2091         if (filename != NULL) {
2092                 err = dso__load_vmlinux(dso, map, filename, true);
2093                 if (err > 0)
2094                         goto out;
2095                 free(filename);
2096         }
2097 out:
2098         return err;
2099 }
2100
2101 static bool visible_dir_filter(const char *name, struct dirent *d)
2102 {
2103         if (d->d_type != DT_DIR)
2104                 return false;
2105         return lsdir_no_dot_filter(name, d);
2106 }
2107
2108 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
2109 {
2110         char kallsyms_filename[PATH_MAX];
2111         int ret = -1;
2112         struct strlist *dirs;
2113         struct str_node *nd;
2114
2115         dirs = lsdir(dir, visible_dir_filter);
2116         if (!dirs)
2117                 return -1;
2118
2119         strlist__for_each_entry(nd, dirs) {
2120                 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
2121                           "%s/%s/kallsyms", dir, nd->s);
2122                 if (!validate_kcore_addresses(kallsyms_filename, map)) {
2123                         strlcpy(dir, kallsyms_filename, dir_sz);
2124                         ret = 0;
2125                         break;
2126                 }
2127         }
2128
2129         strlist__delete(dirs);
2130
2131         return ret;
2132 }
2133
2134 /*
2135  * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
2136  * since access(R_OK) only checks with real UID/GID but open() use effective
2137  * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
2138  */
2139 static bool filename__readable(const char *file)
2140 {
2141         int fd = open(file, O_RDONLY);
2142         if (fd < 0)
2143                 return false;
2144         close(fd);
2145         return true;
2146 }
2147
2148 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
2149 {
2150         struct build_id bid;
2151         char sbuild_id[SBUILD_ID_SIZE];
2152         bool is_host = false;
2153         char path[PATH_MAX];
2154
2155         if (!dso->has_build_id) {
2156                 /*
2157                  * Last resort, if we don't have a build-id and couldn't find
2158                  * any vmlinux file, try the running kernel kallsyms table.
2159                  */
2160                 goto proc_kallsyms;
2161         }
2162
2163         if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0)
2164                 is_host = dso__build_id_equal(dso, &bid);
2165
2166         /* Try a fast path for /proc/kallsyms if possible */
2167         if (is_host) {
2168                 /*
2169                  * Do not check the build-id cache, unless we know we cannot use
2170                  * /proc/kcore or module maps don't match to /proc/kallsyms.
2171                  * To check readability of /proc/kcore, do not use access(R_OK)
2172                  * since /proc/kcore requires CAP_SYS_RAWIO to read and access
2173                  * can't check it.
2174                  */
2175                 if (filename__readable("/proc/kcore") &&
2176                     !validate_kcore_addresses("/proc/kallsyms", map))
2177                         goto proc_kallsyms;
2178         }
2179
2180         build_id__sprintf(&dso->bid, sbuild_id);
2181
2182         /* Find kallsyms in build-id cache with kcore */
2183         scnprintf(path, sizeof(path), "%s/%s/%s",
2184                   buildid_dir, DSO__NAME_KCORE, sbuild_id);
2185
2186         if (!find_matching_kcore(map, path, sizeof(path)))
2187                 return strdup(path);
2188
2189         /* Use current /proc/kallsyms if possible */
2190         if (is_host) {
2191 proc_kallsyms:
2192                 return strdup("/proc/kallsyms");
2193         }
2194
2195         /* Finally, find a cache of kallsyms */
2196         if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
2197                 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
2198                        sbuild_id);
2199                 return NULL;
2200         }
2201
2202         return strdup(path);
2203 }
2204
2205 static int dso__load_kernel_sym(struct dso *dso, struct map *map)
2206 {
2207         int err;
2208         const char *kallsyms_filename = NULL;
2209         char *kallsyms_allocated_filename = NULL;
2210         /*
2211          * Step 1: if the user specified a kallsyms or vmlinux filename, use
2212          * it and only it, reporting errors to the user if it cannot be used.
2213          *
2214          * For instance, try to analyse an ARM perf.data file _without_ a
2215          * build-id, or if the user specifies the wrong path to the right
2216          * vmlinux file, obviously we can't fallback to another vmlinux (a
2217          * x86_86 one, on the machine where analysis is being performed, say),
2218          * or worse, /proc/kallsyms.
2219          *
2220          * If the specified file _has_ a build-id and there is a build-id
2221          * section in the perf.data file, we will still do the expected
2222          * validation in dso__load_vmlinux and will bail out if they don't
2223          * match.
2224          */
2225         if (symbol_conf.kallsyms_name != NULL) {
2226                 kallsyms_filename = symbol_conf.kallsyms_name;
2227                 goto do_kallsyms;
2228         }
2229
2230         if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
2231                 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2232         }
2233
2234         if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2235                 err = dso__load_vmlinux_path(dso, map);
2236                 if (err > 0)
2237                         return err;
2238         }
2239
2240         /* do not try local files if a symfs was given */
2241         if (symbol_conf.symfs[0] != 0)
2242                 return -1;
2243
2244         kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
2245         if (!kallsyms_allocated_filename)
2246                 return -1;
2247
2248         kallsyms_filename = kallsyms_allocated_filename;
2249
2250 do_kallsyms:
2251         err = dso__load_kallsyms(dso, kallsyms_filename, map);
2252         if (err > 0)
2253                 pr_debug("Using %s for symbols\n", kallsyms_filename);
2254         free(kallsyms_allocated_filename);
2255
2256         if (err > 0 && !dso__is_kcore(dso)) {
2257                 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
2258                 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2259                 map__fixup_start(map);
2260                 map__fixup_end(map);
2261         }
2262
2263         return err;
2264 }
2265
2266 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2267 {
2268         int err;
2269         const char *kallsyms_filename = NULL;
2270         struct machine *machine = map__kmaps(map)->machine;
2271         char path[PATH_MAX];
2272
2273         if (machine__is_default_guest(machine)) {
2274                 /*
2275                  * if the user specified a vmlinux filename, use it and only
2276                  * it, reporting errors to the user if it cannot be used.
2277                  * Or use file guest_kallsyms inputted by user on commandline
2278                  */
2279                 if (symbol_conf.default_guest_vmlinux_name != NULL) {
2280                         err = dso__load_vmlinux(dso, map,
2281                                                 symbol_conf.default_guest_vmlinux_name,
2282                                                 false);
2283                         return err;
2284                 }
2285
2286                 kallsyms_filename = symbol_conf.default_guest_kallsyms;
2287                 if (!kallsyms_filename)
2288                         return -1;
2289         } else {
2290                 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2291                 kallsyms_filename = path;
2292         }
2293
2294         err = dso__load_kallsyms(dso, kallsyms_filename, map);
2295         if (err > 0)
2296                 pr_debug("Using %s for symbols\n", kallsyms_filename);
2297         if (err > 0 && !dso__is_kcore(dso)) {
2298                 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
2299                 dso__set_long_name(dso, machine->mmap_name, false);
2300                 map__fixup_start(map);
2301                 map__fixup_end(map);
2302         }
2303
2304         return err;
2305 }
2306
2307 static void vmlinux_path__exit(void)
2308 {
2309         while (--vmlinux_path__nr_entries >= 0)
2310                 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2311         vmlinux_path__nr_entries = 0;
2312
2313         zfree(&vmlinux_path);
2314 }
2315
2316 static const char * const vmlinux_paths[] = {
2317         "vmlinux",
2318         "/boot/vmlinux"
2319 };
2320
2321 static const char * const vmlinux_paths_upd[] = {
2322         "/boot/vmlinux-%s",
2323         "/usr/lib/debug/boot/vmlinux-%s",
2324         "/lib/modules/%s/build/vmlinux",
2325         "/usr/lib/debug/lib/modules/%s/vmlinux",
2326         "/usr/lib/debug/boot/vmlinux-%s.debug"
2327 };
2328
2329 static int vmlinux_path__add(const char *new_entry)
2330 {
2331         vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2332         if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2333                 return -1;
2334         ++vmlinux_path__nr_entries;
2335
2336         return 0;
2337 }
2338
2339 static int vmlinux_path__init(struct perf_env *env)
2340 {
2341         struct utsname uts;
2342         char bf[PATH_MAX];
2343         char *kernel_version;
2344         unsigned int i;
2345
2346         vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2347                               ARRAY_SIZE(vmlinux_paths_upd)));
2348         if (vmlinux_path == NULL)
2349                 return -1;
2350
2351         for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2352                 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2353                         goto out_fail;
2354
2355         /* only try kernel version if no symfs was given */
2356         if (symbol_conf.symfs[0] != 0)
2357                 return 0;
2358
2359         if (env) {
2360                 kernel_version = env->os_release;
2361         } else {
2362                 if (uname(&uts) < 0)
2363                         goto out_fail;
2364
2365                 kernel_version = uts.release;
2366         }
2367
2368         for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2369                 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2370                 if (vmlinux_path__add(bf) < 0)
2371                         goto out_fail;
2372         }
2373
2374         return 0;
2375
2376 out_fail:
2377         vmlinux_path__exit();
2378         return -1;
2379 }
2380
2381 int setup_list(struct strlist **list, const char *list_str,
2382                       const char *list_name)
2383 {
2384         if (list_str == NULL)
2385                 return 0;
2386
2387         *list = strlist__new(list_str, NULL);
2388         if (!*list) {
2389                 pr_err("problems parsing %s list\n", list_name);
2390                 return -1;
2391         }
2392
2393         symbol_conf.has_filter = true;
2394         return 0;
2395 }
2396
2397 int setup_intlist(struct intlist **list, const char *list_str,
2398                   const char *list_name)
2399 {
2400         if (list_str == NULL)
2401                 return 0;
2402
2403         *list = intlist__new(list_str);
2404         if (!*list) {
2405                 pr_err("problems parsing %s list\n", list_name);
2406                 return -1;
2407         }
2408         return 0;
2409 }
2410
2411 static bool symbol__read_kptr_restrict(void)
2412 {
2413         bool value = false;
2414         FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2415
2416         if (fp != NULL) {
2417                 char line[8];
2418
2419                 if (fgets(line, sizeof(line), fp) != NULL)
2420                         value = perf_cap__capable(CAP_SYSLOG) ?
2421                                         (atoi(line) >= 2) :
2422                                         (atoi(line) != 0);
2423
2424                 fclose(fp);
2425         }
2426
2427         /* Per kernel/kallsyms.c:
2428          * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
2429          */
2430         if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
2431                 value = true;
2432
2433         return value;
2434 }
2435
2436 int symbol__annotation_init(void)
2437 {
2438         if (symbol_conf.init_annotation)
2439                 return 0;
2440
2441         if (symbol_conf.initialized) {
2442                 pr_err("Annotation needs to be init before symbol__init()\n");
2443                 return -1;
2444         }
2445
2446         symbol_conf.priv_size += sizeof(struct annotation);
2447         symbol_conf.init_annotation = true;
2448         return 0;
2449 }
2450
2451 int symbol__init(struct perf_env *env)
2452 {
2453         const char *symfs;
2454
2455         if (symbol_conf.initialized)
2456                 return 0;
2457
2458         symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2459
2460         symbol__elf_init();
2461
2462         if (symbol_conf.sort_by_name)
2463                 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2464                                           sizeof(struct symbol));
2465
2466         if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2467                 return -1;
2468
2469         if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2470                 pr_err("'.' is the only non valid --field-separator argument\n");
2471                 return -1;
2472         }
2473
2474         if (setup_list(&symbol_conf.dso_list,
2475                        symbol_conf.dso_list_str, "dso") < 0)
2476                 return -1;
2477
2478         if (setup_list(&symbol_conf.comm_list,
2479                        symbol_conf.comm_list_str, "comm") < 0)
2480                 goto out_free_dso_list;
2481
2482         if (setup_intlist(&symbol_conf.pid_list,
2483                        symbol_conf.pid_list_str, "pid") < 0)
2484                 goto out_free_comm_list;
2485
2486         if (setup_intlist(&symbol_conf.tid_list,
2487                        symbol_conf.tid_list_str, "tid") < 0)
2488                 goto out_free_pid_list;
2489
2490         if (setup_list(&symbol_conf.sym_list,
2491                        symbol_conf.sym_list_str, "symbol") < 0)
2492                 goto out_free_tid_list;
2493
2494         if (setup_list(&symbol_conf.bt_stop_list,
2495                        symbol_conf.bt_stop_list_str, "symbol") < 0)
2496                 goto out_free_sym_list;
2497
2498         /*
2499          * A path to symbols of "/" is identical to ""
2500          * reset here for simplicity.
2501          */
2502         symfs = realpath(symbol_conf.symfs, NULL);
2503         if (symfs == NULL)
2504                 symfs = symbol_conf.symfs;
2505         if (strcmp(symfs, "/") == 0)
2506                 symbol_conf.symfs = "";
2507         if (symfs != symbol_conf.symfs)
2508                 free((void *)symfs);
2509
2510         symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2511
2512         symbol_conf.initialized = true;
2513         return 0;
2514
2515 out_free_sym_list:
2516         strlist__delete(symbol_conf.sym_list);
2517 out_free_tid_list:
2518         intlist__delete(symbol_conf.tid_list);
2519 out_free_pid_list:
2520         intlist__delete(symbol_conf.pid_list);
2521 out_free_comm_list:
2522         strlist__delete(symbol_conf.comm_list);
2523 out_free_dso_list:
2524         strlist__delete(symbol_conf.dso_list);
2525         return -1;
2526 }
2527
2528 void symbol__exit(void)
2529 {
2530         if (!symbol_conf.initialized)
2531                 return;
2532         strlist__delete(symbol_conf.bt_stop_list);
2533         strlist__delete(symbol_conf.sym_list);
2534         strlist__delete(symbol_conf.dso_list);
2535         strlist__delete(symbol_conf.comm_list);
2536         intlist__delete(symbol_conf.tid_list);
2537         intlist__delete(symbol_conf.pid_list);
2538         vmlinux_path__exit();
2539         symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2540         symbol_conf.bt_stop_list = NULL;
2541         symbol_conf.initialized = false;
2542 }
2543
2544 int symbol__config_symfs(const struct option *opt __maybe_unused,
2545                          const char *dir, int unset __maybe_unused)
2546 {
2547         char *bf = NULL;
2548         int ret;
2549
2550         symbol_conf.symfs = strdup(dir);
2551         if (symbol_conf.symfs == NULL)
2552                 return -ENOMEM;
2553
2554         /* skip the locally configured cache if a symfs is given, and
2555          * config buildid dir to symfs/.debug
2556          */
2557         ret = asprintf(&bf, "%s/%s", dir, ".debug");
2558         if (ret < 0)
2559                 return -ENOMEM;
2560
2561         set_buildid_dir(bf);
2562
2563         free(bf);
2564         return 0;
2565 }
2566
2567 struct mem_info *mem_info__get(struct mem_info *mi)
2568 {
2569         if (mi)
2570                 refcount_inc(&mi->refcnt);
2571         return mi;
2572 }
2573
2574 void mem_info__put(struct mem_info *mi)
2575 {
2576         if (mi && refcount_dec_and_test(&mi->refcnt))
2577                 free(mi);
2578 }
2579
2580 struct mem_info *mem_info__new(void)
2581 {
2582         struct mem_info *mi = zalloc(sizeof(*mi));
2583
2584         if (mi)
2585                 refcount_set(&mi->refcnt, 1);
2586         return mi;
2587 }