11 #include <symbol/kallsyms.h>
15 #define EM_AARCH64 183 /* ARM 64 bit */
19 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
20 extern char *cplus_demangle(const char *, int);
22 static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
24 return cplus_demangle(c, i);
28 static inline char *bfd_demangle(void __maybe_unused *v,
29 const char __maybe_unused *c,
35 #define PACKAGE 'perf'
40 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
41 static int elf_getphdrnum(Elf *elf, size_t *dst)
46 ehdr = gelf_getehdr(elf, &gehdr);
56 #ifndef NT_GNU_BUILD_ID
57 #define NT_GNU_BUILD_ID 3
61 * elf_symtab__for_each_symbol - iterate thru all the symbols
63 * @syms: struct elf_symtab instance to iterate
65 * @sym: GElf_Sym iterator
67 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
68 for (idx = 0, gelf_getsym(syms, idx, &sym);\
70 idx++, gelf_getsym(syms, idx, &sym))
72 static inline uint8_t elf_sym__type(const GElf_Sym *sym)
74 return GELF_ST_TYPE(sym->st_info);
77 static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
79 return GELF_ST_VISIBILITY(sym->st_other);
83 #define STT_GNU_IFUNC 10
86 static inline int elf_sym__is_function(const GElf_Sym *sym)
88 return (elf_sym__type(sym) == STT_FUNC ||
89 elf_sym__type(sym) == STT_GNU_IFUNC) &&
91 sym->st_shndx != SHN_UNDEF;
94 static inline bool elf_sym__is_object(const GElf_Sym *sym)
96 return elf_sym__type(sym) == STT_OBJECT &&
98 sym->st_shndx != SHN_UNDEF;
101 static inline int elf_sym__is_label(const GElf_Sym *sym)
103 return elf_sym__type(sym) == STT_NOTYPE &&
105 sym->st_shndx != SHN_UNDEF &&
106 sym->st_shndx != SHN_ABS &&
107 elf_sym__visibility(sym) != STV_HIDDEN &&
108 elf_sym__visibility(sym) != STV_INTERNAL;
111 static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
115 return elf_sym__is_function(sym);
117 return elf_sym__is_object(sym);
123 static inline const char *elf_sym__name(const GElf_Sym *sym,
124 const Elf_Data *symstrs)
126 return symstrs->d_buf + sym->st_name;
129 static inline const char *elf_sec__name(const GElf_Shdr *shdr,
130 const Elf_Data *secstrs)
132 return secstrs->d_buf + shdr->sh_name;
135 static inline int elf_sec__is_text(const GElf_Shdr *shdr,
136 const Elf_Data *secstrs)
138 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
141 static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
142 const Elf_Data *secstrs)
144 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
147 static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
152 return elf_sec__is_text(shdr, secstrs);
154 return elf_sec__is_data(shdr, secstrs);
160 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
166 while ((sec = elf_nextscn(elf, sec)) != NULL) {
167 gelf_getshdr(sec, &shdr);
169 if ((addr >= shdr.sh_addr) &&
170 (addr < (shdr.sh_addr + shdr.sh_size)))
179 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
180 GElf_Shdr *shp, const char *name, size_t *idx)
185 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
186 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
189 while ((sec = elf_nextscn(elf, sec)) != NULL) {
192 gelf_getshdr(sec, shp);
193 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
194 if (str && !strcmp(name, str)) {
205 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
206 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
208 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
210 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
211 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
213 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
216 * We need to check if we have a .dynsym, so that we can handle the
217 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
218 * .dynsym or .symtab).
219 * And always look at the original dso, not at debuginfo packages, that
220 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
222 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
223 symbol_filter_t filter)
225 uint32_t nr_rel_entries, idx;
230 GElf_Shdr shdr_rel_plt, shdr_dynsym;
231 Elf_Data *reldata, *syms, *symstrs;
232 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
235 char sympltname[1024];
237 int nr = 0, symidx, err = 0;
245 scn_dynsym = ss->dynsym;
246 shdr_dynsym = ss->dynshdr;
247 dynsym_idx = ss->dynsym_idx;
249 if (scn_dynsym == NULL)
252 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
254 if (scn_plt_rel == NULL) {
255 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
257 if (scn_plt_rel == NULL)
263 if (shdr_rel_plt.sh_link != dynsym_idx)
266 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
270 * Fetch the relocation section to find the idxes to the GOT
271 * and the symbols in the .dynsym they refer to.
273 reldata = elf_getdata(scn_plt_rel, NULL);
277 syms = elf_getdata(scn_dynsym, NULL);
281 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
282 if (scn_symstrs == NULL)
285 symstrs = elf_getdata(scn_symstrs, NULL);
289 if (symstrs->d_size == 0)
292 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
293 plt_offset = shdr_plt.sh_offset;
295 if (shdr_rel_plt.sh_type == SHT_RELA) {
296 GElf_Rela pos_mem, *pos;
298 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
300 symidx = GELF_R_SYM(pos->r_info);
301 plt_offset += shdr_plt.sh_entsize;
302 gelf_getsym(syms, symidx, &sym);
303 snprintf(sympltname, sizeof(sympltname),
304 "%s@plt", elf_sym__name(&sym, symstrs));
306 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
307 STB_GLOBAL, sympltname);
311 if (filter && filter(map, f))
314 symbols__insert(&dso->symbols[map->type], f);
318 } else if (shdr_rel_plt.sh_type == SHT_REL) {
319 GElf_Rel pos_mem, *pos;
320 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
322 symidx = GELF_R_SYM(pos->r_info);
323 plt_offset += shdr_plt.sh_entsize;
324 gelf_getsym(syms, symidx, &sym);
325 snprintf(sympltname, sizeof(sympltname),
326 "%s@plt", elf_sym__name(&sym, symstrs));
328 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
329 STB_GLOBAL, sympltname);
333 if (filter && filter(map, f))
336 symbols__insert(&dso->symbols[map->type], f);
346 pr_debug("%s: problems reading %s PLT info.\n",
347 __func__, dso->long_name);
352 * Align offset to 4 bytes as needed for note name and descriptor data.
354 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
356 static int elf_read_build_id(Elf *elf, void *bf, size_t size)
366 if (size < BUILD_ID_SIZE)
373 if (gelf_getehdr(elf, &ehdr) == NULL) {
374 pr_err("%s: cannot get elf header.\n", __func__);
379 * Check following sections for notes:
380 * '.note.gnu.build-id'
382 * '.note' (VDSO specific)
385 sec = elf_section_by_name(elf, &ehdr, &shdr,
386 ".note.gnu.build-id", NULL);
390 sec = elf_section_by_name(elf, &ehdr, &shdr,
395 sec = elf_section_by_name(elf, &ehdr, &shdr,
404 data = elf_getdata(sec, NULL);
409 while (ptr < (data->d_buf + data->d_size)) {
410 GElf_Nhdr *nhdr = ptr;
411 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
412 descsz = NOTE_ALIGN(nhdr->n_descsz);
415 ptr += sizeof(*nhdr);
418 if (nhdr->n_type == NT_GNU_BUILD_ID &&
419 nhdr->n_namesz == sizeof("GNU")) {
420 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
421 size_t sz = min(size, descsz);
423 memset(bf + sz, 0, size - sz);
435 int filename__read_build_id(const char *filename, void *bf, size_t size)
440 if (size < BUILD_ID_SIZE)
443 fd = open(filename, O_RDONLY);
447 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
449 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
453 err = elf_read_build_id(elf, bf, size);
462 int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
466 if (size < BUILD_ID_SIZE)
469 fd = open(filename, O_RDONLY);
476 size_t namesz, descsz;
478 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
481 namesz = NOTE_ALIGN(nhdr.n_namesz);
482 descsz = NOTE_ALIGN(nhdr.n_descsz);
483 if (nhdr.n_type == NT_GNU_BUILD_ID &&
484 nhdr.n_namesz == sizeof("GNU")) {
485 if (read(fd, bf, namesz) != (ssize_t)namesz)
487 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
488 size_t sz = min(descsz, size);
489 if (read(fd, build_id, sz) == (ssize_t)sz) {
490 memset(build_id + sz, 0, size - sz);
494 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
497 int n = namesz + descsz;
499 if (n > (int)sizeof(bf)) {
501 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
502 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
504 if (read(fd, bf, n) != n)
513 int filename__read_debuglink(const char *filename, char *debuglink,
524 fd = open(filename, O_RDONLY);
528 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
530 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
538 if (gelf_getehdr(elf, &ehdr) == NULL) {
539 pr_err("%s: cannot get elf header.\n", __func__);
543 sec = elf_section_by_name(elf, &ehdr, &shdr,
544 ".gnu_debuglink", NULL);
548 data = elf_getdata(sec, NULL);
552 /* the start of this section is a zero-terminated string */
553 strncpy(debuglink, data->d_buf, size);
565 static int dso__swap_init(struct dso *dso, unsigned char eidata)
567 static unsigned int const endian = 1;
569 dso->needs_swap = DSO_SWAP__NO;
573 /* We are big endian, DSO is little endian. */
574 if (*(unsigned char const *)&endian != 1)
575 dso->needs_swap = DSO_SWAP__YES;
579 /* We are little endian, DSO is big endian. */
580 if (*(unsigned char const *)&endian != 0)
581 dso->needs_swap = DSO_SWAP__YES;
585 pr_err("unrecognized DSO data encoding %d\n", eidata);
592 static int decompress_kmodule(struct dso *dso, const char *name,
593 enum dso_binary_type type)
596 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
599 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
600 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
601 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
604 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
605 name = dso->long_name;
607 if (kmod_path__parse_ext(&m, name) || !m.comp)
610 fd = mkstemp(tmpbuf);
612 dso->load_errno = errno;
616 if (!decompress_to_file(m.ext, name, fd)) {
617 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
629 bool symsrc__possibly_runtime(struct symsrc *ss)
631 return ss->dynsym || ss->opdsec;
634 bool symsrc__has_symtab(struct symsrc *ss)
636 return ss->symtab != NULL;
639 void symsrc__destroy(struct symsrc *ss)
646 bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
648 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
651 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
652 enum dso_binary_type type)
659 if (dso__needs_decompress(dso)) {
660 fd = decompress_kmodule(dso, name, type);
664 fd = open(name, O_RDONLY);
666 dso->load_errno = errno;
671 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
673 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
674 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
678 if (gelf_getehdr(elf, &ehdr) == NULL) {
679 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
680 pr_debug("%s: cannot get elf header.\n", __func__);
684 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
685 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
689 /* Always reject images with a mismatched build-id: */
690 if (dso->has_build_id) {
691 u8 build_id[BUILD_ID_SIZE];
693 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
694 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
698 if (!dso__build_id_equal(dso, build_id)) {
699 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
700 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
705 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
707 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
709 if (ss->symshdr.sh_type != SHT_SYMTAB)
713 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
715 if (ss->dynshdr.sh_type != SHT_DYNSYM)
719 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
721 if (ss->opdshdr.sh_type != SHT_PROGBITS)
724 if (dso->kernel == DSO_TYPE_USER) {
726 ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
727 ehdr.e_type == ET_REL ||
729 elf_section_by_name(elf, &ehdr, &shdr,
733 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
736 ss->name = strdup(name);
738 dso->load_errno = errno;
757 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
758 * @kmap: kernel maps and relocation reference symbol
760 * This function returns %true if we are dealing with the kernel maps and the
761 * relocation reference symbol has not yet been found. Otherwise %false is
764 static bool ref_reloc_sym_not_found(struct kmap *kmap)
766 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
767 !kmap->ref_reloc_sym->unrelocated_addr;
771 * ref_reloc - kernel relocation offset.
772 * @kmap: kernel maps and relocation reference symbol
774 * This function returns the offset of kernel addresses as determined by using
775 * the relocation reference symbol i.e. if the kernel has not been relocated
776 * then the return value is zero.
778 static u64 ref_reloc(struct kmap *kmap)
780 if (kmap && kmap->ref_reloc_sym &&
781 kmap->ref_reloc_sym->unrelocated_addr)
782 return kmap->ref_reloc_sym->addr -
783 kmap->ref_reloc_sym->unrelocated_addr;
787 static bool want_demangle(bool is_kernel_sym)
789 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
792 void __weak arch__elf_sym_adjust(GElf_Sym *sym __maybe_unused) { }
794 int dso__load_sym(struct dso *dso, struct map *map,
795 struct symsrc *syms_ss, struct symsrc *runtime_ss,
796 symbol_filter_t filter, int kmodule)
798 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
799 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
800 struct map *curr_map = map;
801 struct dso *curr_dso = dso;
802 Elf_Data *symstrs, *secstrs;
808 Elf_Data *syms, *opddata = NULL;
810 Elf_Scn *sec, *sec_strndx;
813 bool remap_kernel = false, adjust_kernel_syms = false;
818 dso->symtab_type = syms_ss->type;
819 dso->is_64_bit = syms_ss->is_64_bit;
820 dso->rel = syms_ss->ehdr.e_type == ET_REL;
823 * Modules may already have symbols from kallsyms, but those symbols
824 * have the wrong values for the dso maps, so remove them.
826 if (kmodule && syms_ss->symtab)
827 symbols__delete(&dso->symbols[map->type]);
829 if (!syms_ss->symtab) {
831 * If the vmlinux is stripped, fail so we will fall back
832 * to using kallsyms. The vmlinux runtime symbols aren't
838 syms_ss->symtab = syms_ss->dynsym;
839 syms_ss->symshdr = syms_ss->dynshdr;
843 ehdr = syms_ss->ehdr;
844 sec = syms_ss->symtab;
845 shdr = syms_ss->symshdr;
847 if (runtime_ss->opdsec)
848 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
850 syms = elf_getdata(sec, NULL);
854 sec = elf_getscn(elf, shdr.sh_link);
858 symstrs = elf_getdata(sec, NULL);
862 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
863 if (sec_strndx == NULL)
866 secstrs = elf_getdata(sec_strndx, NULL);
870 nr_syms = shdr.sh_size / shdr.sh_entsize;
872 memset(&sym, 0, sizeof(sym));
875 * The kernel relocation symbol is needed in advance in order to adjust
876 * kernel maps correctly.
878 if (ref_reloc_sym_not_found(kmap)) {
879 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
880 const char *elf_name = elf_sym__name(&sym, symstrs);
882 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
884 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
885 map->reloc = kmap->ref_reloc_sym->addr -
886 kmap->ref_reloc_sym->unrelocated_addr;
892 * Handle any relocation of vdso necessary because older kernels
893 * attempted to prelink vdso to its virtual address.
895 if (dso__is_vdso(dso)) {
898 if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
899 map->reloc = map->start - tshdr.sh_addr + tshdr.sh_offset;
902 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
904 * Initial kernel and module mappings do not map to the dso. For
905 * function mappings, flag the fixups.
907 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
909 adjust_kernel_syms = dso->adjust_symbols;
911 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
913 const char *elf_name = elf_sym__name(&sym, symstrs);
914 char *demangled = NULL;
915 int is_label = elf_sym__is_label(&sym);
916 const char *section_name;
917 bool used_opd = false;
919 if (!is_label && !elf_sym__is_a(&sym, map->type))
922 /* Reject ARM ELF "mapping symbols": these aren't unique and
923 * don't identify functions, so will confuse the profile
925 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
926 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
927 && (elf_name[2] == '\0' || elf_name[2] == '.'))
931 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
932 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
933 u64 *opd = opddata->d_buf + offset;
934 sym.st_value = DSO__SWAP(dso, u64, *opd);
935 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
940 * When loading symbols in a data mapping, ABS symbols (which
941 * has a value of SHN_ABS in its st_shndx) failed at
942 * elf_getscn(). And it marks the loading as a failure so
943 * already loaded symbols cannot be fixed up.
945 * I'm not sure what should be done. Just ignore them for now.
948 if (sym.st_shndx == SHN_ABS)
951 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
955 gelf_getshdr(sec, &shdr);
957 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
960 section_name = elf_sec__name(&shdr, secstrs);
962 /* On ARM, symbols for thumb functions have 1 added to
963 * the symbol address as a flag - remove it */
964 if ((ehdr.e_machine == EM_ARM) &&
965 (map->type == MAP__FUNCTION) &&
969 arch__elf_sym_adjust(&sym);
971 if (dso->kernel || kmodule) {
972 char dso_name[PATH_MAX];
974 /* Adjust symbol to map to file offset */
975 if (adjust_kernel_syms)
976 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
978 if (strcmp(section_name,
979 (curr_dso->short_name +
980 dso->short_name_len)) == 0)
983 if (strcmp(section_name, ".text") == 0) {
985 * The initial kernel mapping is based on
986 * kallsyms and identity maps. Overwrite it to
987 * map to the kernel dso.
989 if (remap_kernel && dso->kernel) {
990 remap_kernel = false;
991 map->start = shdr.sh_addr +
993 map->end = map->start + shdr.sh_size;
994 map->pgoff = shdr.sh_offset;
995 map->map_ip = map__map_ip;
996 map->unmap_ip = map__unmap_ip;
997 /* Ensure maps are correctly ordered */
1000 map_groups__remove(kmaps, map);
1001 map_groups__insert(kmaps, map);
1007 * The initial module mapping is based on
1008 * /proc/modules mapped to offset zero.
1009 * Overwrite it to map to the module dso.
1011 if (remap_kernel && kmodule) {
1012 remap_kernel = false;
1013 map->pgoff = shdr.sh_offset;
1024 snprintf(dso_name, sizeof(dso_name),
1025 "%s%s", dso->short_name, section_name);
1027 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
1028 if (curr_map == NULL) {
1029 u64 start = sym.st_value;
1032 start += map->start + shdr.sh_offset;
1034 curr_dso = dso__new(dso_name);
1035 if (curr_dso == NULL)
1037 curr_dso->kernel = dso->kernel;
1038 curr_dso->long_name = dso->long_name;
1039 curr_dso->long_name_len = dso->long_name_len;
1040 curr_map = map__new2(start, curr_dso,
1042 if (curr_map == NULL) {
1046 if (adjust_kernel_syms) {
1047 curr_map->start = shdr.sh_addr +
1049 curr_map->end = curr_map->start +
1051 curr_map->pgoff = shdr.sh_offset;
1053 curr_map->map_ip = identity__map_ip;
1054 curr_map->unmap_ip = identity__map_ip;
1056 curr_dso->symtab_type = dso->symtab_type;
1057 map_groups__insert(kmaps, curr_map);
1058 dsos__add(&map->groups->machine->dsos, curr_dso);
1059 dso__set_loaded(curr_dso, map->type);
1061 curr_dso = curr_map->dso;
1066 if ((used_opd && runtime_ss->adjust_symbols)
1067 || (!used_opd && syms_ss->adjust_symbols)) {
1068 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1069 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1070 (u64)sym.st_value, (u64)shdr.sh_addr,
1071 (u64)shdr.sh_offset);
1072 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1076 * We need to figure out if the object was created from C++ sources
1077 * DWARF DW_compile_unit has this, but we don't always have access
1080 if (want_demangle(dso->kernel || kmodule)) {
1081 int demangle_flags = DMGL_NO_OPTS;
1083 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
1085 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
1086 if (demangled != NULL)
1087 elf_name = demangled;
1089 f = symbol__new(sym.st_value, sym.st_size,
1090 GELF_ST_BIND(sym.st_info), elf_name);
1095 if (filter && filter(curr_map, f))
1098 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1104 * For misannotated, zeroed, ASM function sizes.
1107 symbols__fixup_end(&dso->symbols[map->type]);
1108 symbols__fixup_duplicate(&dso->symbols[map->type]);
1111 * We need to fixup this here too because we create new
1112 * maps here, for things like vsyscall sections.
1114 __map_groups__fixup_end(kmaps, map->type);
1122 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1129 if (elf_getphdrnum(elf, &phdrnum))
1132 for (i = 0; i < phdrnum; i++) {
1133 if (gelf_getphdr(elf, i, &phdr) == NULL)
1135 if (phdr.p_type != PT_LOAD)
1138 if (!(phdr.p_flags & PF_X))
1141 if (!(phdr.p_flags & PF_R))
1144 sz = min(phdr.p_memsz, phdr.p_filesz);
1147 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1154 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1160 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1165 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1167 err = elf_read_maps(elf, exe, mapfn, data);
1173 enum dso_type dso__type_fd(int fd)
1175 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1180 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1185 if (ek != ELF_K_ELF)
1188 if (gelf_getclass(elf) == ELFCLASS64) {
1189 dso_type = DSO__TYPE_64BIT;
1193 if (gelf_getehdr(elf, &ehdr) == NULL)
1196 if (ehdr.e_machine == EM_X86_64)
1197 dso_type = DSO__TYPE_X32BIT;
1199 dso_type = DSO__TYPE_32BIT;
1206 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1211 char *buf = malloc(page_size);
1216 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1219 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1226 /* Use read because mmap won't work on proc files */
1227 r = read(from, buf, n);
1233 r = write(to, buf, n);
1254 static int kcore__open(struct kcore *kcore, const char *filename)
1258 kcore->fd = open(filename, O_RDONLY);
1259 if (kcore->fd == -1)
1262 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1266 kcore->elfclass = gelf_getclass(kcore->elf);
1267 if (kcore->elfclass == ELFCLASSNONE)
1270 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1277 elf_end(kcore->elf);
1283 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1286 kcore->elfclass = elfclass;
1289 kcore->fd = mkstemp(filename);
1291 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1292 if (kcore->fd == -1)
1295 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1299 if (!gelf_newehdr(kcore->elf, elfclass))
1302 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
1307 elf_end(kcore->elf);
1314 static void kcore__close(struct kcore *kcore)
1316 elf_end(kcore->elf);
1320 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1322 GElf_Ehdr *ehdr = &to->ehdr;
1323 GElf_Ehdr *kehdr = &from->ehdr;
1325 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1326 ehdr->e_type = kehdr->e_type;
1327 ehdr->e_machine = kehdr->e_machine;
1328 ehdr->e_version = kehdr->e_version;
1331 ehdr->e_flags = kehdr->e_flags;
1332 ehdr->e_phnum = count;
1333 ehdr->e_shentsize = 0;
1335 ehdr->e_shstrndx = 0;
1337 if (from->elfclass == ELFCLASS32) {
1338 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1339 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1340 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1342 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1343 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1344 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1347 if (!gelf_update_ehdr(to->elf, ehdr))
1350 if (!gelf_newphdr(to->elf, count))
1356 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1361 .p_flags = PF_R | PF_W | PF_X,
1367 .p_align = page_size,
1370 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
1376 static off_t kcore__write(struct kcore *kcore)
1378 return elf_update(kcore->elf, ELF_C_WRITE);
1387 struct kcore_copy_info {
1393 u64 first_module_symbol;
1394 u64 last_module_symbol;
1395 struct phdr_data kernel_map;
1396 struct phdr_data modules_map;
1399 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1402 struct kcore_copy_info *kci = arg;
1404 if (!symbol_type__is_a(type, MAP__FUNCTION))
1407 if (strchr(name, '[')) {
1408 if (!kci->first_module_symbol || start < kci->first_module_symbol)
1409 kci->first_module_symbol = start;
1410 if (start > kci->last_module_symbol)
1411 kci->last_module_symbol = start;
1415 if (!kci->first_symbol || start < kci->first_symbol)
1416 kci->first_symbol = start;
1418 if (!kci->last_symbol || start > kci->last_symbol)
1419 kci->last_symbol = start;
1421 if (!strcmp(name, "_stext")) {
1426 if (!strcmp(name, "_etext")) {
1434 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1437 char kallsyms_filename[PATH_MAX];
1439 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1441 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1444 if (kallsyms__parse(kallsyms_filename, kci,
1445 kcore_copy__process_kallsyms) < 0)
1451 static int kcore_copy__process_modules(void *arg,
1452 const char *name __maybe_unused,
1455 struct kcore_copy_info *kci = arg;
1457 if (!kci->first_module || start < kci->first_module)
1458 kci->first_module = start;
1463 static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1466 char modules_filename[PATH_MAX];
1468 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1470 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1473 if (modules__parse(modules_filename, kci,
1474 kcore_copy__process_modules) < 0)
1480 static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1483 if (p->addr || s < start || s >= end)
1487 p->offset = (s - start) + pgoff;
1488 p->len = e < end ? e - s : end - s;
1491 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1493 struct kcore_copy_info *kci = data;
1494 u64 end = start + len;
1496 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1499 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1500 kci->last_module_symbol);
1505 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1507 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1513 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1516 if (kcore_copy__parse_kallsyms(kci, dir))
1519 if (kcore_copy__parse_modules(kci, dir))
1523 kci->stext = round_down(kci->stext, page_size);
1525 kci->stext = round_down(kci->first_symbol, page_size);
1528 kci->etext = round_up(kci->etext, page_size);
1529 } else if (kci->last_symbol) {
1530 kci->etext = round_up(kci->last_symbol, page_size);
1531 kci->etext += page_size;
1534 if (kci->first_module_symbol &&
1535 (!kci->first_module || kci->first_module_symbol < kci->first_module))
1536 kci->first_module = kci->first_module_symbol;
1538 kci->first_module = round_down(kci->first_module, page_size);
1540 if (kci->last_module_symbol) {
1541 kci->last_module_symbol = round_up(kci->last_module_symbol,
1543 kci->last_module_symbol += page_size;
1546 if (!kci->stext || !kci->etext)
1549 if (kci->first_module && !kci->last_module_symbol)
1552 return kcore_copy__read_maps(kci, elf);
1555 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1558 char from_filename[PATH_MAX];
1559 char to_filename[PATH_MAX];
1561 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1562 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1564 return copyfile_mode(from_filename, to_filename, 0400);
1567 static int kcore_copy__unlink(const char *dir, const char *name)
1569 char filename[PATH_MAX];
1571 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1573 return unlink(filename);
1576 static int kcore_copy__compare_fds(int from, int to)
1584 buf_from = malloc(page_size);
1585 buf_to = malloc(page_size);
1586 if (!buf_from || !buf_to)
1590 /* Use read because mmap won't work on proc files */
1591 ret = read(from, buf_from, page_size);
1600 if (readn(to, buf_to, len) != (int)len)
1603 if (memcmp(buf_from, buf_to, len))
1614 static int kcore_copy__compare_files(const char *from_filename,
1615 const char *to_filename)
1617 int from, to, err = -1;
1619 from = open(from_filename, O_RDONLY);
1623 to = open(to_filename, O_RDONLY);
1625 goto out_close_from;
1627 err = kcore_copy__compare_fds(from, to);
1635 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1638 char from_filename[PATH_MAX];
1639 char to_filename[PATH_MAX];
1641 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1642 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1644 return kcore_copy__compare_files(from_filename, to_filename);
1648 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1649 * @from_dir: from directory
1650 * @to_dir: to directory
1652 * This function copies kallsyms, modules and kcore files from one directory to
1653 * another. kallsyms and modules are copied entirely. Only code segments are
1654 * copied from kcore. It is assumed that two segments suffice: one for the
1655 * kernel proper and one for all the modules. The code segments are determined
1656 * from kallsyms and modules files. The kernel map starts at _stext or the
1657 * lowest function symbol, and ends at _etext or the highest function symbol.
1658 * The module map starts at the lowest module address and ends at the highest
1659 * module symbol. Start addresses are rounded down to the nearest page. End
1660 * addresses are rounded up to the nearest page. An extra page is added to the
1661 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1662 * symbol too. Because it contains only code sections, the resulting kcore is
1663 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1664 * is not the same for the kernel map and the modules map. That happens because
1665 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1666 * kallsyms and modules files are compared with their copies to check that
1667 * modules have not been loaded or unloaded while the copies were taking place.
1669 * Return: %0 on success, %-1 on failure.
1671 int kcore_copy(const char *from_dir, const char *to_dir)
1674 struct kcore extract;
1676 int idx = 0, err = -1;
1677 off_t offset = page_size, sz, modules_offset = 0;
1678 struct kcore_copy_info kci = { .stext = 0, };
1679 char kcore_filename[PATH_MAX];
1680 char extract_filename[PATH_MAX];
1682 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1685 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1686 goto out_unlink_kallsyms;
1688 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1689 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1691 if (kcore__open(&kcore, kcore_filename))
1692 goto out_unlink_modules;
1694 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1695 goto out_kcore_close;
1697 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1698 goto out_kcore_close;
1700 if (!kci.modules_map.addr)
1703 if (kcore__copy_hdr(&kcore, &extract, count))
1704 goto out_extract_close;
1706 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1707 kci.kernel_map.len))
1708 goto out_extract_close;
1710 if (kci.modules_map.addr) {
1711 modules_offset = offset + kci.kernel_map.len;
1712 if (kcore__add_phdr(&extract, idx, modules_offset,
1713 kci.modules_map.addr, kci.modules_map.len))
1714 goto out_extract_close;
1717 sz = kcore__write(&extract);
1718 if (sz < 0 || sz > offset)
1719 goto out_extract_close;
1721 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1722 kci.kernel_map.len))
1723 goto out_extract_close;
1725 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1726 extract.fd, modules_offset,
1727 kci.modules_map.len))
1728 goto out_extract_close;
1730 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1731 goto out_extract_close;
1733 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1734 goto out_extract_close;
1739 kcore__close(&extract);
1741 unlink(extract_filename);
1743 kcore__close(&kcore);
1746 kcore_copy__unlink(to_dir, "modules");
1747 out_unlink_kallsyms:
1749 kcore_copy__unlink(to_dir, "kallsyms");
1754 int kcore_extract__create(struct kcore_extract *kce)
1757 struct kcore extract;
1759 int idx = 0, err = -1;
1760 off_t offset = page_size, sz;
1762 if (kcore__open(&kcore, kce->kcore_filename))
1765 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1766 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1767 goto out_kcore_close;
1769 if (kcore__copy_hdr(&kcore, &extract, count))
1770 goto out_extract_close;
1772 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1773 goto out_extract_close;
1775 sz = kcore__write(&extract);
1776 if (sz < 0 || sz > offset)
1777 goto out_extract_close;
1779 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1780 goto out_extract_close;
1785 kcore__close(&extract);
1787 unlink(kce->extract_filename);
1789 kcore__close(&kcore);
1794 void kcore_extract__delete(struct kcore_extract *kce)
1796 unlink(kce->extract_filename);
1799 void symbol__elf_init(void)
1801 elf_version(EV_CURRENT);