1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
12 #include <sys/utsname.h>
13 #include <sys/param.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
22 #include "libbpf_internal.h"
26 #define BTF_MAX_NR_TYPES 0x7fffffffU
27 #define BTF_MAX_STR_OFFSET 0x7fffffffU
29 static struct btf_type btf_void;
32 /* raw BTF data in native endianness */
34 /* raw BTF data in non-native endianness */
35 void *raw_data_swapped;
37 /* whether target endianness differs from the native one */
41 * When BTF is loaded from an ELF or raw memory it is stored
42 * in a contiguous memory block. The hdr, type_data, and, strs_data
43 * point inside that memory region to their respective parts of BTF
46 * +--------------------------------+
47 * | Header | Types | Strings |
48 * +--------------------------------+
53 * strs_data------------+
55 * If BTF data is later modified, e.g., due to types added or
56 * removed, BTF deduplication performed, etc, this contiguous
57 * representation is broken up into three independently allocated
58 * memory regions to be able to modify them independently.
59 * raw_data is nulled out at that point, but can be later allocated
60 * and cached again if user calls btf__raw_data(), at which point
61 * raw_data will contain a contiguous copy of header, types, and
64 * +----------+ +---------+ +-----------+
65 * | Header | | Types | | Strings |
66 * +----------+ +---------+ +-----------+
71 * strset__data(strs_set)-----+
73 * +----------+---------+-----------+
74 * | Header | Types | Strings |
75 * raw_data----->+----------+---------+-----------+
77 struct btf_header *hdr;
80 size_t types_data_cap; /* used size stored in hdr->type_len */
82 /* type ID to `struct btf_type *` lookup index
83 * type_offs[0] corresponds to the first non-VOID type:
84 * - for base BTF it's type [1];
85 * - for split BTF it's the first non-base BTF type.
89 /* number of types in this BTF instance:
90 * - doesn't include special [0] void type;
91 * - for split BTF counts number of types added on top of base BTF.
94 /* if not NULL, points to the base BTF on top of which the current
98 /* BTF type ID of the first type in this BTF instance:
99 * - for base BTF it's equal to 1;
100 * - for split BTF it's equal to biggest type ID of base BTF plus 1.
103 /* logical string offset of this BTF instance:
104 * - for base BTF it's equal to 0;
105 * - for split BTF it's equal to total size of base BTF's string section size.
109 /* only one of strs_data or strs_set can be non-NULL, depending on
110 * whether BTF is in a modifiable state (strs_set is used) or not
111 * (strs_data points inside raw_data)
114 /* a set of unique strings */
115 struct strset *strs_set;
116 /* whether strings are already deduplicated */
119 /* BTF object FD, if loaded into kernel */
122 /* Pointer size (in bytes) for a target architecture of this BTF */
126 static inline __u64 ptr_to_u64(const void *ptr)
128 return (__u64) (unsigned long) ptr;
131 /* Ensure given dynamically allocated memory region pointed to by *data* with
132 * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
133 * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
134 * are already used. At most *max_cnt* elements can be ever allocated.
135 * If necessary, memory is reallocated and all existing data is copied over,
136 * new pointer to the memory region is stored at *data, new memory region
137 * capacity (in number of elements) is stored in *cap.
138 * On success, memory pointer to the beginning of unused memory is returned.
139 * On error, NULL is returned.
141 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
142 size_t cur_cnt, size_t max_cnt, size_t add_cnt)
147 if (cur_cnt + add_cnt <= *cap_cnt)
148 return *data + cur_cnt * elem_sz;
150 /* requested more than the set limit */
151 if (cur_cnt + add_cnt > max_cnt)
155 new_cnt += new_cnt / 4; /* expand by 25% */
156 if (new_cnt < 16) /* but at least 16 elements */
158 if (new_cnt > max_cnt) /* but not exceeding a set limit */
160 if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */
161 new_cnt = cur_cnt + add_cnt;
163 new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
167 /* zero out newly allocated portion of memory */
168 memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
172 return new_data + cur_cnt * elem_sz;
175 /* Ensure given dynamically allocated memory region has enough allocated space
176 * to accommodate *need_cnt* elements of size *elem_sz* bytes each
178 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
182 if (need_cnt <= *cap_cnt)
185 p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
192 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
194 return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
195 btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
198 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
202 p = btf_add_type_offs_mem(btf, 1);
210 static void btf_bswap_hdr(struct btf_header *h)
212 h->magic = bswap_16(h->magic);
213 h->hdr_len = bswap_32(h->hdr_len);
214 h->type_off = bswap_32(h->type_off);
215 h->type_len = bswap_32(h->type_len);
216 h->str_off = bswap_32(h->str_off);
217 h->str_len = bswap_32(h->str_len);
220 static int btf_parse_hdr(struct btf *btf)
222 struct btf_header *hdr = btf->hdr;
225 if (btf->raw_size < sizeof(struct btf_header)) {
226 pr_debug("BTF header not found\n");
230 if (hdr->magic == bswap_16(BTF_MAGIC)) {
231 btf->swapped_endian = true;
232 if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
233 pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
234 bswap_32(hdr->hdr_len));
238 } else if (hdr->magic != BTF_MAGIC) {
239 pr_debug("Invalid BTF magic: %x\n", hdr->magic);
243 if (btf->raw_size < hdr->hdr_len) {
244 pr_debug("BTF header len %u larger than data size %u\n",
245 hdr->hdr_len, btf->raw_size);
249 meta_left = btf->raw_size - hdr->hdr_len;
250 if (meta_left < (long long)hdr->str_off + hdr->str_len) {
251 pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
255 if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
256 pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
257 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
261 if (hdr->type_off % 4) {
262 pr_debug("BTF type section is not aligned to 4 bytes\n");
269 static int btf_parse_str_sec(struct btf *btf)
271 const struct btf_header *hdr = btf->hdr;
272 const char *start = btf->strs_data;
273 const char *end = start + btf->hdr->str_len;
275 if (btf->base_btf && hdr->str_len == 0)
277 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
278 pr_debug("Invalid BTF string section\n");
281 if (!btf->base_btf && start[0]) {
282 pr_debug("Invalid BTF string section\n");
288 static int btf_type_size(const struct btf_type *t)
290 const int base_size = sizeof(struct btf_type);
291 __u16 vlen = btf_vlen(t);
293 switch (btf_kind(t)) {
296 case BTF_KIND_VOLATILE:
297 case BTF_KIND_RESTRICT:
299 case BTF_KIND_TYPEDEF:
302 case BTF_KIND_TYPE_TAG:
305 return base_size + sizeof(__u32);
307 return base_size + vlen * sizeof(struct btf_enum);
308 case BTF_KIND_ENUM64:
309 return base_size + vlen * sizeof(struct btf_enum64);
311 return base_size + sizeof(struct btf_array);
312 case BTF_KIND_STRUCT:
314 return base_size + vlen * sizeof(struct btf_member);
315 case BTF_KIND_FUNC_PROTO:
316 return base_size + vlen * sizeof(struct btf_param);
318 return base_size + sizeof(struct btf_var);
319 case BTF_KIND_DATASEC:
320 return base_size + vlen * sizeof(struct btf_var_secinfo);
321 case BTF_KIND_DECL_TAG:
322 return base_size + sizeof(struct btf_decl_tag);
324 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
329 static void btf_bswap_type_base(struct btf_type *t)
331 t->name_off = bswap_32(t->name_off);
332 t->info = bswap_32(t->info);
333 t->type = bswap_32(t->type);
336 static int btf_bswap_type_rest(struct btf_type *t)
338 struct btf_var_secinfo *v;
339 struct btf_enum64 *e64;
340 struct btf_member *m;
344 __u16 vlen = btf_vlen(t);
347 switch (btf_kind(t)) {
350 case BTF_KIND_VOLATILE:
351 case BTF_KIND_RESTRICT:
353 case BTF_KIND_TYPEDEF:
356 case BTF_KIND_TYPE_TAG:
359 *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
362 for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
363 e->name_off = bswap_32(e->name_off);
364 e->val = bswap_32(e->val);
367 case BTF_KIND_ENUM64:
368 for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
369 e64->name_off = bswap_32(e64->name_off);
370 e64->val_lo32 = bswap_32(e64->val_lo32);
371 e64->val_hi32 = bswap_32(e64->val_hi32);
376 a->type = bswap_32(a->type);
377 a->index_type = bswap_32(a->index_type);
378 a->nelems = bswap_32(a->nelems);
380 case BTF_KIND_STRUCT:
382 for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
383 m->name_off = bswap_32(m->name_off);
384 m->type = bswap_32(m->type);
385 m->offset = bswap_32(m->offset);
388 case BTF_KIND_FUNC_PROTO:
389 for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
390 p->name_off = bswap_32(p->name_off);
391 p->type = bswap_32(p->type);
395 btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
397 case BTF_KIND_DATASEC:
398 for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
399 v->type = bswap_32(v->type);
400 v->offset = bswap_32(v->offset);
401 v->size = bswap_32(v->size);
404 case BTF_KIND_DECL_TAG:
405 btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
408 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
413 static int btf_parse_type_sec(struct btf *btf)
415 struct btf_header *hdr = btf->hdr;
416 void *next_type = btf->types_data;
417 void *end_type = next_type + hdr->type_len;
420 while (next_type + sizeof(struct btf_type) <= end_type) {
421 if (btf->swapped_endian)
422 btf_bswap_type_base(next_type);
424 type_size = btf_type_size(next_type);
427 if (next_type + type_size > end_type) {
428 pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
432 if (btf->swapped_endian && btf_bswap_type_rest(next_type))
435 err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
439 next_type += type_size;
443 if (next_type != end_type) {
444 pr_warn("BTF types data is malformed\n");
451 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
455 s = btf__str_by_offset(btf, str_off);
457 pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
464 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
466 const struct btf_type *t;
468 t = btf__type_by_id(btf, id);
470 pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
477 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
479 __u32 kind = btf_kind(t);
482 err = btf_validate_str(btf, t->name_off, "type name", id);
493 case BTF_KIND_TYPEDEF:
494 case BTF_KIND_VOLATILE:
496 case BTF_KIND_RESTRICT:
498 case BTF_KIND_DECL_TAG:
499 case BTF_KIND_TYPE_TAG:
500 err = btf_validate_id(btf, t->type, id);
504 case BTF_KIND_ARRAY: {
505 const struct btf_array *a = btf_array(t);
507 err = btf_validate_id(btf, a->type, id);
508 err = err ?: btf_validate_id(btf, a->index_type, id);
513 case BTF_KIND_STRUCT:
514 case BTF_KIND_UNION: {
515 const struct btf_member *m = btf_members(t);
518 for (i = 0; i < n; i++, m++) {
519 err = btf_validate_str(btf, m->name_off, "field name", id);
520 err = err ?: btf_validate_id(btf, m->type, id);
526 case BTF_KIND_ENUM: {
527 const struct btf_enum *m = btf_enum(t);
530 for (i = 0; i < n; i++, m++) {
531 err = btf_validate_str(btf, m->name_off, "enum name", id);
537 case BTF_KIND_ENUM64: {
538 const struct btf_enum64 *m = btf_enum64(t);
541 for (i = 0; i < n; i++, m++) {
542 err = btf_validate_str(btf, m->name_off, "enum name", id);
548 case BTF_KIND_FUNC: {
549 const struct btf_type *ft;
551 err = btf_validate_id(btf, t->type, id);
554 ft = btf__type_by_id(btf, t->type);
555 if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
556 pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
561 case BTF_KIND_FUNC_PROTO: {
562 const struct btf_param *m = btf_params(t);
565 for (i = 0; i < n; i++, m++) {
566 err = btf_validate_str(btf, m->name_off, "param name", id);
567 err = err ?: btf_validate_id(btf, m->type, id);
573 case BTF_KIND_DATASEC: {
574 const struct btf_var_secinfo *m = btf_var_secinfos(t);
577 for (i = 0; i < n; i++, m++) {
578 err = btf_validate_id(btf, m->type, id);
585 pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
591 /* Validate basic sanity of BTF. It's intentionally less thorough than
592 * kernel's validation and validates only properties of BTF that libbpf relies
593 * on to be correct (e.g., valid type IDs, valid string offsets, etc)
595 static int btf_sanity_check(const struct btf *btf)
597 const struct btf_type *t;
598 __u32 i, n = btf__type_cnt(btf);
601 for (i = 1; i < n; i++) {
602 t = btf_type_by_id(btf, i);
603 err = btf_validate_type(btf, t, i);
610 __u32 btf__type_cnt(const struct btf *btf)
612 return btf->start_id + btf->nr_types;
615 const struct btf *btf__base_btf(const struct btf *btf)
617 return btf->base_btf;
620 /* internal helper returning non-const pointer to a type */
621 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
625 if (type_id < btf->start_id)
626 return btf_type_by_id(btf->base_btf, type_id);
627 return btf->types_data + btf->type_offs[type_id - btf->start_id];
630 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
632 if (type_id >= btf->start_id + btf->nr_types)
633 return errno = EINVAL, NULL;
634 return btf_type_by_id((struct btf *)btf, type_id);
637 static int determine_ptr_size(const struct btf *btf)
639 static const char * const long_aliases[] = {
652 const struct btf_type *t;
656 if (btf->base_btf && btf->base_btf->ptr_sz > 0)
657 return btf->base_btf->ptr_sz;
659 n = btf__type_cnt(btf);
660 for (i = 1; i < n; i++) {
661 t = btf__type_by_id(btf, i);
665 if (t->size != 4 && t->size != 8)
668 name = btf__name_by_offset(btf, t->name_off);
672 for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
673 if (strcmp(name, long_aliases[j]) == 0)
681 static size_t btf_ptr_sz(const struct btf *btf)
684 ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
685 return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
688 /* Return pointer size this BTF instance assumes. The size is heuristically
689 * determined by looking for 'long' or 'unsigned long' integer type and
690 * recording its size in bytes. If BTF type information doesn't have any such
691 * type, this function returns 0. In the latter case, native architecture's
692 * pointer size is assumed, so will be either 4 or 8, depending on
693 * architecture that libbpf was compiled for. It's possible to override
694 * guessed value by using btf__set_pointer_size() API.
696 size_t btf__pointer_size(const struct btf *btf)
699 ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
702 /* not enough BTF type info to guess */
708 /* Override or set pointer size in bytes. Only values of 4 and 8 are
711 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
713 if (ptr_sz != 4 && ptr_sz != 8)
714 return libbpf_err(-EINVAL);
715 btf->ptr_sz = ptr_sz;
719 static bool is_host_big_endian(void)
721 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
723 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
726 # error "Unrecognized __BYTE_ORDER__"
730 enum btf_endianness btf__endianness(const struct btf *btf)
732 if (is_host_big_endian())
733 return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
735 return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
738 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
740 if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
741 return libbpf_err(-EINVAL);
743 btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
744 if (!btf->swapped_endian) {
745 free(btf->raw_data_swapped);
746 btf->raw_data_swapped = NULL;
751 static bool btf_type_is_void(const struct btf_type *t)
753 return t == &btf_void || btf_is_fwd(t);
756 static bool btf_type_is_void_or_null(const struct btf_type *t)
758 return !t || btf_type_is_void(t);
761 #define MAX_RESOLVE_DEPTH 32
763 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
765 const struct btf_array *array;
766 const struct btf_type *t;
771 t = btf__type_by_id(btf, type_id);
772 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
773 switch (btf_kind(t)) {
775 case BTF_KIND_STRUCT:
778 case BTF_KIND_ENUM64:
779 case BTF_KIND_DATASEC:
784 size = btf_ptr_sz(btf);
786 case BTF_KIND_TYPEDEF:
787 case BTF_KIND_VOLATILE:
789 case BTF_KIND_RESTRICT:
791 case BTF_KIND_DECL_TAG:
792 case BTF_KIND_TYPE_TAG:
796 array = btf_array(t);
797 if (nelems && array->nelems > UINT32_MAX / nelems)
798 return libbpf_err(-E2BIG);
799 nelems *= array->nelems;
800 type_id = array->type;
803 return libbpf_err(-EINVAL);
806 t = btf__type_by_id(btf, type_id);
811 return libbpf_err(-EINVAL);
812 if (nelems && size > UINT32_MAX / nelems)
813 return libbpf_err(-E2BIG);
815 return nelems * size;
818 int btf__align_of(const struct btf *btf, __u32 id)
820 const struct btf_type *t = btf__type_by_id(btf, id);
821 __u16 kind = btf_kind(t);
826 case BTF_KIND_ENUM64:
828 return min(btf_ptr_sz(btf), (size_t)t->size);
830 return btf_ptr_sz(btf);
831 case BTF_KIND_TYPEDEF:
832 case BTF_KIND_VOLATILE:
834 case BTF_KIND_RESTRICT:
835 case BTF_KIND_TYPE_TAG:
836 return btf__align_of(btf, t->type);
838 return btf__align_of(btf, btf_array(t)->type);
839 case BTF_KIND_STRUCT:
840 case BTF_KIND_UNION: {
841 const struct btf_member *m = btf_members(t);
842 __u16 vlen = btf_vlen(t);
843 int i, max_align = 1, align;
845 for (i = 0; i < vlen; i++, m++) {
846 align = btf__align_of(btf, m->type);
848 return libbpf_err(align);
849 max_align = max(max_align, align);
851 /* if field offset isn't aligned according to field
852 * type's alignment, then struct must be packed
854 if (btf_member_bitfield_size(t, i) == 0 &&
855 (m->offset % (8 * align)) != 0)
859 /* if struct/union size isn't a multiple of its alignment,
860 * then struct must be packed
862 if ((t->size % max_align) != 0)
868 pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
869 return errno = EINVAL, 0;
873 int btf__resolve_type(const struct btf *btf, __u32 type_id)
875 const struct btf_type *t;
878 t = btf__type_by_id(btf, type_id);
879 while (depth < MAX_RESOLVE_DEPTH &&
880 !btf_type_is_void_or_null(t) &&
881 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
883 t = btf__type_by_id(btf, type_id);
887 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
888 return libbpf_err(-EINVAL);
893 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
895 __u32 i, nr_types = btf__type_cnt(btf);
897 if (!strcmp(type_name, "void"))
900 for (i = 1; i < nr_types; i++) {
901 const struct btf_type *t = btf__type_by_id(btf, i);
902 const char *name = btf__name_by_offset(btf, t->name_off);
904 if (name && !strcmp(type_name, name))
908 return libbpf_err(-ENOENT);
911 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
912 const char *type_name, __u32 kind)
914 __u32 i, nr_types = btf__type_cnt(btf);
916 if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
919 for (i = start_id; i < nr_types; i++) {
920 const struct btf_type *t = btf__type_by_id(btf, i);
923 if (btf_kind(t) != kind)
925 name = btf__name_by_offset(btf, t->name_off);
926 if (name && !strcmp(type_name, name))
930 return libbpf_err(-ENOENT);
933 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
936 return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
939 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
942 return btf_find_by_name_kind(btf, 1, type_name, kind);
945 static bool btf_is_modifiable(const struct btf *btf)
947 return (void *)btf->hdr != btf->raw_data;
950 void btf__free(struct btf *btf)
952 if (IS_ERR_OR_NULL(btf))
958 if (btf_is_modifiable(btf)) {
959 /* if BTF was modified after loading, it will have a split
960 * in-memory representation for header, types, and strings
961 * sections, so we need to free all of them individually. It
962 * might still have a cached contiguous raw data present,
963 * which will be unconditionally freed below.
966 free(btf->types_data);
967 strset__free(btf->strs_set);
970 free(btf->raw_data_swapped);
971 free(btf->type_offs);
975 static struct btf *btf_new_empty(struct btf *base_btf)
979 btf = calloc(1, sizeof(*btf));
981 return ERR_PTR(-ENOMEM);
985 btf->start_str_off = 0;
987 btf->ptr_sz = sizeof(void *);
988 btf->swapped_endian = false;
991 btf->base_btf = base_btf;
992 btf->start_id = btf__type_cnt(base_btf);
993 btf->start_str_off = base_btf->hdr->str_len;
996 /* +1 for empty string at offset 0 */
997 btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
998 btf->raw_data = calloc(1, btf->raw_size);
999 if (!btf->raw_data) {
1001 return ERR_PTR(-ENOMEM);
1004 btf->hdr = btf->raw_data;
1005 btf->hdr->hdr_len = sizeof(struct btf_header);
1006 btf->hdr->magic = BTF_MAGIC;
1007 btf->hdr->version = BTF_VERSION;
1009 btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1010 btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1011 btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1016 struct btf *btf__new_empty(void)
1018 return libbpf_ptr(btf_new_empty(NULL));
1021 struct btf *btf__new_empty_split(struct btf *base_btf)
1023 return libbpf_ptr(btf_new_empty(base_btf));
1026 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
1031 btf = calloc(1, sizeof(struct btf));
1033 return ERR_PTR(-ENOMEM);
1037 btf->start_str_off = 0;
1041 btf->base_btf = base_btf;
1042 btf->start_id = btf__type_cnt(base_btf);
1043 btf->start_str_off = base_btf->hdr->str_len;
1046 btf->raw_data = malloc(size);
1047 if (!btf->raw_data) {
1051 memcpy(btf->raw_data, data, size);
1052 btf->raw_size = size;
1054 btf->hdr = btf->raw_data;
1055 err = btf_parse_hdr(btf);
1059 btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1060 btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1062 err = btf_parse_str_sec(btf);
1063 err = err ?: btf_parse_type_sec(btf);
1064 err = err ?: btf_sanity_check(btf);
1071 return ERR_PTR(err);
1077 struct btf *btf__new(const void *data, __u32 size)
1079 return libbpf_ptr(btf_new(data, size, NULL));
1082 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1083 struct btf_ext **btf_ext)
1085 Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
1086 int err = 0, fd = -1, idx = 0;
1087 struct btf *btf = NULL;
1088 Elf_Scn *scn = NULL;
1093 if (elf_version(EV_CURRENT) == EV_NONE) {
1094 pr_warn("failed to init libelf for %s\n", path);
1095 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1098 fd = open(path, O_RDONLY | O_CLOEXEC);
1101 pr_warn("failed to open %s: %s\n", path, strerror(errno));
1102 return ERR_PTR(err);
1105 err = -LIBBPF_ERRNO__FORMAT;
1107 elf = elf_begin(fd, ELF_C_READ, NULL);
1109 pr_warn("failed to open %s as ELF file\n", path);
1112 if (!gelf_getehdr(elf, &ehdr)) {
1113 pr_warn("failed to get EHDR from %s\n", path);
1117 if (elf_getshdrstrndx(elf, &shstrndx)) {
1118 pr_warn("failed to get section names section index for %s\n",
1123 if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1124 pr_warn("failed to get e_shstrndx from %s\n", path);
1128 while ((scn = elf_nextscn(elf, scn)) != NULL) {
1133 if (gelf_getshdr(scn, &sh) != &sh) {
1134 pr_warn("failed to get section(%d) header from %s\n",
1138 name = elf_strptr(elf, shstrndx, sh.sh_name);
1140 pr_warn("failed to get section(%d) name from %s\n",
1144 if (strcmp(name, BTF_ELF_SEC) == 0) {
1145 btf_data = elf_getdata(scn, 0);
1147 pr_warn("failed to get section(%d, %s) data from %s\n",
1152 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1153 btf_ext_data = elf_getdata(scn, 0);
1154 if (!btf_ext_data) {
1155 pr_warn("failed to get section(%d, %s) data from %s\n",
1164 pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1168 btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
1169 err = libbpf_get_error(btf);
1173 switch (gelf_getclass(elf)) {
1175 btf__set_pointer_size(btf, 4);
1178 btf__set_pointer_size(btf, 8);
1181 pr_warn("failed to get ELF class (bitness) for %s\n", path);
1185 if (btf_ext && btf_ext_data) {
1186 *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
1187 err = libbpf_get_error(*btf_ext);
1190 } else if (btf_ext) {
1202 btf_ext__free(*btf_ext);
1205 return ERR_PTR(err);
1208 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1210 return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1213 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1215 return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1218 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1220 struct btf *btf = NULL;
1227 f = fopen(path, "rbe");
1233 /* check BTF magic */
1234 if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1238 if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1239 /* definitely not a raw BTF */
1245 if (fseek(f, 0, SEEK_END)) {
1254 /* rewind to the start */
1255 if (fseek(f, 0, SEEK_SET)) {
1260 /* pre-alloc memory and read all of BTF data */
1266 if (fread(data, 1, sz, f) < sz) {
1271 /* finally parse BTF data */
1272 btf = btf_new(data, sz, base_btf);
1278 return err ? ERR_PTR(err) : btf;
1281 struct btf *btf__parse_raw(const char *path)
1283 return libbpf_ptr(btf_parse_raw(path, NULL));
1286 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1288 return libbpf_ptr(btf_parse_raw(path, base_btf));
1291 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1299 btf = btf_parse_raw(path, base_btf);
1300 err = libbpf_get_error(btf);
1304 return ERR_PTR(err);
1305 return btf_parse_elf(path, base_btf, btf_ext);
1308 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1310 return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1313 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1315 return libbpf_ptr(btf_parse(path, base_btf, NULL));
1318 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1320 int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
1322 LIBBPF_OPTS(bpf_btf_load_opts, opts);
1323 __u32 buf_sz = 0, raw_size;
1324 char *buf = NULL, *tmp;
1329 return libbpf_err(-EEXIST);
1330 if (log_sz && !log_buf)
1331 return libbpf_err(-EINVAL);
1333 /* cache native raw data representation */
1334 raw_data = btf_get_raw_data(btf, &raw_size, false);
1339 btf->raw_size = raw_size;
1340 btf->raw_data = raw_data;
1343 /* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1344 * initially. Only if BTF loading fails, we bump log_level to 1 and
1345 * retry, using either auto-allocated or custom log_buf. This way
1346 * non-NULL custom log_buf provides a buffer just in case, but hopes
1347 * for successful load and no need for log_buf.
1350 /* if caller didn't provide custom log_buf, we'll keep
1351 * allocating our own progressively bigger buffers for BTF
1355 buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1356 tmp = realloc(buf, buf_sz);
1365 opts.log_buf = log_buf ? log_buf : buf;
1366 opts.log_size = log_buf ? log_sz : buf_sz;
1367 opts.log_level = log_level;
1370 btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1372 /* time to turn on verbose mode and try again */
1373 if (log_level == 0) {
1377 /* only retry if caller didn't provide custom log_buf, but
1378 * make sure we can never overflow buf_sz
1380 if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1384 pr_warn("BTF loading error: %d\n", err);
1385 /* don't print out contents of custom log_buf */
1386 if (!log_buf && buf[0])
1387 pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1392 return libbpf_err(err);
1395 int btf__load_into_kernel(struct btf *btf)
1397 return btf_load_into_kernel(btf, NULL, 0, 0);
1400 int btf__fd(const struct btf *btf)
1405 void btf__set_fd(struct btf *btf, int fd)
1410 static const void *btf_strs_data(const struct btf *btf)
1412 return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1415 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1417 struct btf_header *hdr = btf->hdr;
1423 data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1425 *size = btf->raw_size;
1429 data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1430 data = calloc(1, data_sz);
1435 memcpy(p, hdr, hdr->hdr_len);
1440 memcpy(p, btf->types_data, hdr->type_len);
1442 for (i = 0; i < btf->nr_types; i++) {
1443 t = p + btf->type_offs[i];
1444 /* btf_bswap_type_rest() relies on native t->info, so
1445 * we swap base type info after we swapped all the
1446 * additional information
1448 if (btf_bswap_type_rest(t))
1450 btf_bswap_type_base(t);
1455 memcpy(p, btf_strs_data(btf), hdr->str_len);
1465 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1467 struct btf *btf = (struct btf *)btf_ro;
1471 data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1473 return errno = ENOMEM, NULL;
1475 btf->raw_size = data_sz;
1476 if (btf->swapped_endian)
1477 btf->raw_data_swapped = data;
1479 btf->raw_data = data;
1484 __attribute__((alias("btf__raw_data")))
1485 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1487 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1489 if (offset < btf->start_str_off)
1490 return btf__str_by_offset(btf->base_btf, offset);
1491 else if (offset - btf->start_str_off < btf->hdr->str_len)
1492 return btf_strs_data(btf) + (offset - btf->start_str_off);
1494 return errno = EINVAL, NULL;
1497 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1499 return btf__str_by_offset(btf, offset);
1502 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1504 struct bpf_btf_info btf_info;
1505 __u32 len = sizeof(btf_info);
1511 /* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1512 * let's start with a sane default - 4KiB here - and resize it only if
1513 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1516 ptr = malloc(last_size);
1518 return ERR_PTR(-ENOMEM);
1520 memset(&btf_info, 0, sizeof(btf_info));
1521 btf_info.btf = ptr_to_u64(ptr);
1522 btf_info.btf_size = last_size;
1523 err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1525 if (!err && btf_info.btf_size > last_size) {
1528 last_size = btf_info.btf_size;
1529 temp_ptr = realloc(ptr, last_size);
1531 btf = ERR_PTR(-ENOMEM);
1536 len = sizeof(btf_info);
1537 memset(&btf_info, 0, sizeof(btf_info));
1538 btf_info.btf = ptr_to_u64(ptr);
1539 btf_info.btf_size = last_size;
1541 err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1544 if (err || btf_info.btf_size > last_size) {
1545 btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1549 btf = btf_new(ptr, btf_info.btf_size, base_btf);
1556 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1561 btf_fd = bpf_btf_get_fd_by_id(id);
1563 return libbpf_err_ptr(-errno);
1565 btf = btf_get_from_fd(btf_fd, base_btf);
1568 return libbpf_ptr(btf);
1571 struct btf *btf__load_from_kernel_by_id(__u32 id)
1573 return btf__load_from_kernel_by_id_split(id, NULL);
1576 static void btf_invalidate_raw_data(struct btf *btf)
1578 if (btf->raw_data) {
1579 free(btf->raw_data);
1580 btf->raw_data = NULL;
1582 if (btf->raw_data_swapped) {
1583 free(btf->raw_data_swapped);
1584 btf->raw_data_swapped = NULL;
1588 /* Ensure BTF is ready to be modified (by splitting into a three memory
1589 * regions for header, types, and strings). Also invalidate cached
1592 static int btf_ensure_modifiable(struct btf *btf)
1595 struct strset *set = NULL;
1598 if (btf_is_modifiable(btf)) {
1599 /* any BTF modification invalidates raw_data */
1600 btf_invalidate_raw_data(btf);
1604 /* split raw data into three memory regions */
1605 hdr = malloc(btf->hdr->hdr_len);
1606 types = malloc(btf->hdr->type_len);
1610 memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1611 memcpy(types, btf->types_data, btf->hdr->type_len);
1613 /* build lookup index for all strings */
1614 set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1620 /* only when everything was successful, update internal state */
1622 btf->types_data = types;
1623 btf->types_data_cap = btf->hdr->type_len;
1624 btf->strs_data = NULL;
1625 btf->strs_set = set;
1626 /* if BTF was created from scratch, all strings are guaranteed to be
1627 * unique and deduplicated
1629 if (btf->hdr->str_len == 0)
1630 btf->strs_deduped = true;
1631 if (!btf->base_btf && btf->hdr->str_len == 1)
1632 btf->strs_deduped = true;
1634 /* invalidate raw_data representation */
1635 btf_invalidate_raw_data(btf);
1646 /* Find an offset in BTF string section that corresponds to a given string *s*.
1648 * - >0 offset into string section, if string is found;
1649 * - -ENOENT, if string is not in the string section;
1650 * - <0, on any other error.
1652 int btf__find_str(struct btf *btf, const char *s)
1656 if (btf->base_btf) {
1657 off = btf__find_str(btf->base_btf, s);
1662 /* BTF needs to be in a modifiable state to build string lookup index */
1663 if (btf_ensure_modifiable(btf))
1664 return libbpf_err(-ENOMEM);
1666 off = strset__find_str(btf->strs_set, s);
1668 return libbpf_err(off);
1670 return btf->start_str_off + off;
1673 /* Add a string s to the BTF string section.
1675 * - > 0 offset into string section, on success;
1678 int btf__add_str(struct btf *btf, const char *s)
1682 if (btf->base_btf) {
1683 off = btf__find_str(btf->base_btf, s);
1688 if (btf_ensure_modifiable(btf))
1689 return libbpf_err(-ENOMEM);
1691 off = strset__add_str(btf->strs_set, s);
1693 return libbpf_err(off);
1695 btf->hdr->str_len = strset__data_size(btf->strs_set);
1697 return btf->start_str_off + off;
1700 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1702 return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1703 btf->hdr->type_len, UINT_MAX, add_sz);
1706 static void btf_type_inc_vlen(struct btf_type *t)
1708 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1711 static int btf_commit_type(struct btf *btf, int data_sz)
1715 err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1717 return libbpf_err(err);
1719 btf->hdr->type_len += data_sz;
1720 btf->hdr->str_off += data_sz;
1722 return btf->start_id + btf->nr_types - 1;
1726 const struct btf *src;
1728 struct hashmap *str_off_map; /* map string offsets from src to dst */
1731 static int btf_rewrite_str(__u32 *str_off, void *ctx)
1733 struct btf_pipe *p = ctx;
1737 if (!*str_off) /* nothing to do for empty strings */
1740 if (p->str_off_map &&
1741 hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1742 *str_off = mapped_off;
1746 off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1750 /* Remember string mapping from src to dst. It avoids
1751 * performing expensive string comparisons.
1753 if (p->str_off_map) {
1754 err = hashmap__append(p->str_off_map, *str_off, off);
1763 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1765 struct btf_pipe p = { .src = src_btf, .dst = btf };
1769 sz = btf_type_size(src_type);
1771 return libbpf_err(sz);
1773 /* deconstruct BTF, if necessary, and invalidate raw_data */
1774 if (btf_ensure_modifiable(btf))
1775 return libbpf_err(-ENOMEM);
1777 t = btf_add_type_mem(btf, sz);
1779 return libbpf_err(-ENOMEM);
1781 memcpy(t, src_type, sz);
1783 err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1785 return libbpf_err(err);
1787 return btf_commit_type(btf, sz);
1790 static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
1792 struct btf *btf = ctx;
1794 if (!*type_id) /* nothing to do for VOID references */
1797 /* we haven't updated btf's type count yet, so
1798 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1799 * add to all newly added BTF types
1801 *type_id += btf->start_id + btf->nr_types - 1;
1805 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1806 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1808 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1810 struct btf_pipe p = { .src = src_btf, .dst = btf };
1811 int data_sz, sz, cnt, i, err, old_strs_len;
1815 /* appending split BTF isn't supported yet */
1816 if (src_btf->base_btf)
1817 return libbpf_err(-ENOTSUP);
1819 /* deconstruct BTF, if necessary, and invalidate raw_data */
1820 if (btf_ensure_modifiable(btf))
1821 return libbpf_err(-ENOMEM);
1823 /* remember original strings section size if we have to roll back
1824 * partial strings section changes
1826 old_strs_len = btf->hdr->str_len;
1828 data_sz = src_btf->hdr->type_len;
1829 cnt = btf__type_cnt(src_btf) - 1;
1831 /* pre-allocate enough memory for new types */
1832 t = btf_add_type_mem(btf, data_sz);
1834 return libbpf_err(-ENOMEM);
1836 /* pre-allocate enough memory for type offset index for new types */
1837 off = btf_add_type_offs_mem(btf, cnt);
1839 return libbpf_err(-ENOMEM);
1841 /* Map the string offsets from src_btf to the offsets from btf to improve performance */
1842 p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1843 if (IS_ERR(p.str_off_map))
1844 return libbpf_err(-ENOMEM);
1846 /* bulk copy types data for all types from src_btf */
1847 memcpy(t, src_btf->types_data, data_sz);
1849 for (i = 0; i < cnt; i++) {
1850 sz = btf_type_size(t);
1852 /* unlikely, has to be corrupted src_btf */
1857 /* fill out type ID to type offset mapping for lookups by type ID */
1858 *off = t - btf->types_data;
1860 /* add, dedup, and remap strings referenced by this BTF type */
1861 err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1865 /* remap all type IDs referenced from this BTF type */
1866 err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
1870 /* go to next type data and type offset index entry */
1875 /* Up until now any of the copied type data was effectively invisible,
1876 * so if we exited early before this point due to error, BTF would be
1877 * effectively unmodified. There would be extra internal memory
1878 * pre-allocated, but it would not be available for querying. But now
1879 * that we've copied and rewritten all the data successfully, we can
1880 * update type count and various internal offsets and sizes to
1881 * "commit" the changes and made them visible to the outside world.
1883 btf->hdr->type_len += data_sz;
1884 btf->hdr->str_off += data_sz;
1885 btf->nr_types += cnt;
1887 hashmap__free(p.str_off_map);
1889 /* return type ID of the first added BTF type */
1890 return btf->start_id + btf->nr_types - cnt;
1892 /* zero out preallocated memory as if it was just allocated with
1895 memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1896 memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1898 /* and now restore original strings section size; types data size
1899 * wasn't modified, so doesn't need restoring, see big comment above
1901 btf->hdr->str_len = old_strs_len;
1903 hashmap__free(p.str_off_map);
1905 return libbpf_err(err);
1909 * Append new BTF_KIND_INT type with:
1910 * - *name* - non-empty, non-NULL type name;
1911 * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1912 * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1914 * - >0, type ID of newly added BTF type;
1917 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1922 /* non-empty name */
1923 if (!name || !name[0])
1924 return libbpf_err(-EINVAL);
1925 /* byte_sz must be power of 2 */
1926 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
1927 return libbpf_err(-EINVAL);
1928 if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
1929 return libbpf_err(-EINVAL);
1931 /* deconstruct BTF, if necessary, and invalidate raw_data */
1932 if (btf_ensure_modifiable(btf))
1933 return libbpf_err(-ENOMEM);
1935 sz = sizeof(struct btf_type) + sizeof(int);
1936 t = btf_add_type_mem(btf, sz);
1938 return libbpf_err(-ENOMEM);
1940 /* if something goes wrong later, we might end up with an extra string,
1941 * but that shouldn't be a problem, because BTF can't be constructed
1942 * completely anyway and will most probably be just discarded
1944 name_off = btf__add_str(btf, name);
1948 t->name_off = name_off;
1949 t->info = btf_type_info(BTF_KIND_INT, 0, 0);
1951 /* set INT info, we don't allow setting legacy bit offset/size */
1952 *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
1954 return btf_commit_type(btf, sz);
1958 * Append new BTF_KIND_FLOAT type with:
1959 * - *name* - non-empty, non-NULL type name;
1960 * - *sz* - size of the type, in bytes;
1962 * - >0, type ID of newly added BTF type;
1965 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
1970 /* non-empty name */
1971 if (!name || !name[0])
1972 return libbpf_err(-EINVAL);
1974 /* byte_sz must be one of the explicitly allowed values */
1975 if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
1977 return libbpf_err(-EINVAL);
1979 if (btf_ensure_modifiable(btf))
1980 return libbpf_err(-ENOMEM);
1982 sz = sizeof(struct btf_type);
1983 t = btf_add_type_mem(btf, sz);
1985 return libbpf_err(-ENOMEM);
1987 name_off = btf__add_str(btf, name);
1991 t->name_off = name_off;
1992 t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
1995 return btf_commit_type(btf, sz);
1998 /* it's completely legal to append BTF types with type IDs pointing forward to
1999 * types that haven't been appended yet, so we only make sure that id looks
2000 * sane, we can't guarantee that ID will always be valid
2002 static int validate_type_id(int id)
2004 if (id < 0 || id > BTF_MAX_NR_TYPES)
2009 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2010 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
2013 int sz, name_off = 0;
2015 if (validate_type_id(ref_type_id))
2016 return libbpf_err(-EINVAL);
2018 if (btf_ensure_modifiable(btf))
2019 return libbpf_err(-ENOMEM);
2021 sz = sizeof(struct btf_type);
2022 t = btf_add_type_mem(btf, sz);
2024 return libbpf_err(-ENOMEM);
2026 if (name && name[0]) {
2027 name_off = btf__add_str(btf, name);
2032 t->name_off = name_off;
2033 t->info = btf_type_info(kind, 0, 0);
2034 t->type = ref_type_id;
2036 return btf_commit_type(btf, sz);
2040 * Append new BTF_KIND_PTR type with:
2041 * - *ref_type_id* - referenced type ID, it might not exist yet;
2043 * - >0, type ID of newly added BTF type;
2046 int btf__add_ptr(struct btf *btf, int ref_type_id)
2048 return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
2052 * Append new BTF_KIND_ARRAY type with:
2053 * - *index_type_id* - type ID of the type describing array index;
2054 * - *elem_type_id* - type ID of the type describing array element;
2055 * - *nr_elems* - the size of the array;
2057 * - >0, type ID of newly added BTF type;
2060 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2063 struct btf_array *a;
2066 if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2067 return libbpf_err(-EINVAL);
2069 if (btf_ensure_modifiable(btf))
2070 return libbpf_err(-ENOMEM);
2072 sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2073 t = btf_add_type_mem(btf, sz);
2075 return libbpf_err(-ENOMEM);
2078 t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2082 a->type = elem_type_id;
2083 a->index_type = index_type_id;
2084 a->nelems = nr_elems;
2086 return btf_commit_type(btf, sz);
2089 /* generic STRUCT/UNION append function */
2090 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2093 int sz, name_off = 0;
2095 if (btf_ensure_modifiable(btf))
2096 return libbpf_err(-ENOMEM);
2098 sz = sizeof(struct btf_type);
2099 t = btf_add_type_mem(btf, sz);
2101 return libbpf_err(-ENOMEM);
2103 if (name && name[0]) {
2104 name_off = btf__add_str(btf, name);
2109 /* start out with vlen=0 and no kflag; this will be adjusted when
2110 * adding each member
2112 t->name_off = name_off;
2113 t->info = btf_type_info(kind, 0, 0);
2116 return btf_commit_type(btf, sz);
2120 * Append new BTF_KIND_STRUCT type with:
2121 * - *name* - name of the struct, can be NULL or empty for anonymous structs;
2122 * - *byte_sz* - size of the struct, in bytes;
2124 * Struct initially has no fields in it. Fields can be added by
2125 * btf__add_field() right after btf__add_struct() succeeds.
2128 * - >0, type ID of newly added BTF type;
2131 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2133 return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2137 * Append new BTF_KIND_UNION type with:
2138 * - *name* - name of the union, can be NULL or empty for anonymous union;
2139 * - *byte_sz* - size of the union, in bytes;
2141 * Union initially has no fields in it. Fields can be added by
2142 * btf__add_field() right after btf__add_union() succeeds. All fields
2143 * should have *bit_offset* of 0.
2146 * - >0, type ID of newly added BTF type;
2149 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2151 return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2154 static struct btf_type *btf_last_type(struct btf *btf)
2156 return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2160 * Append new field for the current STRUCT/UNION type with:
2161 * - *name* - name of the field, can be NULL or empty for anonymous field;
2162 * - *type_id* - type ID for the type describing field type;
2163 * - *bit_offset* - bit offset of the start of the field within struct/union;
2164 * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2169 int btf__add_field(struct btf *btf, const char *name, int type_id,
2170 __u32 bit_offset, __u32 bit_size)
2173 struct btf_member *m;
2175 int sz, name_off = 0;
2177 /* last type should be union/struct */
2178 if (btf->nr_types == 0)
2179 return libbpf_err(-EINVAL);
2180 t = btf_last_type(btf);
2181 if (!btf_is_composite(t))
2182 return libbpf_err(-EINVAL);
2184 if (validate_type_id(type_id))
2185 return libbpf_err(-EINVAL);
2186 /* best-effort bit field offset/size enforcement */
2187 is_bitfield = bit_size || (bit_offset % 8 != 0);
2188 if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2189 return libbpf_err(-EINVAL);
2191 /* only offset 0 is allowed for unions */
2192 if (btf_is_union(t) && bit_offset)
2193 return libbpf_err(-EINVAL);
2195 /* decompose and invalidate raw data */
2196 if (btf_ensure_modifiable(btf))
2197 return libbpf_err(-ENOMEM);
2199 sz = sizeof(struct btf_member);
2200 m = btf_add_type_mem(btf, sz);
2202 return libbpf_err(-ENOMEM);
2204 if (name && name[0]) {
2205 name_off = btf__add_str(btf, name);
2210 m->name_off = name_off;
2212 m->offset = bit_offset | (bit_size << 24);
2214 /* btf_add_type_mem can invalidate t pointer */
2215 t = btf_last_type(btf);
2216 /* update parent type's vlen and kflag */
2217 t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2219 btf->hdr->type_len += sz;
2220 btf->hdr->str_off += sz;
2224 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2225 bool is_signed, __u8 kind)
2228 int sz, name_off = 0;
2230 /* byte_sz must be power of 2 */
2231 if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2232 return libbpf_err(-EINVAL);
2234 if (btf_ensure_modifiable(btf))
2235 return libbpf_err(-ENOMEM);
2237 sz = sizeof(struct btf_type);
2238 t = btf_add_type_mem(btf, sz);
2240 return libbpf_err(-ENOMEM);
2242 if (name && name[0]) {
2243 name_off = btf__add_str(btf, name);
2248 /* start out with vlen=0; it will be adjusted when adding enum values */
2249 t->name_off = name_off;
2250 t->info = btf_type_info(kind, 0, is_signed);
2253 return btf_commit_type(btf, sz);
2257 * Append new BTF_KIND_ENUM type with:
2258 * - *name* - name of the enum, can be NULL or empty for anonymous enums;
2259 * - *byte_sz* - size of the enum, in bytes.
2261 * Enum initially has no enum values in it (and corresponds to enum forward
2262 * declaration). Enumerator values can be added by btf__add_enum_value()
2263 * immediately after btf__add_enum() succeeds.
2266 * - >0, type ID of newly added BTF type;
2269 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2272 * set the signedness to be unsigned, it will change to signed
2273 * if any later enumerator is negative.
2275 return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2279 * Append new enum value for the current ENUM type with:
2280 * - *name* - name of the enumerator value, can't be NULL or empty;
2281 * - *value* - integer value corresponding to enum value *name*;
2286 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2292 /* last type should be BTF_KIND_ENUM */
2293 if (btf->nr_types == 0)
2294 return libbpf_err(-EINVAL);
2295 t = btf_last_type(btf);
2296 if (!btf_is_enum(t))
2297 return libbpf_err(-EINVAL);
2299 /* non-empty name */
2300 if (!name || !name[0])
2301 return libbpf_err(-EINVAL);
2302 if (value < INT_MIN || value > UINT_MAX)
2303 return libbpf_err(-E2BIG);
2305 /* decompose and invalidate raw data */
2306 if (btf_ensure_modifiable(btf))
2307 return libbpf_err(-ENOMEM);
2309 sz = sizeof(struct btf_enum);
2310 v = btf_add_type_mem(btf, sz);
2312 return libbpf_err(-ENOMEM);
2314 name_off = btf__add_str(btf, name);
2318 v->name_off = name_off;
2321 /* update parent type's vlen */
2322 t = btf_last_type(btf);
2323 btf_type_inc_vlen(t);
2325 /* if negative value, set signedness to signed */
2327 t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2329 btf->hdr->type_len += sz;
2330 btf->hdr->str_off += sz;
2335 * Append new BTF_KIND_ENUM64 type with:
2336 * - *name* - name of the enum, can be NULL or empty for anonymous enums;
2337 * - *byte_sz* - size of the enum, in bytes.
2338 * - *is_signed* - whether the enum values are signed or not;
2340 * Enum initially has no enum values in it (and corresponds to enum forward
2341 * declaration). Enumerator values can be added by btf__add_enum64_value()
2342 * immediately after btf__add_enum64() succeeds.
2345 * - >0, type ID of newly added BTF type;
2348 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2351 return btf_add_enum_common(btf, name, byte_sz, is_signed,
2356 * Append new enum value for the current ENUM64 type with:
2357 * - *name* - name of the enumerator value, can't be NULL or empty;
2358 * - *value* - integer value corresponding to enum value *name*;
2363 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2365 struct btf_enum64 *v;
2369 /* last type should be BTF_KIND_ENUM64 */
2370 if (btf->nr_types == 0)
2371 return libbpf_err(-EINVAL);
2372 t = btf_last_type(btf);
2373 if (!btf_is_enum64(t))
2374 return libbpf_err(-EINVAL);
2376 /* non-empty name */
2377 if (!name || !name[0])
2378 return libbpf_err(-EINVAL);
2380 /* decompose and invalidate raw data */
2381 if (btf_ensure_modifiable(btf))
2382 return libbpf_err(-ENOMEM);
2384 sz = sizeof(struct btf_enum64);
2385 v = btf_add_type_mem(btf, sz);
2387 return libbpf_err(-ENOMEM);
2389 name_off = btf__add_str(btf, name);
2393 v->name_off = name_off;
2394 v->val_lo32 = (__u32)value;
2395 v->val_hi32 = value >> 32;
2397 /* update parent type's vlen */
2398 t = btf_last_type(btf);
2399 btf_type_inc_vlen(t);
2401 btf->hdr->type_len += sz;
2402 btf->hdr->str_off += sz;
2407 * Append new BTF_KIND_FWD type with:
2408 * - *name*, non-empty/non-NULL name;
2409 * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2410 * BTF_FWD_UNION, or BTF_FWD_ENUM;
2412 * - >0, type ID of newly added BTF type;
2415 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2417 if (!name || !name[0])
2418 return libbpf_err(-EINVAL);
2421 case BTF_FWD_STRUCT:
2422 case BTF_FWD_UNION: {
2426 id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2429 t = btf_type_by_id(btf, id);
2430 t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2434 /* enum forward in BTF currently is just an enum with no enum
2435 * values; we also assume a standard 4-byte size for it
2437 return btf__add_enum(btf, name, sizeof(int));
2439 return libbpf_err(-EINVAL);
2444 * Append new BTF_KING_TYPEDEF type with:
2445 * - *name*, non-empty/non-NULL name;
2446 * - *ref_type_id* - referenced type ID, it might not exist yet;
2448 * - >0, type ID of newly added BTF type;
2451 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2453 if (!name || !name[0])
2454 return libbpf_err(-EINVAL);
2456 return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2460 * Append new BTF_KIND_VOLATILE type with:
2461 * - *ref_type_id* - referenced type ID, it might not exist yet;
2463 * - >0, type ID of newly added BTF type;
2466 int btf__add_volatile(struct btf *btf, int ref_type_id)
2468 return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2472 * Append new BTF_KIND_CONST type with:
2473 * - *ref_type_id* - referenced type ID, it might not exist yet;
2475 * - >0, type ID of newly added BTF type;
2478 int btf__add_const(struct btf *btf, int ref_type_id)
2480 return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2484 * Append new BTF_KIND_RESTRICT type with:
2485 * - *ref_type_id* - referenced type ID, it might not exist yet;
2487 * - >0, type ID of newly added BTF type;
2490 int btf__add_restrict(struct btf *btf, int ref_type_id)
2492 return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2496 * Append new BTF_KIND_TYPE_TAG type with:
2497 * - *value*, non-empty/non-NULL tag value;
2498 * - *ref_type_id* - referenced type ID, it might not exist yet;
2500 * - >0, type ID of newly added BTF type;
2503 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2505 if (!value || !value[0])
2506 return libbpf_err(-EINVAL);
2508 return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2512 * Append new BTF_KIND_FUNC type with:
2513 * - *name*, non-empty/non-NULL name;
2514 * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2516 * - >0, type ID of newly added BTF type;
2519 int btf__add_func(struct btf *btf, const char *name,
2520 enum btf_func_linkage linkage, int proto_type_id)
2524 if (!name || !name[0])
2525 return libbpf_err(-EINVAL);
2526 if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2527 linkage != BTF_FUNC_EXTERN)
2528 return libbpf_err(-EINVAL);
2530 id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2532 struct btf_type *t = btf_type_by_id(btf, id);
2534 t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2536 return libbpf_err(id);
2540 * Append new BTF_KIND_FUNC_PROTO with:
2541 * - *ret_type_id* - type ID for return result of a function.
2543 * Function prototype initially has no arguments, but they can be added by
2544 * btf__add_func_param() one by one, immediately after
2545 * btf__add_func_proto() succeeded.
2548 * - >0, type ID of newly added BTF type;
2551 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2556 if (validate_type_id(ret_type_id))
2557 return libbpf_err(-EINVAL);
2559 if (btf_ensure_modifiable(btf))
2560 return libbpf_err(-ENOMEM);
2562 sz = sizeof(struct btf_type);
2563 t = btf_add_type_mem(btf, sz);
2565 return libbpf_err(-ENOMEM);
2567 /* start out with vlen=0; this will be adjusted when adding enum
2568 * values, if necessary
2571 t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2572 t->type = ret_type_id;
2574 return btf_commit_type(btf, sz);
2578 * Append new function parameter for current FUNC_PROTO type with:
2579 * - *name* - parameter name, can be NULL or empty;
2580 * - *type_id* - type ID describing the type of the parameter.
2585 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2588 struct btf_param *p;
2589 int sz, name_off = 0;
2591 if (validate_type_id(type_id))
2592 return libbpf_err(-EINVAL);
2594 /* last type should be BTF_KIND_FUNC_PROTO */
2595 if (btf->nr_types == 0)
2596 return libbpf_err(-EINVAL);
2597 t = btf_last_type(btf);
2598 if (!btf_is_func_proto(t))
2599 return libbpf_err(-EINVAL);
2601 /* decompose and invalidate raw data */
2602 if (btf_ensure_modifiable(btf))
2603 return libbpf_err(-ENOMEM);
2605 sz = sizeof(struct btf_param);
2606 p = btf_add_type_mem(btf, sz);
2608 return libbpf_err(-ENOMEM);
2610 if (name && name[0]) {
2611 name_off = btf__add_str(btf, name);
2616 p->name_off = name_off;
2619 /* update parent type's vlen */
2620 t = btf_last_type(btf);
2621 btf_type_inc_vlen(t);
2623 btf->hdr->type_len += sz;
2624 btf->hdr->str_off += sz;
2629 * Append new BTF_KIND_VAR type with:
2630 * - *name* - non-empty/non-NULL name;
2631 * - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2632 * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2633 * - *type_id* - type ID of the type describing the type of the variable.
2635 * - >0, type ID of newly added BTF type;
2638 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2644 /* non-empty name */
2645 if (!name || !name[0])
2646 return libbpf_err(-EINVAL);
2647 if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2648 linkage != BTF_VAR_GLOBAL_EXTERN)
2649 return libbpf_err(-EINVAL);
2650 if (validate_type_id(type_id))
2651 return libbpf_err(-EINVAL);
2653 /* deconstruct BTF, if necessary, and invalidate raw_data */
2654 if (btf_ensure_modifiable(btf))
2655 return libbpf_err(-ENOMEM);
2657 sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2658 t = btf_add_type_mem(btf, sz);
2660 return libbpf_err(-ENOMEM);
2662 name_off = btf__add_str(btf, name);
2666 t->name_off = name_off;
2667 t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2671 v->linkage = linkage;
2673 return btf_commit_type(btf, sz);
2677 * Append new BTF_KIND_DATASEC type with:
2678 * - *name* - non-empty/non-NULL name;
2679 * - *byte_sz* - data section size, in bytes.
2681 * Data section is initially empty. Variables info can be added with
2682 * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2685 * - >0, type ID of newly added BTF type;
2688 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2693 /* non-empty name */
2694 if (!name || !name[0])
2695 return libbpf_err(-EINVAL);
2697 if (btf_ensure_modifiable(btf))
2698 return libbpf_err(-ENOMEM);
2700 sz = sizeof(struct btf_type);
2701 t = btf_add_type_mem(btf, sz);
2703 return libbpf_err(-ENOMEM);
2705 name_off = btf__add_str(btf, name);
2709 /* start with vlen=0, which will be update as var_secinfos are added */
2710 t->name_off = name_off;
2711 t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2714 return btf_commit_type(btf, sz);
2718 * Append new data section variable information entry for current DATASEC type:
2719 * - *var_type_id* - type ID, describing type of the variable;
2720 * - *offset* - variable offset within data section, in bytes;
2721 * - *byte_sz* - variable size, in bytes.
2727 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2730 struct btf_var_secinfo *v;
2733 /* last type should be BTF_KIND_DATASEC */
2734 if (btf->nr_types == 0)
2735 return libbpf_err(-EINVAL);
2736 t = btf_last_type(btf);
2737 if (!btf_is_datasec(t))
2738 return libbpf_err(-EINVAL);
2740 if (validate_type_id(var_type_id))
2741 return libbpf_err(-EINVAL);
2743 /* decompose and invalidate raw data */
2744 if (btf_ensure_modifiable(btf))
2745 return libbpf_err(-ENOMEM);
2747 sz = sizeof(struct btf_var_secinfo);
2748 v = btf_add_type_mem(btf, sz);
2750 return libbpf_err(-ENOMEM);
2752 v->type = var_type_id;
2756 /* update parent type's vlen */
2757 t = btf_last_type(btf);
2758 btf_type_inc_vlen(t);
2760 btf->hdr->type_len += sz;
2761 btf->hdr->str_off += sz;
2766 * Append new BTF_KIND_DECL_TAG type with:
2767 * - *value* - non-empty/non-NULL string;
2768 * - *ref_type_id* - referenced type ID, it might not exist yet;
2769 * - *component_idx* - -1 for tagging reference type, otherwise struct/union
2770 * member or function argument index;
2772 * - >0, type ID of newly added BTF type;
2775 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2781 if (!value || !value[0] || component_idx < -1)
2782 return libbpf_err(-EINVAL);
2784 if (validate_type_id(ref_type_id))
2785 return libbpf_err(-EINVAL);
2787 if (btf_ensure_modifiable(btf))
2788 return libbpf_err(-ENOMEM);
2790 sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2791 t = btf_add_type_mem(btf, sz);
2793 return libbpf_err(-ENOMEM);
2795 value_off = btf__add_str(btf, value);
2799 t->name_off = value_off;
2800 t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2801 t->type = ref_type_id;
2802 btf_decl_tag(t)->component_idx = component_idx;
2804 return btf_commit_type(btf, sz);
2807 struct btf_ext_sec_setup_param {
2811 struct btf_ext_info *ext_info;
2815 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2816 struct btf_ext_sec_setup_param *ext_sec)
2818 const struct btf_ext_info_sec *sinfo;
2819 struct btf_ext_info *ext_info;
2820 __u32 info_left, record_size;
2822 /* The start of the info sec (including the __u32 record_size). */
2825 if (ext_sec->len == 0)
2828 if (ext_sec->off & 0x03) {
2829 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2834 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2835 info_left = ext_sec->len;
2837 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2838 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2839 ext_sec->desc, ext_sec->off, ext_sec->len);
2843 /* At least a record size */
2844 if (info_left < sizeof(__u32)) {
2845 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2849 /* The record size needs to meet the minimum standard */
2850 record_size = *(__u32 *)info;
2851 if (record_size < ext_sec->min_rec_size ||
2852 record_size & 0x03) {
2853 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2854 ext_sec->desc, record_size);
2858 sinfo = info + sizeof(__u32);
2859 info_left -= sizeof(__u32);
2861 /* If no records, return failure now so .BTF.ext won't be used. */
2863 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2868 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2869 __u64 total_record_size;
2872 if (info_left < sec_hdrlen) {
2873 pr_debug("%s section header is not found in .BTF.ext\n",
2878 num_records = sinfo->num_info;
2879 if (num_records == 0) {
2880 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2885 total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2886 if (info_left < total_record_size) {
2887 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2892 info_left -= total_record_size;
2893 sinfo = (void *)sinfo + total_record_size;
2897 ext_info = ext_sec->ext_info;
2898 ext_info->len = ext_sec->len - sizeof(__u32);
2899 ext_info->rec_size = record_size;
2900 ext_info->info = info + sizeof(__u32);
2901 ext_info->sec_cnt = sec_cnt;
2906 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2908 struct btf_ext_sec_setup_param param = {
2909 .off = btf_ext->hdr->func_info_off,
2910 .len = btf_ext->hdr->func_info_len,
2911 .min_rec_size = sizeof(struct bpf_func_info_min),
2912 .ext_info = &btf_ext->func_info,
2916 return btf_ext_setup_info(btf_ext, ¶m);
2919 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
2921 struct btf_ext_sec_setup_param param = {
2922 .off = btf_ext->hdr->line_info_off,
2923 .len = btf_ext->hdr->line_info_len,
2924 .min_rec_size = sizeof(struct bpf_line_info_min),
2925 .ext_info = &btf_ext->line_info,
2926 .desc = "line_info",
2929 return btf_ext_setup_info(btf_ext, ¶m);
2932 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
2934 struct btf_ext_sec_setup_param param = {
2935 .off = btf_ext->hdr->core_relo_off,
2936 .len = btf_ext->hdr->core_relo_len,
2937 .min_rec_size = sizeof(struct bpf_core_relo),
2938 .ext_info = &btf_ext->core_relo_info,
2939 .desc = "core_relo",
2942 return btf_ext_setup_info(btf_ext, ¶m);
2945 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
2947 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
2949 if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
2950 data_size < hdr->hdr_len) {
2951 pr_debug("BTF.ext header not found");
2955 if (hdr->magic == bswap_16(BTF_MAGIC)) {
2956 pr_warn("BTF.ext in non-native endianness is not supported\n");
2958 } else if (hdr->magic != BTF_MAGIC) {
2959 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
2963 if (hdr->version != BTF_VERSION) {
2964 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
2969 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
2973 if (data_size == hdr->hdr_len) {
2974 pr_debug("BTF.ext has no data\n");
2981 void btf_ext__free(struct btf_ext *btf_ext)
2983 if (IS_ERR_OR_NULL(btf_ext))
2985 free(btf_ext->func_info.sec_idxs);
2986 free(btf_ext->line_info.sec_idxs);
2987 free(btf_ext->core_relo_info.sec_idxs);
2988 free(btf_ext->data);
2992 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
2994 struct btf_ext *btf_ext;
2997 btf_ext = calloc(1, sizeof(struct btf_ext));
2999 return libbpf_err_ptr(-ENOMEM);
3001 btf_ext->data_size = size;
3002 btf_ext->data = malloc(size);
3003 if (!btf_ext->data) {
3007 memcpy(btf_ext->data, data, size);
3009 err = btf_ext_parse_hdr(btf_ext->data, size);
3013 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3018 err = btf_ext_setup_func_info(btf_ext);
3022 err = btf_ext_setup_line_info(btf_ext);
3026 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3027 goto done; /* skip core relos parsing */
3029 err = btf_ext_setup_core_relos(btf_ext);
3035 btf_ext__free(btf_ext);
3036 return libbpf_err_ptr(err);
3042 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
3044 *size = btf_ext->data_size;
3045 return btf_ext->data;
3050 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3051 static void btf_dedup_free(struct btf_dedup *d);
3052 static int btf_dedup_prep(struct btf_dedup *d);
3053 static int btf_dedup_strings(struct btf_dedup *d);
3054 static int btf_dedup_prim_types(struct btf_dedup *d);
3055 static int btf_dedup_struct_types(struct btf_dedup *d);
3056 static int btf_dedup_ref_types(struct btf_dedup *d);
3057 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3058 static int btf_dedup_compact_types(struct btf_dedup *d);
3059 static int btf_dedup_remap_types(struct btf_dedup *d);
3062 * Deduplicate BTF types and strings.
3064 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3065 * section with all BTF type descriptors and string data. It overwrites that
3066 * memory in-place with deduplicated types and strings without any loss of
3067 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3068 * is provided, all the strings referenced from .BTF.ext section are honored
3069 * and updated to point to the right offsets after deduplication.
3071 * If function returns with error, type/string data might be garbled and should
3074 * More verbose and detailed description of both problem btf_dedup is solving,
3075 * as well as solution could be found at:
3076 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3078 * Problem description and justification
3079 * =====================================
3081 * BTF type information is typically emitted either as a result of conversion
3082 * from DWARF to BTF or directly by compiler. In both cases, each compilation
3083 * unit contains information about a subset of all the types that are used
3084 * in an application. These subsets are frequently overlapping and contain a lot
3085 * of duplicated information when later concatenated together into a single
3086 * binary. This algorithm ensures that each unique type is represented by single
3087 * BTF type descriptor, greatly reducing resulting size of BTF data.
3089 * Compilation unit isolation and subsequent duplication of data is not the only
3090 * problem. The same type hierarchy (e.g., struct and all the type that struct
3091 * references) in different compilation units can be represented in BTF to
3092 * various degrees of completeness (or, rather, incompleteness) due to
3093 * struct/union forward declarations.
3095 * Let's take a look at an example, that we'll use to better understand the
3096 * problem (and solution). Suppose we have two compilation units, each using
3097 * same `struct S`, but each of them having incomplete type information about
3126 * In case of CU #1, BTF data will know only that `struct B` exist (but no
3127 * more), but will know the complete type information about `struct A`. While
3128 * for CU #2, it will know full type information about `struct B`, but will
3129 * only know about forward declaration of `struct A` (in BTF terms, it will
3130 * have `BTF_KIND_FWD` type descriptor with name `B`).
3132 * This compilation unit isolation means that it's possible that there is no
3133 * single CU with complete type information describing structs `S`, `A`, and
3134 * `B`. Also, we might get tons of duplicated and redundant type information.
3136 * Additional complication we need to keep in mind comes from the fact that
3137 * types, in general, can form graphs containing cycles, not just DAGs.
3139 * While algorithm does deduplication, it also merges and resolves type
3140 * information (unless disabled throught `struct btf_opts`), whenever possible.
3141 * E.g., in the example above with two compilation units having partial type
3142 * information for structs `A` and `B`, the output of algorithm will emit
3143 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3144 * (as well as type information for `int` and pointers), as if they were defined
3145 * in a single compilation unit as:
3165 * Algorithm completes its work in 7 separate passes:
3167 * 1. Strings deduplication.
3168 * 2. Primitive types deduplication (int, enum, fwd).
3169 * 3. Struct/union types deduplication.
3170 * 4. Resolve unambiguous forward declarations.
3171 * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3172 * protos, and const/volatile/restrict modifiers).
3173 * 6. Types compaction.
3174 * 7. Types remapping.
3176 * Algorithm determines canonical type descriptor, which is a single
3177 * representative type for each truly unique type. This canonical type is the
3178 * one that will go into final deduplicated BTF type information. For
3179 * struct/unions, it is also the type that algorithm will merge additional type
3180 * information into (while resolving FWDs), as it discovers it from data in
3181 * other CUs. Each input BTF type eventually gets either mapped to itself, if
3182 * that type is canonical, or to some other type, if that type is equivalent
3183 * and was chosen as canonical representative. This mapping is stored in
3184 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3185 * FWD type got resolved to.
3187 * To facilitate fast discovery of canonical types, we also maintain canonical
3188 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3189 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3190 * that match that signature. With sufficiently good choice of type signature
3191 * hashing function, we can limit number of canonical types for each unique type
3192 * signature to a very small number, allowing to find canonical type for any
3193 * duplicated type very quickly.
3195 * Struct/union deduplication is the most critical part and algorithm for
3196 * deduplicating structs/unions is described in greater details in comments for
3197 * `btf_dedup_is_equiv` function.
3199 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3201 struct btf_dedup *d;
3204 if (!OPTS_VALID(opts, btf_dedup_opts))
3205 return libbpf_err(-EINVAL);
3207 d = btf_dedup_new(btf, opts);
3209 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3210 return libbpf_err(-EINVAL);
3213 if (btf_ensure_modifiable(btf)) {
3218 err = btf_dedup_prep(d);
3220 pr_debug("btf_dedup_prep failed:%d\n", err);
3223 err = btf_dedup_strings(d);
3225 pr_debug("btf_dedup_strings failed:%d\n", err);
3228 err = btf_dedup_prim_types(d);
3230 pr_debug("btf_dedup_prim_types failed:%d\n", err);
3233 err = btf_dedup_struct_types(d);
3235 pr_debug("btf_dedup_struct_types failed:%d\n", err);
3238 err = btf_dedup_resolve_fwds(d);
3240 pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
3243 err = btf_dedup_ref_types(d);
3245 pr_debug("btf_dedup_ref_types failed:%d\n", err);
3248 err = btf_dedup_compact_types(d);
3250 pr_debug("btf_dedup_compact_types failed:%d\n", err);
3253 err = btf_dedup_remap_types(d);
3255 pr_debug("btf_dedup_remap_types failed:%d\n", err);
3261 return libbpf_err(err);
3264 #define BTF_UNPROCESSED_ID ((__u32)-1)
3265 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3268 /* .BTF section to be deduped in-place */
3271 * Optional .BTF.ext section. When provided, any strings referenced
3272 * from it will be taken into account when deduping strings
3274 struct btf_ext *btf_ext;
3276 * This is a map from any type's signature hash to a list of possible
3277 * canonical representative type candidates. Hash collisions are
3278 * ignored, so even types of various kinds can share same list of
3279 * candidates, which is fine because we rely on subsequent
3280 * btf_xxx_equal() checks to authoritatively verify type equality.
3282 struct hashmap *dedup_table;
3283 /* Canonical types map */
3285 /* Hypothetical mapping, used during type graph equivalence checks */
3290 /* Whether hypothetical mapping, if successful, would need to adjust
3291 * already canonicalized types (due to a new forward declaration to
3292 * concrete type resolution). In such case, during split BTF dedup
3293 * candidate type would still be considered as different, because base
3294 * BTF is considered to be immutable.
3296 bool hypot_adjust_canon;
3297 /* Various option modifying behavior of algorithm */
3298 struct btf_dedup_opts opts;
3299 /* temporary strings deduplication state */
3300 struct strset *strs_set;
3303 static long hash_combine(long h, long value)
3305 return h * 31 + value;
3308 #define for_each_dedup_cand(d, node, hash) \
3309 hashmap__for_each_key_entry(d->dedup_table, node, hash)
3311 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3313 return hashmap__append(d->dedup_table, hash, type_id);
3316 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3317 __u32 from_id, __u32 to_id)
3319 if (d->hypot_cnt == d->hypot_cap) {
3322 d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3323 new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3326 d->hypot_list = new_list;
3328 d->hypot_list[d->hypot_cnt++] = from_id;
3329 d->hypot_map[from_id] = to_id;
3333 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3337 for (i = 0; i < d->hypot_cnt; i++)
3338 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3340 d->hypot_adjust_canon = false;
3343 static void btf_dedup_free(struct btf_dedup *d)
3345 hashmap__free(d->dedup_table);
3346 d->dedup_table = NULL;
3352 d->hypot_map = NULL;
3354 free(d->hypot_list);
3355 d->hypot_list = NULL;
3360 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3365 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3370 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3375 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3377 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3378 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3379 int i, err = 0, type_cnt;
3382 return ERR_PTR(-ENOMEM);
3384 if (OPTS_GET(opts, force_collisions, false))
3385 hash_fn = btf_dedup_collision_hash_fn;
3388 d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3390 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3391 if (IS_ERR(d->dedup_table)) {
3392 err = PTR_ERR(d->dedup_table);
3393 d->dedup_table = NULL;
3397 type_cnt = btf__type_cnt(btf);
3398 d->map = malloc(sizeof(__u32) * type_cnt);
3403 /* special BTF "void" type is made canonical immediately */
3405 for (i = 1; i < type_cnt; i++) {
3406 struct btf_type *t = btf_type_by_id(d->btf, i);
3408 /* VAR and DATASEC are never deduped and are self-canonical */
3409 if (btf_is_var(t) || btf_is_datasec(t))
3412 d->map[i] = BTF_UNPROCESSED_ID;
3415 d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3416 if (!d->hypot_map) {
3420 for (i = 0; i < type_cnt; i++)
3421 d->hypot_map[i] = BTF_UNPROCESSED_ID;
3426 return ERR_PTR(err);
3433 * Iterate over all possible places in .BTF and .BTF.ext that can reference
3434 * string and pass pointer to it to a provided callback `fn`.
3436 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3440 for (i = 0; i < d->btf->nr_types; i++) {
3441 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3443 r = btf_type_visit_str_offs(t, fn, ctx);
3451 r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3458 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3460 struct btf_dedup *d = ctx;
3461 __u32 str_off = *str_off_ptr;
3465 /* don't touch empty string or string in main BTF */
3466 if (str_off == 0 || str_off < d->btf->start_str_off)
3469 s = btf__str_by_offset(d->btf, str_off);
3470 if (d->btf->base_btf) {
3471 err = btf__find_str(d->btf->base_btf, s);
3480 off = strset__add_str(d->strs_set, s);
3484 *str_off_ptr = d->btf->start_str_off + off;
3489 * Dedup string and filter out those that are not referenced from either .BTF
3490 * or .BTF.ext (if provided) sections.
3492 * This is done by building index of all strings in BTF's string section,
3493 * then iterating over all entities that can reference strings (e.g., type
3494 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3495 * strings as used. After that all used strings are deduped and compacted into
3496 * sequential blob of memory and new offsets are calculated. Then all the string
3497 * references are iterated again and rewritten using new offsets.
3499 static int btf_dedup_strings(struct btf_dedup *d)
3503 if (d->btf->strs_deduped)
3506 d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3507 if (IS_ERR(d->strs_set)) {
3508 err = PTR_ERR(d->strs_set);
3512 if (!d->btf->base_btf) {
3513 /* insert empty string; we won't be looking it up during strings
3514 * dedup, but it's good to have it for generic BTF string lookups
3516 err = strset__add_str(d->strs_set, "");
3521 /* remap string offsets */
3522 err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3526 /* replace BTF string data and hash with deduped ones */
3527 strset__free(d->btf->strs_set);
3528 d->btf->hdr->str_len = strset__data_size(d->strs_set);
3529 d->btf->strs_set = d->strs_set;
3531 d->btf->strs_deduped = true;
3535 strset__free(d->strs_set);
3541 static long btf_hash_common(struct btf_type *t)
3545 h = hash_combine(0, t->name_off);
3546 h = hash_combine(h, t->info);
3547 h = hash_combine(h, t->size);
3551 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3553 return t1->name_off == t2->name_off &&
3554 t1->info == t2->info &&
3555 t1->size == t2->size;
3558 /* Calculate type signature hash of INT or TAG. */
3559 static long btf_hash_int_decl_tag(struct btf_type *t)
3561 __u32 info = *(__u32 *)(t + 1);
3564 h = btf_hash_common(t);
3565 h = hash_combine(h, info);
3569 /* Check structural equality of two INTs or TAGs. */
3570 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3574 if (!btf_equal_common(t1, t2))
3576 info1 = *(__u32 *)(t1 + 1);
3577 info2 = *(__u32 *)(t2 + 1);
3578 return info1 == info2;
3581 /* Calculate type signature hash of ENUM/ENUM64. */
3582 static long btf_hash_enum(struct btf_type *t)
3586 /* don't hash vlen, enum members and size to support enum fwd resolving */
3587 h = hash_combine(0, t->name_off);
3591 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3593 const struct btf_enum *m1, *m2;
3597 vlen = btf_vlen(t1);
3600 for (i = 0; i < vlen; i++) {
3601 if (m1->name_off != m2->name_off || m1->val != m2->val)
3609 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3611 const struct btf_enum64 *m1, *m2;
3615 vlen = btf_vlen(t1);
3616 m1 = btf_enum64(t1);
3617 m2 = btf_enum64(t2);
3618 for (i = 0; i < vlen; i++) {
3619 if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3620 m1->val_hi32 != m2->val_hi32)
3628 /* Check structural equality of two ENUMs or ENUM64s. */
3629 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3631 if (!btf_equal_common(t1, t2))
3634 /* t1 & t2 kinds are identical because of btf_equal_common */
3635 if (btf_kind(t1) == BTF_KIND_ENUM)
3636 return btf_equal_enum_members(t1, t2);
3638 return btf_equal_enum64_members(t1, t2);
3641 static inline bool btf_is_enum_fwd(struct btf_type *t)
3643 return btf_is_any_enum(t) && btf_vlen(t) == 0;
3646 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3648 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3649 return btf_equal_enum(t1, t2);
3650 /* At this point either t1 or t2 or both are forward declarations, thus:
3651 * - skip comparing vlen because it is zero for forward declarations;
3652 * - skip comparing size to allow enum forward declarations
3653 * to be compatible with enum64 full declarations;
3654 * - skip comparing kind for the same reason.
3656 return t1->name_off == t2->name_off &&
3657 btf_is_any_enum(t1) && btf_is_any_enum(t2);
3661 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3662 * as referenced type IDs equivalence is established separately during type
3663 * graph equivalence check algorithm.
3665 static long btf_hash_struct(struct btf_type *t)
3667 const struct btf_member *member = btf_members(t);
3668 __u32 vlen = btf_vlen(t);
3669 long h = btf_hash_common(t);
3672 for (i = 0; i < vlen; i++) {
3673 h = hash_combine(h, member->name_off);
3674 h = hash_combine(h, member->offset);
3675 /* no hashing of referenced type ID, it can be unresolved yet */
3682 * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3683 * type IDs. This check is performed during type graph equivalence check and
3684 * referenced types equivalence is checked separately.
3686 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3688 const struct btf_member *m1, *m2;
3692 if (!btf_equal_common(t1, t2))
3695 vlen = btf_vlen(t1);
3696 m1 = btf_members(t1);
3697 m2 = btf_members(t2);
3698 for (i = 0; i < vlen; i++) {
3699 if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3708 * Calculate type signature hash of ARRAY, including referenced type IDs,
3709 * under assumption that they were already resolved to canonical type IDs and
3710 * are not going to change.
3712 static long btf_hash_array(struct btf_type *t)
3714 const struct btf_array *info = btf_array(t);
3715 long h = btf_hash_common(t);
3717 h = hash_combine(h, info->type);
3718 h = hash_combine(h, info->index_type);
3719 h = hash_combine(h, info->nelems);
3724 * Check exact equality of two ARRAYs, taking into account referenced
3725 * type IDs, under assumption that they were already resolved to canonical
3726 * type IDs and are not going to change.
3727 * This function is called during reference types deduplication to compare
3728 * ARRAY to potential canonical representative.
3730 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3732 const struct btf_array *info1, *info2;
3734 if (!btf_equal_common(t1, t2))
3737 info1 = btf_array(t1);
3738 info2 = btf_array(t2);
3739 return info1->type == info2->type &&
3740 info1->index_type == info2->index_type &&
3741 info1->nelems == info2->nelems;
3745 * Check structural compatibility of two ARRAYs, ignoring referenced type
3746 * IDs. This check is performed during type graph equivalence check and
3747 * referenced types equivalence is checked separately.
3749 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3751 if (!btf_equal_common(t1, t2))
3754 return btf_array(t1)->nelems == btf_array(t2)->nelems;
3758 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3759 * under assumption that they were already resolved to canonical type IDs and
3760 * are not going to change.
3762 static long btf_hash_fnproto(struct btf_type *t)
3764 const struct btf_param *member = btf_params(t);
3765 __u16 vlen = btf_vlen(t);
3766 long h = btf_hash_common(t);
3769 for (i = 0; i < vlen; i++) {
3770 h = hash_combine(h, member->name_off);
3771 h = hash_combine(h, member->type);
3778 * Check exact equality of two FUNC_PROTOs, taking into account referenced
3779 * type IDs, under assumption that they were already resolved to canonical
3780 * type IDs and are not going to change.
3781 * This function is called during reference types deduplication to compare
3782 * FUNC_PROTO to potential canonical representative.
3784 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3786 const struct btf_param *m1, *m2;
3790 if (!btf_equal_common(t1, t2))
3793 vlen = btf_vlen(t1);
3794 m1 = btf_params(t1);
3795 m2 = btf_params(t2);
3796 for (i = 0; i < vlen; i++) {
3797 if (m1->name_off != m2->name_off || m1->type != m2->type)
3806 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3807 * IDs. This check is performed during type graph equivalence check and
3808 * referenced types equivalence is checked separately.
3810 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3812 const struct btf_param *m1, *m2;
3816 /* skip return type ID */
3817 if (t1->name_off != t2->name_off || t1->info != t2->info)
3820 vlen = btf_vlen(t1);
3821 m1 = btf_params(t1);
3822 m2 = btf_params(t2);
3823 for (i = 0; i < vlen; i++) {
3824 if (m1->name_off != m2->name_off)
3832 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3833 * types and initializing the rest of the state (canonical type mapping) for
3834 * the fixed base BTF part.
3836 static int btf_dedup_prep(struct btf_dedup *d)
3842 if (!d->btf->base_btf)
3845 for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3846 t = btf_type_by_id(d->btf, type_id);
3848 /* all base BTF types are self-canonical by definition */
3849 d->map[type_id] = type_id;
3851 switch (btf_kind(t)) {
3853 case BTF_KIND_DATASEC:
3854 /* VAR and DATASEC are never hash/deduplicated */
3856 case BTF_KIND_CONST:
3857 case BTF_KIND_VOLATILE:
3858 case BTF_KIND_RESTRICT:
3861 case BTF_KIND_TYPEDEF:
3863 case BTF_KIND_FLOAT:
3864 case BTF_KIND_TYPE_TAG:
3865 h = btf_hash_common(t);
3868 case BTF_KIND_DECL_TAG:
3869 h = btf_hash_int_decl_tag(t);
3872 case BTF_KIND_ENUM64:
3873 h = btf_hash_enum(t);
3875 case BTF_KIND_STRUCT:
3876 case BTF_KIND_UNION:
3877 h = btf_hash_struct(t);
3879 case BTF_KIND_ARRAY:
3880 h = btf_hash_array(t);
3882 case BTF_KIND_FUNC_PROTO:
3883 h = btf_hash_fnproto(t);
3886 pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3889 if (btf_dedup_table_add(d, h, type_id))
3897 * Deduplicate primitive types, that can't reference other types, by calculating
3898 * their type signature hash and comparing them with any possible canonical
3899 * candidate. If no canonical candidate matches, type itself is marked as
3900 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3902 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3904 struct btf_type *t = btf_type_by_id(d->btf, type_id);
3905 struct hashmap_entry *hash_entry;
3906 struct btf_type *cand;
3907 /* if we don't find equivalent type, then we are canonical */
3908 __u32 new_id = type_id;
3912 switch (btf_kind(t)) {
3913 case BTF_KIND_CONST:
3914 case BTF_KIND_VOLATILE:
3915 case BTF_KIND_RESTRICT:
3917 case BTF_KIND_TYPEDEF:
3918 case BTF_KIND_ARRAY:
3919 case BTF_KIND_STRUCT:
3920 case BTF_KIND_UNION:
3922 case BTF_KIND_FUNC_PROTO:
3924 case BTF_KIND_DATASEC:
3925 case BTF_KIND_DECL_TAG:
3926 case BTF_KIND_TYPE_TAG:
3930 h = btf_hash_int_decl_tag(t);
3931 for_each_dedup_cand(d, hash_entry, h) {
3932 cand_id = hash_entry->value;
3933 cand = btf_type_by_id(d->btf, cand_id);
3934 if (btf_equal_int_tag(t, cand)) {
3942 case BTF_KIND_ENUM64:
3943 h = btf_hash_enum(t);
3944 for_each_dedup_cand(d, hash_entry, h) {
3945 cand_id = hash_entry->value;
3946 cand = btf_type_by_id(d->btf, cand_id);
3947 if (btf_equal_enum(t, cand)) {
3951 if (btf_compat_enum(t, cand)) {
3952 if (btf_is_enum_fwd(t)) {
3953 /* resolve fwd to full enum */
3957 /* resolve canonical enum fwd to full enum */
3958 d->map[cand_id] = type_id;
3964 case BTF_KIND_FLOAT:
3965 h = btf_hash_common(t);
3966 for_each_dedup_cand(d, hash_entry, h) {
3967 cand_id = hash_entry->value;
3968 cand = btf_type_by_id(d->btf, cand_id);
3969 if (btf_equal_common(t, cand)) {
3980 d->map[type_id] = new_id;
3981 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
3987 static int btf_dedup_prim_types(struct btf_dedup *d)
3991 for (i = 0; i < d->btf->nr_types; i++) {
3992 err = btf_dedup_prim_type(d, d->btf->start_id + i);
4000 * Check whether type is already mapped into canonical one (could be to itself).
4002 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4004 return d->map[type_id] <= BTF_MAX_NR_TYPES;
4008 * Resolve type ID into its canonical type ID, if any; otherwise return original
4009 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4010 * STRUCT/UNION link and resolve it into canonical type ID as well.
4012 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4014 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4015 type_id = d->map[type_id];
4020 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4023 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4025 __u32 orig_type_id = type_id;
4027 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4030 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4031 type_id = d->map[type_id];
4033 if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4036 return orig_type_id;
4040 static inline __u16 btf_fwd_kind(struct btf_type *t)
4042 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4045 /* Check if given two types are identical ARRAY definitions */
4046 static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
4048 struct btf_type *t1, *t2;
4050 t1 = btf_type_by_id(d->btf, id1);
4051 t2 = btf_type_by_id(d->btf, id2);
4052 if (!btf_is_array(t1) || !btf_is_array(t2))
4055 return btf_equal_array(t1, t2);
4058 /* Check if given two types are identical STRUCT/UNION definitions */
4059 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
4061 const struct btf_member *m1, *m2;
4062 struct btf_type *t1, *t2;
4065 t1 = btf_type_by_id(d->btf, id1);
4066 t2 = btf_type_by_id(d->btf, id2);
4068 if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
4071 if (!btf_shallow_equal_struct(t1, t2))
4074 m1 = btf_members(t1);
4075 m2 = btf_members(t2);
4076 for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4077 if (m1->type != m2->type &&
4078 !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
4079 !btf_dedup_identical_structs(d, m1->type, m2->type))
4086 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4087 * call it "candidate graph" in this description for brevity) to a type graph
4088 * formed by (potential) canonical struct/union ("canonical graph" for brevity
4089 * here, though keep in mind that not all types in canonical graph are
4090 * necessarily canonical representatives themselves, some of them might be
4091 * duplicates or its uniqueness might not have been established yet).
4093 * - >0, if type graphs are equivalent;
4094 * - 0, if not equivalent;
4097 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4098 * equivalence of BTF types at each step. If at any point BTF types in candidate
4099 * and canonical graphs are not compatible structurally, whole graphs are
4100 * incompatible. If types are structurally equivalent (i.e., all information
4101 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4102 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
4103 * If a type references other types, then those referenced types are checked
4104 * for equivalence recursively.
4106 * During DFS traversal, if we find that for current `canon_id` type we
4107 * already have some mapping in hypothetical map, we check for two possible
4109 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4110 * happen when type graphs have cycles. In this case we assume those two
4111 * types are equivalent.
4112 * - `canon_id` is mapped to different type. This is contradiction in our
4113 * hypothetical mapping, because same graph in canonical graph corresponds
4114 * to two different types in candidate graph, which for equivalent type
4115 * graphs shouldn't happen. This condition terminates equivalence check
4116 * with negative result.
4118 * If type graphs traversal exhausts types to check and find no contradiction,
4119 * then type graphs are equivalent.
4121 * When checking types for equivalence, there is one special case: FWD types.
4122 * If FWD type resolution is allowed and one of the types (either from canonical
4123 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4124 * flag) and their names match, hypothetical mapping is updated to point from
4125 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4126 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4128 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4129 * if there are two exactly named (or anonymous) structs/unions that are
4130 * compatible structurally, one of which has FWD field, while other is concrete
4131 * STRUCT/UNION, but according to C sources they are different structs/unions
4132 * that are referencing different types with the same name. This is extremely
4133 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4134 * this logic is causing problems.
4136 * Doing FWD resolution means that both candidate and/or canonical graphs can
4137 * consists of portions of the graph that come from multiple compilation units.
4138 * This is due to the fact that types within single compilation unit are always
4139 * deduplicated and FWDs are already resolved, if referenced struct/union
4140 * definiton is available. So, if we had unresolved FWD and found corresponding
4141 * STRUCT/UNION, they will be from different compilation units. This
4142 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4143 * type graph will likely have at least two different BTF types that describe
4144 * same type (e.g., most probably there will be two different BTF types for the
4145 * same 'int' primitive type) and could even have "overlapping" parts of type
4146 * graph that describe same subset of types.
4148 * This in turn means that our assumption that each type in canonical graph
4149 * must correspond to exactly one type in candidate graph might not hold
4150 * anymore and will make it harder to detect contradictions using hypothetical
4151 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4152 * resolution only in canonical graph. FWDs in candidate graphs are never
4153 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4155 * - Both types in canonical and candidate graphs are FWDs. If they are
4156 * structurally equivalent, then they can either be both resolved to the
4157 * same STRUCT/UNION or not resolved at all. In both cases they are
4158 * equivalent and there is no need to resolve FWD on candidate side.
4159 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4160 * so nothing to resolve as well, algorithm will check equivalence anyway.
4161 * - Type in canonical graph is FWD, while type in candidate is concrete
4162 * STRUCT/UNION. In this case candidate graph comes from single compilation
4163 * unit, so there is exactly one BTF type for each unique C type. After
4164 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
4165 * in canonical graph mapping to single BTF type in candidate graph, but
4166 * because hypothetical mapping maps from canonical to candidate types, it's
4167 * alright, and we still maintain the property of having single `canon_id`
4168 * mapping to single `cand_id` (there could be two different `canon_id`
4169 * mapped to the same `cand_id`, but it's not contradictory).
4170 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4171 * graph is FWD. In this case we are just going to check compatibility of
4172 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4173 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4174 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4175 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4178 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4181 struct btf_type *cand_type;
4182 struct btf_type *canon_type;
4183 __u32 hypot_type_id;
4188 /* if both resolve to the same canonical, they must be equivalent */
4189 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4192 canon_id = resolve_fwd_id(d, canon_id);
4194 hypot_type_id = d->hypot_map[canon_id];
4195 if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4196 if (hypot_type_id == cand_id)
4198 /* In some cases compiler will generate different DWARF types
4199 * for *identical* array type definitions and use them for
4200 * different fields within the *same* struct. This breaks type
4201 * equivalence check, which makes an assumption that candidate
4202 * types sub-graph has a consistent and deduped-by-compiler
4203 * types within a single CU. So work around that by explicitly
4204 * allowing identical array types here.
4206 if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4208 /* It turns out that similar situation can happen with
4209 * struct/union sometimes, sigh... Handle the case where
4210 * structs/unions are exactly the same, down to the referenced
4211 * type IDs. Anything more complicated (e.g., if referenced
4212 * types are different, but equivalent) is *way more*
4213 * complicated and requires a many-to-many equivalence mapping.
4215 if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4220 if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4223 cand_type = btf_type_by_id(d->btf, cand_id);
4224 canon_type = btf_type_by_id(d->btf, canon_id);
4225 cand_kind = btf_kind(cand_type);
4226 canon_kind = btf_kind(canon_type);
4228 if (cand_type->name_off != canon_type->name_off)
4231 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
4232 if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4233 && cand_kind != canon_kind) {
4237 if (cand_kind == BTF_KIND_FWD) {
4238 real_kind = canon_kind;
4239 fwd_kind = btf_fwd_kind(cand_type);
4241 real_kind = cand_kind;
4242 fwd_kind = btf_fwd_kind(canon_type);
4243 /* we'd need to resolve base FWD to STRUCT/UNION */
4244 if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4245 d->hypot_adjust_canon = true;
4247 return fwd_kind == real_kind;
4250 if (cand_kind != canon_kind)
4253 switch (cand_kind) {
4255 return btf_equal_int_tag(cand_type, canon_type);
4258 case BTF_KIND_ENUM64:
4259 return btf_compat_enum(cand_type, canon_type);
4262 case BTF_KIND_FLOAT:
4263 return btf_equal_common(cand_type, canon_type);
4265 case BTF_KIND_CONST:
4266 case BTF_KIND_VOLATILE:
4267 case BTF_KIND_RESTRICT:
4269 case BTF_KIND_TYPEDEF:
4271 case BTF_KIND_TYPE_TAG:
4272 if (cand_type->info != canon_type->info)
4274 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4276 case BTF_KIND_ARRAY: {
4277 const struct btf_array *cand_arr, *canon_arr;
4279 if (!btf_compat_array(cand_type, canon_type))
4281 cand_arr = btf_array(cand_type);
4282 canon_arr = btf_array(canon_type);
4283 eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4286 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4289 case BTF_KIND_STRUCT:
4290 case BTF_KIND_UNION: {
4291 const struct btf_member *cand_m, *canon_m;
4294 if (!btf_shallow_equal_struct(cand_type, canon_type))
4296 vlen = btf_vlen(cand_type);
4297 cand_m = btf_members(cand_type);
4298 canon_m = btf_members(canon_type);
4299 for (i = 0; i < vlen; i++) {
4300 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4310 case BTF_KIND_FUNC_PROTO: {
4311 const struct btf_param *cand_p, *canon_p;
4314 if (!btf_compat_fnproto(cand_type, canon_type))
4316 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4319 vlen = btf_vlen(cand_type);
4320 cand_p = btf_params(cand_type);
4321 canon_p = btf_params(canon_type);
4322 for (i = 0; i < vlen; i++) {
4323 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4339 * Use hypothetical mapping, produced by successful type graph equivalence
4340 * check, to augment existing struct/union canonical mapping, where possible.
4342 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4343 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4344 * it doesn't matter if FWD type was part of canonical graph or candidate one,
4345 * we are recording the mapping anyway. As opposed to carefulness required
4346 * for struct/union correspondence mapping (described below), for FWD resolution
4347 * it's not important, as by the time that FWD type (reference type) will be
4348 * deduplicated all structs/unions will be deduped already anyway.
4350 * Recording STRUCT/UNION mapping is purely a performance optimization and is
4351 * not required for correctness. It needs to be done carefully to ensure that
4352 * struct/union from candidate's type graph is not mapped into corresponding
4353 * struct/union from canonical type graph that itself hasn't been resolved into
4354 * canonical representative. The only guarantee we have is that canonical
4355 * struct/union was determined as canonical and that won't change. But any
4356 * types referenced through that struct/union fields could have been not yet
4357 * resolved, so in case like that it's too early to establish any kind of
4358 * correspondence between structs/unions.
4360 * No canonical correspondence is derived for primitive types (they are already
4361 * deduplicated completely already anyway) or reference types (they rely on
4362 * stability of struct/union canonical relationship for equivalence checks).
4364 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4366 __u32 canon_type_id, targ_type_id;
4367 __u16 t_kind, c_kind;
4371 for (i = 0; i < d->hypot_cnt; i++) {
4372 canon_type_id = d->hypot_list[i];
4373 targ_type_id = d->hypot_map[canon_type_id];
4374 t_id = resolve_type_id(d, targ_type_id);
4375 c_id = resolve_type_id(d, canon_type_id);
4376 t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4377 c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4379 * Resolve FWD into STRUCT/UNION.
4380 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4381 * mapped to canonical representative (as opposed to
4382 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4383 * eventually that struct is going to be mapped and all resolved
4384 * FWDs will automatically resolve to correct canonical
4385 * representative. This will happen before ref type deduping,
4386 * which critically depends on stability of these mapping. This
4387 * stability is not a requirement for STRUCT/UNION equivalence
4391 /* if it's the split BTF case, we still need to point base FWD
4392 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4393 * will be resolved against base FWD. If we don't point base
4394 * canonical FWD to the resolved STRUCT/UNION, then all the
4395 * FWDs in split BTF won't be correctly resolved to a proper
4398 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4399 d->map[c_id] = t_id;
4401 /* if graph equivalence determined that we'd need to adjust
4402 * base canonical types, then we need to only point base FWDs
4403 * to STRUCTs/UNIONs and do no more modifications. For all
4404 * other purposes the type graphs were not equivalent.
4406 if (d->hypot_adjust_canon)
4409 if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4410 d->map[t_id] = c_id;
4412 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4413 c_kind != BTF_KIND_FWD &&
4414 is_type_mapped(d, c_id) &&
4415 !is_type_mapped(d, t_id)) {
4417 * as a perf optimization, we can map struct/union
4418 * that's part of type graph we just verified for
4419 * equivalence. We can do that for struct/union that has
4420 * canonical representative only, though.
4422 d->map[t_id] = c_id;
4428 * Deduplicate struct/union types.
4430 * For each struct/union type its type signature hash is calculated, taking
4431 * into account type's name, size, number, order and names of fields, but
4432 * ignoring type ID's referenced from fields, because they might not be deduped
4433 * completely until after reference types deduplication phase. This type hash
4434 * is used to iterate over all potential canonical types, sharing same hash.
4435 * For each canonical candidate we check whether type graphs that they form
4436 * (through referenced types in fields and so on) are equivalent using algorithm
4437 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4438 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4439 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4440 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4441 * potentially map other structs/unions to their canonical representatives,
4442 * if such relationship hasn't yet been established. This speeds up algorithm
4443 * by eliminating some of the duplicate work.
4445 * If no matching canonical representative was found, struct/union is marked
4446 * as canonical for itself and is added into btf_dedup->dedup_table hash map
4447 * for further look ups.
4449 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4451 struct btf_type *cand_type, *t;
4452 struct hashmap_entry *hash_entry;
4453 /* if we don't find equivalent type, then we are canonical */
4454 __u32 new_id = type_id;
4458 /* already deduped or is in process of deduping (loop detected) */
4459 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4462 t = btf_type_by_id(d->btf, type_id);
4465 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4468 h = btf_hash_struct(t);
4469 for_each_dedup_cand(d, hash_entry, h) {
4470 __u32 cand_id = hash_entry->value;
4474 * Even though btf_dedup_is_equiv() checks for
4475 * btf_shallow_equal_struct() internally when checking two
4476 * structs (unions) for equivalence, we need to guard here
4477 * from picking matching FWD type as a dedup candidate.
4478 * This can happen due to hash collision. In such case just
4479 * relying on btf_dedup_is_equiv() would lead to potentially
4480 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4481 * FWD and compatible STRUCT/UNION are considered equivalent.
4483 cand_type = btf_type_by_id(d->btf, cand_id);
4484 if (!btf_shallow_equal_struct(t, cand_type))
4487 btf_dedup_clear_hypot_map(d);
4488 eq = btf_dedup_is_equiv(d, type_id, cand_id);
4493 btf_dedup_merge_hypot_map(d);
4494 if (d->hypot_adjust_canon) /* not really equivalent */
4500 d->map[type_id] = new_id;
4501 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4507 static int btf_dedup_struct_types(struct btf_dedup *d)
4511 for (i = 0; i < d->btf->nr_types; i++) {
4512 err = btf_dedup_struct_type(d, d->btf->start_id + i);
4520 * Deduplicate reference type.
4522 * Once all primitive and struct/union types got deduplicated, we can easily
4523 * deduplicate all other (reference) BTF types. This is done in two steps:
4525 * 1. Resolve all referenced type IDs into their canonical type IDs. This
4526 * resolution can be done either immediately for primitive or struct/union types
4527 * (because they were deduped in previous two phases) or recursively for
4528 * reference types. Recursion will always terminate at either primitive or
4529 * struct/union type, at which point we can "unwind" chain of reference types
4530 * one by one. There is no danger of encountering cycles because in C type
4531 * system the only way to form type cycle is through struct/union, so any chain
4532 * of reference types, even those taking part in a type cycle, will inevitably
4533 * reach struct/union at some point.
4535 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4536 * becomes "stable", in the sense that no further deduplication will cause
4537 * any changes to it. With that, it's now possible to calculate type's signature
4538 * hash (this time taking into account referenced type IDs) and loop over all
4539 * potential canonical representatives. If no match was found, current type
4540 * will become canonical representative of itself and will be added into
4541 * btf_dedup->dedup_table as another possible canonical representative.
4543 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4545 struct hashmap_entry *hash_entry;
4546 __u32 new_id = type_id, cand_id;
4547 struct btf_type *t, *cand;
4548 /* if we don't find equivalent type, then we are representative type */
4552 if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4554 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4555 return resolve_type_id(d, type_id);
4557 t = btf_type_by_id(d->btf, type_id);
4558 d->map[type_id] = BTF_IN_PROGRESS_ID;
4560 switch (btf_kind(t)) {
4561 case BTF_KIND_CONST:
4562 case BTF_KIND_VOLATILE:
4563 case BTF_KIND_RESTRICT:
4565 case BTF_KIND_TYPEDEF:
4567 case BTF_KIND_TYPE_TAG:
4568 ref_type_id = btf_dedup_ref_type(d, t->type);
4569 if (ref_type_id < 0)
4571 t->type = ref_type_id;
4573 h = btf_hash_common(t);
4574 for_each_dedup_cand(d, hash_entry, h) {
4575 cand_id = hash_entry->value;
4576 cand = btf_type_by_id(d->btf, cand_id);
4577 if (btf_equal_common(t, cand)) {
4584 case BTF_KIND_DECL_TAG:
4585 ref_type_id = btf_dedup_ref_type(d, t->type);
4586 if (ref_type_id < 0)
4588 t->type = ref_type_id;
4590 h = btf_hash_int_decl_tag(t);
4591 for_each_dedup_cand(d, hash_entry, h) {
4592 cand_id = hash_entry->value;
4593 cand = btf_type_by_id(d->btf, cand_id);
4594 if (btf_equal_int_tag(t, cand)) {
4601 case BTF_KIND_ARRAY: {
4602 struct btf_array *info = btf_array(t);
4604 ref_type_id = btf_dedup_ref_type(d, info->type);
4605 if (ref_type_id < 0)
4607 info->type = ref_type_id;
4609 ref_type_id = btf_dedup_ref_type(d, info->index_type);
4610 if (ref_type_id < 0)
4612 info->index_type = ref_type_id;
4614 h = btf_hash_array(t);
4615 for_each_dedup_cand(d, hash_entry, h) {
4616 cand_id = hash_entry->value;
4617 cand = btf_type_by_id(d->btf, cand_id);
4618 if (btf_equal_array(t, cand)) {
4626 case BTF_KIND_FUNC_PROTO: {
4627 struct btf_param *param;
4631 ref_type_id = btf_dedup_ref_type(d, t->type);
4632 if (ref_type_id < 0)
4634 t->type = ref_type_id;
4637 param = btf_params(t);
4638 for (i = 0; i < vlen; i++) {
4639 ref_type_id = btf_dedup_ref_type(d, param->type);
4640 if (ref_type_id < 0)
4642 param->type = ref_type_id;
4646 h = btf_hash_fnproto(t);
4647 for_each_dedup_cand(d, hash_entry, h) {
4648 cand_id = hash_entry->value;
4649 cand = btf_type_by_id(d->btf, cand_id);
4650 if (btf_equal_fnproto(t, cand)) {
4662 d->map[type_id] = new_id;
4663 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4669 static int btf_dedup_ref_types(struct btf_dedup *d)
4673 for (i = 0; i < d->btf->nr_types; i++) {
4674 err = btf_dedup_ref_type(d, d->btf->start_id + i);
4678 /* we won't need d->dedup_table anymore */
4679 hashmap__free(d->dedup_table);
4680 d->dedup_table = NULL;
4685 * Collect a map from type names to type ids for all canonical structs
4686 * and unions. If the same name is shared by several canonical types
4687 * use a special value 0 to indicate this fact.
4689 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
4691 __u32 nr_types = btf__type_cnt(d->btf);
4698 * Iterate over base and split module ids in order to get all
4699 * available structs in the map.
4701 for (type_id = 1; type_id < nr_types; ++type_id) {
4702 t = btf_type_by_id(d->btf, type_id);
4705 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4708 /* Skip non-canonical types */
4709 if (type_id != d->map[type_id])
4712 err = hashmap__add(names_map, t->name_off, type_id);
4714 err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
4723 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
4725 struct btf_type *t = btf_type_by_id(d->btf, type_id);
4726 enum btf_fwd_kind fwd_kind = btf_kflag(t);
4727 __u16 cand_kind, kind = btf_kind(t);
4728 struct btf_type *cand_t;
4731 if (kind != BTF_KIND_FWD)
4734 /* Skip if this FWD already has a mapping */
4735 if (type_id != d->map[type_id])
4738 if (!hashmap__find(names_map, t->name_off, &cand_id))
4741 /* Zero is a special value indicating that name is not unique */
4745 cand_t = btf_type_by_id(d->btf, cand_id);
4746 cand_kind = btf_kind(cand_t);
4747 if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
4748 (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
4751 d->map[type_id] = cand_id;
4757 * Resolve unambiguous forward declarations.
4759 * The lion's share of all FWD declarations is resolved during
4760 * `btf_dedup_struct_types` phase when different type graphs are
4761 * compared against each other. However, if in some compilation unit a
4762 * FWD declaration is not a part of a type graph compared against
4763 * another type graph that declaration's canonical type would not be
4769 * struct foo *some_global;
4773 * struct foo { int u; };
4774 * struct foo *another_global;
4776 * After `btf_dedup_struct_types` the BTF looks as follows:
4778 * [1] STRUCT 'foo' size=4 vlen=1 ...
4779 * [2] INT 'int' size=4 ...
4780 * [3] PTR '(anon)' type_id=1
4781 * [4] FWD 'foo' fwd_kind=struct
4782 * [5] PTR '(anon)' type_id=4
4784 * This pass assumes that such FWD declarations should be mapped to
4785 * structs or unions with identical name in case if the name is not
4788 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
4791 struct hashmap *names_map;
4793 names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
4794 if (IS_ERR(names_map))
4795 return PTR_ERR(names_map);
4797 err = btf_dedup_fill_unique_names_map(d, names_map);
4801 for (i = 0; i < d->btf->nr_types; i++) {
4802 err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
4808 hashmap__free(names_map);
4815 * After we established for each type its corresponding canonical representative
4816 * type, we now can eliminate types that are not canonical and leave only
4817 * canonical ones layed out sequentially in memory by copying them over
4818 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4819 * a map from original type ID to a new compacted type ID, which will be used
4820 * during next phase to "fix up" type IDs, referenced from struct/union and
4823 static int btf_dedup_compact_types(struct btf_dedup *d)
4826 __u32 next_type_id = d->btf->start_id;
4827 const struct btf_type *t;
4831 /* we are going to reuse hypot_map to store compaction remapping */
4832 d->hypot_map[0] = 0;
4833 /* base BTF types are not renumbered */
4834 for (id = 1; id < d->btf->start_id; id++)
4835 d->hypot_map[id] = id;
4836 for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4837 d->hypot_map[id] = BTF_UNPROCESSED_ID;
4839 p = d->btf->types_data;
4841 for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4842 if (d->map[id] != id)
4845 t = btf__type_by_id(d->btf, id);
4846 len = btf_type_size(t);
4851 d->hypot_map[id] = next_type_id;
4852 d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4857 /* shrink struct btf's internal types index and update btf_header */
4858 d->btf->nr_types = next_type_id - d->btf->start_id;
4859 d->btf->type_offs_cap = d->btf->nr_types;
4860 d->btf->hdr->type_len = p - d->btf->types_data;
4861 new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4863 if (d->btf->type_offs_cap && !new_offs)
4865 d->btf->type_offs = new_offs;
4866 d->btf->hdr->str_off = d->btf->hdr->type_len;
4867 d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4872 * Figure out final (deduplicated and compacted) type ID for provided original
4873 * `type_id` by first resolving it into corresponding canonical type ID and
4874 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4875 * which is populated during compaction phase.
4877 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4879 struct btf_dedup *d = ctx;
4880 __u32 resolved_type_id, new_type_id;
4882 resolved_type_id = resolve_type_id(d, *type_id);
4883 new_type_id = d->hypot_map[resolved_type_id];
4884 if (new_type_id > BTF_MAX_NR_TYPES)
4887 *type_id = new_type_id;
4892 * Remap referenced type IDs into deduped type IDs.
4894 * After BTF types are deduplicated and compacted, their final type IDs may
4895 * differ from original ones. The map from original to a corresponding
4896 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4897 * compaction phase. During remapping phase we are rewriting all type IDs
4898 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4899 * their final deduped type IDs.
4901 static int btf_dedup_remap_types(struct btf_dedup *d)
4905 for (i = 0; i < d->btf->nr_types; i++) {
4906 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4908 r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
4916 r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
4924 * Probe few well-known locations for vmlinux kernel image and try to load BTF
4925 * data out of it to use for target BTF.
4927 struct btf *btf__load_vmlinux_btf(void)
4929 const char *locations[] = {
4930 /* try canonical vmlinux BTF through sysfs first */
4931 "/sys/kernel/btf/vmlinux",
4932 /* fall back to trying to find vmlinux on disk otherwise */
4933 "/boot/vmlinux-%1$s",
4934 "/lib/modules/%1$s/vmlinux-%1$s",
4935 "/lib/modules/%1$s/build/vmlinux",
4936 "/usr/lib/modules/%1$s/kernel/vmlinux",
4937 "/usr/lib/debug/boot/vmlinux-%1$s",
4938 "/usr/lib/debug/boot/vmlinux-%1$s.debug",
4939 "/usr/lib/debug/lib/modules/%1$s/vmlinux",
4941 char path[PATH_MAX + 1];
4948 for (i = 0; i < ARRAY_SIZE(locations); i++) {
4949 snprintf(path, PATH_MAX, locations[i], buf.release);
4951 if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
4954 btf = btf__parse(path, NULL);
4955 err = libbpf_get_error(btf);
4956 pr_debug("loading kernel BTF '%s': %d\n", path, err);
4963 pr_warn("failed to find valid kernel BTF\n");
4964 return libbpf_err_ptr(-ESRCH);
4967 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
4969 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
4973 snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
4974 return btf__parse_split(path, vmlinux_btf);
4977 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
4981 switch (btf_kind(t)) {
4983 case BTF_KIND_FLOAT:
4985 case BTF_KIND_ENUM64:
4989 case BTF_KIND_CONST:
4990 case BTF_KIND_VOLATILE:
4991 case BTF_KIND_RESTRICT:
4993 case BTF_KIND_TYPEDEF:
4996 case BTF_KIND_DECL_TAG:
4997 case BTF_KIND_TYPE_TAG:
4998 return visit(&t->type, ctx);
5000 case BTF_KIND_ARRAY: {
5001 struct btf_array *a = btf_array(t);
5003 err = visit(&a->type, ctx);
5004 err = err ?: visit(&a->index_type, ctx);
5008 case BTF_KIND_STRUCT:
5009 case BTF_KIND_UNION: {
5010 struct btf_member *m = btf_members(t);
5012 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5013 err = visit(&m->type, ctx);
5020 case BTF_KIND_FUNC_PROTO: {
5021 struct btf_param *m = btf_params(t);
5023 err = visit(&t->type, ctx);
5026 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5027 err = visit(&m->type, ctx);
5034 case BTF_KIND_DATASEC: {
5035 struct btf_var_secinfo *m = btf_var_secinfos(t);
5037 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5038 err = visit(&m->type, ctx);
5050 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
5054 err = visit(&t->name_off, ctx);
5058 switch (btf_kind(t)) {
5059 case BTF_KIND_STRUCT:
5060 case BTF_KIND_UNION: {
5061 struct btf_member *m = btf_members(t);
5063 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5064 err = visit(&m->name_off, ctx);
5070 case BTF_KIND_ENUM: {
5071 struct btf_enum *m = btf_enum(t);
5073 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5074 err = visit(&m->name_off, ctx);
5080 case BTF_KIND_ENUM64: {
5081 struct btf_enum64 *m = btf_enum64(t);
5083 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5084 err = visit(&m->name_off, ctx);
5090 case BTF_KIND_FUNC_PROTO: {
5091 struct btf_param *m = btf_params(t);
5093 for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5094 err = visit(&m->name_off, ctx);
5107 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5109 const struct btf_ext_info *seg;
5110 struct btf_ext_info_sec *sec;
5113 seg = &btf_ext->func_info;
5114 for_each_btf_ext_sec(seg, sec) {
5115 struct bpf_func_info_min *rec;
5117 for_each_btf_ext_rec(seg, sec, i, rec) {
5118 err = visit(&rec->type_id, ctx);
5124 seg = &btf_ext->core_relo_info;
5125 for_each_btf_ext_sec(seg, sec) {
5126 struct bpf_core_relo *rec;
5128 for_each_btf_ext_rec(seg, sec, i, rec) {
5129 err = visit(&rec->type_id, ctx);
5138 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5140 const struct btf_ext_info *seg;
5141 struct btf_ext_info_sec *sec;
5144 seg = &btf_ext->func_info;
5145 for_each_btf_ext_sec(seg, sec) {
5146 err = visit(&sec->sec_name_off, ctx);
5151 seg = &btf_ext->line_info;
5152 for_each_btf_ext_sec(seg, sec) {
5153 struct bpf_line_info_min *rec;
5155 err = visit(&sec->sec_name_off, ctx);
5159 for_each_btf_ext_rec(seg, sec, i, rec) {
5160 err = visit(&rec->file_name_off, ctx);
5163 err = visit(&rec->line_off, ctx);
5169 seg = &btf_ext->core_relo_info;
5170 for_each_btf_ext_sec(seg, sec) {
5171 struct bpf_core_relo *rec;
5173 err = visit(&sec->sec_name_off, ctx);
5177 for_each_btf_ext_rec(seg, sec, i, rec) {
5178 err = visit(&rec->access_str_off, ctx);