1 /* Copyright (c) 2017 Facebook
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
15 #include <linux/types.h>
16 typedef __u16 __sum16;
17 #include <arpa/inet.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_packet.h>
21 #include <linux/ipv6.h>
22 #include <linux/tcp.h>
23 #include <linux/filter.h>
24 #include <linux/perf_event.h>
25 #include <linux/unistd.h>
27 #include <sys/ioctl.h>
29 #include <sys/types.h>
32 #include <linux/bpf.h>
33 #include <linux/err.h>
35 #include <bpf/libbpf.h>
37 #include "test_iptunnel_common.h"
39 #include "bpf_endian.h"
40 #include "bpf_rlimit.h"
41 #include "trace_helpers.h"
43 static int error_cnt, pass_cnt;
44 static bool jit_enabled;
46 #define MAGIC_BYTES 123
48 /* ipv4 test vector */
54 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
57 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
61 /* ipv6 test vector */
67 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
69 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
73 #define CHECK(condition, tag, format...) ({ \
74 int __ret = !!(condition); \
77 printf("%s:FAIL:%s ", __func__, tag); \
81 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
86 static int bpf_find_map(const char *test, struct bpf_object *obj,
91 map = bpf_object__find_map_by_name(obj, name);
93 printf("%s:FAIL:map '%s' not found\n", test, name);
97 return bpf_map__fd(map);
100 static void test_pkt_access(void)
102 const char *file = "./test_pkt_access.o";
103 struct bpf_object *obj;
104 __u32 duration, retval;
107 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
113 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
114 NULL, NULL, &retval, &duration);
115 CHECK(err || errno || retval, "ipv4",
116 "err %d errno %d retval %d duration %d\n",
117 err, errno, retval, duration);
119 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
120 NULL, NULL, &retval, &duration);
121 CHECK(err || errno || retval, "ipv6",
122 "err %d errno %d retval %d duration %d\n",
123 err, errno, retval, duration);
124 bpf_object__close(obj);
127 static void test_xdp(void)
129 struct vip key4 = {.protocol = 6, .family = AF_INET};
130 struct vip key6 = {.protocol = 6, .family = AF_INET6};
131 struct iptnl_info value4 = {.family = AF_INET};
132 struct iptnl_info value6 = {.family = AF_INET6};
133 const char *file = "./test_xdp.o";
134 struct bpf_object *obj;
136 struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
137 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
138 __u32 duration, retval, size;
139 int err, prog_fd, map_fd;
141 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
147 map_fd = bpf_find_map(__func__, obj, "vip2tnl");
150 bpf_map_update_elem(map_fd, &key4, &value4, 0);
151 bpf_map_update_elem(map_fd, &key6, &value6, 0);
153 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
154 buf, &size, &retval, &duration);
156 CHECK(err || errno || retval != XDP_TX || size != 74 ||
157 iph->protocol != IPPROTO_IPIP, "ipv4",
158 "err %d errno %d retval %d size %d\n",
159 err, errno, retval, size);
161 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
162 buf, &size, &retval, &duration);
163 CHECK(err || errno || retval != XDP_TX || size != 114 ||
164 iph6->nexthdr != IPPROTO_IPV6, "ipv6",
165 "err %d errno %d retval %d size %d\n",
166 err, errno, retval, size);
168 bpf_object__close(obj);
171 static void test_xdp_adjust_tail(void)
173 const char *file = "./test_adjust_tail.o";
174 struct bpf_object *obj;
176 __u32 duration, retval, size;
179 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
185 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
186 buf, &size, &retval, &duration);
188 CHECK(err || errno || retval != XDP_DROP,
189 "ipv4", "err %d errno %d retval %d size %d\n",
190 err, errno, retval, size);
192 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
193 buf, &size, &retval, &duration);
194 CHECK(err || errno || retval != XDP_TX || size != 54,
195 "ipv6", "err %d errno %d retval %d size %d\n",
196 err, errno, retval, size);
197 bpf_object__close(obj);
202 #define MAGIC_VAL 0x1234
203 #define NUM_ITER 100000
206 static void test_l4lb(const char *file)
208 unsigned int nr_cpus = bpf_num_possible_cpus();
209 struct vip key = {.protocol = 6};
213 } value = {.vip_num = VIP_NUM};
214 __u32 stats_key = VIP_NUM;
219 struct real_definition {
225 } real_def = {.dst = MAGIC_VAL};
226 __u32 ch_key = 11, real_num = 3;
227 __u32 duration, retval, size;
228 int err, i, prog_fd, map_fd;
229 __u64 bytes = 0, pkts = 0;
230 struct bpf_object *obj;
232 u32 *magic = (u32 *)buf;
234 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
240 map_fd = bpf_find_map(__func__, obj, "vip_map");
243 bpf_map_update_elem(map_fd, &key, &value, 0);
245 map_fd = bpf_find_map(__func__, obj, "ch_rings");
248 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
250 map_fd = bpf_find_map(__func__, obj, "reals");
253 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
255 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
256 buf, &size, &retval, &duration);
257 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
258 *magic != MAGIC_VAL, "ipv4",
259 "err %d errno %d retval %d size %d magic %x\n",
260 err, errno, retval, size, *magic);
262 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
263 buf, &size, &retval, &duration);
264 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
265 *magic != MAGIC_VAL, "ipv6",
266 "err %d errno %d retval %d size %d magic %x\n",
267 err, errno, retval, size, *magic);
269 map_fd = bpf_find_map(__func__, obj, "stats");
272 bpf_map_lookup_elem(map_fd, &stats_key, stats);
273 for (i = 0; i < nr_cpus; i++) {
274 bytes += stats[i].bytes;
275 pkts += stats[i].pkts;
277 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
279 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
282 bpf_object__close(obj);
285 static void test_l4lb_all(void)
287 const char *file1 = "./test_l4lb.o";
288 const char *file2 = "./test_l4lb_noinline.o";
294 static void test_xdp_noinline(void)
296 const char *file = "./test_xdp_noinline.o";
297 unsigned int nr_cpus = bpf_num_possible_cpus();
298 struct vip key = {.protocol = 6};
302 } value = {.vip_num = VIP_NUM};
303 __u32 stats_key = VIP_NUM;
308 struct real_definition {
314 } real_def = {.dst = MAGIC_VAL};
315 __u32 ch_key = 11, real_num = 3;
316 __u32 duration, retval, size;
317 int err, i, prog_fd, map_fd;
318 __u64 bytes = 0, pkts = 0;
319 struct bpf_object *obj;
321 u32 *magic = (u32 *)buf;
323 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
329 map_fd = bpf_find_map(__func__, obj, "vip_map");
332 bpf_map_update_elem(map_fd, &key, &value, 0);
334 map_fd = bpf_find_map(__func__, obj, "ch_rings");
337 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
339 map_fd = bpf_find_map(__func__, obj, "reals");
342 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
344 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
345 buf, &size, &retval, &duration);
346 CHECK(err || errno || retval != 1 || size != 54 ||
347 *magic != MAGIC_VAL, "ipv4",
348 "err %d errno %d retval %d size %d magic %x\n",
349 err, errno, retval, size, *magic);
351 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
352 buf, &size, &retval, &duration);
353 CHECK(err || errno || retval != 1 || size != 74 ||
354 *magic != MAGIC_VAL, "ipv6",
355 "err %d errno %d retval %d size %d magic %x\n",
356 err, errno, retval, size, *magic);
358 map_fd = bpf_find_map(__func__, obj, "stats");
361 bpf_map_lookup_elem(map_fd, &stats_key, stats);
362 for (i = 0; i < nr_cpus; i++) {
363 bytes += stats[i].bytes;
364 pkts += stats[i].pkts;
366 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
368 printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
371 bpf_object__close(obj);
374 static void test_tcp_estats(void)
376 const char *file = "./test_tcp_estats.o";
378 struct bpf_object *obj;
381 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
382 CHECK(err, "", "err %d errno %d\n", err, errno);
388 bpf_object__close(obj);
391 static inline __u64 ptr_to_u64(const void *ptr)
393 return (__u64) (unsigned long) ptr;
396 static bool is_jit_enabled(void)
398 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
399 bool enabled = false;
402 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
403 if (sysctl_fd != -1) {
406 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
407 enabled = (tmpc != '0');
414 static void test_bpf_obj_id(void)
416 const __u64 array_magic_value = 0xfaceb00c;
417 const __u32 array_key = 0;
418 const int nr_iters = 2;
419 const char *file = "./test_obj_id.o";
420 const char *expected_prog_name = "test_obj_id";
421 const char *expected_map_name = "test_map_id";
422 const __u64 nsec_per_sec = 1000000000;
424 struct bpf_object *objs[nr_iters];
425 int prog_fds[nr_iters], map_fds[nr_iters];
426 /* +1 to test for the info_len returned by kernel */
427 struct bpf_prog_info prog_infos[nr_iters + 1];
428 struct bpf_map_info map_infos[nr_iters + 1];
429 /* Each prog only uses one map. +1 to test nr_map_ids
430 * returned by kernel.
432 __u32 map_ids[nr_iters + 1];
433 char jited_insns[128], xlated_insns[128], zeros[128];
434 __u32 i, next_id, info_len, nr_id_found, duration = 0;
435 struct timespec real_time_ts, boot_time_ts;
438 uid_t my_uid = getuid();
439 time_t now, load_time;
441 err = bpf_prog_get_fd_by_id(0);
442 CHECK(err >= 0 || errno != ENOENT,
443 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
445 err = bpf_map_get_fd_by_id(0);
446 CHECK(err >= 0 || errno != ENOENT,
447 "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
449 for (i = 0; i < nr_iters; i++)
452 /* Check bpf_obj_get_info_by_fd() */
453 bzero(zeros, sizeof(zeros));
454 for (i = 0; i < nr_iters; i++) {
456 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
457 &objs[i], &prog_fds[i]);
458 /* test_obj_id.o is a dumb prog. It should never fail
465 /* Insert a magic value to the map */
466 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
467 assert(map_fds[i] >= 0);
468 err = bpf_map_update_elem(map_fds[i], &array_key,
469 &array_magic_value, 0);
472 /* Check getting map info */
473 info_len = sizeof(struct bpf_map_info) * 2;
474 bzero(&map_infos[i], info_len);
475 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
478 map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
479 map_infos[i].key_size != sizeof(__u32) ||
480 map_infos[i].value_size != sizeof(__u64) ||
481 map_infos[i].max_entries != 1 ||
482 map_infos[i].map_flags != 0 ||
483 info_len != sizeof(struct bpf_map_info) ||
484 strcmp((char *)map_infos[i].name, expected_map_name),
486 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
488 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
489 info_len, sizeof(struct bpf_map_info),
490 map_infos[i].key_size,
491 map_infos[i].value_size,
492 map_infos[i].max_entries,
493 map_infos[i].map_flags,
494 map_infos[i].name, expected_map_name))
497 /* Check getting prog info */
498 info_len = sizeof(struct bpf_prog_info) * 2;
499 bzero(&prog_infos[i], info_len);
500 bzero(jited_insns, sizeof(jited_insns));
501 bzero(xlated_insns, sizeof(xlated_insns));
502 prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
503 prog_infos[i].jited_prog_len = sizeof(jited_insns);
504 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
505 prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
506 prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
507 prog_infos[i].nr_map_ids = 2;
508 err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
510 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
512 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
514 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
515 + (prog_infos[i].load_time / nsec_per_sec);
517 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
518 info_len != sizeof(struct bpf_prog_info) ||
519 (jit_enabled && !prog_infos[i].jited_prog_len) ||
521 !memcmp(jited_insns, zeros, sizeof(zeros))) ||
522 !prog_infos[i].xlated_prog_len ||
523 !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
524 load_time < now - 60 || load_time > now + 60 ||
525 prog_infos[i].created_by_uid != my_uid ||
526 prog_infos[i].nr_map_ids != 1 ||
527 *(int *)prog_infos[i].map_ids != map_infos[i].id ||
528 strcmp((char *)prog_infos[i].name, expected_prog_name),
530 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
532 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
533 info_len, sizeof(struct bpf_prog_info),
535 prog_infos[i].jited_prog_len,
536 prog_infos[i].xlated_prog_len,
537 !!memcmp(jited_insns, zeros, sizeof(zeros)),
538 !!memcmp(xlated_insns, zeros, sizeof(zeros)),
540 prog_infos[i].created_by_uid, my_uid,
541 prog_infos[i].nr_map_ids, 1,
542 *(int *)prog_infos[i].map_ids, map_infos[i].id,
543 prog_infos[i].name, expected_prog_name))
547 /* Check bpf_prog_get_next_id() */
550 while (!bpf_prog_get_next_id(next_id, &next_id)) {
551 struct bpf_prog_info prog_info = {};
555 info_len = sizeof(prog_info);
557 prog_fd = bpf_prog_get_fd_by_id(next_id);
558 if (prog_fd < 0 && errno == ENOENT)
559 /* The bpf_prog is in the dead row */
561 if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
562 "prog_fd %d next_id %d errno %d\n",
563 prog_fd, next_id, errno))
566 for (i = 0; i < nr_iters; i++)
567 if (prog_infos[i].id == next_id)
576 * prog_info.nr_map_ids = 1
577 * prog_info.map_ids = NULL
579 prog_info.nr_map_ids = 1;
580 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
581 if (CHECK(!err || errno != EFAULT,
582 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
585 bzero(&prog_info, sizeof(prog_info));
586 info_len = sizeof(prog_info);
588 saved_map_id = *(int *)(prog_infos[i].map_ids);
589 prog_info.map_ids = prog_infos[i].map_ids;
590 prog_info.nr_map_ids = 2;
591 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
592 prog_infos[i].jited_prog_insns = 0;
593 prog_infos[i].xlated_prog_insns = 0;
594 CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
595 memcmp(&prog_info, &prog_infos[i], info_len) ||
596 *(int *)prog_info.map_ids != saved_map_id,
597 "get-prog-info(next_id->fd)",
598 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
599 err, errno, info_len, sizeof(struct bpf_prog_info),
600 memcmp(&prog_info, &prog_infos[i], info_len),
601 *(int *)prog_info.map_ids, saved_map_id);
604 CHECK(nr_id_found != nr_iters,
605 "check total prog id found by get_next_id",
606 "nr_id_found %u(%u)\n",
607 nr_id_found, nr_iters);
609 /* Check bpf_map_get_next_id() */
612 while (!bpf_map_get_next_id(next_id, &next_id)) {
613 struct bpf_map_info map_info = {};
616 info_len = sizeof(map_info);
618 map_fd = bpf_map_get_fd_by_id(next_id);
619 if (map_fd < 0 && errno == ENOENT)
620 /* The bpf_map is in the dead row */
622 if (CHECK(map_fd < 0, "get-map-fd(next_id)",
623 "map_fd %d next_id %u errno %d\n",
624 map_fd, next_id, errno))
627 for (i = 0; i < nr_iters; i++)
628 if (map_infos[i].id == next_id)
636 err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
639 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
640 CHECK(err || info_len != sizeof(struct bpf_map_info) ||
641 memcmp(&map_info, &map_infos[i], info_len) ||
642 array_value != array_magic_value,
643 "check get-map-info(next_id->fd)",
644 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
645 err, errno, info_len, sizeof(struct bpf_map_info),
646 memcmp(&map_info, &map_infos[i], info_len),
647 array_value, array_magic_value);
651 CHECK(nr_id_found != nr_iters,
652 "check total map id found by get_next_id",
653 "nr_id_found %u(%u)\n",
654 nr_id_found, nr_iters);
657 for (i = 0; i < nr_iters; i++)
658 bpf_object__close(objs[i]);
661 static void test_pkt_md_access(void)
663 const char *file = "./test_pkt_md_access.o";
664 struct bpf_object *obj;
665 __u32 duration, retval;
668 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
674 err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
675 NULL, NULL, &retval, &duration);
676 CHECK(err || retval, "",
677 "err %d errno %d retval %d duration %d\n",
678 err, errno, retval, duration);
680 bpf_object__close(obj);
683 static void test_obj_name(void)
691 { "_123456789ABCDE", 1, 0 },
692 { "_123456789ABCDEF", 0, EINVAL },
693 { "_123456789ABCD\n", 0, EINVAL },
695 struct bpf_insn prog[] = {
696 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
702 for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
703 size_t name_len = strlen(tests[i].name) + 1;
708 /* test different attr.prog_name during BPF_PROG_LOAD */
709 ncopy = name_len < sizeof(attr.prog_name) ?
710 name_len : sizeof(attr.prog_name);
711 bzero(&attr, sizeof(attr));
712 attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
714 attr.insns = ptr_to_u64(prog);
715 attr.license = ptr_to_u64("");
716 memcpy(attr.prog_name, tests[i].name, ncopy);
718 fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
719 CHECK((tests[i].success && fd < 0) ||
720 (!tests[i].success && fd != -1) ||
721 (!tests[i].success && errno != tests[i].expected_errno),
722 "check-bpf-prog-name",
723 "fd %d(%d) errno %d(%d)\n",
724 fd, tests[i].success, errno, tests[i].expected_errno);
729 /* test different attr.map_name during BPF_MAP_CREATE */
730 ncopy = name_len < sizeof(attr.map_name) ?
731 name_len : sizeof(attr.map_name);
732 bzero(&attr, sizeof(attr));
733 attr.map_type = BPF_MAP_TYPE_ARRAY;
736 attr.max_entries = 1;
738 memcpy(attr.map_name, tests[i].name, ncopy);
739 fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
740 CHECK((tests[i].success && fd < 0) ||
741 (!tests[i].success && fd != -1) ||
742 (!tests[i].success && errno != tests[i].expected_errno),
743 "check-bpf-map-name",
744 "fd %d(%d) errno %d(%d)\n",
745 fd, tests[i].success, errno, tests[i].expected_errno);
752 static void test_tp_attach_query(void)
754 const int num_progs = 3;
755 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
756 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
757 const char *file = "./test_tracepoint.o";
758 struct perf_event_query_bpf *query;
759 struct perf_event_attr attr = {};
760 struct bpf_object *obj[num_progs];
761 struct bpf_prog_info prog_info;
764 snprintf(buf, sizeof(buf),
765 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
766 efd = open(buf, O_RDONLY, 0);
767 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
769 bytes = read(efd, buf, sizeof(buf));
771 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
772 "read", "bytes %d errno %d\n", bytes, errno))
775 attr.config = strtol(buf, NULL, 0);
776 attr.type = PERF_TYPE_TRACEPOINT;
777 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
778 attr.sample_period = 1;
779 attr.wakeup_events = 1;
781 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
782 for (i = 0; i < num_progs; i++) {
783 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
785 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
788 bzero(&prog_info, sizeof(prog_info));
789 prog_info.jited_prog_len = 0;
790 prog_info.xlated_prog_len = 0;
791 prog_info.nr_map_ids = 0;
792 info_len = sizeof(prog_info);
793 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
794 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
797 saved_prog_ids[i] = prog_info.id;
799 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
800 0 /* cpu 0 */, -1 /* group id */,
802 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
805 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
806 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
811 /* check NULL prog array query */
812 query->ids_len = num_progs;
813 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
814 if (CHECK(err || query->prog_cnt != 0,
815 "perf_event_ioc_query_bpf",
816 "err %d errno %d query->prog_cnt %u\n",
817 err, errno, query->prog_cnt))
821 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
822 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
827 /* try to get # of programs only */
829 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
830 if (CHECK(err || query->prog_cnt != 2,
831 "perf_event_ioc_query_bpf",
832 "err %d errno %d query->prog_cnt %u\n",
833 err, errno, query->prog_cnt))
836 /* try a few negative tests */
837 /* invalid query pointer */
838 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
839 (struct perf_event_query_bpf *)0x1);
840 if (CHECK(!err || errno != EFAULT,
841 "perf_event_ioc_query_bpf",
842 "err %d errno %d\n", err, errno))
845 /* no enough space */
847 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
848 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
849 "perf_event_ioc_query_bpf",
850 "err %d errno %d query->prog_cnt %u\n",
851 err, errno, query->prog_cnt))
855 query->ids_len = num_progs;
856 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
857 if (CHECK(err || query->prog_cnt != (i + 1),
858 "perf_event_ioc_query_bpf",
859 "err %d errno %d query->prog_cnt %u\n",
860 err, errno, query->prog_cnt))
862 for (j = 0; j < i + 1; j++)
863 if (CHECK(saved_prog_ids[j] != query->ids[j],
864 "perf_event_ioc_query_bpf",
865 "#%d saved_prog_id %x query prog_id %x\n",
866 j, saved_prog_ids[j], query->ids[j]))
871 for (; i >= 0; i--) {
873 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
877 bpf_object__close(obj[i]);
882 static int compare_map_keys(int map1_fd, int map2_fd)
885 char val_buf[PERF_MAX_STACK_DEPTH *
886 sizeof(struct bpf_stack_build_id)];
889 err = bpf_map_get_next_key(map1_fd, NULL, &key);
892 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
896 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
897 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
909 static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
911 __u32 key, next_key, *cur_key_p, *next_key_p;
912 char *val_buf1, *val_buf2;
915 val_buf1 = malloc(stack_trace_len);
916 val_buf2 = malloc(stack_trace_len);
919 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
920 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
923 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
926 for (i = 0; i < stack_trace_len; i++) {
927 if (val_buf1[i] != val_buf2[i]) {
934 next_key_p = &next_key;
945 static void test_stacktrace_map()
947 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
948 const char *file = "./test_stacktrace_map.o";
949 int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
950 struct perf_event_attr attr = {};
951 __u32 key, val, duration = 0;
952 struct bpf_object *obj;
955 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
956 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
959 /* Get the ID for the sched/sched_switch tracepoint */
960 snprintf(buf, sizeof(buf),
961 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
962 efd = open(buf, O_RDONLY, 0);
963 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
966 bytes = read(efd, buf, sizeof(buf));
968 if (bytes <= 0 || bytes >= sizeof(buf))
971 /* Open the perf event and attach bpf progrram */
972 attr.config = strtol(buf, NULL, 0);
973 attr.type = PERF_TYPE_TRACEPOINT;
974 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
975 attr.sample_period = 1;
976 attr.wakeup_events = 1;
977 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
978 0 /* cpu 0 */, -1 /* group id */,
980 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
984 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
988 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
993 control_map_fd = bpf_find_map(__func__, obj, "control_map");
994 if (control_map_fd < 0)
997 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
998 if (stackid_hmap_fd < 0)
1001 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1002 if (stackmap_fd < 0)
1005 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1006 if (stack_amap_fd < 0)
1009 /* give some time for bpf program run */
1012 /* disable stack trace collection */
1015 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1017 /* for every element in stackid_hmap, we can find a corresponding one
1018 * in stackmap, and vise versa.
1020 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1021 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1022 "err %d errno %d\n", err, errno))
1023 goto disable_pmu_noerr;
1025 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1026 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1027 "err %d errno %d\n", err, errno))
1028 goto disable_pmu_noerr;
1030 stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
1031 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1032 if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1033 "err %d errno %d\n", err, errno))
1034 goto disable_pmu_noerr;
1036 goto disable_pmu_noerr;
1040 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1043 bpf_object__close(obj);
1046 static void test_stacktrace_map_raw_tp()
1048 int control_map_fd, stackid_hmap_fd, stackmap_fd;
1049 const char *file = "./test_stacktrace_map.o";
1050 int efd, err, prog_fd;
1051 __u32 key, val, duration = 0;
1052 struct bpf_object *obj;
1054 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1055 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1058 efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
1059 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1063 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1064 if (control_map_fd < 0)
1067 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1068 if (stackid_hmap_fd < 0)
1071 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1072 if (stackmap_fd < 0)
1075 /* give some time for bpf program run */
1078 /* disable stack trace collection */
1081 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1083 /* for every element in stackid_hmap, we can find a corresponding one
1084 * in stackmap, and vise versa.
1086 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1087 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1088 "err %d errno %d\n", err, errno))
1091 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1092 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1093 "err %d errno %d\n", err, errno))
1096 goto close_prog_noerr;
1100 bpf_object__close(obj);
1103 static int extract_build_id(char *build_id, size_t size)
1109 fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
1113 if (getline(&line, &len, fp) == -1)
1119 memcpy(build_id, line, len);
1120 build_id[len] = '\0';
1128 static void test_stacktrace_build_id(void)
1130 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1131 const char *file = "./test_stacktrace_build_id.o";
1132 int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
1133 struct perf_event_attr attr = {};
1134 __u32 key, previous_key, val, duration = 0;
1135 struct bpf_object *obj;
1138 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1139 int build_id_matches = 0;
1143 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1144 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1147 /* Get the ID for the sched/sched_switch tracepoint */
1148 snprintf(buf, sizeof(buf),
1149 "/sys/kernel/debug/tracing/events/random/urandom_read/id");
1150 efd = open(buf, O_RDONLY, 0);
1151 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1154 bytes = read(efd, buf, sizeof(buf));
1156 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
1157 "read", "bytes %d errno %d\n", bytes, errno))
1160 /* Open the perf event and attach bpf progrram */
1161 attr.config = strtol(buf, NULL, 0);
1162 attr.type = PERF_TYPE_TRACEPOINT;
1163 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
1164 attr.sample_period = 1;
1165 attr.wakeup_events = 1;
1166 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1167 0 /* cpu 0 */, -1 /* group id */,
1169 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
1173 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1174 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1178 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1179 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1184 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1185 if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1186 "err %d errno %d\n", err, errno))
1189 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1190 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1191 "err %d errno %d\n", err, errno))
1194 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1195 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1199 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1200 if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
1201 "err %d errno %d\n", err, errno))
1204 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1206 assert(system("./urandom_read") == 0);
1207 /* disable stack trace collection */
1210 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1212 /* for every element in stackid_hmap, we can find a corresponding one
1213 * in stackmap, and vise versa.
1215 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1216 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1217 "err %d errno %d\n", err, errno))
1220 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1221 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1222 "err %d errno %d\n", err, errno))
1225 err = extract_build_id(buf, 256);
1227 if (CHECK(err, "get build_id with readelf",
1228 "err %d errno %d\n", err, errno))
1231 err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1232 if (CHECK(err, "get_next_key from stackmap",
1233 "err %d, errno %d\n", err, errno))
1239 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1240 if (CHECK(err, "lookup_elem from stackmap",
1241 "err %d, errno %d\n", err, errno))
1243 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1244 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1245 id_offs[i].offset != 0) {
1246 for (j = 0; j < 20; ++j)
1247 sprintf(build_id + 2 * j, "%02x",
1248 id_offs[i].build_id[j] & 0xff);
1249 if (strstr(buf, build_id) != NULL)
1250 build_id_matches = 1;
1253 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1255 /* stack_map_get_build_id_offset() is racy and sometimes can return
1256 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
1257 * try it one more time.
1259 if (build_id_matches < 1 && retry--) {
1260 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1262 bpf_object__close(obj);
1263 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1268 if (CHECK(build_id_matches < 1, "build id match",
1269 "Didn't find expected build ID from the map\n"))
1272 stack_trace_len = PERF_MAX_STACK_DEPTH
1273 * sizeof(struct bpf_stack_build_id);
1274 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1275 CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1276 "err %d errno %d\n", err, errno);
1279 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1285 bpf_object__close(obj);
1291 static void test_stacktrace_build_id_nmi(void)
1293 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1294 const char *file = "./test_stacktrace_build_id.o";
1295 int err, pmu_fd, prog_fd;
1296 struct perf_event_attr attr = {
1297 .sample_freq = 5000,
1299 .type = PERF_TYPE_HARDWARE,
1300 .config = PERF_COUNT_HW_CPU_CYCLES,
1302 __u32 key, previous_key, val, duration = 0;
1303 struct bpf_object *obj;
1306 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1307 int build_id_matches = 0;
1311 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
1312 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1315 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1316 0 /* cpu 0 */, -1 /* group id */,
1318 if (CHECK(pmu_fd < 0, "perf_event_open",
1319 "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
1323 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1324 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1328 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1329 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1334 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1335 if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1336 "err %d errno %d\n", err, errno))
1339 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1340 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1341 "err %d errno %d\n", err, errno))
1344 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1345 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1349 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1350 if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
1351 "err %d errno %d\n", err, errno))
1354 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1356 assert(system("taskset 0x1 ./urandom_read 100000") == 0);
1357 /* disable stack trace collection */
1360 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1362 /* for every element in stackid_hmap, we can find a corresponding one
1363 * in stackmap, and vise versa.
1365 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1366 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1367 "err %d errno %d\n", err, errno))
1370 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1371 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1372 "err %d errno %d\n", err, errno))
1375 err = extract_build_id(buf, 256);
1377 if (CHECK(err, "get build_id with readelf",
1378 "err %d errno %d\n", err, errno))
1381 err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1382 if (CHECK(err, "get_next_key from stackmap",
1383 "err %d, errno %d\n", err, errno))
1389 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1390 if (CHECK(err, "lookup_elem from stackmap",
1391 "err %d, errno %d\n", err, errno))
1393 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1394 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1395 id_offs[i].offset != 0) {
1396 for (j = 0; j < 20; ++j)
1397 sprintf(build_id + 2 * j, "%02x",
1398 id_offs[i].build_id[j] & 0xff);
1399 if (strstr(buf, build_id) != NULL)
1400 build_id_matches = 1;
1403 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1405 /* stack_map_get_build_id_offset() is racy and sometimes can return
1406 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
1407 * try it one more time.
1409 if (build_id_matches < 1 && retry--) {
1410 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1412 bpf_object__close(obj);
1413 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1418 if (CHECK(build_id_matches < 1, "build id match",
1419 "Didn't find expected build ID from the map\n"))
1423 * We intentionally skip compare_stack_ips(). This is because we
1424 * only support one in_nmi() ips-to-build_id translation per cpu
1425 * at any time, thus stack_amap here will always fallback to
1426 * BPF_STACK_BUILD_ID_IP;
1430 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1436 bpf_object__close(obj);
1439 #define MAX_CNT_RAWTP 10ull
1440 #define MAX_STACK_RAWTP 100
1441 struct get_stack_trace_t {
1443 int kern_stack_size;
1444 int user_stack_size;
1445 int user_stack_buildid_size;
1446 __u64 kern_stack[MAX_STACK_RAWTP];
1447 __u64 user_stack[MAX_STACK_RAWTP];
1448 struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
1451 static int get_stack_print_output(void *data, int size)
1453 bool good_kern_stack = false, good_user_stack = false;
1454 const char *nonjit_func = "___bpf_prog_run";
1455 struct get_stack_trace_t *e = data;
1462 if (size < sizeof(struct get_stack_trace_t)) {
1463 __u64 *raw_data = data;
1466 num_stack = size / sizeof(__u64);
1467 /* If jit is enabled, we do not have a good way to
1468 * verify the sanity of the kernel stack. So we
1469 * just assume it is good if the stack is not empty.
1470 * This could be improved in the future.
1473 found = num_stack > 0;
1475 for (i = 0; i < num_stack; i++) {
1476 ks = ksym_search(raw_data[i]);
1477 if (strcmp(ks->name, nonjit_func) == 0) {
1484 good_kern_stack = true;
1485 good_user_stack = true;
1488 num_stack = e->kern_stack_size / sizeof(__u64);
1490 good_kern_stack = num_stack > 0;
1492 for (i = 0; i < num_stack; i++) {
1493 ks = ksym_search(e->kern_stack[i]);
1494 if (strcmp(ks->name, nonjit_func) == 0) {
1495 good_kern_stack = true;
1500 if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
1501 good_user_stack = true;
1503 if (!good_kern_stack || !good_user_stack)
1504 return LIBBPF_PERF_EVENT_ERROR;
1506 if (cnt == MAX_CNT_RAWTP)
1507 return LIBBPF_PERF_EVENT_DONE;
1509 return LIBBPF_PERF_EVENT_CONT;
1512 static void test_get_stack_raw_tp(void)
1514 const char *file = "./test_get_stack_rawtp.o";
1515 int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
1516 struct perf_event_attr attr = {};
1517 struct timespec tv = {0, 10};
1518 __u32 key = 0, duration = 0;
1519 struct bpf_object *obj;
1521 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1522 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1525 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
1526 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1529 perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
1530 if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
1534 err = load_kallsyms();
1535 if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
1538 attr.sample_type = PERF_SAMPLE_RAW;
1539 attr.type = PERF_TYPE_SOFTWARE;
1540 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
1541 pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
1543 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
1547 err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
1548 if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
1552 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1553 if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
1557 err = perf_event_mmap(pmu_fd);
1558 if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
1561 /* trigger some syscall action */
1562 for (i = 0; i < MAX_CNT_RAWTP; i++)
1563 nanosleep(&tv, NULL);
1565 err = perf_event_poller(pmu_fd, get_stack_print_output);
1566 if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
1569 goto close_prog_noerr;
1573 bpf_object__close(obj);
1576 static void test_task_fd_query_rawtp(void)
1578 const char *file = "./test_get_stack_rawtp.o";
1579 __u64 probe_offset, probe_addr;
1580 __u32 len, prog_id, fd_type;
1581 struct bpf_object *obj;
1582 int efd, err, prog_fd;
1586 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1587 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1590 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
1591 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1594 /* query (getpid(), efd) */
1596 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1597 &fd_type, &probe_offset, &probe_addr);
1598 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
1602 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1603 strcmp(buf, "sys_enter") == 0;
1604 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
1610 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1611 &fd_type, &probe_offset, &probe_addr);
1612 if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
1615 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1616 len == strlen("sys_enter");
1617 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1620 /* test empty buffer */
1622 err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
1623 &fd_type, &probe_offset, &probe_addr);
1624 if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
1627 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1628 len == strlen("sys_enter");
1629 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1632 /* test smaller buffer */
1634 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1635 &fd_type, &probe_offset, &probe_addr);
1636 if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
1637 "err %d errno %d\n", err, errno))
1639 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1640 len == strlen("sys_enter") &&
1641 strcmp(buf, "sy") == 0;
1642 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1645 goto close_prog_noerr;
1649 bpf_object__close(obj);
1652 static void test_task_fd_query_tp_core(const char *probe_name,
1653 const char *tp_name)
1655 const char *file = "./test_tracepoint.o";
1656 int err, bytes, efd, prog_fd, pmu_fd;
1657 struct perf_event_attr attr = {};
1658 __u64 probe_offset, probe_addr;
1659 __u32 len, prog_id, fd_type;
1660 struct bpf_object *obj;
1664 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1665 if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
1668 snprintf(buf, sizeof(buf),
1669 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
1670 efd = open(buf, O_RDONLY, 0);
1671 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1673 bytes = read(efd, buf, sizeof(buf));
1675 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
1676 "bytes %d errno %d\n", bytes, errno))
1679 attr.config = strtol(buf, NULL, 0);
1680 attr.type = PERF_TYPE_TRACEPOINT;
1681 attr.sample_type = PERF_SAMPLE_RAW;
1682 attr.sample_period = 1;
1683 attr.wakeup_events = 1;
1684 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1685 0 /* cpu 0 */, -1 /* group id */,
1687 if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
1690 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1691 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
1695 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1696 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
1700 /* query (getpid(), pmu_fd) */
1702 err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
1703 &fd_type, &probe_offset, &probe_addr);
1704 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
1708 err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
1709 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
1714 goto close_prog_noerr;
1721 bpf_object__close(obj);
1724 static void test_task_fd_query_tp(void)
1726 test_task_fd_query_tp_core("sched/sched_switch",
1728 test_task_fd_query_tp_core("syscalls/sys_enter_read",
1734 jit_enabled = is_jit_enabled();
1738 test_xdp_adjust_tail();
1740 test_xdp_noinline();
1743 test_pkt_md_access();
1745 test_tp_attach_query();
1746 test_stacktrace_map();
1747 test_stacktrace_build_id();
1748 test_stacktrace_build_id_nmi();
1749 test_stacktrace_map_raw_tp();
1750 test_get_stack_raw_tp();
1751 test_task_fd_query_rawtp();
1752 test_task_fd_query_tp();
1754 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
1755 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;