2 * common eBPF ELF operations.
4 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation;
11 * version 2.1 of the License (not later!)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses>
25 #include <asm/unistd.h>
26 #include <linux/bpf.h>
30 * When building perf, unistd.h is overridden. __NR_bpf is
31 * required to be defined explicitly.
34 # if defined(__i386__)
36 # elif defined(__x86_64__)
38 # elif defined(__aarch64__)
40 # elif defined(__sparc__)
42 # elif defined(__s390__)
44 # elif defined(__arc__)
47 # error __NR_bpf not defined. libbpf does not support your arch.
51 static inline __u64 ptr_to_u64(const void *ptr)
53 return (__u64) (unsigned long) ptr;
56 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
59 return syscall(__NR_bpf, cmd, attr, size);
62 int bpf_create_map_node(enum bpf_map_type map_type, int key_size,
63 int value_size, int max_entries, __u32 map_flags,
68 memset(&attr, '\0', sizeof(attr));
70 attr.map_type = map_type;
71 attr.key_size = key_size;
72 attr.value_size = value_size;
73 attr.max_entries = max_entries;
74 attr.map_flags = map_flags;
76 attr.map_flags |= BPF_F_NUMA_NODE;
77 attr.numa_node = node;
80 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
83 int bpf_create_map(enum bpf_map_type map_type, int key_size,
84 int value_size, int max_entries, __u32 map_flags)
86 return bpf_create_map_node(map_type, key_size, value_size,
87 max_entries, map_flags, -1);
90 int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size,
91 int inner_map_fd, int max_entries,
92 __u32 map_flags, int node)
96 memset(&attr, '\0', sizeof(attr));
98 attr.map_type = map_type;
99 attr.key_size = key_size;
101 attr.inner_map_fd = inner_map_fd;
102 attr.max_entries = max_entries;
103 attr.map_flags = map_flags;
105 attr.map_flags |= BPF_F_NUMA_NODE;
106 attr.numa_node = node;
109 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
112 int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size,
113 int inner_map_fd, int max_entries, __u32 map_flags)
115 return bpf_create_map_in_map_node(map_type, key_size, inner_map_fd,
116 max_entries, map_flags, -1);
119 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
120 size_t insns_cnt, const char *license,
121 __u32 kern_version, char *log_buf, size_t log_buf_sz)
126 bzero(&attr, sizeof(attr));
127 attr.prog_type = type;
128 attr.insn_cnt = (__u32)insns_cnt;
129 attr.insns = ptr_to_u64(insns);
130 attr.license = ptr_to_u64(license);
131 attr.log_buf = ptr_to_u64(NULL);
134 attr.kern_version = kern_version;
136 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
137 if (fd >= 0 || !log_buf || !log_buf_sz)
140 /* Try again with log */
141 attr.log_buf = ptr_to_u64(log_buf);
142 attr.log_size = log_buf_sz;
145 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
148 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
149 size_t insns_cnt, int strict_alignment,
150 const char *license, __u32 kern_version,
151 char *log_buf, size_t log_buf_sz, int log_level)
155 bzero(&attr, sizeof(attr));
156 attr.prog_type = type;
157 attr.insn_cnt = (__u32)insns_cnt;
158 attr.insns = ptr_to_u64(insns);
159 attr.license = ptr_to_u64(license);
160 attr.log_buf = ptr_to_u64(log_buf);
161 attr.log_size = log_buf_sz;
162 attr.log_level = log_level;
164 attr.kern_version = kern_version;
165 attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
167 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
170 int bpf_map_update_elem(int fd, const void *key, const void *value,
175 bzero(&attr, sizeof(attr));
177 attr.key = ptr_to_u64(key);
178 attr.value = ptr_to_u64(value);
181 return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
184 int bpf_map_lookup_elem(int fd, const void *key, void *value)
188 bzero(&attr, sizeof(attr));
190 attr.key = ptr_to_u64(key);
191 attr.value = ptr_to_u64(value);
193 return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
196 int bpf_map_delete_elem(int fd, const void *key)
200 bzero(&attr, sizeof(attr));
202 attr.key = ptr_to_u64(key);
204 return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
207 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
211 bzero(&attr, sizeof(attr));
213 attr.key = ptr_to_u64(key);
214 attr.next_key = ptr_to_u64(next_key);
216 return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
219 int bpf_obj_pin(int fd, const char *pathname)
223 bzero(&attr, sizeof(attr));
224 attr.pathname = ptr_to_u64((void *)pathname);
227 return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
230 int bpf_obj_get(const char *pathname)
234 bzero(&attr, sizeof(attr));
235 attr.pathname = ptr_to_u64((void *)pathname);
237 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
240 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
245 bzero(&attr, sizeof(attr));
246 attr.target_fd = target_fd;
247 attr.attach_bpf_fd = prog_fd;
248 attr.attach_type = type;
249 attr.attach_flags = flags;
251 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
254 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
258 bzero(&attr, sizeof(attr));
259 attr.target_fd = target_fd;
260 attr.attach_type = type;
262 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
265 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
266 void *data_out, __u32 *size_out, __u32 *retval,
272 bzero(&attr, sizeof(attr));
273 attr.test.prog_fd = prog_fd;
274 attr.test.data_in = ptr_to_u64(data);
275 attr.test.data_out = ptr_to_u64(data_out);
276 attr.test.data_size_in = size;
277 attr.test.repeat = repeat;
279 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
281 *size_out = attr.test.data_size_out;
283 *retval = attr.test.retval;
285 *duration = attr.test.duration;
289 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
294 bzero(&attr, sizeof(attr));
295 attr.start_id = start_id;
297 err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
299 *next_id = attr.next_id;
304 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
309 bzero(&attr, sizeof(attr));
310 attr.start_id = start_id;
312 err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
314 *next_id = attr.next_id;
319 int bpf_prog_get_fd_by_id(__u32 id)
323 bzero(&attr, sizeof(attr));
326 return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
329 int bpf_map_get_fd_by_id(__u32 id)
333 bzero(&attr, sizeof(attr));
336 return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
339 int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
344 bzero(&attr, sizeof(attr));
345 attr.info.bpf_fd = prog_fd;
346 attr.info.info_len = *info_len;
347 attr.info.info = ptr_to_u64(info);
349 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
351 *info_len = attr.info.info_len;