1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 #include <linux/bpf-cgroup.h>
7 #include <linux/rcupdate.h>
8 #include <linux/random.h>
10 #include <linux/topology.h>
11 #include <linux/ktime.h>
12 #include <linux/sched.h>
13 #include <linux/uidgid.h>
14 #include <linux/filter.h>
15 #include <linux/ctype.h>
16 #include <linux/jiffies.h>
17 #include <linux/pid_namespace.h>
18 #include <linux/proc_ns.h>
19 #include <linux/security.h>
20 #include <linux/btf_ids.h>
22 #include "../../lib/kstrtox.h"
24 /* If kernel subsystem is allowing eBPF programs to call this function,
25 * inside its own verifier_ops->get_func_proto() callback it should return
26 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
28 * Different map implementations will rely on rcu in map methods
29 * lookup/update/delete, therefore eBPF programs must run under rcu lock
30 * if program is allowed to access maps, so check rcu_read_lock_held in
31 * all three functions.
33 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
35 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
36 return (unsigned long) map->ops->map_lookup_elem(map, key);
39 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
40 .func = bpf_map_lookup_elem,
43 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
44 .arg1_type = ARG_CONST_MAP_PTR,
45 .arg2_type = ARG_PTR_TO_MAP_KEY,
48 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
49 void *, value, u64, flags)
51 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
52 return map->ops->map_update_elem(map, key, value, flags);
55 const struct bpf_func_proto bpf_map_update_elem_proto = {
56 .func = bpf_map_update_elem,
59 .ret_type = RET_INTEGER,
60 .arg1_type = ARG_CONST_MAP_PTR,
61 .arg2_type = ARG_PTR_TO_MAP_KEY,
62 .arg3_type = ARG_PTR_TO_MAP_VALUE,
63 .arg4_type = ARG_ANYTHING,
66 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
68 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
69 return map->ops->map_delete_elem(map, key);
72 const struct bpf_func_proto bpf_map_delete_elem_proto = {
73 .func = bpf_map_delete_elem,
76 .ret_type = RET_INTEGER,
77 .arg1_type = ARG_CONST_MAP_PTR,
78 .arg2_type = ARG_PTR_TO_MAP_KEY,
81 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
83 return map->ops->map_push_elem(map, value, flags);
86 const struct bpf_func_proto bpf_map_push_elem_proto = {
87 .func = bpf_map_push_elem,
90 .ret_type = RET_INTEGER,
91 .arg1_type = ARG_CONST_MAP_PTR,
92 .arg2_type = ARG_PTR_TO_MAP_VALUE,
93 .arg3_type = ARG_ANYTHING,
96 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
98 return map->ops->map_pop_elem(map, value);
101 const struct bpf_func_proto bpf_map_pop_elem_proto = {
102 .func = bpf_map_pop_elem,
104 .ret_type = RET_INTEGER,
105 .arg1_type = ARG_CONST_MAP_PTR,
106 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
109 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
111 return map->ops->map_peek_elem(map, value);
114 const struct bpf_func_proto bpf_map_peek_elem_proto = {
115 .func = bpf_map_peek_elem,
117 .ret_type = RET_INTEGER,
118 .arg1_type = ARG_CONST_MAP_PTR,
119 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
122 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
124 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
125 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
128 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
129 .func = bpf_map_lookup_percpu_elem,
132 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
133 .arg1_type = ARG_CONST_MAP_PTR,
134 .arg2_type = ARG_PTR_TO_MAP_KEY,
135 .arg3_type = ARG_ANYTHING,
138 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
139 .func = bpf_user_rnd_u32,
141 .ret_type = RET_INTEGER,
144 BPF_CALL_0(bpf_get_smp_processor_id)
146 return smp_processor_id();
149 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
150 .func = bpf_get_smp_processor_id,
152 .ret_type = RET_INTEGER,
155 BPF_CALL_0(bpf_get_numa_node_id)
157 return numa_node_id();
160 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
161 .func = bpf_get_numa_node_id,
163 .ret_type = RET_INTEGER,
166 BPF_CALL_0(bpf_ktime_get_ns)
168 /* NMI safe access to clock monotonic */
169 return ktime_get_mono_fast_ns();
172 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
173 .func = bpf_ktime_get_ns,
175 .ret_type = RET_INTEGER,
178 BPF_CALL_0(bpf_ktime_get_boot_ns)
180 /* NMI safe access to clock boottime */
181 return ktime_get_boot_fast_ns();
184 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
185 .func = bpf_ktime_get_boot_ns,
187 .ret_type = RET_INTEGER,
190 BPF_CALL_0(bpf_ktime_get_coarse_ns)
192 return ktime_get_coarse_ns();
195 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
196 .func = bpf_ktime_get_coarse_ns,
198 .ret_type = RET_INTEGER,
201 BPF_CALL_0(bpf_get_current_pid_tgid)
203 struct task_struct *task = current;
208 return (u64) task->tgid << 32 | task->pid;
211 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
212 .func = bpf_get_current_pid_tgid,
214 .ret_type = RET_INTEGER,
217 BPF_CALL_0(bpf_get_current_uid_gid)
219 struct task_struct *task = current;
226 current_uid_gid(&uid, &gid);
227 return (u64) from_kgid(&init_user_ns, gid) << 32 |
228 from_kuid(&init_user_ns, uid);
231 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
232 .func = bpf_get_current_uid_gid,
234 .ret_type = RET_INTEGER,
237 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
239 struct task_struct *task = current;
244 /* Verifier guarantees that size > 0 */
245 strscpy(buf, task->comm, size);
248 memset(buf, 0, size);
252 const struct bpf_func_proto bpf_get_current_comm_proto = {
253 .func = bpf_get_current_comm,
255 .ret_type = RET_INTEGER,
256 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
257 .arg2_type = ARG_CONST_SIZE,
260 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
262 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
264 arch_spinlock_t *l = (void *)lock;
267 arch_spinlock_t lock;
268 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
270 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
271 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
272 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
276 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
278 arch_spinlock_t *l = (void *)lock;
285 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
287 atomic_t *l = (void *)lock;
289 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
291 atomic_cond_read_relaxed(l, !VAL);
292 } while (atomic_xchg(l, 1));
295 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
297 atomic_t *l = (void *)lock;
299 atomic_set_release(l, 0);
304 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
306 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
310 local_irq_save(flags);
311 __bpf_spin_lock(lock);
312 __this_cpu_write(irqsave_flags, flags);
315 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
317 __bpf_spin_lock_irqsave(lock);
321 const struct bpf_func_proto bpf_spin_lock_proto = {
322 .func = bpf_spin_lock,
324 .ret_type = RET_VOID,
325 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
328 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
332 flags = __this_cpu_read(irqsave_flags);
333 __bpf_spin_unlock(lock);
334 local_irq_restore(flags);
337 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
339 __bpf_spin_unlock_irqrestore(lock);
343 const struct bpf_func_proto bpf_spin_unlock_proto = {
344 .func = bpf_spin_unlock,
346 .ret_type = RET_VOID,
347 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
350 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
353 struct bpf_spin_lock *lock;
356 lock = src + map->spin_lock_off;
358 lock = dst + map->spin_lock_off;
360 __bpf_spin_lock_irqsave(lock);
361 copy_map_value(map, dst, src);
362 __bpf_spin_unlock_irqrestore(lock);
366 BPF_CALL_0(bpf_jiffies64)
368 return get_jiffies_64();
371 const struct bpf_func_proto bpf_jiffies64_proto = {
372 .func = bpf_jiffies64,
374 .ret_type = RET_INTEGER,
377 #ifdef CONFIG_CGROUPS
378 BPF_CALL_0(bpf_get_current_cgroup_id)
384 cgrp = task_dfl_cgroup(current);
385 cgrp_id = cgroup_id(cgrp);
391 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
392 .func = bpf_get_current_cgroup_id,
394 .ret_type = RET_INTEGER,
397 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
400 struct cgroup *ancestor;
404 cgrp = task_dfl_cgroup(current);
405 ancestor = cgroup_ancestor(cgrp, ancestor_level);
406 cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
412 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
413 .func = bpf_get_current_ancestor_cgroup_id,
415 .ret_type = RET_INTEGER,
416 .arg1_type = ARG_ANYTHING,
419 #ifdef CONFIG_CGROUP_BPF
421 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
423 /* flags argument is not used now,
424 * but provides an ability to extend the API.
425 * verifier checks that its value is correct.
427 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
428 struct bpf_cgroup_storage *storage;
429 struct bpf_cg_run_ctx *ctx;
432 /* get current cgroup storage from BPF run context */
433 ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
434 storage = ctx->prog_item->cgroup_storage[stype];
436 if (stype == BPF_CGROUP_STORAGE_SHARED)
437 ptr = &READ_ONCE(storage->buf)->data[0];
439 ptr = this_cpu_ptr(storage->percpu_buf);
441 return (unsigned long)ptr;
444 const struct bpf_func_proto bpf_get_local_storage_proto = {
445 .func = bpf_get_local_storage,
447 .ret_type = RET_PTR_TO_MAP_VALUE,
448 .arg1_type = ARG_CONST_MAP_PTR,
449 .arg2_type = ARG_ANYTHING,
453 #define BPF_STRTOX_BASE_MASK 0x1F
455 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
456 unsigned long long *res, bool *is_negative)
458 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
459 const char *cur_buf = buf;
460 size_t cur_len = buf_len;
461 unsigned int consumed;
465 if (!buf || !buf_len || !res || !is_negative)
468 if (base != 0 && base != 8 && base != 10 && base != 16)
471 if (flags & ~BPF_STRTOX_BASE_MASK)
474 while (cur_buf < buf + buf_len && isspace(*cur_buf))
477 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
481 consumed = cur_buf - buf;
486 cur_len = min(cur_len, sizeof(str) - 1);
487 memcpy(str, cur_buf, cur_len);
491 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
492 val_len = _parse_integer(cur_buf, base, res);
494 if (val_len & KSTRTOX_OVERFLOW)
501 consumed += cur_buf - str;
506 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
509 unsigned long long _res;
513 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
517 if ((long long)-_res > 0)
521 if ((long long)_res < 0)
528 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
534 err = __bpf_strtoll(buf, buf_len, flags, &_res);
537 if (_res != (long)_res)
543 const struct bpf_func_proto bpf_strtol_proto = {
546 .ret_type = RET_INTEGER,
547 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
548 .arg2_type = ARG_CONST_SIZE,
549 .arg3_type = ARG_ANYTHING,
550 .arg4_type = ARG_PTR_TO_LONG,
553 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
554 unsigned long *, res)
556 unsigned long long _res;
560 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
565 if (_res != (unsigned long)_res)
571 const struct bpf_func_proto bpf_strtoul_proto = {
574 .ret_type = RET_INTEGER,
575 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
576 .arg2_type = ARG_CONST_SIZE,
577 .arg3_type = ARG_ANYTHING,
578 .arg4_type = ARG_PTR_TO_LONG,
582 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
584 return strncmp(s1, s2, s1_sz);
587 const struct bpf_func_proto bpf_strncmp_proto = {
590 .ret_type = RET_INTEGER,
591 .arg1_type = ARG_PTR_TO_MEM,
592 .arg2_type = ARG_CONST_SIZE,
593 .arg3_type = ARG_PTR_TO_CONST_STR,
596 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
597 struct bpf_pidns_info *, nsdata, u32, size)
599 struct task_struct *task = current;
600 struct pid_namespace *pidns;
603 if (unlikely(size != sizeof(struct bpf_pidns_info)))
606 if (unlikely((u64)(dev_t)dev != dev))
612 pidns = task_active_pid_ns(task);
613 if (unlikely(!pidns)) {
618 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
621 nsdata->pid = task_pid_nr_ns(task, pidns);
622 nsdata->tgid = task_tgid_nr_ns(task, pidns);
625 memset((void *)nsdata, 0, (size_t) size);
629 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
630 .func = bpf_get_ns_current_pid_tgid,
632 .ret_type = RET_INTEGER,
633 .arg1_type = ARG_ANYTHING,
634 .arg2_type = ARG_ANYTHING,
635 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
636 .arg4_type = ARG_CONST_SIZE,
639 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
640 .func = bpf_get_raw_cpu_id,
642 .ret_type = RET_INTEGER,
645 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
646 u64, flags, void *, data, u64, size)
648 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
651 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
654 const struct bpf_func_proto bpf_event_output_data_proto = {
655 .func = bpf_event_output_data,
657 .ret_type = RET_INTEGER,
658 .arg1_type = ARG_PTR_TO_CTX,
659 .arg2_type = ARG_CONST_MAP_PTR,
660 .arg3_type = ARG_ANYTHING,
661 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
662 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
665 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
666 const void __user *, user_ptr)
668 int ret = copy_from_user(dst, user_ptr, size);
671 memset(dst, 0, size);
678 const struct bpf_func_proto bpf_copy_from_user_proto = {
679 .func = bpf_copy_from_user,
681 .ret_type = RET_INTEGER,
682 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
683 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
684 .arg3_type = ARG_ANYTHING,
687 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
688 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
692 /* flags is not used yet */
699 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
703 memset(dst, 0, size);
704 /* Return -EFAULT for partial read */
705 return ret < 0 ? ret : -EFAULT;
708 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
709 .func = bpf_copy_from_user_task,
711 .ret_type = RET_INTEGER,
712 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
713 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
714 .arg3_type = ARG_ANYTHING,
715 .arg4_type = ARG_PTR_TO_BTF_ID,
716 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
717 .arg5_type = ARG_ANYTHING
720 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
722 if (cpu >= nr_cpu_ids)
723 return (unsigned long)NULL;
725 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
728 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
729 .func = bpf_per_cpu_ptr,
731 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
732 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
733 .arg2_type = ARG_ANYTHING,
736 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
738 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
741 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
742 .func = bpf_this_cpu_ptr,
744 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
745 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
748 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
751 void __user *user_ptr = (__force void __user *)unsafe_ptr;
757 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
758 if ((unsigned long)unsafe_ptr < TASK_SIZE)
759 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
763 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
765 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
771 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
772 * arguments representation.
774 #define MAX_BPRINTF_BUF_LEN 512
776 /* Support executing three nested bprintf helper calls on a given CPU */
777 #define MAX_BPRINTF_NEST_LEVEL 3
778 struct bpf_bprintf_buffers {
779 char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
781 static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
782 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
784 static int try_get_fmt_tmp_buf(char **tmp_buf)
786 struct bpf_bprintf_buffers *bufs;
790 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
791 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
792 this_cpu_dec(bpf_bprintf_nest_level);
796 bufs = this_cpu_ptr(&bpf_bprintf_bufs);
797 *tmp_buf = bufs->tmp_bufs[nest_level - 1];
802 void bpf_bprintf_cleanup(void)
804 if (this_cpu_read(bpf_bprintf_nest_level)) {
805 this_cpu_dec(bpf_bprintf_nest_level);
811 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
813 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
815 * This can be used in two ways:
816 * - Format string verification only: when bin_args is NULL
817 * - Arguments preparation: in addition to the above verification, it writes in
818 * bin_args a binary representation of arguments usable by bstr_printf where
819 * pointers from BPF have been sanitized.
821 * In argument preparation mode, if 0 is returned, safe temporary buffers are
822 * allocated and bpf_bprintf_cleanup should be called to free them after use.
824 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
825 u32 **bin_args, u32 num_args)
827 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
828 size_t sizeof_cur_arg, sizeof_cur_ip;
829 int err, i, num_spec = 0;
831 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
833 fmt_end = strnchr(fmt, fmt_size, 0);
836 fmt_size = fmt_end - fmt;
839 if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
842 tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
843 *bin_args = (u32 *)tmp_buf;
846 for (i = 0; i < fmt_size; i++) {
847 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
855 if (fmt[i + 1] == '%') {
860 if (num_spec >= num_args) {
865 /* The string is zero-terminated so if fmt[i] != 0, we can
866 * always access fmt[i + 1], in the worst case it will be a 0
870 /* skip optional "[0 +-][num]" width formatting field */
871 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
874 if (fmt[i] >= '1' && fmt[i] <= '9') {
876 while (fmt[i] >= '0' && fmt[i] <= '9')
881 sizeof_cur_arg = sizeof(long);
883 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
885 fmt_ptype = fmt[i + 1];
890 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
891 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
892 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
894 /* just kernel pointers */
896 cur_arg = raw_args[num_spec];
901 if (fmt[i + 1] == 'B') {
903 err = snprintf(tmp_buf,
904 (tmp_buf_end - tmp_buf),
906 (void *)(long)raw_args[num_spec]);
907 tmp_buf += (err + 1);
915 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
916 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
917 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
926 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
927 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
932 unsafe_ptr = (char *)(long)raw_args[num_spec];
933 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
936 memset(cur_ip, 0, sizeof_cur_ip);
938 /* hack: bstr_printf expects IP addresses to be
939 * pre-formatted as strings, ironically, the easiest way
940 * to do that is to call snprintf.
942 ip_spec[2] = fmt[i - 1];
944 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
951 } else if (fmt[i] == 's') {
954 if (fmt[i + 1] != 0 &&
955 !isspace(fmt[i + 1]) &&
956 !ispunct(fmt[i + 1])) {
964 if (tmp_buf_end == tmp_buf) {
969 unsafe_ptr = (char *)(long)raw_args[num_spec];
970 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
972 tmp_buf_end - tmp_buf);
982 } else if (fmt[i] == 'c') {
986 if (tmp_buf_end == tmp_buf) {
991 *tmp_buf = raw_args[num_spec];
998 sizeof_cur_arg = sizeof(int);
1000 if (fmt[i] == 'l') {
1001 sizeof_cur_arg = sizeof(long);
1004 if (fmt[i] == 'l') {
1005 sizeof_cur_arg = sizeof(long long);
1009 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1010 fmt[i] != 'x' && fmt[i] != 'X') {
1016 cur_arg = raw_args[num_spec];
1019 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1020 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1025 if (sizeof_cur_arg == 8) {
1026 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
1027 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1029 *(u32 *)tmp_buf = (u32)(long)cur_arg;
1031 tmp_buf += sizeof_cur_arg;
1039 bpf_bprintf_cleanup();
1043 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1044 const void *, data, u32, data_len)
1049 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1050 (data_len && !data))
1052 num_args = data_len / 8;
1054 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1055 * can safely give an unbounded size.
1057 err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
1061 err = bstr_printf(str, str_size, fmt, bin_args);
1063 bpf_bprintf_cleanup();
1068 const struct bpf_func_proto bpf_snprintf_proto = {
1069 .func = bpf_snprintf,
1071 .ret_type = RET_INTEGER,
1072 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1073 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1074 .arg3_type = ARG_PTR_TO_CONST_STR,
1075 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1076 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1079 /* BPF map elements can contain 'struct bpf_timer'.
1080 * Such map owns all of its BPF timers.
1081 * 'struct bpf_timer' is allocated as part of map element allocation
1082 * and it's zero initialized.
1083 * That space is used to keep 'struct bpf_timer_kern'.
1084 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1085 * remembers 'struct bpf_map *' pointer it's part of.
1086 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1087 * bpf_timer_start() arms the timer.
1088 * If user space reference to a map goes to zero at this point
1089 * ops->map_release_uref callback is responsible for cancelling the timers,
1090 * freeing their memory, and decrementing prog's refcnts.
1091 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1092 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1093 * freeing the timers when inner map is replaced or deleted by user space.
1095 struct bpf_hrtimer {
1096 struct hrtimer timer;
1097 struct bpf_map *map;
1098 struct bpf_prog *prog;
1099 void __rcu *callback_fn;
1103 /* the actual struct hidden inside uapi struct bpf_timer */
1104 struct bpf_timer_kern {
1105 struct bpf_hrtimer *timer;
1106 /* bpf_spin_lock is used here instead of spinlock_t to make
1107 * sure that it always fits into space reserved by struct bpf_timer
1108 * regardless of LOCKDEP and spinlock debug flags.
1110 struct bpf_spin_lock lock;
1111 } __attribute__((aligned(8)));
1113 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1115 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1117 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1118 struct bpf_map *map = t->map;
1119 void *value = t->value;
1120 bpf_callback_t callback_fn;
1124 BTF_TYPE_EMIT(struct bpf_timer);
1125 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1129 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1130 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1131 * Remember the timer this callback is servicing to prevent
1132 * deadlock if callback_fn() calls bpf_timer_cancel() or
1133 * bpf_map_delete_elem() on the same timer.
1135 this_cpu_write(hrtimer_running, t);
1136 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1137 struct bpf_array *array = container_of(map, struct bpf_array, map);
1139 /* compute the key */
1140 idx = ((char *)value - array->value) / array->elem_size;
1142 } else { /* hash or lru */
1143 key = value - round_up(map->key_size, 8);
1146 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1147 /* The verifier checked that return value is zero. */
1149 this_cpu_write(hrtimer_running, NULL);
1151 return HRTIMER_NORESTART;
1154 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1157 clockid_t clockid = flags & (MAX_CLOCKS - 1);
1158 struct bpf_hrtimer *t;
1161 BUILD_BUG_ON(MAX_CLOCKS != 16);
1162 BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1163 BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1168 if (flags >= MAX_CLOCKS ||
1169 /* similar to timerfd except _ALARM variants are not supported */
1170 (clockid != CLOCK_MONOTONIC &&
1171 clockid != CLOCK_REALTIME &&
1172 clockid != CLOCK_BOOTTIME))
1174 __bpf_spin_lock_irqsave(&timer->lock);
1180 if (!atomic64_read(&map->usercnt)) {
1181 /* maps with timers must be either held by user space
1182 * or pinned in bpffs.
1187 /* allocate hrtimer via map_kmalloc to use memcg accounting */
1188 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1193 t->value = (void *)timer - map->timer_off;
1196 rcu_assign_pointer(t->callback_fn, NULL);
1197 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1198 t->timer.function = bpf_timer_cb;
1201 __bpf_spin_unlock_irqrestore(&timer->lock);
1205 static const struct bpf_func_proto bpf_timer_init_proto = {
1206 .func = bpf_timer_init,
1208 .ret_type = RET_INTEGER,
1209 .arg1_type = ARG_PTR_TO_TIMER,
1210 .arg2_type = ARG_CONST_MAP_PTR,
1211 .arg3_type = ARG_ANYTHING,
1214 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1215 struct bpf_prog_aux *, aux)
1217 struct bpf_prog *prev, *prog = aux->prog;
1218 struct bpf_hrtimer *t;
1223 __bpf_spin_lock_irqsave(&timer->lock);
1229 if (!atomic64_read(&t->map->usercnt)) {
1230 /* maps with timers must be either held by user space
1231 * or pinned in bpffs. Otherwise timer might still be
1232 * running even when bpf prog is detached and user space
1233 * is gone, since map_release_uref won't ever be called.
1240 /* Bump prog refcnt once. Every bpf_timer_set_callback()
1241 * can pick different callback_fn-s within the same prog.
1243 prog = bpf_prog_inc_not_zero(prog);
1245 ret = PTR_ERR(prog);
1249 /* Drop prev prog refcnt when swapping with new prog */
1253 rcu_assign_pointer(t->callback_fn, callback_fn);
1255 __bpf_spin_unlock_irqrestore(&timer->lock);
1259 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1260 .func = bpf_timer_set_callback,
1262 .ret_type = RET_INTEGER,
1263 .arg1_type = ARG_PTR_TO_TIMER,
1264 .arg2_type = ARG_PTR_TO_FUNC,
1267 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1269 struct bpf_hrtimer *t;
1276 __bpf_spin_lock_irqsave(&timer->lock);
1278 if (!t || !t->prog) {
1282 hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT);
1284 __bpf_spin_unlock_irqrestore(&timer->lock);
1288 static const struct bpf_func_proto bpf_timer_start_proto = {
1289 .func = bpf_timer_start,
1291 .ret_type = RET_INTEGER,
1292 .arg1_type = ARG_PTR_TO_TIMER,
1293 .arg2_type = ARG_ANYTHING,
1294 .arg3_type = ARG_ANYTHING,
1297 static void drop_prog_refcnt(struct bpf_hrtimer *t)
1299 struct bpf_prog *prog = t->prog;
1304 rcu_assign_pointer(t->callback_fn, NULL);
1308 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1310 struct bpf_hrtimer *t;
1315 __bpf_spin_lock_irqsave(&timer->lock);
1321 if (this_cpu_read(hrtimer_running) == t) {
1322 /* If bpf callback_fn is trying to bpf_timer_cancel()
1323 * its own timer the hrtimer_cancel() will deadlock
1324 * since it waits for callback_fn to finish
1329 drop_prog_refcnt(t);
1331 __bpf_spin_unlock_irqrestore(&timer->lock);
1332 /* Cancel the timer and wait for associated callback to finish
1333 * if it was running.
1335 ret = ret ?: hrtimer_cancel(&t->timer);
1339 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1340 .func = bpf_timer_cancel,
1342 .ret_type = RET_INTEGER,
1343 .arg1_type = ARG_PTR_TO_TIMER,
1346 /* This function is called by map_delete/update_elem for individual element and
1347 * by ops->map_release_uref when the user space reference to a map reaches zero.
1349 void bpf_timer_cancel_and_free(void *val)
1351 struct bpf_timer_kern *timer = val;
1352 struct bpf_hrtimer *t;
1354 /* Performance optimization: read timer->timer without lock first. */
1355 if (!READ_ONCE(timer->timer))
1358 __bpf_spin_lock_irqsave(&timer->lock);
1359 /* re-read it under lock */
1363 drop_prog_refcnt(t);
1364 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1365 * this timer, since it won't be initialized.
1367 timer->timer = NULL;
1369 __bpf_spin_unlock_irqrestore(&timer->lock);
1372 /* Cancel the timer and wait for callback to complete if it was running.
1373 * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
1374 * right after for both preallocated and non-preallocated maps.
1375 * The timer->timer = NULL was already done and no code path can
1376 * see address 't' anymore.
1378 * Check that bpf_map_delete/update_elem() wasn't called from timer
1379 * callback_fn. In such case don't call hrtimer_cancel() (since it will
1380 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
1381 * return -1). Though callback_fn is still running on this cpu it's
1382 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1383 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1384 * since timer->timer = NULL was already done. The timer will be
1385 * effectively cancelled because bpf_timer_cb() will return
1386 * HRTIMER_NORESTART.
1388 if (this_cpu_read(hrtimer_running) != t)
1389 hrtimer_cancel(&t->timer);
1393 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
1395 unsigned long *kptr = map_value;
1397 return xchg(kptr, (unsigned long)ptr);
1400 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1401 * helper is determined dynamically by the verifier.
1403 #define BPF_PTR_POISON ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1405 const struct bpf_func_proto bpf_kptr_xchg_proto = {
1406 .func = bpf_kptr_xchg,
1408 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1409 .ret_btf_id = BPF_PTR_POISON,
1410 .arg1_type = ARG_PTR_TO_KPTR,
1411 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1412 .arg2_btf_id = BPF_PTR_POISON,
1415 /* Since the upper 8 bits of dynptr->size is reserved, the
1416 * maximum supported size is 2^24 - 1.
1418 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1419 #define DYNPTR_TYPE_SHIFT 28
1420 #define DYNPTR_SIZE_MASK 0xFFFFFF
1421 #define DYNPTR_RDONLY_BIT BIT(31)
1423 static bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr)
1425 return ptr->size & DYNPTR_RDONLY_BIT;
1428 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1430 ptr->size |= type << DYNPTR_TYPE_SHIFT;
1433 static u32 bpf_dynptr_get_size(struct bpf_dynptr_kern *ptr)
1435 return ptr->size & DYNPTR_SIZE_MASK;
1438 int bpf_dynptr_check_size(u32 size)
1440 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1443 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1444 enum bpf_dynptr_type type, u32 offset, u32 size)
1447 ptr->offset = offset;
1449 bpf_dynptr_set_type(ptr, type);
1452 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1454 memset(ptr, 0, sizeof(*ptr));
1457 static int bpf_dynptr_check_off_len(struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
1459 u32 size = bpf_dynptr_get_size(ptr);
1461 if (len > size || offset > size - len)
1467 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1471 err = bpf_dynptr_check_size(size);
1475 /* flags is currently unsupported */
1481 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1486 bpf_dynptr_set_null(ptr);
1490 const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1491 .func = bpf_dynptr_from_mem,
1493 .ret_type = RET_INTEGER,
1494 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1495 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1496 .arg3_type = ARG_ANYTHING,
1497 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
1500 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src,
1501 u32, offset, u64, flags)
1505 if (!src->data || flags)
1508 err = bpf_dynptr_check_off_len(src, offset, len);
1512 memcpy(dst, src->data + src->offset + offset, len);
1517 const struct bpf_func_proto bpf_dynptr_read_proto = {
1518 .func = bpf_dynptr_read,
1520 .ret_type = RET_INTEGER,
1521 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1522 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1523 .arg3_type = ARG_PTR_TO_DYNPTR,
1524 .arg4_type = ARG_ANYTHING,
1525 .arg5_type = ARG_ANYTHING,
1528 BPF_CALL_5(bpf_dynptr_write, struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1529 u32, len, u64, flags)
1533 if (!dst->data || flags || bpf_dynptr_is_rdonly(dst))
1536 err = bpf_dynptr_check_off_len(dst, offset, len);
1540 memcpy(dst->data + dst->offset + offset, src, len);
1545 const struct bpf_func_proto bpf_dynptr_write_proto = {
1546 .func = bpf_dynptr_write,
1548 .ret_type = RET_INTEGER,
1549 .arg1_type = ARG_PTR_TO_DYNPTR,
1550 .arg2_type = ARG_ANYTHING,
1551 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1552 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
1553 .arg5_type = ARG_ANYTHING,
1556 BPF_CALL_3(bpf_dynptr_data, struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1563 err = bpf_dynptr_check_off_len(ptr, offset, len);
1567 if (bpf_dynptr_is_rdonly(ptr))
1570 return (unsigned long)(ptr->data + ptr->offset + offset);
1573 const struct bpf_func_proto bpf_dynptr_data_proto = {
1574 .func = bpf_dynptr_data,
1576 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1577 .arg1_type = ARG_PTR_TO_DYNPTR,
1578 .arg2_type = ARG_ANYTHING,
1579 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
1582 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1583 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1584 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1585 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1586 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1587 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1588 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1590 const struct bpf_func_proto *
1591 bpf_base_func_proto(enum bpf_func_id func_id)
1594 case BPF_FUNC_map_lookup_elem:
1595 return &bpf_map_lookup_elem_proto;
1596 case BPF_FUNC_map_update_elem:
1597 return &bpf_map_update_elem_proto;
1598 case BPF_FUNC_map_delete_elem:
1599 return &bpf_map_delete_elem_proto;
1600 case BPF_FUNC_map_push_elem:
1601 return &bpf_map_push_elem_proto;
1602 case BPF_FUNC_map_pop_elem:
1603 return &bpf_map_pop_elem_proto;
1604 case BPF_FUNC_map_peek_elem:
1605 return &bpf_map_peek_elem_proto;
1606 case BPF_FUNC_map_lookup_percpu_elem:
1607 return &bpf_map_lookup_percpu_elem_proto;
1608 case BPF_FUNC_get_prandom_u32:
1609 return &bpf_get_prandom_u32_proto;
1610 case BPF_FUNC_get_smp_processor_id:
1611 return &bpf_get_raw_smp_processor_id_proto;
1612 case BPF_FUNC_get_numa_node_id:
1613 return &bpf_get_numa_node_id_proto;
1614 case BPF_FUNC_tail_call:
1615 return &bpf_tail_call_proto;
1616 case BPF_FUNC_ktime_get_ns:
1617 return &bpf_ktime_get_ns_proto;
1618 case BPF_FUNC_ktime_get_boot_ns:
1619 return &bpf_ktime_get_boot_ns_proto;
1620 case BPF_FUNC_ringbuf_output:
1621 return &bpf_ringbuf_output_proto;
1622 case BPF_FUNC_ringbuf_reserve:
1623 return &bpf_ringbuf_reserve_proto;
1624 case BPF_FUNC_ringbuf_submit:
1625 return &bpf_ringbuf_submit_proto;
1626 case BPF_FUNC_ringbuf_discard:
1627 return &bpf_ringbuf_discard_proto;
1628 case BPF_FUNC_ringbuf_query:
1629 return &bpf_ringbuf_query_proto;
1630 case BPF_FUNC_ringbuf_reserve_dynptr:
1631 return &bpf_ringbuf_reserve_dynptr_proto;
1632 case BPF_FUNC_ringbuf_submit_dynptr:
1633 return &bpf_ringbuf_submit_dynptr_proto;
1634 case BPF_FUNC_ringbuf_discard_dynptr:
1635 return &bpf_ringbuf_discard_dynptr_proto;
1636 case BPF_FUNC_for_each_map_elem:
1637 return &bpf_for_each_map_elem_proto;
1639 return &bpf_loop_proto;
1640 case BPF_FUNC_strncmp:
1641 return &bpf_strncmp_proto;
1642 case BPF_FUNC_dynptr_from_mem:
1643 return &bpf_dynptr_from_mem_proto;
1644 case BPF_FUNC_dynptr_read:
1645 return &bpf_dynptr_read_proto;
1646 case BPF_FUNC_dynptr_write:
1647 return &bpf_dynptr_write_proto;
1648 case BPF_FUNC_dynptr_data:
1649 return &bpf_dynptr_data_proto;
1658 case BPF_FUNC_spin_lock:
1659 return &bpf_spin_lock_proto;
1660 case BPF_FUNC_spin_unlock:
1661 return &bpf_spin_unlock_proto;
1662 case BPF_FUNC_jiffies64:
1663 return &bpf_jiffies64_proto;
1664 case BPF_FUNC_per_cpu_ptr:
1665 return &bpf_per_cpu_ptr_proto;
1666 case BPF_FUNC_this_cpu_ptr:
1667 return &bpf_this_cpu_ptr_proto;
1668 case BPF_FUNC_timer_init:
1669 return &bpf_timer_init_proto;
1670 case BPF_FUNC_timer_set_callback:
1671 return &bpf_timer_set_callback_proto;
1672 case BPF_FUNC_timer_start:
1673 return &bpf_timer_start_proto;
1674 case BPF_FUNC_timer_cancel:
1675 return &bpf_timer_cancel_proto;
1676 case BPF_FUNC_kptr_xchg:
1677 return &bpf_kptr_xchg_proto;
1682 if (!perfmon_capable())
1686 case BPF_FUNC_trace_printk:
1687 return bpf_get_trace_printk_proto();
1688 case BPF_FUNC_get_current_task:
1689 return &bpf_get_current_task_proto;
1690 case BPF_FUNC_get_current_task_btf:
1691 return &bpf_get_current_task_btf_proto;
1692 case BPF_FUNC_probe_read_user:
1693 return &bpf_probe_read_user_proto;
1694 case BPF_FUNC_probe_read_kernel:
1695 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1696 NULL : &bpf_probe_read_kernel_proto;
1697 case BPF_FUNC_probe_read_user_str:
1698 return &bpf_probe_read_user_str_proto;
1699 case BPF_FUNC_probe_read_kernel_str:
1700 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1701 NULL : &bpf_probe_read_kernel_str_proto;
1702 case BPF_FUNC_snprintf_btf:
1703 return &bpf_snprintf_btf_proto;
1704 case BPF_FUNC_snprintf:
1705 return &bpf_snprintf_proto;
1706 case BPF_FUNC_task_pt_regs:
1707 return &bpf_task_pt_regs_proto;
1708 case BPF_FUNC_trace_vprintk:
1709 return bpf_get_trace_vprintk_proto();