1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 #include <linux/bpf-cgroup.h>
7 #include <linux/rcupdate.h>
8 #include <linux/random.h>
10 #include <linux/topology.h>
11 #include <linux/ktime.h>
12 #include <linux/sched.h>
13 #include <linux/uidgid.h>
14 #include <linux/filter.h>
15 #include <linux/ctype.h>
16 #include <linux/jiffies.h>
17 #include <linux/pid_namespace.h>
18 #include <linux/proc_ns.h>
19 #include <linux/security.h>
21 #include "../../lib/kstrtox.h"
23 /* If kernel subsystem is allowing eBPF programs to call this function,
24 * inside its own verifier_ops->get_func_proto() callback it should return
25 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
27 * Different map implementations will rely on rcu in map methods
28 * lookup/update/delete, therefore eBPF programs must run under rcu lock
29 * if program is allowed to access maps, so check rcu_read_lock_held in
30 * all three functions.
32 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
34 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
35 return (unsigned long) map->ops->map_lookup_elem(map, key);
38 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
39 .func = bpf_map_lookup_elem,
42 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
43 .arg1_type = ARG_CONST_MAP_PTR,
44 .arg2_type = ARG_PTR_TO_MAP_KEY,
47 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
48 void *, value, u64, flags)
50 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
51 return map->ops->map_update_elem(map, key, value, flags);
54 const struct bpf_func_proto bpf_map_update_elem_proto = {
55 .func = bpf_map_update_elem,
58 .ret_type = RET_INTEGER,
59 .arg1_type = ARG_CONST_MAP_PTR,
60 .arg2_type = ARG_PTR_TO_MAP_KEY,
61 .arg3_type = ARG_PTR_TO_MAP_VALUE,
62 .arg4_type = ARG_ANYTHING,
65 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
67 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
68 return map->ops->map_delete_elem(map, key);
71 const struct bpf_func_proto bpf_map_delete_elem_proto = {
72 .func = bpf_map_delete_elem,
75 .ret_type = RET_INTEGER,
76 .arg1_type = ARG_CONST_MAP_PTR,
77 .arg2_type = ARG_PTR_TO_MAP_KEY,
80 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
82 return map->ops->map_push_elem(map, value, flags);
85 const struct bpf_func_proto bpf_map_push_elem_proto = {
86 .func = bpf_map_push_elem,
89 .ret_type = RET_INTEGER,
90 .arg1_type = ARG_CONST_MAP_PTR,
91 .arg2_type = ARG_PTR_TO_MAP_VALUE,
92 .arg3_type = ARG_ANYTHING,
95 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
97 return map->ops->map_pop_elem(map, value);
100 const struct bpf_func_proto bpf_map_pop_elem_proto = {
101 .func = bpf_map_pop_elem,
103 .ret_type = RET_INTEGER,
104 .arg1_type = ARG_CONST_MAP_PTR,
105 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
108 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
110 return map->ops->map_peek_elem(map, value);
113 const struct bpf_func_proto bpf_map_peek_elem_proto = {
114 .func = bpf_map_peek_elem,
116 .ret_type = RET_INTEGER,
117 .arg1_type = ARG_CONST_MAP_PTR,
118 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
121 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
122 .func = bpf_user_rnd_u32,
124 .ret_type = RET_INTEGER,
127 BPF_CALL_0(bpf_get_smp_processor_id)
129 return smp_processor_id();
132 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
133 .func = bpf_get_smp_processor_id,
135 .ret_type = RET_INTEGER,
138 BPF_CALL_0(bpf_get_numa_node_id)
140 return numa_node_id();
143 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
144 .func = bpf_get_numa_node_id,
146 .ret_type = RET_INTEGER,
149 BPF_CALL_0(bpf_ktime_get_ns)
151 /* NMI safe access to clock monotonic */
152 return ktime_get_mono_fast_ns();
155 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
156 .func = bpf_ktime_get_ns,
158 .ret_type = RET_INTEGER,
161 BPF_CALL_0(bpf_ktime_get_boot_ns)
163 /* NMI safe access to clock boottime */
164 return ktime_get_boot_fast_ns();
167 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
168 .func = bpf_ktime_get_boot_ns,
170 .ret_type = RET_INTEGER,
173 BPF_CALL_0(bpf_ktime_get_coarse_ns)
175 return ktime_get_coarse_ns();
178 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
179 .func = bpf_ktime_get_coarse_ns,
181 .ret_type = RET_INTEGER,
184 BPF_CALL_0(bpf_get_current_pid_tgid)
186 struct task_struct *task = current;
191 return (u64) task->tgid << 32 | task->pid;
194 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
195 .func = bpf_get_current_pid_tgid,
197 .ret_type = RET_INTEGER,
200 BPF_CALL_0(bpf_get_current_uid_gid)
202 struct task_struct *task = current;
209 current_uid_gid(&uid, &gid);
210 return (u64) from_kgid(&init_user_ns, gid) << 32 |
211 from_kuid(&init_user_ns, uid);
214 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
215 .func = bpf_get_current_uid_gid,
217 .ret_type = RET_INTEGER,
220 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
222 struct task_struct *task = current;
227 strncpy(buf, task->comm, size);
229 /* Verifier guarantees that size > 0. For task->comm exceeding
230 * size, guarantee that buf is %NUL-terminated. Unconditionally
231 * done here to save the size test.
236 memset(buf, 0, size);
240 const struct bpf_func_proto bpf_get_current_comm_proto = {
241 .func = bpf_get_current_comm,
243 .ret_type = RET_INTEGER,
244 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
245 .arg2_type = ARG_CONST_SIZE,
248 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
250 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
252 arch_spinlock_t *l = (void *)lock;
255 arch_spinlock_t lock;
256 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
258 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
259 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
260 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
264 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
266 arch_spinlock_t *l = (void *)lock;
273 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
275 atomic_t *l = (void *)lock;
277 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
279 atomic_cond_read_relaxed(l, !VAL);
280 } while (atomic_xchg(l, 1));
283 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
285 atomic_t *l = (void *)lock;
287 atomic_set_release(l, 0);
292 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
294 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
298 local_irq_save(flags);
299 __bpf_spin_lock(lock);
300 __this_cpu_write(irqsave_flags, flags);
303 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
305 __bpf_spin_lock_irqsave(lock);
309 const struct bpf_func_proto bpf_spin_lock_proto = {
310 .func = bpf_spin_lock,
312 .ret_type = RET_VOID,
313 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
316 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
320 flags = __this_cpu_read(irqsave_flags);
321 __bpf_spin_unlock(lock);
322 local_irq_restore(flags);
325 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
327 __bpf_spin_unlock_irqrestore(lock);
331 const struct bpf_func_proto bpf_spin_unlock_proto = {
332 .func = bpf_spin_unlock,
334 .ret_type = RET_VOID,
335 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
338 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
341 struct bpf_spin_lock *lock;
344 lock = src + map->spin_lock_off;
346 lock = dst + map->spin_lock_off;
348 __bpf_spin_lock_irqsave(lock);
349 copy_map_value(map, dst, src);
350 __bpf_spin_unlock_irqrestore(lock);
354 BPF_CALL_0(bpf_jiffies64)
356 return get_jiffies_64();
359 const struct bpf_func_proto bpf_jiffies64_proto = {
360 .func = bpf_jiffies64,
362 .ret_type = RET_INTEGER,
365 #ifdef CONFIG_CGROUPS
366 BPF_CALL_0(bpf_get_current_cgroup_id)
372 cgrp = task_dfl_cgroup(current);
373 cgrp_id = cgroup_id(cgrp);
379 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
380 .func = bpf_get_current_cgroup_id,
382 .ret_type = RET_INTEGER,
385 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
388 struct cgroup *ancestor;
392 cgrp = task_dfl_cgroup(current);
393 ancestor = cgroup_ancestor(cgrp, ancestor_level);
394 cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
400 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
401 .func = bpf_get_current_ancestor_cgroup_id,
403 .ret_type = RET_INTEGER,
404 .arg1_type = ARG_ANYTHING,
407 #ifdef CONFIG_CGROUP_BPF
409 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
411 /* flags argument is not used now,
412 * but provides an ability to extend the API.
413 * verifier checks that its value is correct.
415 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
416 struct bpf_cgroup_storage *storage;
417 struct bpf_cg_run_ctx *ctx;
420 /* get current cgroup storage from BPF run context */
421 ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
422 storage = ctx->prog_item->cgroup_storage[stype];
424 if (stype == BPF_CGROUP_STORAGE_SHARED)
425 ptr = &READ_ONCE(storage->buf)->data[0];
427 ptr = this_cpu_ptr(storage->percpu_buf);
429 return (unsigned long)ptr;
432 const struct bpf_func_proto bpf_get_local_storage_proto = {
433 .func = bpf_get_local_storage,
435 .ret_type = RET_PTR_TO_MAP_VALUE,
436 .arg1_type = ARG_CONST_MAP_PTR,
437 .arg2_type = ARG_ANYTHING,
441 #define BPF_STRTOX_BASE_MASK 0x1F
443 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
444 unsigned long long *res, bool *is_negative)
446 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
447 const char *cur_buf = buf;
448 size_t cur_len = buf_len;
449 unsigned int consumed;
453 if (!buf || !buf_len || !res || !is_negative)
456 if (base != 0 && base != 8 && base != 10 && base != 16)
459 if (flags & ~BPF_STRTOX_BASE_MASK)
462 while (cur_buf < buf + buf_len && isspace(*cur_buf))
465 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
469 consumed = cur_buf - buf;
474 cur_len = min(cur_len, sizeof(str) - 1);
475 memcpy(str, cur_buf, cur_len);
479 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
480 val_len = _parse_integer(cur_buf, base, res);
482 if (val_len & KSTRTOX_OVERFLOW)
489 consumed += cur_buf - str;
494 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
497 unsigned long long _res;
501 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
505 if ((long long)-_res > 0)
509 if ((long long)_res < 0)
516 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
522 err = __bpf_strtoll(buf, buf_len, flags, &_res);
525 if (_res != (long)_res)
531 const struct bpf_func_proto bpf_strtol_proto = {
534 .ret_type = RET_INTEGER,
535 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
536 .arg2_type = ARG_CONST_SIZE,
537 .arg3_type = ARG_ANYTHING,
538 .arg4_type = ARG_PTR_TO_LONG,
541 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
542 unsigned long *, res)
544 unsigned long long _res;
548 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
553 if (_res != (unsigned long)_res)
559 const struct bpf_func_proto bpf_strtoul_proto = {
562 .ret_type = RET_INTEGER,
563 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
564 .arg2_type = ARG_CONST_SIZE,
565 .arg3_type = ARG_ANYTHING,
566 .arg4_type = ARG_PTR_TO_LONG,
570 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
572 return strncmp(s1, s2, s1_sz);
575 const struct bpf_func_proto bpf_strncmp_proto = {
578 .ret_type = RET_INTEGER,
579 .arg1_type = ARG_PTR_TO_MEM,
580 .arg2_type = ARG_CONST_SIZE,
581 .arg3_type = ARG_PTR_TO_CONST_STR,
584 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
585 struct bpf_pidns_info *, nsdata, u32, size)
587 struct task_struct *task = current;
588 struct pid_namespace *pidns;
591 if (unlikely(size != sizeof(struct bpf_pidns_info)))
594 if (unlikely((u64)(dev_t)dev != dev))
600 pidns = task_active_pid_ns(task);
601 if (unlikely(!pidns)) {
606 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
609 nsdata->pid = task_pid_nr_ns(task, pidns);
610 nsdata->tgid = task_tgid_nr_ns(task, pidns);
613 memset((void *)nsdata, 0, (size_t) size);
617 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
618 .func = bpf_get_ns_current_pid_tgid,
620 .ret_type = RET_INTEGER,
621 .arg1_type = ARG_ANYTHING,
622 .arg2_type = ARG_ANYTHING,
623 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
624 .arg4_type = ARG_CONST_SIZE,
627 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
628 .func = bpf_get_raw_cpu_id,
630 .ret_type = RET_INTEGER,
633 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
634 u64, flags, void *, data, u64, size)
636 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
639 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
642 const struct bpf_func_proto bpf_event_output_data_proto = {
643 .func = bpf_event_output_data,
645 .ret_type = RET_INTEGER,
646 .arg1_type = ARG_PTR_TO_CTX,
647 .arg2_type = ARG_CONST_MAP_PTR,
648 .arg3_type = ARG_ANYTHING,
649 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
650 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
653 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
654 const void __user *, user_ptr)
656 int ret = copy_from_user(dst, user_ptr, size);
659 memset(dst, 0, size);
666 const struct bpf_func_proto bpf_copy_from_user_proto = {
667 .func = bpf_copy_from_user,
669 .ret_type = RET_INTEGER,
670 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
671 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
672 .arg3_type = ARG_ANYTHING,
675 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
677 if (cpu >= nr_cpu_ids)
678 return (unsigned long)NULL;
680 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
683 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
684 .func = bpf_per_cpu_ptr,
686 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
687 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
688 .arg2_type = ARG_ANYTHING,
691 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
693 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
696 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
697 .func = bpf_this_cpu_ptr,
699 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
700 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
703 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
706 void __user *user_ptr = (__force void __user *)unsafe_ptr;
712 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
713 if ((unsigned long)unsafe_ptr < TASK_SIZE)
714 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
718 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
720 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
726 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
727 * arguments representation.
729 #define MAX_BPRINTF_BUF_LEN 512
731 /* Support executing three nested bprintf helper calls on a given CPU */
732 #define MAX_BPRINTF_NEST_LEVEL 3
733 struct bpf_bprintf_buffers {
734 char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
736 static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
737 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
739 static int try_get_fmt_tmp_buf(char **tmp_buf)
741 struct bpf_bprintf_buffers *bufs;
745 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
746 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
747 this_cpu_dec(bpf_bprintf_nest_level);
751 bufs = this_cpu_ptr(&bpf_bprintf_bufs);
752 *tmp_buf = bufs->tmp_bufs[nest_level - 1];
757 void bpf_bprintf_cleanup(void)
759 if (this_cpu_read(bpf_bprintf_nest_level)) {
760 this_cpu_dec(bpf_bprintf_nest_level);
766 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
768 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
770 * This can be used in two ways:
771 * - Format string verification only: when bin_args is NULL
772 * - Arguments preparation: in addition to the above verification, it writes in
773 * bin_args a binary representation of arguments usable by bstr_printf where
774 * pointers from BPF have been sanitized.
776 * In argument preparation mode, if 0 is returned, safe temporary buffers are
777 * allocated and bpf_bprintf_cleanup should be called to free them after use.
779 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
780 u32 **bin_args, u32 num_args)
782 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
783 size_t sizeof_cur_arg, sizeof_cur_ip;
784 int err, i, num_spec = 0;
786 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
788 fmt_end = strnchr(fmt, fmt_size, 0);
791 fmt_size = fmt_end - fmt;
794 if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
797 tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
798 *bin_args = (u32 *)tmp_buf;
801 for (i = 0; i < fmt_size; i++) {
802 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
810 if (fmt[i + 1] == '%') {
815 if (num_spec >= num_args) {
820 /* The string is zero-terminated so if fmt[i] != 0, we can
821 * always access fmt[i + 1], in the worst case it will be a 0
825 /* skip optional "[0 +-][num]" width formatting field */
826 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
829 if (fmt[i] >= '1' && fmt[i] <= '9') {
831 while (fmt[i] >= '0' && fmt[i] <= '9')
836 sizeof_cur_arg = sizeof(long);
838 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
840 fmt_ptype = fmt[i + 1];
845 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
846 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
847 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
849 /* just kernel pointers */
851 cur_arg = raw_args[num_spec];
856 if (fmt[i + 1] == 'B') {
858 err = snprintf(tmp_buf,
859 (tmp_buf_end - tmp_buf),
861 (void *)(long)raw_args[num_spec]);
862 tmp_buf += (err + 1);
870 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
871 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
872 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
881 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
882 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
887 unsafe_ptr = (char *)(long)raw_args[num_spec];
888 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
891 memset(cur_ip, 0, sizeof_cur_ip);
893 /* hack: bstr_printf expects IP addresses to be
894 * pre-formatted as strings, ironically, the easiest way
895 * to do that is to call snprintf.
897 ip_spec[2] = fmt[i - 1];
899 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
906 } else if (fmt[i] == 's') {
909 if (fmt[i + 1] != 0 &&
910 !isspace(fmt[i + 1]) &&
911 !ispunct(fmt[i + 1])) {
919 if (tmp_buf_end == tmp_buf) {
924 unsafe_ptr = (char *)(long)raw_args[num_spec];
925 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
927 tmp_buf_end - tmp_buf);
937 } else if (fmt[i] == 'c') {
941 if (tmp_buf_end == tmp_buf) {
946 *tmp_buf = raw_args[num_spec];
953 sizeof_cur_arg = sizeof(int);
956 sizeof_cur_arg = sizeof(long);
960 sizeof_cur_arg = sizeof(long long);
964 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
965 fmt[i] != 'x' && fmt[i] != 'X') {
971 cur_arg = raw_args[num_spec];
974 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
975 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
980 if (sizeof_cur_arg == 8) {
981 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
982 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
984 *(u32 *)tmp_buf = (u32)(long)cur_arg;
986 tmp_buf += sizeof_cur_arg;
994 bpf_bprintf_cleanup();
998 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
999 const void *, data, u32, data_len)
1004 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1005 (data_len && !data))
1007 num_args = data_len / 8;
1009 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1010 * can safely give an unbounded size.
1012 err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
1016 err = bstr_printf(str, str_size, fmt, bin_args);
1018 bpf_bprintf_cleanup();
1023 const struct bpf_func_proto bpf_snprintf_proto = {
1024 .func = bpf_snprintf,
1026 .ret_type = RET_INTEGER,
1027 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1028 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1029 .arg3_type = ARG_PTR_TO_CONST_STR,
1030 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1031 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1034 /* BPF map elements can contain 'struct bpf_timer'.
1035 * Such map owns all of its BPF timers.
1036 * 'struct bpf_timer' is allocated as part of map element allocation
1037 * and it's zero initialized.
1038 * That space is used to keep 'struct bpf_timer_kern'.
1039 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1040 * remembers 'struct bpf_map *' pointer it's part of.
1041 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1042 * bpf_timer_start() arms the timer.
1043 * If user space reference to a map goes to zero at this point
1044 * ops->map_release_uref callback is responsible for cancelling the timers,
1045 * freeing their memory, and decrementing prog's refcnts.
1046 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1047 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1048 * freeing the timers when inner map is replaced or deleted by user space.
1050 struct bpf_hrtimer {
1051 struct hrtimer timer;
1052 struct bpf_map *map;
1053 struct bpf_prog *prog;
1054 void __rcu *callback_fn;
1058 /* the actual struct hidden inside uapi struct bpf_timer */
1059 struct bpf_timer_kern {
1060 struct bpf_hrtimer *timer;
1061 /* bpf_spin_lock is used here instead of spinlock_t to make
1062 * sure that it always fits into space resereved by struct bpf_timer
1063 * regardless of LOCKDEP and spinlock debug flags.
1065 struct bpf_spin_lock lock;
1066 } __attribute__((aligned(8)));
1068 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1070 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1072 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1073 struct bpf_map *map = t->map;
1074 void *value = t->value;
1075 bpf_callback_t callback_fn;
1079 BTF_TYPE_EMIT(struct bpf_timer);
1080 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1084 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1085 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1086 * Remember the timer this callback is servicing to prevent
1087 * deadlock if callback_fn() calls bpf_timer_cancel() or
1088 * bpf_map_delete_elem() on the same timer.
1090 this_cpu_write(hrtimer_running, t);
1091 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1092 struct bpf_array *array = container_of(map, struct bpf_array, map);
1094 /* compute the key */
1095 idx = ((char *)value - array->value) / array->elem_size;
1097 } else { /* hash or lru */
1098 key = value - round_up(map->key_size, 8);
1101 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1102 /* The verifier checked that return value is zero. */
1104 this_cpu_write(hrtimer_running, NULL);
1106 return HRTIMER_NORESTART;
1109 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1112 clockid_t clockid = flags & (MAX_CLOCKS - 1);
1113 struct bpf_hrtimer *t;
1116 BUILD_BUG_ON(MAX_CLOCKS != 16);
1117 BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1118 BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1123 if (flags >= MAX_CLOCKS ||
1124 /* similar to timerfd except _ALARM variants are not supported */
1125 (clockid != CLOCK_MONOTONIC &&
1126 clockid != CLOCK_REALTIME &&
1127 clockid != CLOCK_BOOTTIME))
1129 __bpf_spin_lock_irqsave(&timer->lock);
1135 if (!atomic64_read(&map->usercnt)) {
1136 /* maps with timers must be either held by user space
1137 * or pinned in bpffs.
1142 /* allocate hrtimer via map_kmalloc to use memcg accounting */
1143 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1148 t->value = (void *)timer - map->timer_off;
1151 rcu_assign_pointer(t->callback_fn, NULL);
1152 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1153 t->timer.function = bpf_timer_cb;
1156 __bpf_spin_unlock_irqrestore(&timer->lock);
1160 static const struct bpf_func_proto bpf_timer_init_proto = {
1161 .func = bpf_timer_init,
1163 .ret_type = RET_INTEGER,
1164 .arg1_type = ARG_PTR_TO_TIMER,
1165 .arg2_type = ARG_CONST_MAP_PTR,
1166 .arg3_type = ARG_ANYTHING,
1169 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1170 struct bpf_prog_aux *, aux)
1172 struct bpf_prog *prev, *prog = aux->prog;
1173 struct bpf_hrtimer *t;
1178 __bpf_spin_lock_irqsave(&timer->lock);
1184 if (!atomic64_read(&t->map->usercnt)) {
1185 /* maps with timers must be either held by user space
1186 * or pinned in bpffs. Otherwise timer might still be
1187 * running even when bpf prog is detached and user space
1188 * is gone, since map_release_uref won't ever be called.
1195 /* Bump prog refcnt once. Every bpf_timer_set_callback()
1196 * can pick different callback_fn-s within the same prog.
1198 prog = bpf_prog_inc_not_zero(prog);
1200 ret = PTR_ERR(prog);
1204 /* Drop prev prog refcnt when swapping with new prog */
1208 rcu_assign_pointer(t->callback_fn, callback_fn);
1210 __bpf_spin_unlock_irqrestore(&timer->lock);
1214 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1215 .func = bpf_timer_set_callback,
1217 .ret_type = RET_INTEGER,
1218 .arg1_type = ARG_PTR_TO_TIMER,
1219 .arg2_type = ARG_PTR_TO_FUNC,
1222 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1224 struct bpf_hrtimer *t;
1231 __bpf_spin_lock_irqsave(&timer->lock);
1233 if (!t || !t->prog) {
1237 hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT);
1239 __bpf_spin_unlock_irqrestore(&timer->lock);
1243 static const struct bpf_func_proto bpf_timer_start_proto = {
1244 .func = bpf_timer_start,
1246 .ret_type = RET_INTEGER,
1247 .arg1_type = ARG_PTR_TO_TIMER,
1248 .arg2_type = ARG_ANYTHING,
1249 .arg3_type = ARG_ANYTHING,
1252 static void drop_prog_refcnt(struct bpf_hrtimer *t)
1254 struct bpf_prog *prog = t->prog;
1259 rcu_assign_pointer(t->callback_fn, NULL);
1263 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1265 struct bpf_hrtimer *t;
1270 __bpf_spin_lock_irqsave(&timer->lock);
1276 if (this_cpu_read(hrtimer_running) == t) {
1277 /* If bpf callback_fn is trying to bpf_timer_cancel()
1278 * its own timer the hrtimer_cancel() will deadlock
1279 * since it waits for callback_fn to finish
1284 drop_prog_refcnt(t);
1286 __bpf_spin_unlock_irqrestore(&timer->lock);
1287 /* Cancel the timer and wait for associated callback to finish
1288 * if it was running.
1290 ret = ret ?: hrtimer_cancel(&t->timer);
1294 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1295 .func = bpf_timer_cancel,
1297 .ret_type = RET_INTEGER,
1298 .arg1_type = ARG_PTR_TO_TIMER,
1301 /* This function is called by map_delete/update_elem for individual element and
1302 * by ops->map_release_uref when the user space reference to a map reaches zero.
1304 void bpf_timer_cancel_and_free(void *val)
1306 struct bpf_timer_kern *timer = val;
1307 struct bpf_hrtimer *t;
1309 /* Performance optimization: read timer->timer without lock first. */
1310 if (!READ_ONCE(timer->timer))
1313 __bpf_spin_lock_irqsave(&timer->lock);
1314 /* re-read it under lock */
1318 drop_prog_refcnt(t);
1319 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1320 * this timer, since it won't be initialized.
1322 timer->timer = NULL;
1324 __bpf_spin_unlock_irqrestore(&timer->lock);
1327 /* Cancel the timer and wait for callback to complete if it was running.
1328 * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
1329 * right after for both preallocated and non-preallocated maps.
1330 * The timer->timer = NULL was already done and no code path can
1331 * see address 't' anymore.
1333 * Check that bpf_map_delete/update_elem() wasn't called from timer
1334 * callback_fn. In such case don't call hrtimer_cancel() (since it will
1335 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
1336 * return -1). Though callback_fn is still running on this cpu it's
1337 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1338 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1339 * since timer->timer = NULL was already done. The timer will be
1340 * effectively cancelled because bpf_timer_cb() will return
1341 * HRTIMER_NORESTART.
1343 if (this_cpu_read(hrtimer_running) != t)
1344 hrtimer_cancel(&t->timer);
1348 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1349 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1350 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1351 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1352 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1353 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1354 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1356 const struct bpf_func_proto *
1357 bpf_base_func_proto(enum bpf_func_id func_id)
1360 case BPF_FUNC_map_lookup_elem:
1361 return &bpf_map_lookup_elem_proto;
1362 case BPF_FUNC_map_update_elem:
1363 return &bpf_map_update_elem_proto;
1364 case BPF_FUNC_map_delete_elem:
1365 return &bpf_map_delete_elem_proto;
1366 case BPF_FUNC_map_push_elem:
1367 return &bpf_map_push_elem_proto;
1368 case BPF_FUNC_map_pop_elem:
1369 return &bpf_map_pop_elem_proto;
1370 case BPF_FUNC_map_peek_elem:
1371 return &bpf_map_peek_elem_proto;
1372 case BPF_FUNC_get_prandom_u32:
1373 return &bpf_get_prandom_u32_proto;
1374 case BPF_FUNC_get_smp_processor_id:
1375 return &bpf_get_raw_smp_processor_id_proto;
1376 case BPF_FUNC_get_numa_node_id:
1377 return &bpf_get_numa_node_id_proto;
1378 case BPF_FUNC_tail_call:
1379 return &bpf_tail_call_proto;
1380 case BPF_FUNC_ktime_get_ns:
1381 return &bpf_ktime_get_ns_proto;
1382 case BPF_FUNC_ktime_get_boot_ns:
1383 return &bpf_ktime_get_boot_ns_proto;
1384 case BPF_FUNC_ringbuf_output:
1385 return &bpf_ringbuf_output_proto;
1386 case BPF_FUNC_ringbuf_reserve:
1387 return &bpf_ringbuf_reserve_proto;
1388 case BPF_FUNC_ringbuf_submit:
1389 return &bpf_ringbuf_submit_proto;
1390 case BPF_FUNC_ringbuf_discard:
1391 return &bpf_ringbuf_discard_proto;
1392 case BPF_FUNC_ringbuf_query:
1393 return &bpf_ringbuf_query_proto;
1394 case BPF_FUNC_for_each_map_elem:
1395 return &bpf_for_each_map_elem_proto;
1397 return &bpf_loop_proto;
1398 case BPF_FUNC_strncmp:
1399 return &bpf_strncmp_proto;
1408 case BPF_FUNC_spin_lock:
1409 return &bpf_spin_lock_proto;
1410 case BPF_FUNC_spin_unlock:
1411 return &bpf_spin_unlock_proto;
1412 case BPF_FUNC_jiffies64:
1413 return &bpf_jiffies64_proto;
1414 case BPF_FUNC_per_cpu_ptr:
1415 return &bpf_per_cpu_ptr_proto;
1416 case BPF_FUNC_this_cpu_ptr:
1417 return &bpf_this_cpu_ptr_proto;
1418 case BPF_FUNC_timer_init:
1419 return &bpf_timer_init_proto;
1420 case BPF_FUNC_timer_set_callback:
1421 return &bpf_timer_set_callback_proto;
1422 case BPF_FUNC_timer_start:
1423 return &bpf_timer_start_proto;
1424 case BPF_FUNC_timer_cancel:
1425 return &bpf_timer_cancel_proto;
1430 if (!perfmon_capable())
1434 case BPF_FUNC_trace_printk:
1435 return bpf_get_trace_printk_proto();
1436 case BPF_FUNC_get_current_task:
1437 return &bpf_get_current_task_proto;
1438 case BPF_FUNC_get_current_task_btf:
1439 return &bpf_get_current_task_btf_proto;
1440 case BPF_FUNC_probe_read_user:
1441 return &bpf_probe_read_user_proto;
1442 case BPF_FUNC_probe_read_kernel:
1443 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1444 NULL : &bpf_probe_read_kernel_proto;
1445 case BPF_FUNC_probe_read_user_str:
1446 return &bpf_probe_read_user_str_proto;
1447 case BPF_FUNC_probe_read_kernel_str:
1448 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1449 NULL : &bpf_probe_read_kernel_str_proto;
1450 case BPF_FUNC_snprintf_btf:
1451 return &bpf_snprintf_btf_proto;
1452 case BPF_FUNC_snprintf:
1453 return &bpf_snprintf_proto;
1454 case BPF_FUNC_task_pt_regs:
1455 return &bpf_task_pt_regs_proto;
1456 case BPF_FUNC_trace_vprintk:
1457 return bpf_get_trace_vprintk_proto();