1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 #include <linux/rcupdate.h>
6 #include <linux/random.h>
8 #include <linux/topology.h>
9 #include <linux/ktime.h>
10 #include <linux/sched.h>
11 #include <linux/uidgid.h>
12 #include <linux/filter.h>
13 #include <linux/ctype.h>
14 #include <linux/jiffies.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/proc_ns.h>
17 #include <linux/security.h>
19 #include "../../lib/kstrtox.h"
21 /* If kernel subsystem is allowing eBPF programs to call this function,
22 * inside its own verifier_ops->get_func_proto() callback it should return
23 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
25 * Different map implementations will rely on rcu in map methods
26 * lookup/update/delete, therefore eBPF programs must run under rcu lock
27 * if program is allowed to access maps, so check rcu_read_lock_held in
28 * all three functions.
30 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
32 WARN_ON_ONCE(!rcu_read_lock_held());
33 return (unsigned long) map->ops->map_lookup_elem(map, key);
36 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
37 .func = bpf_map_lookup_elem,
40 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
41 .arg1_type = ARG_CONST_MAP_PTR,
42 .arg2_type = ARG_PTR_TO_MAP_KEY,
45 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
46 void *, value, u64, flags)
48 WARN_ON_ONCE(!rcu_read_lock_held());
49 return map->ops->map_update_elem(map, key, value, flags);
52 const struct bpf_func_proto bpf_map_update_elem_proto = {
53 .func = bpf_map_update_elem,
56 .ret_type = RET_INTEGER,
57 .arg1_type = ARG_CONST_MAP_PTR,
58 .arg2_type = ARG_PTR_TO_MAP_KEY,
59 .arg3_type = ARG_PTR_TO_MAP_VALUE,
60 .arg4_type = ARG_ANYTHING,
63 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
65 WARN_ON_ONCE(!rcu_read_lock_held());
66 return map->ops->map_delete_elem(map, key);
69 const struct bpf_func_proto bpf_map_delete_elem_proto = {
70 .func = bpf_map_delete_elem,
73 .ret_type = RET_INTEGER,
74 .arg1_type = ARG_CONST_MAP_PTR,
75 .arg2_type = ARG_PTR_TO_MAP_KEY,
78 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
80 return map->ops->map_push_elem(map, value, flags);
83 const struct bpf_func_proto bpf_map_push_elem_proto = {
84 .func = bpf_map_push_elem,
87 .ret_type = RET_INTEGER,
88 .arg1_type = ARG_CONST_MAP_PTR,
89 .arg2_type = ARG_PTR_TO_MAP_VALUE,
90 .arg3_type = ARG_ANYTHING,
93 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
95 return map->ops->map_pop_elem(map, value);
98 const struct bpf_func_proto bpf_map_pop_elem_proto = {
99 .func = bpf_map_pop_elem,
101 .ret_type = RET_INTEGER,
102 .arg1_type = ARG_CONST_MAP_PTR,
103 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
106 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
108 return map->ops->map_peek_elem(map, value);
111 const struct bpf_func_proto bpf_map_peek_elem_proto = {
112 .func = bpf_map_peek_elem,
114 .ret_type = RET_INTEGER,
115 .arg1_type = ARG_CONST_MAP_PTR,
116 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
119 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
120 .func = bpf_user_rnd_u32,
122 .ret_type = RET_INTEGER,
125 BPF_CALL_0(bpf_get_smp_processor_id)
127 return smp_processor_id();
130 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
131 .func = bpf_get_smp_processor_id,
133 .ret_type = RET_INTEGER,
136 BPF_CALL_0(bpf_get_numa_node_id)
138 return numa_node_id();
141 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
142 .func = bpf_get_numa_node_id,
144 .ret_type = RET_INTEGER,
147 BPF_CALL_0(bpf_ktime_get_ns)
149 /* NMI safe access to clock monotonic */
150 return ktime_get_mono_fast_ns();
153 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
154 .func = bpf_ktime_get_ns,
156 .ret_type = RET_INTEGER,
159 BPF_CALL_0(bpf_ktime_get_boot_ns)
161 /* NMI safe access to clock boottime */
162 return ktime_get_boot_fast_ns();
165 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
166 .func = bpf_ktime_get_boot_ns,
168 .ret_type = RET_INTEGER,
171 BPF_CALL_0(bpf_ktime_get_coarse_ns)
173 return ktime_get_coarse_ns();
176 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
177 .func = bpf_ktime_get_coarse_ns,
179 .ret_type = RET_INTEGER,
182 BPF_CALL_0(bpf_get_current_pid_tgid)
184 struct task_struct *task = current;
189 return (u64) task->tgid << 32 | task->pid;
192 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
193 .func = bpf_get_current_pid_tgid,
195 .ret_type = RET_INTEGER,
198 BPF_CALL_0(bpf_get_current_uid_gid)
200 struct task_struct *task = current;
207 current_uid_gid(&uid, &gid);
208 return (u64) from_kgid(&init_user_ns, gid) << 32 |
209 from_kuid(&init_user_ns, uid);
212 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
213 .func = bpf_get_current_uid_gid,
215 .ret_type = RET_INTEGER,
218 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
220 struct task_struct *task = current;
225 strncpy(buf, task->comm, size);
227 /* Verifier guarantees that size > 0. For task->comm exceeding
228 * size, guarantee that buf is %NUL-terminated. Unconditionally
229 * done here to save the size test.
234 memset(buf, 0, size);
238 const struct bpf_func_proto bpf_get_current_comm_proto = {
239 .func = bpf_get_current_comm,
241 .ret_type = RET_INTEGER,
242 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
243 .arg2_type = ARG_CONST_SIZE,
246 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
248 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
250 arch_spinlock_t *l = (void *)lock;
253 arch_spinlock_t lock;
254 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
256 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
257 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
258 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
262 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
264 arch_spinlock_t *l = (void *)lock;
271 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
273 atomic_t *l = (void *)lock;
275 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
277 atomic_cond_read_relaxed(l, !VAL);
278 } while (atomic_xchg(l, 1));
281 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
283 atomic_t *l = (void *)lock;
285 atomic_set_release(l, 0);
290 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
292 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
296 local_irq_save(flags);
297 __bpf_spin_lock(lock);
298 __this_cpu_write(irqsave_flags, flags);
302 const struct bpf_func_proto bpf_spin_lock_proto = {
303 .func = bpf_spin_lock,
305 .ret_type = RET_VOID,
306 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
309 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
313 flags = __this_cpu_read(irqsave_flags);
314 __bpf_spin_unlock(lock);
315 local_irq_restore(flags);
319 const struct bpf_func_proto bpf_spin_unlock_proto = {
320 .func = bpf_spin_unlock,
322 .ret_type = RET_VOID,
323 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
326 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
329 struct bpf_spin_lock *lock;
332 lock = src + map->spin_lock_off;
334 lock = dst + map->spin_lock_off;
336 ____bpf_spin_lock(lock);
337 copy_map_value(map, dst, src);
338 ____bpf_spin_unlock(lock);
342 BPF_CALL_0(bpf_jiffies64)
344 return get_jiffies_64();
347 const struct bpf_func_proto bpf_jiffies64_proto = {
348 .func = bpf_jiffies64,
350 .ret_type = RET_INTEGER,
353 #ifdef CONFIG_CGROUPS
354 BPF_CALL_0(bpf_get_current_cgroup_id)
356 struct cgroup *cgrp = task_dfl_cgroup(current);
358 return cgroup_id(cgrp);
361 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
362 .func = bpf_get_current_cgroup_id,
364 .ret_type = RET_INTEGER,
367 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
369 struct cgroup *cgrp = task_dfl_cgroup(current);
370 struct cgroup *ancestor;
372 ancestor = cgroup_ancestor(cgrp, ancestor_level);
375 return cgroup_id(ancestor);
378 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
379 .func = bpf_get_current_ancestor_cgroup_id,
381 .ret_type = RET_INTEGER,
382 .arg1_type = ARG_ANYTHING,
385 #ifdef CONFIG_CGROUP_BPF
386 DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
387 bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
389 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
391 /* flags argument is not used now,
392 * but provides an ability to extend the API.
393 * verifier checks that its value is correct.
395 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
396 struct bpf_cgroup_storage *storage = NULL;
400 for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
401 if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
404 storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
408 if (stype == BPF_CGROUP_STORAGE_SHARED)
409 ptr = &READ_ONCE(storage->buf)->data[0];
411 ptr = this_cpu_ptr(storage->percpu_buf);
413 return (unsigned long)ptr;
416 const struct bpf_func_proto bpf_get_local_storage_proto = {
417 .func = bpf_get_local_storage,
419 .ret_type = RET_PTR_TO_MAP_VALUE,
420 .arg1_type = ARG_CONST_MAP_PTR,
421 .arg2_type = ARG_ANYTHING,
425 #define BPF_STRTOX_BASE_MASK 0x1F
427 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
428 unsigned long long *res, bool *is_negative)
430 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
431 const char *cur_buf = buf;
432 size_t cur_len = buf_len;
433 unsigned int consumed;
437 if (!buf || !buf_len || !res || !is_negative)
440 if (base != 0 && base != 8 && base != 10 && base != 16)
443 if (flags & ~BPF_STRTOX_BASE_MASK)
446 while (cur_buf < buf + buf_len && isspace(*cur_buf))
449 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
453 consumed = cur_buf - buf;
458 cur_len = min(cur_len, sizeof(str) - 1);
459 memcpy(str, cur_buf, cur_len);
463 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
464 val_len = _parse_integer(cur_buf, base, res);
466 if (val_len & KSTRTOX_OVERFLOW)
473 consumed += cur_buf - str;
478 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
481 unsigned long long _res;
485 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
489 if ((long long)-_res > 0)
493 if ((long long)_res < 0)
500 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
506 err = __bpf_strtoll(buf, buf_len, flags, &_res);
509 if (_res != (long)_res)
515 const struct bpf_func_proto bpf_strtol_proto = {
518 .ret_type = RET_INTEGER,
519 .arg1_type = ARG_PTR_TO_MEM,
520 .arg2_type = ARG_CONST_SIZE,
521 .arg3_type = ARG_ANYTHING,
522 .arg4_type = ARG_PTR_TO_LONG,
525 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
526 unsigned long *, res)
528 unsigned long long _res;
532 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
537 if (_res != (unsigned long)_res)
543 const struct bpf_func_proto bpf_strtoul_proto = {
546 .ret_type = RET_INTEGER,
547 .arg1_type = ARG_PTR_TO_MEM,
548 .arg2_type = ARG_CONST_SIZE,
549 .arg3_type = ARG_ANYTHING,
550 .arg4_type = ARG_PTR_TO_LONG,
554 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
555 struct bpf_pidns_info *, nsdata, u32, size)
557 struct task_struct *task = current;
558 struct pid_namespace *pidns;
561 if (unlikely(size != sizeof(struct bpf_pidns_info)))
564 if (unlikely((u64)(dev_t)dev != dev))
570 pidns = task_active_pid_ns(task);
571 if (unlikely(!pidns)) {
576 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
579 nsdata->pid = task_pid_nr_ns(task, pidns);
580 nsdata->tgid = task_tgid_nr_ns(task, pidns);
583 memset((void *)nsdata, 0, (size_t) size);
587 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
588 .func = bpf_get_ns_current_pid_tgid,
590 .ret_type = RET_INTEGER,
591 .arg1_type = ARG_ANYTHING,
592 .arg2_type = ARG_ANYTHING,
593 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
594 .arg4_type = ARG_CONST_SIZE,
597 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
598 .func = bpf_get_raw_cpu_id,
600 .ret_type = RET_INTEGER,
603 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
604 u64, flags, void *, data, u64, size)
606 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
609 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
612 const struct bpf_func_proto bpf_event_output_data_proto = {
613 .func = bpf_event_output_data,
615 .ret_type = RET_INTEGER,
616 .arg1_type = ARG_PTR_TO_CTX,
617 .arg2_type = ARG_CONST_MAP_PTR,
618 .arg3_type = ARG_ANYTHING,
619 .arg4_type = ARG_PTR_TO_MEM,
620 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
623 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
624 const void __user *, user_ptr)
626 int ret = copy_from_user(dst, user_ptr, size);
629 memset(dst, 0, size);
636 const struct bpf_func_proto bpf_copy_from_user_proto = {
637 .func = bpf_copy_from_user,
639 .ret_type = RET_INTEGER,
640 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
641 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
642 .arg3_type = ARG_ANYTHING,
645 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
647 if (cpu >= nr_cpu_ids)
648 return (unsigned long)NULL;
650 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
653 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
654 .func = bpf_per_cpu_ptr,
656 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
657 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
658 .arg2_type = ARG_ANYTHING,
661 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
663 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
666 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
667 .func = bpf_this_cpu_ptr,
669 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID,
670 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
673 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
676 void __user *user_ptr = (__force void __user *)unsafe_ptr;
682 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
683 if ((unsigned long)unsafe_ptr < TASK_SIZE)
684 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
688 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
690 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
696 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
697 * arguments representation.
699 #define MAX_BPRINTF_BUF_LEN 512
701 /* Support executing three nested bprintf helper calls on a given CPU */
702 #define MAX_BPRINTF_NEST_LEVEL 3
703 struct bpf_bprintf_buffers {
704 char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
706 static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
707 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
709 static int try_get_fmt_tmp_buf(char **tmp_buf)
711 struct bpf_bprintf_buffers *bufs;
715 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
716 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
717 this_cpu_dec(bpf_bprintf_nest_level);
721 bufs = this_cpu_ptr(&bpf_bprintf_bufs);
722 *tmp_buf = bufs->tmp_bufs[nest_level - 1];
727 void bpf_bprintf_cleanup(void)
729 if (this_cpu_read(bpf_bprintf_nest_level)) {
730 this_cpu_dec(bpf_bprintf_nest_level);
736 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
738 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
740 * This can be used in two ways:
741 * - Format string verification only: when bin_args is NULL
742 * - Arguments preparation: in addition to the above verification, it writes in
743 * bin_args a binary representation of arguments usable by bstr_printf where
744 * pointers from BPF have been sanitized.
746 * In argument preparation mode, if 0 is returned, safe temporary buffers are
747 * allocated and bpf_bprintf_cleanup should be called to free them after use.
749 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
750 u32 **bin_args, u32 num_args)
752 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
753 size_t sizeof_cur_arg, sizeof_cur_ip;
754 int err, i, num_spec = 0;
756 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
758 fmt_end = strnchr(fmt, fmt_size, 0);
761 fmt_size = fmt_end - fmt;
764 if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
767 tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
768 *bin_args = (u32 *)tmp_buf;
771 for (i = 0; i < fmt_size; i++) {
772 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
780 if (fmt[i + 1] == '%') {
785 if (num_spec >= num_args) {
790 /* The string is zero-terminated so if fmt[i] != 0, we can
791 * always access fmt[i + 1], in the worst case it will be a 0
795 /* skip optional "[0 +-][num]" width formatting field */
796 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
799 if (fmt[i] >= '1' && fmt[i] <= '9') {
801 while (fmt[i] >= '0' && fmt[i] <= '9')
806 sizeof_cur_arg = sizeof(long);
808 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
810 fmt_ptype = fmt[i + 1];
815 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
816 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
817 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
819 /* just kernel pointers */
821 cur_arg = raw_args[num_spec];
826 if (fmt[i + 1] == 'B') {
828 err = snprintf(tmp_buf,
829 (tmp_buf_end - tmp_buf),
831 (void *)(long)raw_args[num_spec]);
832 tmp_buf += (err + 1);
840 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
841 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
842 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
851 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
852 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
857 unsafe_ptr = (char *)(long)raw_args[num_spec];
858 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
861 memset(cur_ip, 0, sizeof_cur_ip);
863 /* hack: bstr_printf expects IP addresses to be
864 * pre-formatted as strings, ironically, the easiest way
865 * to do that is to call snprintf.
867 ip_spec[2] = fmt[i - 1];
869 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
876 } else if (fmt[i] == 's') {
879 if (fmt[i + 1] != 0 &&
880 !isspace(fmt[i + 1]) &&
881 !ispunct(fmt[i + 1])) {
889 if (tmp_buf_end == tmp_buf) {
894 unsafe_ptr = (char *)(long)raw_args[num_spec];
895 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
897 tmp_buf_end - tmp_buf);
909 sizeof_cur_arg = sizeof(int);
912 sizeof_cur_arg = sizeof(long);
916 sizeof_cur_arg = sizeof(long long);
920 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
921 fmt[i] != 'x' && fmt[i] != 'X') {
927 cur_arg = raw_args[num_spec];
930 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
931 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
936 if (sizeof_cur_arg == 8) {
937 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
938 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
940 *(u32 *)tmp_buf = (u32)(long)cur_arg;
942 tmp_buf += sizeof_cur_arg;
950 bpf_bprintf_cleanup();
954 #define MAX_SNPRINTF_VARARGS 12
956 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
957 const void *, data, u32, data_len)
962 if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 ||
965 num_args = data_len / 8;
967 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
968 * can safely give an unbounded size.
970 err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
974 err = bstr_printf(str, str_size, fmt, bin_args);
976 bpf_bprintf_cleanup();
981 const struct bpf_func_proto bpf_snprintf_proto = {
982 .func = bpf_snprintf,
984 .ret_type = RET_INTEGER,
985 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
986 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
987 .arg3_type = ARG_PTR_TO_CONST_STR,
988 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
989 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
992 const struct bpf_func_proto bpf_get_current_task_proto __weak;
993 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
994 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
995 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
996 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
998 const struct bpf_func_proto *
999 bpf_base_func_proto(enum bpf_func_id func_id)
1002 case BPF_FUNC_map_lookup_elem:
1003 return &bpf_map_lookup_elem_proto;
1004 case BPF_FUNC_map_update_elem:
1005 return &bpf_map_update_elem_proto;
1006 case BPF_FUNC_map_delete_elem:
1007 return &bpf_map_delete_elem_proto;
1008 case BPF_FUNC_map_push_elem:
1009 return &bpf_map_push_elem_proto;
1010 case BPF_FUNC_map_pop_elem:
1011 return &bpf_map_pop_elem_proto;
1012 case BPF_FUNC_map_peek_elem:
1013 return &bpf_map_peek_elem_proto;
1014 case BPF_FUNC_get_prandom_u32:
1015 return &bpf_get_prandom_u32_proto;
1016 case BPF_FUNC_get_smp_processor_id:
1017 return &bpf_get_raw_smp_processor_id_proto;
1018 case BPF_FUNC_get_numa_node_id:
1019 return &bpf_get_numa_node_id_proto;
1020 case BPF_FUNC_tail_call:
1021 return &bpf_tail_call_proto;
1022 case BPF_FUNC_ktime_get_ns:
1023 return &bpf_ktime_get_ns_proto;
1024 case BPF_FUNC_ktime_get_boot_ns:
1025 return &bpf_ktime_get_boot_ns_proto;
1026 case BPF_FUNC_ktime_get_coarse_ns:
1027 return &bpf_ktime_get_coarse_ns_proto;
1028 case BPF_FUNC_ringbuf_output:
1029 return &bpf_ringbuf_output_proto;
1030 case BPF_FUNC_ringbuf_reserve:
1031 return &bpf_ringbuf_reserve_proto;
1032 case BPF_FUNC_ringbuf_submit:
1033 return &bpf_ringbuf_submit_proto;
1034 case BPF_FUNC_ringbuf_discard:
1035 return &bpf_ringbuf_discard_proto;
1036 case BPF_FUNC_ringbuf_query:
1037 return &bpf_ringbuf_query_proto;
1038 case BPF_FUNC_for_each_map_elem:
1039 return &bpf_for_each_map_elem_proto;
1048 case BPF_FUNC_spin_lock:
1049 return &bpf_spin_lock_proto;
1050 case BPF_FUNC_spin_unlock:
1051 return &bpf_spin_unlock_proto;
1052 case BPF_FUNC_jiffies64:
1053 return &bpf_jiffies64_proto;
1054 case BPF_FUNC_per_cpu_ptr:
1055 return &bpf_per_cpu_ptr_proto;
1056 case BPF_FUNC_this_cpu_ptr:
1057 return &bpf_this_cpu_ptr_proto;
1062 if (!perfmon_capable())
1066 case BPF_FUNC_trace_printk:
1067 return bpf_get_trace_printk_proto();
1068 case BPF_FUNC_get_current_task:
1069 return &bpf_get_current_task_proto;
1070 case BPF_FUNC_probe_read_user:
1071 return &bpf_probe_read_user_proto;
1072 case BPF_FUNC_probe_read_kernel:
1073 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1074 NULL : &bpf_probe_read_kernel_proto;
1075 case BPF_FUNC_probe_read_user_str:
1076 return &bpf_probe_read_user_str_proto;
1077 case BPF_FUNC_probe_read_kernel_str:
1078 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1079 NULL : &bpf_probe_read_kernel_str_proto;
1080 case BPF_FUNC_snprintf_btf:
1081 return &bpf_snprintf_btf_proto;
1082 case BPF_FUNC_snprintf:
1083 return &bpf_snprintf_proto;