1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 #include <linux/rcupdate.h>
6 #include <linux/random.h>
8 #include <linux/topology.h>
9 #include <linux/ktime.h>
10 #include <linux/sched.h>
11 #include <linux/uidgid.h>
12 #include <linux/filter.h>
13 #include <linux/ctype.h>
14 #include <linux/jiffies.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/proc_ns.h>
17 #include <linux/security.h>
19 #include "../../lib/kstrtox.h"
21 /* If kernel subsystem is allowing eBPF programs to call this function,
22 * inside its own verifier_ops->get_func_proto() callback it should return
23 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
25 * Different map implementations will rely on rcu in map methods
26 * lookup/update/delete, therefore eBPF programs must run under rcu lock
27 * if program is allowed to access maps, so check rcu_read_lock_held in
28 * all three functions.
30 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
32 WARN_ON_ONCE(!rcu_read_lock_held());
33 return (unsigned long) map->ops->map_lookup_elem(map, key);
36 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
37 .func = bpf_map_lookup_elem,
40 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
41 .arg1_type = ARG_CONST_MAP_PTR,
42 .arg2_type = ARG_PTR_TO_MAP_KEY,
45 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
46 void *, value, u64, flags)
48 WARN_ON_ONCE(!rcu_read_lock_held());
49 return map->ops->map_update_elem(map, key, value, flags);
52 const struct bpf_func_proto bpf_map_update_elem_proto = {
53 .func = bpf_map_update_elem,
56 .ret_type = RET_INTEGER,
57 .arg1_type = ARG_CONST_MAP_PTR,
58 .arg2_type = ARG_PTR_TO_MAP_KEY,
59 .arg3_type = ARG_PTR_TO_MAP_VALUE,
60 .arg4_type = ARG_ANYTHING,
63 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
65 WARN_ON_ONCE(!rcu_read_lock_held());
66 return map->ops->map_delete_elem(map, key);
69 const struct bpf_func_proto bpf_map_delete_elem_proto = {
70 .func = bpf_map_delete_elem,
73 .ret_type = RET_INTEGER,
74 .arg1_type = ARG_CONST_MAP_PTR,
75 .arg2_type = ARG_PTR_TO_MAP_KEY,
78 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
80 return map->ops->map_push_elem(map, value, flags);
83 const struct bpf_func_proto bpf_map_push_elem_proto = {
84 .func = bpf_map_push_elem,
87 .ret_type = RET_INTEGER,
88 .arg1_type = ARG_CONST_MAP_PTR,
89 .arg2_type = ARG_PTR_TO_MAP_VALUE,
90 .arg3_type = ARG_ANYTHING,
93 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
95 return map->ops->map_pop_elem(map, value);
98 const struct bpf_func_proto bpf_map_pop_elem_proto = {
99 .func = bpf_map_pop_elem,
101 .ret_type = RET_INTEGER,
102 .arg1_type = ARG_CONST_MAP_PTR,
103 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
106 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
108 return map->ops->map_peek_elem(map, value);
111 const struct bpf_func_proto bpf_map_peek_elem_proto = {
112 .func = bpf_map_peek_elem,
114 .ret_type = RET_INTEGER,
115 .arg1_type = ARG_CONST_MAP_PTR,
116 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
119 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
120 .func = bpf_user_rnd_u32,
122 .ret_type = RET_INTEGER,
125 BPF_CALL_0(bpf_get_smp_processor_id)
127 return smp_processor_id();
130 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
131 .func = bpf_get_smp_processor_id,
133 .ret_type = RET_INTEGER,
136 BPF_CALL_0(bpf_get_numa_node_id)
138 return numa_node_id();
141 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
142 .func = bpf_get_numa_node_id,
144 .ret_type = RET_INTEGER,
147 BPF_CALL_0(bpf_ktime_get_ns)
149 /* NMI safe access to clock monotonic */
150 return ktime_get_mono_fast_ns();
153 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
154 .func = bpf_ktime_get_ns,
156 .ret_type = RET_INTEGER,
159 BPF_CALL_0(bpf_ktime_get_boot_ns)
161 /* NMI safe access to clock boottime */
162 return ktime_get_boot_fast_ns();
165 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
166 .func = bpf_ktime_get_boot_ns,
168 .ret_type = RET_INTEGER,
171 BPF_CALL_0(bpf_get_current_pid_tgid)
173 struct task_struct *task = current;
178 return (u64) task->tgid << 32 | task->pid;
181 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
182 .func = bpf_get_current_pid_tgid,
184 .ret_type = RET_INTEGER,
187 BPF_CALL_0(bpf_get_current_uid_gid)
189 struct task_struct *task = current;
196 current_uid_gid(&uid, &gid);
197 return (u64) from_kgid(&init_user_ns, gid) << 32 |
198 from_kuid(&init_user_ns, uid);
201 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
202 .func = bpf_get_current_uid_gid,
204 .ret_type = RET_INTEGER,
207 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
209 struct task_struct *task = current;
214 strncpy(buf, task->comm, size);
216 /* Verifier guarantees that size > 0. For task->comm exceeding
217 * size, guarantee that buf is %NUL-terminated. Unconditionally
218 * done here to save the size test.
223 memset(buf, 0, size);
227 const struct bpf_func_proto bpf_get_current_comm_proto = {
228 .func = bpf_get_current_comm,
230 .ret_type = RET_INTEGER,
231 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
232 .arg2_type = ARG_CONST_SIZE,
235 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
237 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
239 arch_spinlock_t *l = (void *)lock;
242 arch_spinlock_t lock;
243 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
245 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
246 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
247 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
251 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
253 arch_spinlock_t *l = (void *)lock;
260 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
262 atomic_t *l = (void *)lock;
264 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
266 atomic_cond_read_relaxed(l, !VAL);
267 } while (atomic_xchg(l, 1));
270 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
272 atomic_t *l = (void *)lock;
274 atomic_set_release(l, 0);
279 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
281 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
285 local_irq_save(flags);
286 __bpf_spin_lock(lock);
287 __this_cpu_write(irqsave_flags, flags);
290 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
292 __bpf_spin_lock_irqsave(lock);
296 const struct bpf_func_proto bpf_spin_lock_proto = {
297 .func = bpf_spin_lock,
299 .ret_type = RET_VOID,
300 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
303 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
307 flags = __this_cpu_read(irqsave_flags);
308 __bpf_spin_unlock(lock);
309 local_irq_restore(flags);
312 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
314 __bpf_spin_unlock_irqrestore(lock);
318 const struct bpf_func_proto bpf_spin_unlock_proto = {
319 .func = bpf_spin_unlock,
321 .ret_type = RET_VOID,
322 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
325 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
328 struct bpf_spin_lock *lock;
331 lock = src + map->spin_lock_off;
333 lock = dst + map->spin_lock_off;
335 __bpf_spin_lock_irqsave(lock);
336 copy_map_value(map, dst, src);
337 __bpf_spin_unlock_irqrestore(lock);
341 BPF_CALL_0(bpf_jiffies64)
343 return get_jiffies_64();
346 const struct bpf_func_proto bpf_jiffies64_proto = {
347 .func = bpf_jiffies64,
349 .ret_type = RET_INTEGER,
352 #ifdef CONFIG_CGROUPS
353 BPF_CALL_0(bpf_get_current_cgroup_id)
355 struct cgroup *cgrp = task_dfl_cgroup(current);
357 return cgroup_id(cgrp);
360 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
361 .func = bpf_get_current_cgroup_id,
363 .ret_type = RET_INTEGER,
366 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
368 struct cgroup *cgrp = task_dfl_cgroup(current);
369 struct cgroup *ancestor;
371 ancestor = cgroup_ancestor(cgrp, ancestor_level);
374 return cgroup_id(ancestor);
377 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
378 .func = bpf_get_current_ancestor_cgroup_id,
380 .ret_type = RET_INTEGER,
381 .arg1_type = ARG_ANYTHING,
384 #ifdef CONFIG_CGROUP_BPF
385 DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
386 bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
388 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
390 /* flags argument is not used now,
391 * but provides an ability to extend the API.
392 * verifier checks that its value is correct.
394 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
395 struct bpf_cgroup_storage *storage = NULL;
399 for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
400 if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
403 storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
407 if (stype == BPF_CGROUP_STORAGE_SHARED)
408 ptr = &READ_ONCE(storage->buf)->data[0];
410 ptr = this_cpu_ptr(storage->percpu_buf);
412 return (unsigned long)ptr;
415 const struct bpf_func_proto bpf_get_local_storage_proto = {
416 .func = bpf_get_local_storage,
418 .ret_type = RET_PTR_TO_MAP_VALUE,
419 .arg1_type = ARG_CONST_MAP_PTR,
420 .arg2_type = ARG_ANYTHING,
424 #define BPF_STRTOX_BASE_MASK 0x1F
426 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
427 unsigned long long *res, bool *is_negative)
429 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
430 const char *cur_buf = buf;
431 size_t cur_len = buf_len;
432 unsigned int consumed;
436 if (!buf || !buf_len || !res || !is_negative)
439 if (base != 0 && base != 8 && base != 10 && base != 16)
442 if (flags & ~BPF_STRTOX_BASE_MASK)
445 while (cur_buf < buf + buf_len && isspace(*cur_buf))
448 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
452 consumed = cur_buf - buf;
457 cur_len = min(cur_len, sizeof(str) - 1);
458 memcpy(str, cur_buf, cur_len);
462 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
463 val_len = _parse_integer(cur_buf, base, res);
465 if (val_len & KSTRTOX_OVERFLOW)
472 consumed += cur_buf - str;
477 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
480 unsigned long long _res;
484 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
488 if ((long long)-_res > 0)
492 if ((long long)_res < 0)
499 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
505 err = __bpf_strtoll(buf, buf_len, flags, &_res);
508 if (_res != (long)_res)
514 const struct bpf_func_proto bpf_strtol_proto = {
517 .ret_type = RET_INTEGER,
518 .arg1_type = ARG_PTR_TO_MEM,
519 .arg2_type = ARG_CONST_SIZE,
520 .arg3_type = ARG_ANYTHING,
521 .arg4_type = ARG_PTR_TO_LONG,
524 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
525 unsigned long *, res)
527 unsigned long long _res;
531 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
536 if (_res != (unsigned long)_res)
542 const struct bpf_func_proto bpf_strtoul_proto = {
545 .ret_type = RET_INTEGER,
546 .arg1_type = ARG_PTR_TO_MEM,
547 .arg2_type = ARG_CONST_SIZE,
548 .arg3_type = ARG_ANYTHING,
549 .arg4_type = ARG_PTR_TO_LONG,
553 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
554 struct bpf_pidns_info *, nsdata, u32, size)
556 struct task_struct *task = current;
557 struct pid_namespace *pidns;
560 if (unlikely(size != sizeof(struct bpf_pidns_info)))
563 if (unlikely((u64)(dev_t)dev != dev))
569 pidns = task_active_pid_ns(task);
570 if (unlikely(!pidns)) {
575 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
578 nsdata->pid = task_pid_nr_ns(task, pidns);
579 nsdata->tgid = task_tgid_nr_ns(task, pidns);
582 memset((void *)nsdata, 0, (size_t) size);
586 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
587 .func = bpf_get_ns_current_pid_tgid,
589 .ret_type = RET_INTEGER,
590 .arg1_type = ARG_ANYTHING,
591 .arg2_type = ARG_ANYTHING,
592 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
593 .arg4_type = ARG_CONST_SIZE,
596 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
597 .func = bpf_get_raw_cpu_id,
599 .ret_type = RET_INTEGER,
602 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
603 u64, flags, void *, data, u64, size)
605 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
608 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
611 const struct bpf_func_proto bpf_event_output_data_proto = {
612 .func = bpf_event_output_data,
614 .ret_type = RET_INTEGER,
615 .arg1_type = ARG_PTR_TO_CTX,
616 .arg2_type = ARG_CONST_MAP_PTR,
617 .arg3_type = ARG_ANYTHING,
618 .arg4_type = ARG_PTR_TO_MEM,
619 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
622 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
623 const void __user *, user_ptr)
625 int ret = copy_from_user(dst, user_ptr, size);
628 memset(dst, 0, size);
635 const struct bpf_func_proto bpf_copy_from_user_proto = {
636 .func = bpf_copy_from_user,
638 .ret_type = RET_INTEGER,
639 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
640 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
641 .arg3_type = ARG_ANYTHING,
644 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
646 if (cpu >= nr_cpu_ids)
647 return (unsigned long)NULL;
649 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
652 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
653 .func = bpf_per_cpu_ptr,
655 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
656 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
657 .arg2_type = ARG_ANYTHING,
660 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
662 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
665 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
666 .func = bpf_this_cpu_ptr,
668 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID,
669 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
672 const struct bpf_func_proto bpf_get_current_task_proto __weak;
673 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
674 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
675 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
676 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
678 const struct bpf_func_proto *
679 bpf_base_func_proto(enum bpf_func_id func_id)
682 case BPF_FUNC_map_lookup_elem:
683 return &bpf_map_lookup_elem_proto;
684 case BPF_FUNC_map_update_elem:
685 return &bpf_map_update_elem_proto;
686 case BPF_FUNC_map_delete_elem:
687 return &bpf_map_delete_elem_proto;
688 case BPF_FUNC_map_push_elem:
689 return &bpf_map_push_elem_proto;
690 case BPF_FUNC_map_pop_elem:
691 return &bpf_map_pop_elem_proto;
692 case BPF_FUNC_map_peek_elem:
693 return &bpf_map_peek_elem_proto;
694 case BPF_FUNC_get_prandom_u32:
695 return &bpf_get_prandom_u32_proto;
696 case BPF_FUNC_get_smp_processor_id:
697 return &bpf_get_raw_smp_processor_id_proto;
698 case BPF_FUNC_get_numa_node_id:
699 return &bpf_get_numa_node_id_proto;
700 case BPF_FUNC_tail_call:
701 return &bpf_tail_call_proto;
702 case BPF_FUNC_ktime_get_ns:
703 return &bpf_ktime_get_ns_proto;
704 case BPF_FUNC_ktime_get_boot_ns:
705 return &bpf_ktime_get_boot_ns_proto;
706 case BPF_FUNC_ringbuf_output:
707 return &bpf_ringbuf_output_proto;
708 case BPF_FUNC_ringbuf_reserve:
709 return &bpf_ringbuf_reserve_proto;
710 case BPF_FUNC_ringbuf_submit:
711 return &bpf_ringbuf_submit_proto;
712 case BPF_FUNC_ringbuf_discard:
713 return &bpf_ringbuf_discard_proto;
714 case BPF_FUNC_ringbuf_query:
715 return &bpf_ringbuf_query_proto;
724 case BPF_FUNC_spin_lock:
725 return &bpf_spin_lock_proto;
726 case BPF_FUNC_spin_unlock:
727 return &bpf_spin_unlock_proto;
728 case BPF_FUNC_jiffies64:
729 return &bpf_jiffies64_proto;
730 case BPF_FUNC_per_cpu_ptr:
731 return &bpf_per_cpu_ptr_proto;
732 case BPF_FUNC_this_cpu_ptr:
733 return &bpf_this_cpu_ptr_proto;
738 if (!perfmon_capable())
742 case BPF_FUNC_trace_printk:
743 return bpf_get_trace_printk_proto();
744 case BPF_FUNC_get_current_task:
745 return &bpf_get_current_task_proto;
746 case BPF_FUNC_probe_read_user:
747 return &bpf_probe_read_user_proto;
748 case BPF_FUNC_probe_read_kernel:
749 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
750 NULL : &bpf_probe_read_kernel_proto;
751 case BPF_FUNC_probe_read_user_str:
752 return &bpf_probe_read_user_str_proto;
753 case BPF_FUNC_probe_read_kernel_str:
754 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
755 NULL : &bpf_probe_read_kernel_str_proto;
756 case BPF_FUNC_snprintf_btf:
757 return &bpf_snprintf_btf_proto;