GNU Linux-libre 5.10.217-gnu1
[releases.git] / kernel / bpf / helpers.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/rcupdate.h>
6 #include <linux/random.h>
7 #include <linux/smp.h>
8 #include <linux/topology.h>
9 #include <linux/ktime.h>
10 #include <linux/sched.h>
11 #include <linux/uidgid.h>
12 #include <linux/filter.h>
13 #include <linux/ctype.h>
14 #include <linux/jiffies.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/proc_ns.h>
17 #include <linux/security.h>
18
19 #include "../../lib/kstrtox.h"
20
21 /* If kernel subsystem is allowing eBPF programs to call this function,
22  * inside its own verifier_ops->get_func_proto() callback it should return
23  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
24  *
25  * Different map implementations will rely on rcu in map methods
26  * lookup/update/delete, therefore eBPF programs must run under rcu lock
27  * if program is allowed to access maps, so check rcu_read_lock_held in
28  * all three functions.
29  */
30 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
31 {
32         WARN_ON_ONCE(!rcu_read_lock_held());
33         return (unsigned long) map->ops->map_lookup_elem(map, key);
34 }
35
36 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
37         .func           = bpf_map_lookup_elem,
38         .gpl_only       = false,
39         .pkt_access     = true,
40         .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
41         .arg1_type      = ARG_CONST_MAP_PTR,
42         .arg2_type      = ARG_PTR_TO_MAP_KEY,
43 };
44
45 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
46            void *, value, u64, flags)
47 {
48         WARN_ON_ONCE(!rcu_read_lock_held());
49         return map->ops->map_update_elem(map, key, value, flags);
50 }
51
52 const struct bpf_func_proto bpf_map_update_elem_proto = {
53         .func           = bpf_map_update_elem,
54         .gpl_only       = false,
55         .pkt_access     = true,
56         .ret_type       = RET_INTEGER,
57         .arg1_type      = ARG_CONST_MAP_PTR,
58         .arg2_type      = ARG_PTR_TO_MAP_KEY,
59         .arg3_type      = ARG_PTR_TO_MAP_VALUE,
60         .arg4_type      = ARG_ANYTHING,
61 };
62
63 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
64 {
65         WARN_ON_ONCE(!rcu_read_lock_held());
66         return map->ops->map_delete_elem(map, key);
67 }
68
69 const struct bpf_func_proto bpf_map_delete_elem_proto = {
70         .func           = bpf_map_delete_elem,
71         .gpl_only       = false,
72         .pkt_access     = true,
73         .ret_type       = RET_INTEGER,
74         .arg1_type      = ARG_CONST_MAP_PTR,
75         .arg2_type      = ARG_PTR_TO_MAP_KEY,
76 };
77
78 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
79 {
80         return map->ops->map_push_elem(map, value, flags);
81 }
82
83 const struct bpf_func_proto bpf_map_push_elem_proto = {
84         .func           = bpf_map_push_elem,
85         .gpl_only       = false,
86         .pkt_access     = true,
87         .ret_type       = RET_INTEGER,
88         .arg1_type      = ARG_CONST_MAP_PTR,
89         .arg2_type      = ARG_PTR_TO_MAP_VALUE,
90         .arg3_type      = ARG_ANYTHING,
91 };
92
93 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
94 {
95         return map->ops->map_pop_elem(map, value);
96 }
97
98 const struct bpf_func_proto bpf_map_pop_elem_proto = {
99         .func           = bpf_map_pop_elem,
100         .gpl_only       = false,
101         .ret_type       = RET_INTEGER,
102         .arg1_type      = ARG_CONST_MAP_PTR,
103         .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
104 };
105
106 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
107 {
108         return map->ops->map_peek_elem(map, value);
109 }
110
111 const struct bpf_func_proto bpf_map_peek_elem_proto = {
112         .func           = bpf_map_peek_elem,
113         .gpl_only       = false,
114         .ret_type       = RET_INTEGER,
115         .arg1_type      = ARG_CONST_MAP_PTR,
116         .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
117 };
118
119 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
120         .func           = bpf_user_rnd_u32,
121         .gpl_only       = false,
122         .ret_type       = RET_INTEGER,
123 };
124
125 BPF_CALL_0(bpf_get_smp_processor_id)
126 {
127         return smp_processor_id();
128 }
129
130 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
131         .func           = bpf_get_smp_processor_id,
132         .gpl_only       = false,
133         .ret_type       = RET_INTEGER,
134 };
135
136 BPF_CALL_0(bpf_get_numa_node_id)
137 {
138         return numa_node_id();
139 }
140
141 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
142         .func           = bpf_get_numa_node_id,
143         .gpl_only       = false,
144         .ret_type       = RET_INTEGER,
145 };
146
147 BPF_CALL_0(bpf_ktime_get_ns)
148 {
149         /* NMI safe access to clock monotonic */
150         return ktime_get_mono_fast_ns();
151 }
152
153 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
154         .func           = bpf_ktime_get_ns,
155         .gpl_only       = false,
156         .ret_type       = RET_INTEGER,
157 };
158
159 BPF_CALL_0(bpf_ktime_get_boot_ns)
160 {
161         /* NMI safe access to clock boottime */
162         return ktime_get_boot_fast_ns();
163 }
164
165 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
166         .func           = bpf_ktime_get_boot_ns,
167         .gpl_only       = false,
168         .ret_type       = RET_INTEGER,
169 };
170
171 BPF_CALL_0(bpf_get_current_pid_tgid)
172 {
173         struct task_struct *task = current;
174
175         if (unlikely(!task))
176                 return -EINVAL;
177
178         return (u64) task->tgid << 32 | task->pid;
179 }
180
181 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
182         .func           = bpf_get_current_pid_tgid,
183         .gpl_only       = false,
184         .ret_type       = RET_INTEGER,
185 };
186
187 BPF_CALL_0(bpf_get_current_uid_gid)
188 {
189         struct task_struct *task = current;
190         kuid_t uid;
191         kgid_t gid;
192
193         if (unlikely(!task))
194                 return -EINVAL;
195
196         current_uid_gid(&uid, &gid);
197         return (u64) from_kgid(&init_user_ns, gid) << 32 |
198                      from_kuid(&init_user_ns, uid);
199 }
200
201 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
202         .func           = bpf_get_current_uid_gid,
203         .gpl_only       = false,
204         .ret_type       = RET_INTEGER,
205 };
206
207 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
208 {
209         struct task_struct *task = current;
210
211         if (unlikely(!task))
212                 goto err_clear;
213
214         strncpy(buf, task->comm, size);
215
216         /* Verifier guarantees that size > 0. For task->comm exceeding
217          * size, guarantee that buf is %NUL-terminated. Unconditionally
218          * done here to save the size test.
219          */
220         buf[size - 1] = 0;
221         return 0;
222 err_clear:
223         memset(buf, 0, size);
224         return -EINVAL;
225 }
226
227 const struct bpf_func_proto bpf_get_current_comm_proto = {
228         .func           = bpf_get_current_comm,
229         .gpl_only       = false,
230         .ret_type       = RET_INTEGER,
231         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
232         .arg2_type      = ARG_CONST_SIZE,
233 };
234
235 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
236
237 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
238 {
239         arch_spinlock_t *l = (void *)lock;
240         union {
241                 __u32 val;
242                 arch_spinlock_t lock;
243         } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
244
245         compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
246         BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
247         BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
248         arch_spin_lock(l);
249 }
250
251 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
252 {
253         arch_spinlock_t *l = (void *)lock;
254
255         arch_spin_unlock(l);
256 }
257
258 #else
259
260 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
261 {
262         atomic_t *l = (void *)lock;
263
264         BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
265         do {
266                 atomic_cond_read_relaxed(l, !VAL);
267         } while (atomic_xchg(l, 1));
268 }
269
270 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
271 {
272         atomic_t *l = (void *)lock;
273
274         atomic_set_release(l, 0);
275 }
276
277 #endif
278
279 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
280
281 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
282 {
283         unsigned long flags;
284
285         local_irq_save(flags);
286         __bpf_spin_lock(lock);
287         __this_cpu_write(irqsave_flags, flags);
288 }
289
290 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
291 {
292         __bpf_spin_lock_irqsave(lock);
293         return 0;
294 }
295
296 const struct bpf_func_proto bpf_spin_lock_proto = {
297         .func           = bpf_spin_lock,
298         .gpl_only       = false,
299         .ret_type       = RET_VOID,
300         .arg1_type      = ARG_PTR_TO_SPIN_LOCK,
301 };
302
303 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
304 {
305         unsigned long flags;
306
307         flags = __this_cpu_read(irqsave_flags);
308         __bpf_spin_unlock(lock);
309         local_irq_restore(flags);
310 }
311
312 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
313 {
314         __bpf_spin_unlock_irqrestore(lock);
315         return 0;
316 }
317
318 const struct bpf_func_proto bpf_spin_unlock_proto = {
319         .func           = bpf_spin_unlock,
320         .gpl_only       = false,
321         .ret_type       = RET_VOID,
322         .arg1_type      = ARG_PTR_TO_SPIN_LOCK,
323 };
324
325 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
326                            bool lock_src)
327 {
328         struct bpf_spin_lock *lock;
329
330         if (lock_src)
331                 lock = src + map->spin_lock_off;
332         else
333                 lock = dst + map->spin_lock_off;
334         preempt_disable();
335         __bpf_spin_lock_irqsave(lock);
336         copy_map_value(map, dst, src);
337         __bpf_spin_unlock_irqrestore(lock);
338         preempt_enable();
339 }
340
341 BPF_CALL_0(bpf_jiffies64)
342 {
343         return get_jiffies_64();
344 }
345
346 const struct bpf_func_proto bpf_jiffies64_proto = {
347         .func           = bpf_jiffies64,
348         .gpl_only       = false,
349         .ret_type       = RET_INTEGER,
350 };
351
352 #ifdef CONFIG_CGROUPS
353 BPF_CALL_0(bpf_get_current_cgroup_id)
354 {
355         struct cgroup *cgrp = task_dfl_cgroup(current);
356
357         return cgroup_id(cgrp);
358 }
359
360 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
361         .func           = bpf_get_current_cgroup_id,
362         .gpl_only       = false,
363         .ret_type       = RET_INTEGER,
364 };
365
366 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
367 {
368         struct cgroup *cgrp = task_dfl_cgroup(current);
369         struct cgroup *ancestor;
370
371         ancestor = cgroup_ancestor(cgrp, ancestor_level);
372         if (!ancestor)
373                 return 0;
374         return cgroup_id(ancestor);
375 }
376
377 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
378         .func           = bpf_get_current_ancestor_cgroup_id,
379         .gpl_only       = false,
380         .ret_type       = RET_INTEGER,
381         .arg1_type      = ARG_ANYTHING,
382 };
383
384 #ifdef CONFIG_CGROUP_BPF
385 DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
386                 bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
387
388 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
389 {
390         /* flags argument is not used now,
391          * but provides an ability to extend the API.
392          * verifier checks that its value is correct.
393          */
394         enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
395         struct bpf_cgroup_storage *storage = NULL;
396         void *ptr;
397         int i;
398
399         for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
400                 if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
401                         continue;
402
403                 storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
404                 break;
405         }
406
407         if (stype == BPF_CGROUP_STORAGE_SHARED)
408                 ptr = &READ_ONCE(storage->buf)->data[0];
409         else
410                 ptr = this_cpu_ptr(storage->percpu_buf);
411
412         return (unsigned long)ptr;
413 }
414
415 const struct bpf_func_proto bpf_get_local_storage_proto = {
416         .func           = bpf_get_local_storage,
417         .gpl_only       = false,
418         .ret_type       = RET_PTR_TO_MAP_VALUE,
419         .arg1_type      = ARG_CONST_MAP_PTR,
420         .arg2_type      = ARG_ANYTHING,
421 };
422 #endif
423
424 #define BPF_STRTOX_BASE_MASK 0x1F
425
426 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
427                           unsigned long long *res, bool *is_negative)
428 {
429         unsigned int base = flags & BPF_STRTOX_BASE_MASK;
430         const char *cur_buf = buf;
431         size_t cur_len = buf_len;
432         unsigned int consumed;
433         size_t val_len;
434         char str[64];
435
436         if (!buf || !buf_len || !res || !is_negative)
437                 return -EINVAL;
438
439         if (base != 0 && base != 8 && base != 10 && base != 16)
440                 return -EINVAL;
441
442         if (flags & ~BPF_STRTOX_BASE_MASK)
443                 return -EINVAL;
444
445         while (cur_buf < buf + buf_len && isspace(*cur_buf))
446                 ++cur_buf;
447
448         *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
449         if (*is_negative)
450                 ++cur_buf;
451
452         consumed = cur_buf - buf;
453         cur_len -= consumed;
454         if (!cur_len)
455                 return -EINVAL;
456
457         cur_len = min(cur_len, sizeof(str) - 1);
458         memcpy(str, cur_buf, cur_len);
459         str[cur_len] = '\0';
460         cur_buf = str;
461
462         cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
463         val_len = _parse_integer(cur_buf, base, res);
464
465         if (val_len & KSTRTOX_OVERFLOW)
466                 return -ERANGE;
467
468         if (val_len == 0)
469                 return -EINVAL;
470
471         cur_buf += val_len;
472         consumed += cur_buf - str;
473
474         return consumed;
475 }
476
477 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
478                          long long *res)
479 {
480         unsigned long long _res;
481         bool is_negative;
482         int err;
483
484         err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
485         if (err < 0)
486                 return err;
487         if (is_negative) {
488                 if ((long long)-_res > 0)
489                         return -ERANGE;
490                 *res = -_res;
491         } else {
492                 if ((long long)_res < 0)
493                         return -ERANGE;
494                 *res = _res;
495         }
496         return err;
497 }
498
499 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
500            long *, res)
501 {
502         long long _res;
503         int err;
504
505         err = __bpf_strtoll(buf, buf_len, flags, &_res);
506         if (err < 0)
507                 return err;
508         if (_res != (long)_res)
509                 return -ERANGE;
510         *res = _res;
511         return err;
512 }
513
514 const struct bpf_func_proto bpf_strtol_proto = {
515         .func           = bpf_strtol,
516         .gpl_only       = false,
517         .ret_type       = RET_INTEGER,
518         .arg1_type      = ARG_PTR_TO_MEM,
519         .arg2_type      = ARG_CONST_SIZE,
520         .arg3_type      = ARG_ANYTHING,
521         .arg4_type      = ARG_PTR_TO_LONG,
522 };
523
524 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
525            unsigned long *, res)
526 {
527         unsigned long long _res;
528         bool is_negative;
529         int err;
530
531         err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
532         if (err < 0)
533                 return err;
534         if (is_negative)
535                 return -EINVAL;
536         if (_res != (unsigned long)_res)
537                 return -ERANGE;
538         *res = _res;
539         return err;
540 }
541
542 const struct bpf_func_proto bpf_strtoul_proto = {
543         .func           = bpf_strtoul,
544         .gpl_only       = false,
545         .ret_type       = RET_INTEGER,
546         .arg1_type      = ARG_PTR_TO_MEM,
547         .arg2_type      = ARG_CONST_SIZE,
548         .arg3_type      = ARG_ANYTHING,
549         .arg4_type      = ARG_PTR_TO_LONG,
550 };
551 #endif
552
553 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
554            struct bpf_pidns_info *, nsdata, u32, size)
555 {
556         struct task_struct *task = current;
557         struct pid_namespace *pidns;
558         int err = -EINVAL;
559
560         if (unlikely(size != sizeof(struct bpf_pidns_info)))
561                 goto clear;
562
563         if (unlikely((u64)(dev_t)dev != dev))
564                 goto clear;
565
566         if (unlikely(!task))
567                 goto clear;
568
569         pidns = task_active_pid_ns(task);
570         if (unlikely(!pidns)) {
571                 err = -ENOENT;
572                 goto clear;
573         }
574
575         if (!ns_match(&pidns->ns, (dev_t)dev, ino))
576                 goto clear;
577
578         nsdata->pid = task_pid_nr_ns(task, pidns);
579         nsdata->tgid = task_tgid_nr_ns(task, pidns);
580         return 0;
581 clear:
582         memset((void *)nsdata, 0, (size_t) size);
583         return err;
584 }
585
586 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
587         .func           = bpf_get_ns_current_pid_tgid,
588         .gpl_only       = false,
589         .ret_type       = RET_INTEGER,
590         .arg1_type      = ARG_ANYTHING,
591         .arg2_type      = ARG_ANYTHING,
592         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
593         .arg4_type      = ARG_CONST_SIZE,
594 };
595
596 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
597         .func           = bpf_get_raw_cpu_id,
598         .gpl_only       = false,
599         .ret_type       = RET_INTEGER,
600 };
601
602 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
603            u64, flags, void *, data, u64, size)
604 {
605         if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
606                 return -EINVAL;
607
608         return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
609 }
610
611 const struct bpf_func_proto bpf_event_output_data_proto =  {
612         .func           = bpf_event_output_data,
613         .gpl_only       = true,
614         .ret_type       = RET_INTEGER,
615         .arg1_type      = ARG_PTR_TO_CTX,
616         .arg2_type      = ARG_CONST_MAP_PTR,
617         .arg3_type      = ARG_ANYTHING,
618         .arg4_type      = ARG_PTR_TO_MEM,
619         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
620 };
621
622 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
623            const void __user *, user_ptr)
624 {
625         int ret = copy_from_user(dst, user_ptr, size);
626
627         if (unlikely(ret)) {
628                 memset(dst, 0, size);
629                 ret = -EFAULT;
630         }
631
632         return ret;
633 }
634
635 const struct bpf_func_proto bpf_copy_from_user_proto = {
636         .func           = bpf_copy_from_user,
637         .gpl_only       = false,
638         .ret_type       = RET_INTEGER,
639         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
640         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
641         .arg3_type      = ARG_ANYTHING,
642 };
643
644 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
645 {
646         if (cpu >= nr_cpu_ids)
647                 return (unsigned long)NULL;
648
649         return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
650 }
651
652 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
653         .func           = bpf_per_cpu_ptr,
654         .gpl_only       = false,
655         .ret_type       = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
656         .arg1_type      = ARG_PTR_TO_PERCPU_BTF_ID,
657         .arg2_type      = ARG_ANYTHING,
658 };
659
660 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
661 {
662         return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
663 }
664
665 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
666         .func           = bpf_this_cpu_ptr,
667         .gpl_only       = false,
668         .ret_type       = RET_PTR_TO_MEM_OR_BTF_ID,
669         .arg1_type      = ARG_PTR_TO_PERCPU_BTF_ID,
670 };
671
672 const struct bpf_func_proto bpf_get_current_task_proto __weak;
673 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
674 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
675 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
676 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
677
678 const struct bpf_func_proto *
679 bpf_base_func_proto(enum bpf_func_id func_id)
680 {
681         switch (func_id) {
682         case BPF_FUNC_map_lookup_elem:
683                 return &bpf_map_lookup_elem_proto;
684         case BPF_FUNC_map_update_elem:
685                 return &bpf_map_update_elem_proto;
686         case BPF_FUNC_map_delete_elem:
687                 return &bpf_map_delete_elem_proto;
688         case BPF_FUNC_map_push_elem:
689                 return &bpf_map_push_elem_proto;
690         case BPF_FUNC_map_pop_elem:
691                 return &bpf_map_pop_elem_proto;
692         case BPF_FUNC_map_peek_elem:
693                 return &bpf_map_peek_elem_proto;
694         case BPF_FUNC_get_prandom_u32:
695                 return &bpf_get_prandom_u32_proto;
696         case BPF_FUNC_get_smp_processor_id:
697                 return &bpf_get_raw_smp_processor_id_proto;
698         case BPF_FUNC_get_numa_node_id:
699                 return &bpf_get_numa_node_id_proto;
700         case BPF_FUNC_tail_call:
701                 return &bpf_tail_call_proto;
702         case BPF_FUNC_ktime_get_ns:
703                 return &bpf_ktime_get_ns_proto;
704         case BPF_FUNC_ktime_get_boot_ns:
705                 return &bpf_ktime_get_boot_ns_proto;
706         case BPF_FUNC_ringbuf_output:
707                 return &bpf_ringbuf_output_proto;
708         case BPF_FUNC_ringbuf_reserve:
709                 return &bpf_ringbuf_reserve_proto;
710         case BPF_FUNC_ringbuf_submit:
711                 return &bpf_ringbuf_submit_proto;
712         case BPF_FUNC_ringbuf_discard:
713                 return &bpf_ringbuf_discard_proto;
714         case BPF_FUNC_ringbuf_query:
715                 return &bpf_ringbuf_query_proto;
716         default:
717                 break;
718         }
719
720         if (!bpf_capable())
721                 return NULL;
722
723         switch (func_id) {
724         case BPF_FUNC_spin_lock:
725                 return &bpf_spin_lock_proto;
726         case BPF_FUNC_spin_unlock:
727                 return &bpf_spin_unlock_proto;
728         case BPF_FUNC_jiffies64:
729                 return &bpf_jiffies64_proto;
730         case BPF_FUNC_per_cpu_ptr:
731                 return &bpf_per_cpu_ptr_proto;
732         case BPF_FUNC_this_cpu_ptr:
733                 return &bpf_this_cpu_ptr_proto;
734         default:
735                 break;
736         }
737
738         if (!perfmon_capable())
739                 return NULL;
740
741         switch (func_id) {
742         case BPF_FUNC_trace_printk:
743                 return bpf_get_trace_printk_proto();
744         case BPF_FUNC_get_current_task:
745                 return &bpf_get_current_task_proto;
746         case BPF_FUNC_probe_read_user:
747                 return &bpf_probe_read_user_proto;
748         case BPF_FUNC_probe_read_kernel:
749                 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
750                        NULL : &bpf_probe_read_kernel_proto;
751         case BPF_FUNC_probe_read_user_str:
752                 return &bpf_probe_read_user_str_proto;
753         case BPF_FUNC_probe_read_kernel_str:
754                 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
755                        NULL : &bpf_probe_read_kernel_str_proto;
756         case BPF_FUNC_snprintf_btf:
757                 return &bpf_snprintf_btf_proto;
758         default:
759                 return NULL;
760         }
761 }