GNU Linux-libre 5.17.9-gnu
[releases.git] / kernel / bpf / helpers.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/rcupdate.h>
8 #include <linux/random.h>
9 #include <linux/smp.h>
10 #include <linux/topology.h>
11 #include <linux/ktime.h>
12 #include <linux/sched.h>
13 #include <linux/uidgid.h>
14 #include <linux/filter.h>
15 #include <linux/ctype.h>
16 #include <linux/jiffies.h>
17 #include <linux/pid_namespace.h>
18 #include <linux/proc_ns.h>
19 #include <linux/security.h>
20
21 #include "../../lib/kstrtox.h"
22
23 /* If kernel subsystem is allowing eBPF programs to call this function,
24  * inside its own verifier_ops->get_func_proto() callback it should return
25  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
26  *
27  * Different map implementations will rely on rcu in map methods
28  * lookup/update/delete, therefore eBPF programs must run under rcu lock
29  * if program is allowed to access maps, so check rcu_read_lock_held in
30  * all three functions.
31  */
32 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
33 {
34         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
35         return (unsigned long) map->ops->map_lookup_elem(map, key);
36 }
37
38 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
39         .func           = bpf_map_lookup_elem,
40         .gpl_only       = false,
41         .pkt_access     = true,
42         .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
43         .arg1_type      = ARG_CONST_MAP_PTR,
44         .arg2_type      = ARG_PTR_TO_MAP_KEY,
45 };
46
47 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
48            void *, value, u64, flags)
49 {
50         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
51         return map->ops->map_update_elem(map, key, value, flags);
52 }
53
54 const struct bpf_func_proto bpf_map_update_elem_proto = {
55         .func           = bpf_map_update_elem,
56         .gpl_only       = false,
57         .pkt_access     = true,
58         .ret_type       = RET_INTEGER,
59         .arg1_type      = ARG_CONST_MAP_PTR,
60         .arg2_type      = ARG_PTR_TO_MAP_KEY,
61         .arg3_type      = ARG_PTR_TO_MAP_VALUE,
62         .arg4_type      = ARG_ANYTHING,
63 };
64
65 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
66 {
67         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
68         return map->ops->map_delete_elem(map, key);
69 }
70
71 const struct bpf_func_proto bpf_map_delete_elem_proto = {
72         .func           = bpf_map_delete_elem,
73         .gpl_only       = false,
74         .pkt_access     = true,
75         .ret_type       = RET_INTEGER,
76         .arg1_type      = ARG_CONST_MAP_PTR,
77         .arg2_type      = ARG_PTR_TO_MAP_KEY,
78 };
79
80 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
81 {
82         return map->ops->map_push_elem(map, value, flags);
83 }
84
85 const struct bpf_func_proto bpf_map_push_elem_proto = {
86         .func           = bpf_map_push_elem,
87         .gpl_only       = false,
88         .pkt_access     = true,
89         .ret_type       = RET_INTEGER,
90         .arg1_type      = ARG_CONST_MAP_PTR,
91         .arg2_type      = ARG_PTR_TO_MAP_VALUE,
92         .arg3_type      = ARG_ANYTHING,
93 };
94
95 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
96 {
97         return map->ops->map_pop_elem(map, value);
98 }
99
100 const struct bpf_func_proto bpf_map_pop_elem_proto = {
101         .func           = bpf_map_pop_elem,
102         .gpl_only       = false,
103         .ret_type       = RET_INTEGER,
104         .arg1_type      = ARG_CONST_MAP_PTR,
105         .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
106 };
107
108 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
109 {
110         return map->ops->map_peek_elem(map, value);
111 }
112
113 const struct bpf_func_proto bpf_map_peek_elem_proto = {
114         .func           = bpf_map_peek_elem,
115         .gpl_only       = false,
116         .ret_type       = RET_INTEGER,
117         .arg1_type      = ARG_CONST_MAP_PTR,
118         .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
119 };
120
121 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
122         .func           = bpf_user_rnd_u32,
123         .gpl_only       = false,
124         .ret_type       = RET_INTEGER,
125 };
126
127 BPF_CALL_0(bpf_get_smp_processor_id)
128 {
129         return smp_processor_id();
130 }
131
132 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
133         .func           = bpf_get_smp_processor_id,
134         .gpl_only       = false,
135         .ret_type       = RET_INTEGER,
136 };
137
138 BPF_CALL_0(bpf_get_numa_node_id)
139 {
140         return numa_node_id();
141 }
142
143 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
144         .func           = bpf_get_numa_node_id,
145         .gpl_only       = false,
146         .ret_type       = RET_INTEGER,
147 };
148
149 BPF_CALL_0(bpf_ktime_get_ns)
150 {
151         /* NMI safe access to clock monotonic */
152         return ktime_get_mono_fast_ns();
153 }
154
155 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
156         .func           = bpf_ktime_get_ns,
157         .gpl_only       = false,
158         .ret_type       = RET_INTEGER,
159 };
160
161 BPF_CALL_0(bpf_ktime_get_boot_ns)
162 {
163         /* NMI safe access to clock boottime */
164         return ktime_get_boot_fast_ns();
165 }
166
167 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
168         .func           = bpf_ktime_get_boot_ns,
169         .gpl_only       = false,
170         .ret_type       = RET_INTEGER,
171 };
172
173 BPF_CALL_0(bpf_ktime_get_coarse_ns)
174 {
175         return ktime_get_coarse_ns();
176 }
177
178 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
179         .func           = bpf_ktime_get_coarse_ns,
180         .gpl_only       = false,
181         .ret_type       = RET_INTEGER,
182 };
183
184 BPF_CALL_0(bpf_get_current_pid_tgid)
185 {
186         struct task_struct *task = current;
187
188         if (unlikely(!task))
189                 return -EINVAL;
190
191         return (u64) task->tgid << 32 | task->pid;
192 }
193
194 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
195         .func           = bpf_get_current_pid_tgid,
196         .gpl_only       = false,
197         .ret_type       = RET_INTEGER,
198 };
199
200 BPF_CALL_0(bpf_get_current_uid_gid)
201 {
202         struct task_struct *task = current;
203         kuid_t uid;
204         kgid_t gid;
205
206         if (unlikely(!task))
207                 return -EINVAL;
208
209         current_uid_gid(&uid, &gid);
210         return (u64) from_kgid(&init_user_ns, gid) << 32 |
211                      from_kuid(&init_user_ns, uid);
212 }
213
214 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
215         .func           = bpf_get_current_uid_gid,
216         .gpl_only       = false,
217         .ret_type       = RET_INTEGER,
218 };
219
220 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
221 {
222         struct task_struct *task = current;
223
224         if (unlikely(!task))
225                 goto err_clear;
226
227         strncpy(buf, task->comm, size);
228
229         /* Verifier guarantees that size > 0. For task->comm exceeding
230          * size, guarantee that buf is %NUL-terminated. Unconditionally
231          * done here to save the size test.
232          */
233         buf[size - 1] = 0;
234         return 0;
235 err_clear:
236         memset(buf, 0, size);
237         return -EINVAL;
238 }
239
240 const struct bpf_func_proto bpf_get_current_comm_proto = {
241         .func           = bpf_get_current_comm,
242         .gpl_only       = false,
243         .ret_type       = RET_INTEGER,
244         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
245         .arg2_type      = ARG_CONST_SIZE,
246 };
247
248 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
249
250 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
251 {
252         arch_spinlock_t *l = (void *)lock;
253         union {
254                 __u32 val;
255                 arch_spinlock_t lock;
256         } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
257
258         compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
259         BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
260         BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
261         arch_spin_lock(l);
262 }
263
264 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
265 {
266         arch_spinlock_t *l = (void *)lock;
267
268         arch_spin_unlock(l);
269 }
270
271 #else
272
273 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
274 {
275         atomic_t *l = (void *)lock;
276
277         BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
278         do {
279                 atomic_cond_read_relaxed(l, !VAL);
280         } while (atomic_xchg(l, 1));
281 }
282
283 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
284 {
285         atomic_t *l = (void *)lock;
286
287         atomic_set_release(l, 0);
288 }
289
290 #endif
291
292 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
293
294 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
295 {
296         unsigned long flags;
297
298         local_irq_save(flags);
299         __bpf_spin_lock(lock);
300         __this_cpu_write(irqsave_flags, flags);
301 }
302
303 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
304 {
305         __bpf_spin_lock_irqsave(lock);
306         return 0;
307 }
308
309 const struct bpf_func_proto bpf_spin_lock_proto = {
310         .func           = bpf_spin_lock,
311         .gpl_only       = false,
312         .ret_type       = RET_VOID,
313         .arg1_type      = ARG_PTR_TO_SPIN_LOCK,
314 };
315
316 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
317 {
318         unsigned long flags;
319
320         flags = __this_cpu_read(irqsave_flags);
321         __bpf_spin_unlock(lock);
322         local_irq_restore(flags);
323 }
324
325 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
326 {
327         __bpf_spin_unlock_irqrestore(lock);
328         return 0;
329 }
330
331 const struct bpf_func_proto bpf_spin_unlock_proto = {
332         .func           = bpf_spin_unlock,
333         .gpl_only       = false,
334         .ret_type       = RET_VOID,
335         .arg1_type      = ARG_PTR_TO_SPIN_LOCK,
336 };
337
338 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
339                            bool lock_src)
340 {
341         struct bpf_spin_lock *lock;
342
343         if (lock_src)
344                 lock = src + map->spin_lock_off;
345         else
346                 lock = dst + map->spin_lock_off;
347         preempt_disable();
348         __bpf_spin_lock_irqsave(lock);
349         copy_map_value(map, dst, src);
350         __bpf_spin_unlock_irqrestore(lock);
351         preempt_enable();
352 }
353
354 BPF_CALL_0(bpf_jiffies64)
355 {
356         return get_jiffies_64();
357 }
358
359 const struct bpf_func_proto bpf_jiffies64_proto = {
360         .func           = bpf_jiffies64,
361         .gpl_only       = false,
362         .ret_type       = RET_INTEGER,
363 };
364
365 #ifdef CONFIG_CGROUPS
366 BPF_CALL_0(bpf_get_current_cgroup_id)
367 {
368         struct cgroup *cgrp;
369         u64 cgrp_id;
370
371         rcu_read_lock();
372         cgrp = task_dfl_cgroup(current);
373         cgrp_id = cgroup_id(cgrp);
374         rcu_read_unlock();
375
376         return cgrp_id;
377 }
378
379 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
380         .func           = bpf_get_current_cgroup_id,
381         .gpl_only       = false,
382         .ret_type       = RET_INTEGER,
383 };
384
385 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
386 {
387         struct cgroup *cgrp;
388         struct cgroup *ancestor;
389         u64 cgrp_id;
390
391         rcu_read_lock();
392         cgrp = task_dfl_cgroup(current);
393         ancestor = cgroup_ancestor(cgrp, ancestor_level);
394         cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
395         rcu_read_unlock();
396
397         return cgrp_id;
398 }
399
400 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
401         .func           = bpf_get_current_ancestor_cgroup_id,
402         .gpl_only       = false,
403         .ret_type       = RET_INTEGER,
404         .arg1_type      = ARG_ANYTHING,
405 };
406
407 #ifdef CONFIG_CGROUP_BPF
408
409 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
410 {
411         /* flags argument is not used now,
412          * but provides an ability to extend the API.
413          * verifier checks that its value is correct.
414          */
415         enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
416         struct bpf_cgroup_storage *storage;
417         struct bpf_cg_run_ctx *ctx;
418         void *ptr;
419
420         /* get current cgroup storage from BPF run context */
421         ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
422         storage = ctx->prog_item->cgroup_storage[stype];
423
424         if (stype == BPF_CGROUP_STORAGE_SHARED)
425                 ptr = &READ_ONCE(storage->buf)->data[0];
426         else
427                 ptr = this_cpu_ptr(storage->percpu_buf);
428
429         return (unsigned long)ptr;
430 }
431
432 const struct bpf_func_proto bpf_get_local_storage_proto = {
433         .func           = bpf_get_local_storage,
434         .gpl_only       = false,
435         .ret_type       = RET_PTR_TO_MAP_VALUE,
436         .arg1_type      = ARG_CONST_MAP_PTR,
437         .arg2_type      = ARG_ANYTHING,
438 };
439 #endif
440
441 #define BPF_STRTOX_BASE_MASK 0x1F
442
443 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
444                           unsigned long long *res, bool *is_negative)
445 {
446         unsigned int base = flags & BPF_STRTOX_BASE_MASK;
447         const char *cur_buf = buf;
448         size_t cur_len = buf_len;
449         unsigned int consumed;
450         size_t val_len;
451         char str[64];
452
453         if (!buf || !buf_len || !res || !is_negative)
454                 return -EINVAL;
455
456         if (base != 0 && base != 8 && base != 10 && base != 16)
457                 return -EINVAL;
458
459         if (flags & ~BPF_STRTOX_BASE_MASK)
460                 return -EINVAL;
461
462         while (cur_buf < buf + buf_len && isspace(*cur_buf))
463                 ++cur_buf;
464
465         *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
466         if (*is_negative)
467                 ++cur_buf;
468
469         consumed = cur_buf - buf;
470         cur_len -= consumed;
471         if (!cur_len)
472                 return -EINVAL;
473
474         cur_len = min(cur_len, sizeof(str) - 1);
475         memcpy(str, cur_buf, cur_len);
476         str[cur_len] = '\0';
477         cur_buf = str;
478
479         cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
480         val_len = _parse_integer(cur_buf, base, res);
481
482         if (val_len & KSTRTOX_OVERFLOW)
483                 return -ERANGE;
484
485         if (val_len == 0)
486                 return -EINVAL;
487
488         cur_buf += val_len;
489         consumed += cur_buf - str;
490
491         return consumed;
492 }
493
494 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
495                          long long *res)
496 {
497         unsigned long long _res;
498         bool is_negative;
499         int err;
500
501         err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
502         if (err < 0)
503                 return err;
504         if (is_negative) {
505                 if ((long long)-_res > 0)
506                         return -ERANGE;
507                 *res = -_res;
508         } else {
509                 if ((long long)_res < 0)
510                         return -ERANGE;
511                 *res = _res;
512         }
513         return err;
514 }
515
516 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
517            long *, res)
518 {
519         long long _res;
520         int err;
521
522         err = __bpf_strtoll(buf, buf_len, flags, &_res);
523         if (err < 0)
524                 return err;
525         if (_res != (long)_res)
526                 return -ERANGE;
527         *res = _res;
528         return err;
529 }
530
531 const struct bpf_func_proto bpf_strtol_proto = {
532         .func           = bpf_strtol,
533         .gpl_only       = false,
534         .ret_type       = RET_INTEGER,
535         .arg1_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
536         .arg2_type      = ARG_CONST_SIZE,
537         .arg3_type      = ARG_ANYTHING,
538         .arg4_type      = ARG_PTR_TO_LONG,
539 };
540
541 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
542            unsigned long *, res)
543 {
544         unsigned long long _res;
545         bool is_negative;
546         int err;
547
548         err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
549         if (err < 0)
550                 return err;
551         if (is_negative)
552                 return -EINVAL;
553         if (_res != (unsigned long)_res)
554                 return -ERANGE;
555         *res = _res;
556         return err;
557 }
558
559 const struct bpf_func_proto bpf_strtoul_proto = {
560         .func           = bpf_strtoul,
561         .gpl_only       = false,
562         .ret_type       = RET_INTEGER,
563         .arg1_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
564         .arg2_type      = ARG_CONST_SIZE,
565         .arg3_type      = ARG_ANYTHING,
566         .arg4_type      = ARG_PTR_TO_LONG,
567 };
568 #endif
569
570 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
571 {
572         return strncmp(s1, s2, s1_sz);
573 }
574
575 const struct bpf_func_proto bpf_strncmp_proto = {
576         .func           = bpf_strncmp,
577         .gpl_only       = false,
578         .ret_type       = RET_INTEGER,
579         .arg1_type      = ARG_PTR_TO_MEM,
580         .arg2_type      = ARG_CONST_SIZE,
581         .arg3_type      = ARG_PTR_TO_CONST_STR,
582 };
583
584 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
585            struct bpf_pidns_info *, nsdata, u32, size)
586 {
587         struct task_struct *task = current;
588         struct pid_namespace *pidns;
589         int err = -EINVAL;
590
591         if (unlikely(size != sizeof(struct bpf_pidns_info)))
592                 goto clear;
593
594         if (unlikely((u64)(dev_t)dev != dev))
595                 goto clear;
596
597         if (unlikely(!task))
598                 goto clear;
599
600         pidns = task_active_pid_ns(task);
601         if (unlikely(!pidns)) {
602                 err = -ENOENT;
603                 goto clear;
604         }
605
606         if (!ns_match(&pidns->ns, (dev_t)dev, ino))
607                 goto clear;
608
609         nsdata->pid = task_pid_nr_ns(task, pidns);
610         nsdata->tgid = task_tgid_nr_ns(task, pidns);
611         return 0;
612 clear:
613         memset((void *)nsdata, 0, (size_t) size);
614         return err;
615 }
616
617 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
618         .func           = bpf_get_ns_current_pid_tgid,
619         .gpl_only       = false,
620         .ret_type       = RET_INTEGER,
621         .arg1_type      = ARG_ANYTHING,
622         .arg2_type      = ARG_ANYTHING,
623         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
624         .arg4_type      = ARG_CONST_SIZE,
625 };
626
627 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
628         .func           = bpf_get_raw_cpu_id,
629         .gpl_only       = false,
630         .ret_type       = RET_INTEGER,
631 };
632
633 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
634            u64, flags, void *, data, u64, size)
635 {
636         if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
637                 return -EINVAL;
638
639         return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
640 }
641
642 const struct bpf_func_proto bpf_event_output_data_proto =  {
643         .func           = bpf_event_output_data,
644         .gpl_only       = true,
645         .ret_type       = RET_INTEGER,
646         .arg1_type      = ARG_PTR_TO_CTX,
647         .arg2_type      = ARG_CONST_MAP_PTR,
648         .arg3_type      = ARG_ANYTHING,
649         .arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
650         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
651 };
652
653 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
654            const void __user *, user_ptr)
655 {
656         int ret = copy_from_user(dst, user_ptr, size);
657
658         if (unlikely(ret)) {
659                 memset(dst, 0, size);
660                 ret = -EFAULT;
661         }
662
663         return ret;
664 }
665
666 const struct bpf_func_proto bpf_copy_from_user_proto = {
667         .func           = bpf_copy_from_user,
668         .gpl_only       = false,
669         .ret_type       = RET_INTEGER,
670         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
671         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
672         .arg3_type      = ARG_ANYTHING,
673 };
674
675 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
676 {
677         if (cpu >= nr_cpu_ids)
678                 return (unsigned long)NULL;
679
680         return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
681 }
682
683 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
684         .func           = bpf_per_cpu_ptr,
685         .gpl_only       = false,
686         .ret_type       = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
687         .arg1_type      = ARG_PTR_TO_PERCPU_BTF_ID,
688         .arg2_type      = ARG_ANYTHING,
689 };
690
691 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
692 {
693         return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
694 }
695
696 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
697         .func           = bpf_this_cpu_ptr,
698         .gpl_only       = false,
699         .ret_type       = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
700         .arg1_type      = ARG_PTR_TO_PERCPU_BTF_ID,
701 };
702
703 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
704                 size_t bufsz)
705 {
706         void __user *user_ptr = (__force void __user *)unsafe_ptr;
707
708         buf[0] = 0;
709
710         switch (fmt_ptype) {
711         case 's':
712 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
713                 if ((unsigned long)unsafe_ptr < TASK_SIZE)
714                         return strncpy_from_user_nofault(buf, user_ptr, bufsz);
715                 fallthrough;
716 #endif
717         case 'k':
718                 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
719         case 'u':
720                 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
721         }
722
723         return -EINVAL;
724 }
725
726 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
727  * arguments representation.
728  */
729 #define MAX_BPRINTF_BUF_LEN     512
730
731 /* Support executing three nested bprintf helper calls on a given CPU */
732 #define MAX_BPRINTF_NEST_LEVEL  3
733 struct bpf_bprintf_buffers {
734         char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
735 };
736 static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
737 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
738
739 static int try_get_fmt_tmp_buf(char **tmp_buf)
740 {
741         struct bpf_bprintf_buffers *bufs;
742         int nest_level;
743
744         preempt_disable();
745         nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
746         if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
747                 this_cpu_dec(bpf_bprintf_nest_level);
748                 preempt_enable();
749                 return -EBUSY;
750         }
751         bufs = this_cpu_ptr(&bpf_bprintf_bufs);
752         *tmp_buf = bufs->tmp_bufs[nest_level - 1];
753
754         return 0;
755 }
756
757 void bpf_bprintf_cleanup(void)
758 {
759         if (this_cpu_read(bpf_bprintf_nest_level)) {
760                 this_cpu_dec(bpf_bprintf_nest_level);
761                 preempt_enable();
762         }
763 }
764
765 /*
766  * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
767  *
768  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
769  *
770  * This can be used in two ways:
771  * - Format string verification only: when bin_args is NULL
772  * - Arguments preparation: in addition to the above verification, it writes in
773  *   bin_args a binary representation of arguments usable by bstr_printf where
774  *   pointers from BPF have been sanitized.
775  *
776  * In argument preparation mode, if 0 is returned, safe temporary buffers are
777  * allocated and bpf_bprintf_cleanup should be called to free them after use.
778  */
779 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
780                         u32 **bin_args, u32 num_args)
781 {
782         char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
783         size_t sizeof_cur_arg, sizeof_cur_ip;
784         int err, i, num_spec = 0;
785         u64 cur_arg;
786         char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
787
788         fmt_end = strnchr(fmt, fmt_size, 0);
789         if (!fmt_end)
790                 return -EINVAL;
791         fmt_size = fmt_end - fmt;
792
793         if (bin_args) {
794                 if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
795                         return -EBUSY;
796
797                 tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
798                 *bin_args = (u32 *)tmp_buf;
799         }
800
801         for (i = 0; i < fmt_size; i++) {
802                 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
803                         err = -EINVAL;
804                         goto out;
805                 }
806
807                 if (fmt[i] != '%')
808                         continue;
809
810                 if (fmt[i + 1] == '%') {
811                         i++;
812                         continue;
813                 }
814
815                 if (num_spec >= num_args) {
816                         err = -EINVAL;
817                         goto out;
818                 }
819
820                 /* The string is zero-terminated so if fmt[i] != 0, we can
821                  * always access fmt[i + 1], in the worst case it will be a 0
822                  */
823                 i++;
824
825                 /* skip optional "[0 +-][num]" width formatting field */
826                 while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
827                        fmt[i] == ' ')
828                         i++;
829                 if (fmt[i] >= '1' && fmt[i] <= '9') {
830                         i++;
831                         while (fmt[i] >= '0' && fmt[i] <= '9')
832                                 i++;
833                 }
834
835                 if (fmt[i] == 'p') {
836                         sizeof_cur_arg = sizeof(long);
837
838                         if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
839                             fmt[i + 2] == 's') {
840                                 fmt_ptype = fmt[i + 1];
841                                 i += 2;
842                                 goto fmt_str;
843                         }
844
845                         if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
846                             ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
847                             fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
848                             fmt[i + 1] == 'S') {
849                                 /* just kernel pointers */
850                                 if (tmp_buf)
851                                         cur_arg = raw_args[num_spec];
852                                 i++;
853                                 goto nocopy_fmt;
854                         }
855
856                         if (fmt[i + 1] == 'B') {
857                                 if (tmp_buf)  {
858                                         err = snprintf(tmp_buf,
859                                                        (tmp_buf_end - tmp_buf),
860                                                        "%pB",
861                                                        (void *)(long)raw_args[num_spec]);
862                                         tmp_buf += (err + 1);
863                                 }
864
865                                 i++;
866                                 num_spec++;
867                                 continue;
868                         }
869
870                         /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
871                         if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
872                             (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
873                                 err = -EINVAL;
874                                 goto out;
875                         }
876
877                         i += 2;
878                         if (!tmp_buf)
879                                 goto nocopy_fmt;
880
881                         sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
882                         if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
883                                 err = -ENOSPC;
884                                 goto out;
885                         }
886
887                         unsafe_ptr = (char *)(long)raw_args[num_spec];
888                         err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
889                                                        sizeof_cur_ip);
890                         if (err < 0)
891                                 memset(cur_ip, 0, sizeof_cur_ip);
892
893                         /* hack: bstr_printf expects IP addresses to be
894                          * pre-formatted as strings, ironically, the easiest way
895                          * to do that is to call snprintf.
896                          */
897                         ip_spec[2] = fmt[i - 1];
898                         ip_spec[3] = fmt[i];
899                         err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
900                                        ip_spec, &cur_ip);
901
902                         tmp_buf += err + 1;
903                         num_spec++;
904
905                         continue;
906                 } else if (fmt[i] == 's') {
907                         fmt_ptype = fmt[i];
908 fmt_str:
909                         if (fmt[i + 1] != 0 &&
910                             !isspace(fmt[i + 1]) &&
911                             !ispunct(fmt[i + 1])) {
912                                 err = -EINVAL;
913                                 goto out;
914                         }
915
916                         if (!tmp_buf)
917                                 goto nocopy_fmt;
918
919                         if (tmp_buf_end == tmp_buf) {
920                                 err = -ENOSPC;
921                                 goto out;
922                         }
923
924                         unsafe_ptr = (char *)(long)raw_args[num_spec];
925                         err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
926                                                     fmt_ptype,
927                                                     tmp_buf_end - tmp_buf);
928                         if (err < 0) {
929                                 tmp_buf[0] = '\0';
930                                 err = 1;
931                         }
932
933                         tmp_buf += err;
934                         num_spec++;
935
936                         continue;
937                 } else if (fmt[i] == 'c') {
938                         if (!tmp_buf)
939                                 goto nocopy_fmt;
940
941                         if (tmp_buf_end == tmp_buf) {
942                                 err = -ENOSPC;
943                                 goto out;
944                         }
945
946                         *tmp_buf = raw_args[num_spec];
947                         tmp_buf++;
948                         num_spec++;
949
950                         continue;
951                 }
952
953                 sizeof_cur_arg = sizeof(int);
954
955                 if (fmt[i] == 'l') {
956                         sizeof_cur_arg = sizeof(long);
957                         i++;
958                 }
959                 if (fmt[i] == 'l') {
960                         sizeof_cur_arg = sizeof(long long);
961                         i++;
962                 }
963
964                 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
965                     fmt[i] != 'x' && fmt[i] != 'X') {
966                         err = -EINVAL;
967                         goto out;
968                 }
969
970                 if (tmp_buf)
971                         cur_arg = raw_args[num_spec];
972 nocopy_fmt:
973                 if (tmp_buf) {
974                         tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
975                         if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
976                                 err = -ENOSPC;
977                                 goto out;
978                         }
979
980                         if (sizeof_cur_arg == 8) {
981                                 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
982                                 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
983                         } else {
984                                 *(u32 *)tmp_buf = (u32)(long)cur_arg;
985                         }
986                         tmp_buf += sizeof_cur_arg;
987                 }
988                 num_spec++;
989         }
990
991         err = 0;
992 out:
993         if (err)
994                 bpf_bprintf_cleanup();
995         return err;
996 }
997
998 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
999            const void *, data, u32, data_len)
1000 {
1001         int err, num_args;
1002         u32 *bin_args;
1003
1004         if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1005             (data_len && !data))
1006                 return -EINVAL;
1007         num_args = data_len / 8;
1008
1009         /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1010          * can safely give an unbounded size.
1011          */
1012         err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args);
1013         if (err < 0)
1014                 return err;
1015
1016         err = bstr_printf(str, str_size, fmt, bin_args);
1017
1018         bpf_bprintf_cleanup();
1019
1020         return err + 1;
1021 }
1022
1023 const struct bpf_func_proto bpf_snprintf_proto = {
1024         .func           = bpf_snprintf,
1025         .gpl_only       = true,
1026         .ret_type       = RET_INTEGER,
1027         .arg1_type      = ARG_PTR_TO_MEM_OR_NULL,
1028         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
1029         .arg3_type      = ARG_PTR_TO_CONST_STR,
1030         .arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1031         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
1032 };
1033
1034 /* BPF map elements can contain 'struct bpf_timer'.
1035  * Such map owns all of its BPF timers.
1036  * 'struct bpf_timer' is allocated as part of map element allocation
1037  * and it's zero initialized.
1038  * That space is used to keep 'struct bpf_timer_kern'.
1039  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1040  * remembers 'struct bpf_map *' pointer it's part of.
1041  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1042  * bpf_timer_start() arms the timer.
1043  * If user space reference to a map goes to zero at this point
1044  * ops->map_release_uref callback is responsible for cancelling the timers,
1045  * freeing their memory, and decrementing prog's refcnts.
1046  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1047  * Inner maps can contain bpf timers as well. ops->map_release_uref is
1048  * freeing the timers when inner map is replaced or deleted by user space.
1049  */
1050 struct bpf_hrtimer {
1051         struct hrtimer timer;
1052         struct bpf_map *map;
1053         struct bpf_prog *prog;
1054         void __rcu *callback_fn;
1055         void *value;
1056 };
1057
1058 /* the actual struct hidden inside uapi struct bpf_timer */
1059 struct bpf_timer_kern {
1060         struct bpf_hrtimer *timer;
1061         /* bpf_spin_lock is used here instead of spinlock_t to make
1062          * sure that it always fits into space resereved by struct bpf_timer
1063          * regardless of LOCKDEP and spinlock debug flags.
1064          */
1065         struct bpf_spin_lock lock;
1066 } __attribute__((aligned(8)));
1067
1068 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1069
1070 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1071 {
1072         struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1073         struct bpf_map *map = t->map;
1074         void *value = t->value;
1075         bpf_callback_t callback_fn;
1076         void *key;
1077         u32 idx;
1078
1079         BTF_TYPE_EMIT(struct bpf_timer);
1080         callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1081         if (!callback_fn)
1082                 goto out;
1083
1084         /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1085          * cannot be preempted by another bpf_timer_cb() on the same cpu.
1086          * Remember the timer this callback is servicing to prevent
1087          * deadlock if callback_fn() calls bpf_timer_cancel() or
1088          * bpf_map_delete_elem() on the same timer.
1089          */
1090         this_cpu_write(hrtimer_running, t);
1091         if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1092                 struct bpf_array *array = container_of(map, struct bpf_array, map);
1093
1094                 /* compute the key */
1095                 idx = ((char *)value - array->value) / array->elem_size;
1096                 key = &idx;
1097         } else { /* hash or lru */
1098                 key = value - round_up(map->key_size, 8);
1099         }
1100
1101         callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1102         /* The verifier checked that return value is zero. */
1103
1104         this_cpu_write(hrtimer_running, NULL);
1105 out:
1106         return HRTIMER_NORESTART;
1107 }
1108
1109 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1110            u64, flags)
1111 {
1112         clockid_t clockid = flags & (MAX_CLOCKS - 1);
1113         struct bpf_hrtimer *t;
1114         int ret = 0;
1115
1116         BUILD_BUG_ON(MAX_CLOCKS != 16);
1117         BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1118         BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1119
1120         if (in_nmi())
1121                 return -EOPNOTSUPP;
1122
1123         if (flags >= MAX_CLOCKS ||
1124             /* similar to timerfd except _ALARM variants are not supported */
1125             (clockid != CLOCK_MONOTONIC &&
1126              clockid != CLOCK_REALTIME &&
1127              clockid != CLOCK_BOOTTIME))
1128                 return -EINVAL;
1129         __bpf_spin_lock_irqsave(&timer->lock);
1130         t = timer->timer;
1131         if (t) {
1132                 ret = -EBUSY;
1133                 goto out;
1134         }
1135         if (!atomic64_read(&map->usercnt)) {
1136                 /* maps with timers must be either held by user space
1137                  * or pinned in bpffs.
1138                  */
1139                 ret = -EPERM;
1140                 goto out;
1141         }
1142         /* allocate hrtimer via map_kmalloc to use memcg accounting */
1143         t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1144         if (!t) {
1145                 ret = -ENOMEM;
1146                 goto out;
1147         }
1148         t->value = (void *)timer - map->timer_off;
1149         t->map = map;
1150         t->prog = NULL;
1151         rcu_assign_pointer(t->callback_fn, NULL);
1152         hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1153         t->timer.function = bpf_timer_cb;
1154         timer->timer = t;
1155 out:
1156         __bpf_spin_unlock_irqrestore(&timer->lock);
1157         return ret;
1158 }
1159
1160 static const struct bpf_func_proto bpf_timer_init_proto = {
1161         .func           = bpf_timer_init,
1162         .gpl_only       = true,
1163         .ret_type       = RET_INTEGER,
1164         .arg1_type      = ARG_PTR_TO_TIMER,
1165         .arg2_type      = ARG_CONST_MAP_PTR,
1166         .arg3_type      = ARG_ANYTHING,
1167 };
1168
1169 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1170            struct bpf_prog_aux *, aux)
1171 {
1172         struct bpf_prog *prev, *prog = aux->prog;
1173         struct bpf_hrtimer *t;
1174         int ret = 0;
1175
1176         if (in_nmi())
1177                 return -EOPNOTSUPP;
1178         __bpf_spin_lock_irqsave(&timer->lock);
1179         t = timer->timer;
1180         if (!t) {
1181                 ret = -EINVAL;
1182                 goto out;
1183         }
1184         if (!atomic64_read(&t->map->usercnt)) {
1185                 /* maps with timers must be either held by user space
1186                  * or pinned in bpffs. Otherwise timer might still be
1187                  * running even when bpf prog is detached and user space
1188                  * is gone, since map_release_uref won't ever be called.
1189                  */
1190                 ret = -EPERM;
1191                 goto out;
1192         }
1193         prev = t->prog;
1194         if (prev != prog) {
1195                 /* Bump prog refcnt once. Every bpf_timer_set_callback()
1196                  * can pick different callback_fn-s within the same prog.
1197                  */
1198                 prog = bpf_prog_inc_not_zero(prog);
1199                 if (IS_ERR(prog)) {
1200                         ret = PTR_ERR(prog);
1201                         goto out;
1202                 }
1203                 if (prev)
1204                         /* Drop prev prog refcnt when swapping with new prog */
1205                         bpf_prog_put(prev);
1206                 t->prog = prog;
1207         }
1208         rcu_assign_pointer(t->callback_fn, callback_fn);
1209 out:
1210         __bpf_spin_unlock_irqrestore(&timer->lock);
1211         return ret;
1212 }
1213
1214 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1215         .func           = bpf_timer_set_callback,
1216         .gpl_only       = true,
1217         .ret_type       = RET_INTEGER,
1218         .arg1_type      = ARG_PTR_TO_TIMER,
1219         .arg2_type      = ARG_PTR_TO_FUNC,
1220 };
1221
1222 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1223 {
1224         struct bpf_hrtimer *t;
1225         int ret = 0;
1226
1227         if (in_nmi())
1228                 return -EOPNOTSUPP;
1229         if (flags)
1230                 return -EINVAL;
1231         __bpf_spin_lock_irqsave(&timer->lock);
1232         t = timer->timer;
1233         if (!t || !t->prog) {
1234                 ret = -EINVAL;
1235                 goto out;
1236         }
1237         hrtimer_start(&t->timer, ns_to_ktime(nsecs), HRTIMER_MODE_REL_SOFT);
1238 out:
1239         __bpf_spin_unlock_irqrestore(&timer->lock);
1240         return ret;
1241 }
1242
1243 static const struct bpf_func_proto bpf_timer_start_proto = {
1244         .func           = bpf_timer_start,
1245         .gpl_only       = true,
1246         .ret_type       = RET_INTEGER,
1247         .arg1_type      = ARG_PTR_TO_TIMER,
1248         .arg2_type      = ARG_ANYTHING,
1249         .arg3_type      = ARG_ANYTHING,
1250 };
1251
1252 static void drop_prog_refcnt(struct bpf_hrtimer *t)
1253 {
1254         struct bpf_prog *prog = t->prog;
1255
1256         if (prog) {
1257                 bpf_prog_put(prog);
1258                 t->prog = NULL;
1259                 rcu_assign_pointer(t->callback_fn, NULL);
1260         }
1261 }
1262
1263 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1264 {
1265         struct bpf_hrtimer *t;
1266         int ret = 0;
1267
1268         if (in_nmi())
1269                 return -EOPNOTSUPP;
1270         __bpf_spin_lock_irqsave(&timer->lock);
1271         t = timer->timer;
1272         if (!t) {
1273                 ret = -EINVAL;
1274                 goto out;
1275         }
1276         if (this_cpu_read(hrtimer_running) == t) {
1277                 /* If bpf callback_fn is trying to bpf_timer_cancel()
1278                  * its own timer the hrtimer_cancel() will deadlock
1279                  * since it waits for callback_fn to finish
1280                  */
1281                 ret = -EDEADLK;
1282                 goto out;
1283         }
1284         drop_prog_refcnt(t);
1285 out:
1286         __bpf_spin_unlock_irqrestore(&timer->lock);
1287         /* Cancel the timer and wait for associated callback to finish
1288          * if it was running.
1289          */
1290         ret = ret ?: hrtimer_cancel(&t->timer);
1291         return ret;
1292 }
1293
1294 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1295         .func           = bpf_timer_cancel,
1296         .gpl_only       = true,
1297         .ret_type       = RET_INTEGER,
1298         .arg1_type      = ARG_PTR_TO_TIMER,
1299 };
1300
1301 /* This function is called by map_delete/update_elem for individual element and
1302  * by ops->map_release_uref when the user space reference to a map reaches zero.
1303  */
1304 void bpf_timer_cancel_and_free(void *val)
1305 {
1306         struct bpf_timer_kern *timer = val;
1307         struct bpf_hrtimer *t;
1308
1309         /* Performance optimization: read timer->timer without lock first. */
1310         if (!READ_ONCE(timer->timer))
1311                 return;
1312
1313         __bpf_spin_lock_irqsave(&timer->lock);
1314         /* re-read it under lock */
1315         t = timer->timer;
1316         if (!t)
1317                 goto out;
1318         drop_prog_refcnt(t);
1319         /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1320          * this timer, since it won't be initialized.
1321          */
1322         timer->timer = NULL;
1323 out:
1324         __bpf_spin_unlock_irqrestore(&timer->lock);
1325         if (!t)
1326                 return;
1327         /* Cancel the timer and wait for callback to complete if it was running.
1328          * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
1329          * right after for both preallocated and non-preallocated maps.
1330          * The timer->timer = NULL was already done and no code path can
1331          * see address 't' anymore.
1332          *
1333          * Check that bpf_map_delete/update_elem() wasn't called from timer
1334          * callback_fn. In such case don't call hrtimer_cancel() (since it will
1335          * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
1336          * return -1). Though callback_fn is still running on this cpu it's
1337          * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1338          * from 't'. The bpf subprog callback_fn won't be able to access 't',
1339          * since timer->timer = NULL was already done. The timer will be
1340          * effectively cancelled because bpf_timer_cb() will return
1341          * HRTIMER_NORESTART.
1342          */
1343         if (this_cpu_read(hrtimer_running) != t)
1344                 hrtimer_cancel(&t->timer);
1345         kfree(t);
1346 }
1347
1348 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1349 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1350 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1351 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1352 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1353 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1354 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1355
1356 const struct bpf_func_proto *
1357 bpf_base_func_proto(enum bpf_func_id func_id)
1358 {
1359         switch (func_id) {
1360         case BPF_FUNC_map_lookup_elem:
1361                 return &bpf_map_lookup_elem_proto;
1362         case BPF_FUNC_map_update_elem:
1363                 return &bpf_map_update_elem_proto;
1364         case BPF_FUNC_map_delete_elem:
1365                 return &bpf_map_delete_elem_proto;
1366         case BPF_FUNC_map_push_elem:
1367                 return &bpf_map_push_elem_proto;
1368         case BPF_FUNC_map_pop_elem:
1369                 return &bpf_map_pop_elem_proto;
1370         case BPF_FUNC_map_peek_elem:
1371                 return &bpf_map_peek_elem_proto;
1372         case BPF_FUNC_get_prandom_u32:
1373                 return &bpf_get_prandom_u32_proto;
1374         case BPF_FUNC_get_smp_processor_id:
1375                 return &bpf_get_raw_smp_processor_id_proto;
1376         case BPF_FUNC_get_numa_node_id:
1377                 return &bpf_get_numa_node_id_proto;
1378         case BPF_FUNC_tail_call:
1379                 return &bpf_tail_call_proto;
1380         case BPF_FUNC_ktime_get_ns:
1381                 return &bpf_ktime_get_ns_proto;
1382         case BPF_FUNC_ktime_get_boot_ns:
1383                 return &bpf_ktime_get_boot_ns_proto;
1384         case BPF_FUNC_ringbuf_output:
1385                 return &bpf_ringbuf_output_proto;
1386         case BPF_FUNC_ringbuf_reserve:
1387                 return &bpf_ringbuf_reserve_proto;
1388         case BPF_FUNC_ringbuf_submit:
1389                 return &bpf_ringbuf_submit_proto;
1390         case BPF_FUNC_ringbuf_discard:
1391                 return &bpf_ringbuf_discard_proto;
1392         case BPF_FUNC_ringbuf_query:
1393                 return &bpf_ringbuf_query_proto;
1394         case BPF_FUNC_for_each_map_elem:
1395                 return &bpf_for_each_map_elem_proto;
1396         case BPF_FUNC_loop:
1397                 return &bpf_loop_proto;
1398         case BPF_FUNC_strncmp:
1399                 return &bpf_strncmp_proto;
1400         default:
1401                 break;
1402         }
1403
1404         if (!bpf_capable())
1405                 return NULL;
1406
1407         switch (func_id) {
1408         case BPF_FUNC_spin_lock:
1409                 return &bpf_spin_lock_proto;
1410         case BPF_FUNC_spin_unlock:
1411                 return &bpf_spin_unlock_proto;
1412         case BPF_FUNC_jiffies64:
1413                 return &bpf_jiffies64_proto;
1414         case BPF_FUNC_per_cpu_ptr:
1415                 return &bpf_per_cpu_ptr_proto;
1416         case BPF_FUNC_this_cpu_ptr:
1417                 return &bpf_this_cpu_ptr_proto;
1418         case BPF_FUNC_timer_init:
1419                 return &bpf_timer_init_proto;
1420         case BPF_FUNC_timer_set_callback:
1421                 return &bpf_timer_set_callback_proto;
1422         case BPF_FUNC_timer_start:
1423                 return &bpf_timer_start_proto;
1424         case BPF_FUNC_timer_cancel:
1425                 return &bpf_timer_cancel_proto;
1426         default:
1427                 break;
1428         }
1429
1430         if (!perfmon_capable())
1431                 return NULL;
1432
1433         switch (func_id) {
1434         case BPF_FUNC_trace_printk:
1435                 return bpf_get_trace_printk_proto();
1436         case BPF_FUNC_get_current_task:
1437                 return &bpf_get_current_task_proto;
1438         case BPF_FUNC_get_current_task_btf:
1439                 return &bpf_get_current_task_btf_proto;
1440         case BPF_FUNC_probe_read_user:
1441                 return &bpf_probe_read_user_proto;
1442         case BPF_FUNC_probe_read_kernel:
1443                 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1444                        NULL : &bpf_probe_read_kernel_proto;
1445         case BPF_FUNC_probe_read_user_str:
1446                 return &bpf_probe_read_user_str_proto;
1447         case BPF_FUNC_probe_read_kernel_str:
1448                 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1449                        NULL : &bpf_probe_read_kernel_str_proto;
1450         case BPF_FUNC_snprintf_btf:
1451                 return &bpf_snprintf_btf_proto;
1452         case BPF_FUNC_snprintf:
1453                 return &bpf_snprintf_proto;
1454         case BPF_FUNC_task_pt_regs:
1455                 return &bpf_task_pt_regs_proto;
1456         case BPF_FUNC_trace_vprintk:
1457                 return bpf_get_trace_vprintk_proto();
1458         default:
1459                 return NULL;
1460         }
1461 }