2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
35 #include <asm/unaligned.h>
38 #define BPF_R0 regs[BPF_REG_0]
39 #define BPF_R1 regs[BPF_REG_1]
40 #define BPF_R2 regs[BPF_REG_2]
41 #define BPF_R3 regs[BPF_REG_3]
42 #define BPF_R4 regs[BPF_REG_4]
43 #define BPF_R5 regs[BPF_REG_5]
44 #define BPF_R6 regs[BPF_REG_6]
45 #define BPF_R7 regs[BPF_REG_7]
46 #define BPF_R8 regs[BPF_REG_8]
47 #define BPF_R9 regs[BPF_REG_9]
48 #define BPF_R10 regs[BPF_REG_10]
51 #define DST regs[insn->dst_reg]
52 #define SRC regs[insn->src_reg]
53 #define FP regs[BPF_REG_FP]
54 #define AX regs[BPF_REG_AX]
55 #define ARG1 regs[BPF_REG_ARG1]
56 #define CTX regs[BPF_REG_CTX]
59 /* No hurry in this branch
61 * Exported for the bpf jit load helper.
63 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
67 if (k >= SKF_NET_OFF) {
68 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 } else if (k >= SKF_LL_OFF) {
70 if (unlikely(!skb_mac_header_was_set(skb)))
72 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
74 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
80 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
82 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
83 struct bpf_prog_aux *aux;
86 size = round_up(size, PAGE_SIZE);
87 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
91 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
97 fp->pages = size / PAGE_SIZE;
101 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
105 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
107 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
108 gfp_t gfp_extra_flags)
110 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
115 BUG_ON(fp_old == NULL);
117 size = round_up(size, PAGE_SIZE);
118 pages = size / PAGE_SIZE;
119 if (pages <= fp_old->pages)
122 delta = pages - fp_old->pages;
123 ret = __bpf_prog_charge(fp_old->aux->user, delta);
127 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
129 __bpf_prog_uncharge(fp_old->aux->user, delta);
131 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
135 /* We keep fp->aux from fp_old around in the new
136 * reallocated structure.
139 __bpf_prog_free(fp_old);
145 void __bpf_prog_free(struct bpf_prog *fp)
151 int bpf_prog_calc_tag(struct bpf_prog *fp)
153 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
154 u32 raw_size = bpf_prog_tag_scratch_size(fp);
155 u32 digest[SHA_DIGEST_WORDS];
156 u32 ws[SHA_WORKSPACE_WORDS];
157 u32 i, bsize, psize, blocks;
158 struct bpf_insn *dst;
164 raw = vmalloc(raw_size);
169 memset(ws, 0, sizeof(ws));
171 /* We need to take out the map fd for the digest calculation
172 * since they are unstable from user space side.
175 for (i = 0, was_ld_map = false; i < fp->len; i++) {
176 dst[i] = fp->insnsi[i];
178 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
179 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
182 } else if (was_ld_map &&
184 dst[i].dst_reg == 0 &&
185 dst[i].src_reg == 0 &&
194 psize = bpf_prog_insn_size(fp);
195 memset(&raw[psize], 0, raw_size - psize);
198 bsize = round_up(psize, SHA_MESSAGE_BYTES);
199 blocks = bsize / SHA_MESSAGE_BYTES;
201 if (bsize - psize >= sizeof(__be64)) {
202 bits = (__be64 *)(todo + bsize - sizeof(__be64));
204 bits = (__be64 *)(todo + bsize + bits_offset);
207 *bits = cpu_to_be64((psize - 1) << 3);
210 sha_transform(digest, todo, ws);
211 todo += SHA_MESSAGE_BYTES;
214 result = (__force __be32 *)digest;
215 for (i = 0; i < SHA_DIGEST_WORDS; i++)
216 result[i] = cpu_to_be32(digest[i]);
217 memcpy(fp->tag, result, sizeof(fp->tag));
223 static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
225 return BPF_CLASS(insn->code) == BPF_JMP &&
226 /* Call and Exit are both special jumps with no
227 * target inside the BPF instruction image.
229 BPF_OP(insn->code) != BPF_CALL &&
230 BPF_OP(insn->code) != BPF_EXIT;
233 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
234 u32 curr, const bool probe_pass)
236 const s32 off_min = S16_MIN, off_max = S16_MAX;
239 if (curr < pos && curr + off + 1 > pos)
241 else if (curr > pos + delta && curr + off + 1 <= pos + delta)
243 if (off < off_min || off > off_max)
250 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
251 const bool probe_pass)
253 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
254 struct bpf_insn *insn = prog->insnsi;
257 for (i = 0; i < insn_cnt; i++, insn++) {
258 /* In the probing pass we still operate on the original,
259 * unpatched image in order to check overflows before we
260 * do any other adjustments. Therefore skip the patchlet.
262 if (probe_pass && i == pos) {
267 if (!bpf_is_jmp_and_has_target(insn))
270 /* Adjust offset of jmps if we cross patch boundaries. */
271 ret = bpf_adj_delta_to_off(insn, pos, delta, i, probe_pass);
279 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
280 const struct bpf_insn *patch, u32 len)
282 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
283 const u32 cnt_max = S16_MAX;
284 struct bpf_prog *prog_adj;
286 /* Since our patchlet doesn't expand the image, we're done. */
287 if (insn_delta == 0) {
288 memcpy(prog->insnsi + off, patch, sizeof(*patch));
292 insn_adj_cnt = prog->len + insn_delta;
294 /* Reject anything that would potentially let the insn->off
295 * target overflow when we have excessive program expansions.
296 * We need to probe here before we do any reallocation where
297 * we afterwards may not fail anymore.
299 if (insn_adj_cnt > cnt_max &&
300 bpf_adj_branches(prog, off, insn_delta, true))
303 /* Several new instructions need to be inserted. Make room
304 * for them. Likely, there's no need for a new allocation as
305 * last page could have large enough tailroom.
307 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
312 prog_adj->len = insn_adj_cnt;
314 /* Patching happens in 3 steps:
316 * 1) Move over tail of insnsi from next instruction onwards,
317 * so we can patch the single target insn with one or more
318 * new ones (patching is always from 1 to n insns, n > 0).
319 * 2) Inject new instructions at the target location.
320 * 3) Adjust branch offsets if necessary.
322 insn_rest = insn_adj_cnt - off - len;
324 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
325 sizeof(*patch) * insn_rest);
326 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
328 /* We are guaranteed to not fail at this point, otherwise
329 * the ship has sailed to reverse to the original state. An
330 * overflow cannot happen at this point.
332 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
337 #ifdef CONFIG_BPF_JIT
338 /* All BPF JIT sysctl knobs here. */
339 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
340 int bpf_jit_harden __read_mostly;
341 int bpf_jit_kallsyms __read_mostly;
342 long bpf_jit_limit __read_mostly;
343 long bpf_jit_limit_max __read_mostly;
345 static __always_inline void
346 bpf_get_prog_addr_region(const struct bpf_prog *prog,
347 unsigned long *symbol_start,
348 unsigned long *symbol_end)
350 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
351 unsigned long addr = (unsigned long)hdr;
353 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
355 *symbol_start = addr;
356 *symbol_end = addr + hdr->pages * PAGE_SIZE;
359 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
361 BUILD_BUG_ON(sizeof("bpf_prog_") +
362 sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN);
364 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
365 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
369 static __always_inline unsigned long
370 bpf_get_prog_addr_start(struct latch_tree_node *n)
372 unsigned long symbol_start, symbol_end;
373 const struct bpf_prog_aux *aux;
375 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
376 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
381 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
382 struct latch_tree_node *b)
384 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
387 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
389 unsigned long val = (unsigned long)key;
390 unsigned long symbol_start, symbol_end;
391 const struct bpf_prog_aux *aux;
393 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
394 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
396 if (val < symbol_start)
398 if (val >= symbol_end)
404 static const struct latch_tree_ops bpf_tree_ops = {
405 .less = bpf_tree_less,
406 .comp = bpf_tree_comp,
409 static DEFINE_SPINLOCK(bpf_lock);
410 static LIST_HEAD(bpf_kallsyms);
411 static struct latch_tree_root bpf_tree __cacheline_aligned;
413 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
415 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
416 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
417 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
420 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
422 if (list_empty(&aux->ksym_lnode))
425 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
426 list_del_rcu(&aux->ksym_lnode);
429 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
431 return fp->jited && !bpf_prog_was_classic(fp);
434 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
436 return list_empty(&fp->aux->ksym_lnode) ||
437 fp->aux->ksym_lnode.prev == LIST_POISON2;
440 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
442 if (!bpf_prog_kallsyms_candidate(fp) ||
443 !capable(CAP_SYS_ADMIN))
446 spin_lock_bh(&bpf_lock);
447 bpf_prog_ksym_node_add(fp->aux);
448 spin_unlock_bh(&bpf_lock);
451 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
453 if (!bpf_prog_kallsyms_candidate(fp))
456 spin_lock_bh(&bpf_lock);
457 bpf_prog_ksym_node_del(fp->aux);
458 spin_unlock_bh(&bpf_lock);
461 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
463 struct latch_tree_node *n;
465 if (!bpf_jit_kallsyms_enabled())
468 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
470 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
474 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
475 unsigned long *off, char *sym)
477 unsigned long symbol_start, symbol_end;
478 struct bpf_prog *prog;
482 prog = bpf_prog_kallsyms_find(addr);
484 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
485 bpf_get_prog_name(prog, sym);
489 *size = symbol_end - symbol_start;
491 *off = addr - symbol_start;
498 bool is_bpf_text_address(unsigned long addr)
503 ret = bpf_prog_kallsyms_find(addr) != NULL;
509 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
512 unsigned long symbol_start, symbol_end;
513 struct bpf_prog_aux *aux;
517 if (!bpf_jit_kallsyms_enabled())
521 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
525 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
526 bpf_get_prog_name(aux->prog, sym);
528 *value = symbol_start;
529 *type = BPF_SYM_ELF_TYPE;
539 static atomic_long_t bpf_jit_current;
541 /* Can be overridden by an arch's JIT compiler if it has a custom,
542 * dedicated BPF backend memory area, or if neither of the two
545 u64 __weak bpf_jit_alloc_exec_limit(void)
547 #if defined(MODULES_VADDR)
548 return MODULES_END - MODULES_VADDR;
550 return VMALLOC_END - VMALLOC_START;
554 static int __init bpf_jit_charge_init(void)
556 /* Only used as heuristic here to derive limit. */
557 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
558 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
559 PAGE_SIZE), LONG_MAX);
562 pure_initcall(bpf_jit_charge_init);
564 static int bpf_jit_charge_modmem(u32 pages)
566 if (atomic_long_add_return(pages, &bpf_jit_current) >
567 (bpf_jit_limit >> PAGE_SHIFT)) {
568 if (!capable(CAP_SYS_ADMIN)) {
569 atomic_long_sub(pages, &bpf_jit_current);
577 static void bpf_jit_uncharge_modmem(u32 pages)
579 atomic_long_sub(pages, &bpf_jit_current);
582 struct bpf_binary_header *
583 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
584 unsigned int alignment,
585 bpf_jit_fill_hole_t bpf_fill_ill_insns)
587 struct bpf_binary_header *hdr;
588 u32 size, hole, start, pages;
590 /* Most of BPF filters are really small, but if some of them
591 * fill a page, allow at least 128 extra bytes to insert a
592 * random section of illegal instructions.
594 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
595 pages = size / PAGE_SIZE;
597 if (bpf_jit_charge_modmem(pages))
599 hdr = module_alloc(size);
601 bpf_jit_uncharge_modmem(pages);
605 /* Fill space with illegal/arch-dep instructions. */
606 bpf_fill_ill_insns(hdr, size);
609 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
610 PAGE_SIZE - sizeof(*hdr));
611 start = (get_random_int() % hole) & ~(alignment - 1);
613 /* Leave a random number of instructions before BPF code. */
614 *image_ptr = &hdr->image[start];
619 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
621 u32 pages = hdr->pages;
624 bpf_jit_uncharge_modmem(pages);
627 /* This symbol is only overridden by archs that have different
628 * requirements than the usual eBPF JITs, f.e. when they only
629 * implement cBPF JIT, do not set images read-only, etc.
631 void __weak bpf_jit_free(struct bpf_prog *fp)
634 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
636 bpf_jit_binary_unlock_ro(hdr);
637 bpf_jit_binary_free(hdr);
639 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
642 bpf_prog_unlock_free(fp);
645 static int bpf_jit_blind_insn(const struct bpf_insn *from,
646 const struct bpf_insn *aux,
647 struct bpf_insn *to_buff)
649 struct bpf_insn *to = to_buff;
650 u32 imm_rnd = get_random_int();
653 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
654 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
656 /* Constraints on AX register:
658 * AX register is inaccessible from user space. It is mapped in
659 * all JITs, and used here for constant blinding rewrites. It is
660 * typically "stateless" meaning its contents are only valid within
661 * the executed instruction, but not across several instructions.
662 * There are a few exceptions however which are further detailed
665 * Constant blinding is only used by JITs, not in the interpreter.
666 * In restricted circumstances, the verifier can also use the AX
667 * register for rewrites as long as they do not interfere with
670 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
673 if (from->imm == 0 &&
674 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
675 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
676 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
680 switch (from->code) {
681 case BPF_ALU | BPF_ADD | BPF_K:
682 case BPF_ALU | BPF_SUB | BPF_K:
683 case BPF_ALU | BPF_AND | BPF_K:
684 case BPF_ALU | BPF_OR | BPF_K:
685 case BPF_ALU | BPF_XOR | BPF_K:
686 case BPF_ALU | BPF_MUL | BPF_K:
687 case BPF_ALU | BPF_MOV | BPF_K:
688 case BPF_ALU | BPF_DIV | BPF_K:
689 case BPF_ALU | BPF_MOD | BPF_K:
690 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
691 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
692 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
695 case BPF_ALU64 | BPF_ADD | BPF_K:
696 case BPF_ALU64 | BPF_SUB | BPF_K:
697 case BPF_ALU64 | BPF_AND | BPF_K:
698 case BPF_ALU64 | BPF_OR | BPF_K:
699 case BPF_ALU64 | BPF_XOR | BPF_K:
700 case BPF_ALU64 | BPF_MUL | BPF_K:
701 case BPF_ALU64 | BPF_MOV | BPF_K:
702 case BPF_ALU64 | BPF_DIV | BPF_K:
703 case BPF_ALU64 | BPF_MOD | BPF_K:
704 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
705 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
706 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
709 case BPF_JMP | BPF_JEQ | BPF_K:
710 case BPF_JMP | BPF_JNE | BPF_K:
711 case BPF_JMP | BPF_JGT | BPF_K:
712 case BPF_JMP | BPF_JLT | BPF_K:
713 case BPF_JMP | BPF_JGE | BPF_K:
714 case BPF_JMP | BPF_JLE | BPF_K:
715 case BPF_JMP | BPF_JSGT | BPF_K:
716 case BPF_JMP | BPF_JSLT | BPF_K:
717 case BPF_JMP | BPF_JSGE | BPF_K:
718 case BPF_JMP | BPF_JSLE | BPF_K:
719 case BPF_JMP | BPF_JSET | BPF_K:
720 /* Accommodate for extra offset in case of a backjump. */
724 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
725 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
726 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
729 case BPF_LD | BPF_ABS | BPF_W:
730 case BPF_LD | BPF_ABS | BPF_H:
731 case BPF_LD | BPF_ABS | BPF_B:
732 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
733 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
734 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
737 case BPF_LD | BPF_IND | BPF_W:
738 case BPF_LD | BPF_IND | BPF_H:
739 case BPF_LD | BPF_IND | BPF_B:
740 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
741 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
742 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
743 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
746 case BPF_LD | BPF_IMM | BPF_DW:
747 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
748 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
749 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
750 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
752 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
753 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
754 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
755 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
758 case BPF_ST | BPF_MEM | BPF_DW:
759 case BPF_ST | BPF_MEM | BPF_W:
760 case BPF_ST | BPF_MEM | BPF_H:
761 case BPF_ST | BPF_MEM | BPF_B:
762 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
763 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
764 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
771 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
772 gfp_t gfp_extra_flags)
774 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
777 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
779 /* aux->prog still points to the fp_other one, so
780 * when promoting the clone to the real program,
781 * this still needs to be adapted.
783 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
789 static void bpf_prog_clone_free(struct bpf_prog *fp)
791 /* aux was stolen by the other clone, so we cannot free
792 * it from this path! It will be freed eventually by the
793 * other program on release.
795 * At this point, we don't need a deferred release since
796 * clone is guaranteed to not be locked.
802 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
804 /* We have to repoint aux->prog to self, as we don't
805 * know whether fp here is the clone or the original.
808 bpf_prog_clone_free(fp_other);
811 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
813 struct bpf_insn insn_buff[16], aux[2];
814 struct bpf_prog *clone, *tmp;
815 int insn_delta, insn_cnt;
816 struct bpf_insn *insn;
819 if (!bpf_jit_blinding_enabled())
822 clone = bpf_prog_clone_create(prog, GFP_USER);
824 return ERR_PTR(-ENOMEM);
826 insn_cnt = clone->len;
827 insn = clone->insnsi;
829 for (i = 0; i < insn_cnt; i++, insn++) {
830 /* We temporarily need to hold the original ld64 insn
831 * so that we can still access the first part in the
832 * second blinding run.
834 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
836 memcpy(aux, insn, sizeof(aux));
838 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
842 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
844 /* Patching may have repointed aux->prog during
845 * realloc from the original one, so we need to
846 * fix it up here on error.
848 bpf_jit_prog_release_other(prog, clone);
849 return ERR_PTR(-ENOMEM);
853 insn_delta = rewritten - 1;
855 /* Walk new program and skip insns we just inserted. */
856 insn = clone->insnsi + i + insn_delta;
857 insn_cnt += insn_delta;
863 #endif /* CONFIG_BPF_JIT */
865 /* Base function for offset calculation. Needs to go into .text section,
866 * therefore keeping it non-static as well; will also be used by JITs
867 * anyway later on, so do not let the compiler omit it.
869 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
873 EXPORT_SYMBOL_GPL(__bpf_call_base);
875 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
877 * __bpf_prog_run - run eBPF program on a given context
878 * @ctx: is the data we are operating on
879 * @insn: is the array of eBPF instructions
881 * Decode and execute eBPF instructions.
883 static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
887 static const void *jumptable[256] = {
888 [0 ... 255] = &&default_label,
889 /* Now overwrite non-defaults ... */
890 /* 32 bit ALU operations */
891 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
892 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
893 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
894 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
895 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
896 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
897 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
898 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
899 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
900 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
901 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
902 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
903 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
904 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
905 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
906 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
907 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
908 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
909 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
910 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
911 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
912 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
913 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
914 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
915 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
916 /* 64 bit ALU operations */
917 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
918 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
919 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
920 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
921 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
922 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
923 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
924 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
925 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
926 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
927 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
928 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
929 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
930 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
931 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
932 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
933 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
934 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
935 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
936 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
937 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
938 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
939 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
940 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
941 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
942 /* Call instruction */
943 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
944 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
946 [BPF_JMP | BPF_JA] = &&JMP_JA,
947 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
948 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
949 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
950 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
951 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
952 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
953 [BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
954 [BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
955 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
956 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
957 [BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
958 [BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
959 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
960 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
961 [BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
962 [BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
963 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
964 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
965 [BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
966 [BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
967 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
968 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
970 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
971 /* Store instructions */
972 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
973 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
974 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
975 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
976 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
977 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
978 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
979 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
980 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
981 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
982 /* Load instructions */
983 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
984 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
985 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
986 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
987 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
988 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
989 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
990 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
991 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
992 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
993 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
995 u32 tail_call_cnt = 0;
999 #define CONT ({ insn++; goto select_insn; })
1000 #define CONT_JMP ({ insn++; goto select_insn; })
1003 goto *jumptable[insn->code];
1006 #define ALU(OPCODE, OP) \
1007 ALU64_##OPCODE##_X: \
1011 DST = (u32) DST OP (u32) SRC; \
1013 ALU64_##OPCODE##_K: \
1017 DST = (u32) DST OP (u32) IMM; \
1048 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1052 (*(s64 *) &DST) >>= SRC;
1055 (*(s64 *) &DST) >>= IMM;
1058 div64_u64_rem(DST, SRC, &tmp);
1063 DST = do_div(tmp, (u32) SRC);
1066 div64_u64_rem(DST, IMM, &tmp);
1071 DST = do_div(tmp, (u32) IMM);
1074 DST = div64_u64(DST, SRC);
1078 do_div(tmp, (u32) SRC);
1082 DST = div64_u64(DST, IMM);
1086 do_div(tmp, (u32) IMM);
1092 DST = (__force u16) cpu_to_be16(DST);
1095 DST = (__force u32) cpu_to_be32(DST);
1098 DST = (__force u64) cpu_to_be64(DST);
1105 DST = (__force u16) cpu_to_le16(DST);
1108 DST = (__force u32) cpu_to_le32(DST);
1111 DST = (__force u64) cpu_to_le64(DST);
1118 /* Function call scratches BPF_R1-BPF_R5 registers,
1119 * preserves BPF_R6-BPF_R9, and stores return value
1122 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1127 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1128 struct bpf_array *array = container_of(map, struct bpf_array, map);
1129 struct bpf_prog *prog;
1132 if (unlikely(index >= array->map.max_entries))
1134 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1139 prog = READ_ONCE(array->ptrs[index]);
1143 /* ARG1 at this point is guaranteed to point to CTX from
1144 * the verifier side due to the fact that the tail call is
1145 * handeled like a helper, that is, bpf_tail_call_proto,
1146 * where arg1_type is ARG_PTR_TO_CTX.
1148 insn = prog->insnsi;
1230 if (((s64) DST) > ((s64) SRC)) {
1236 if (((s64) DST) > ((s64) IMM)) {
1242 if (((s64) DST) < ((s64) SRC)) {
1248 if (((s64) DST) < ((s64) IMM)) {
1254 if (((s64) DST) >= ((s64) SRC)) {
1260 if (((s64) DST) >= ((s64) IMM)) {
1266 if (((s64) DST) <= ((s64) SRC)) {
1272 if (((s64) DST) <= ((s64) IMM)) {
1292 /* STX and ST and LDX*/
1293 #define LDST(SIZEOP, SIZE) \
1295 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1298 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1301 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1309 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1310 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1313 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1314 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1317 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1320 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1321 * appearing in the programs where ctx == skb
1322 * (see may_access_skb() in the verifier). All programs
1323 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1324 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1325 * verifier will check that BPF_R6 == ctx.
1327 * BPF_ABS and BPF_IND are wrappers of function calls,
1328 * so they scratch BPF_R1-BPF_R5 registers, preserve
1329 * BPF_R6-BPF_R9, and store return value into BPF_R0.
1332 * ctx == skb == BPF_R6 == CTX
1335 * SRC == any register
1336 * IMM == 32-bit immediate
1339 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1342 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1343 if (likely(ptr != NULL)) {
1344 BPF_R0 = get_unaligned_be32(ptr);
1349 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1352 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1353 if (likely(ptr != NULL)) {
1354 BPF_R0 = get_unaligned_be16(ptr);
1359 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1362 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1363 if (likely(ptr != NULL)) {
1364 BPF_R0 = *(u8 *)ptr;
1369 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1372 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1375 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1380 /* If we ever reach this, we have a bug somewhere. */
1381 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1384 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1386 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1387 #define DEFINE_BPF_PROG_RUN(stack_size) \
1388 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1390 u64 stack[stack_size / sizeof(u64)]; \
1391 u64 regs[MAX_BPF_EXT_REG]; \
1393 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1394 ARG1 = (u64) (unsigned long) ctx; \
1395 return ___bpf_prog_run(regs, insn, stack); \
1398 #define EVAL1(FN, X) FN(X)
1399 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1400 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1401 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1402 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1403 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1405 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1406 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1407 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1409 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1411 static unsigned int (*interpreters[])(const void *ctx,
1412 const struct bpf_insn *insn) = {
1413 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1414 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1415 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1419 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1420 const struct bpf_insn *insn)
1422 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1423 * is not working properly, so warn about it!
1430 bool bpf_prog_array_compatible(struct bpf_array *array,
1431 const struct bpf_prog *fp)
1433 if (!array->owner_prog_type) {
1434 /* There's no owner yet where we could check for
1437 array->owner_prog_type = fp->type;
1438 array->owner_jited = fp->jited;
1443 return array->owner_prog_type == fp->type &&
1444 array->owner_jited == fp->jited;
1447 static int bpf_check_tail_call(const struct bpf_prog *fp)
1449 struct bpf_prog_aux *aux = fp->aux;
1452 for (i = 0; i < aux->used_map_cnt; i++) {
1453 struct bpf_map *map = aux->used_maps[i];
1454 struct bpf_array *array;
1456 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1459 array = container_of(map, struct bpf_array, map);
1460 if (!bpf_prog_array_compatible(array, fp))
1468 * bpf_prog_select_runtime - select exec runtime for BPF program
1469 * @fp: bpf_prog populated with internal BPF program
1470 * @err: pointer to error variable
1472 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1473 * The BPF program will be executed via BPF_PROG_RUN() macro.
1475 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1477 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1478 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1480 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1482 fp->bpf_func = __bpf_prog_ret0_warn;
1485 /* eBPF JITs can rewrite the program in case constant
1486 * blinding is active. However, in case of error during
1487 * blinding, bpf_int_jit_compile() must always return a
1488 * valid program, which in this case would simply not
1489 * be JITed, but falls back to the interpreter.
1491 fp = bpf_int_jit_compile(fp);
1492 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1498 bpf_prog_lock_ro(fp);
1500 /* The tail call compatibility check can only be done at
1501 * this late stage as we need to determine, if we deal
1502 * with JITed or non JITed program concatenations and not
1503 * all eBPF JITs might immediately support all features.
1505 *err = bpf_check_tail_call(fp);
1509 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1511 static void bpf_prog_free_deferred(struct work_struct *work)
1513 struct bpf_prog_aux *aux;
1515 aux = container_of(work, struct bpf_prog_aux, work);
1516 bpf_jit_free(aux->prog);
1519 /* Free internal BPF program */
1520 void bpf_prog_free(struct bpf_prog *fp)
1522 struct bpf_prog_aux *aux = fp->aux;
1524 INIT_WORK(&aux->work, bpf_prog_free_deferred);
1525 schedule_work(&aux->work);
1527 EXPORT_SYMBOL_GPL(bpf_prog_free);
1529 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1530 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1532 void bpf_user_rnd_init_once(void)
1534 prandom_init_once(&bpf_user_rnd_state);
1537 BPF_CALL_0(bpf_user_rnd_u32)
1539 /* Should someone ever have the rather unwise idea to use some
1540 * of the registers passed into this function, then note that
1541 * this function is called from native eBPF and classic-to-eBPF
1542 * transformations. Register assignments from both sides are
1543 * different, f.e. classic always sets fn(ctx, A, X) here.
1545 struct rnd_state *state;
1548 state = &get_cpu_var(bpf_user_rnd_state);
1549 res = prandom_u32_state(state);
1550 put_cpu_var(bpf_user_rnd_state);
1555 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1556 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1557 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1558 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1560 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1561 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1562 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1563 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1565 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1566 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1567 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1568 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
1570 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1576 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1577 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1582 /* Always built-in helper functions. */
1583 const struct bpf_func_proto bpf_tail_call_proto = {
1586 .ret_type = RET_VOID,
1587 .arg1_type = ARG_PTR_TO_CTX,
1588 .arg2_type = ARG_CONST_MAP_PTR,
1589 .arg3_type = ARG_ANYTHING,
1592 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1593 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1594 * eBPF and implicitly also cBPF can get JITed!
1596 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1601 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1602 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1604 void __weak bpf_jit_compile(struct bpf_prog *prog)
1608 bool __weak bpf_helper_changes_pkt_data(void *func)
1613 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1614 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1616 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1622 /* All definitions of tracepoints related to BPF. */
1623 #define CREATE_TRACE_POINTS
1624 #include <linux/bpf_trace.h>
1626 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1628 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1629 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);