2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
31 #include <asm/unaligned.h>
34 #define BPF_R0 regs[BPF_REG_0]
35 #define BPF_R1 regs[BPF_REG_1]
36 #define BPF_R2 regs[BPF_REG_2]
37 #define BPF_R3 regs[BPF_REG_3]
38 #define BPF_R4 regs[BPF_REG_4]
39 #define BPF_R5 regs[BPF_REG_5]
40 #define BPF_R6 regs[BPF_REG_6]
41 #define BPF_R7 regs[BPF_REG_7]
42 #define BPF_R8 regs[BPF_REG_8]
43 #define BPF_R9 regs[BPF_REG_9]
44 #define BPF_R10 regs[BPF_REG_10]
47 #define DST regs[insn->dst_reg]
48 #define SRC regs[insn->src_reg]
49 #define FP regs[BPF_REG_FP]
50 #define ARG1 regs[BPF_REG_ARG1]
51 #define CTX regs[BPF_REG_CTX]
54 /* No hurry in this branch
56 * Exported for the bpf jit load helper.
58 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
64 else if (k >= SKF_LL_OFF)
65 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
67 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
73 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
75 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
77 struct bpf_prog_aux *aux;
80 size = round_up(size, PAGE_SIZE);
81 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85 kmemcheck_annotate_bitfield(fp, meta);
87 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
93 fp->pages = size / PAGE_SIZE;
99 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
101 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
102 gfp_t gfp_extra_flags)
104 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
108 BUG_ON(fp_old == NULL);
110 size = round_up(size, PAGE_SIZE);
111 if (size <= fp_old->pages * PAGE_SIZE)
114 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
116 kmemcheck_annotate_bitfield(fp, meta);
118 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
119 fp->pages = size / PAGE_SIZE;
122 /* We keep fp->aux from fp_old around in the new
123 * reallocated structure.
126 __bpf_prog_free(fp_old);
131 EXPORT_SYMBOL_GPL(bpf_prog_realloc);
133 void __bpf_prog_free(struct bpf_prog *fp)
138 EXPORT_SYMBOL_GPL(__bpf_prog_free);
140 static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
142 return BPF_CLASS(insn->code) == BPF_JMP &&
143 /* Call and Exit are both special jumps with no
144 * target inside the BPF instruction image.
146 BPF_OP(insn->code) != BPF_CALL &&
147 BPF_OP(insn->code) != BPF_EXIT;
150 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
152 struct bpf_insn *insn = prog->insnsi;
153 u32 i, insn_cnt = prog->len;
155 for (i = 0; i < insn_cnt; i++, insn++) {
156 if (!bpf_is_jmp_and_has_target(insn))
159 /* Adjust offset of jmps if we cross boundaries. */
160 if (i < pos && i + insn->off + 1 > pos)
162 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
167 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
168 const struct bpf_insn *patch, u32 len)
170 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
171 struct bpf_prog *prog_adj;
173 /* Since our patchlet doesn't expand the image, we're done. */
174 if (insn_delta == 0) {
175 memcpy(prog->insnsi + off, patch, sizeof(*patch));
179 insn_adj_cnt = prog->len + insn_delta;
181 /* Several new instructions need to be inserted. Make room
182 * for them. Likely, there's no need for a new allocation as
183 * last page could have large enough tailroom.
185 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
190 prog_adj->len = insn_adj_cnt;
192 /* Patching happens in 3 steps:
194 * 1) Move over tail of insnsi from next instruction onwards,
195 * so we can patch the single target insn with one or more
196 * new ones (patching is always from 1 to n insns, n > 0).
197 * 2) Inject new instructions at the target location.
198 * 3) Adjust branch offsets if necessary.
200 insn_rest = insn_adj_cnt - off - len;
202 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
203 sizeof(*patch) * insn_rest);
204 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
206 bpf_adj_branches(prog_adj, off, insn_delta);
211 #ifdef CONFIG_BPF_JIT
212 struct bpf_binary_header *
213 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
214 unsigned int alignment,
215 bpf_jit_fill_hole_t bpf_fill_ill_insns)
217 struct bpf_binary_header *hdr;
218 unsigned int size, hole, start;
220 /* Most of BPF filters are really small, but if some of them
221 * fill a page, allow at least 128 extra bytes to insert a
222 * random section of illegal instructions.
224 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
225 hdr = module_alloc(size);
229 /* Fill space with illegal/arch-dep instructions. */
230 bpf_fill_ill_insns(hdr, size);
232 hdr->pages = size / PAGE_SIZE;
233 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
234 PAGE_SIZE - sizeof(*hdr));
235 start = (prandom_u32() % hole) & ~(alignment - 1);
237 /* Leave a random number of instructions before BPF code. */
238 *image_ptr = &hdr->image[start];
243 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
247 #endif /* CONFIG_BPF_JIT */
249 /* Base function for offset calculation. Needs to go into .text section,
250 * therefore keeping it non-static as well; will also be used by JITs
251 * anyway later on, so do not let the compiler omit it.
253 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
257 EXPORT_SYMBOL_GPL(__bpf_call_base);
259 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
261 * __bpf_prog_run - run eBPF program on a given context
262 * @ctx: is the data we are operating on
263 * @insn: is the array of eBPF instructions
265 * Decode and execute eBPF instructions.
267 static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
269 u64 stack[MAX_BPF_STACK / sizeof(u64)];
270 u64 regs[MAX_BPF_REG], tmp;
271 static const void *jumptable[256] = {
272 [0 ... 255] = &&default_label,
273 /* Now overwrite non-defaults ... */
274 /* 32 bit ALU operations */
275 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
276 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
277 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
278 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
279 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
280 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
281 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
282 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
283 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
284 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
285 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
286 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
287 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
288 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
289 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
290 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
291 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
292 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
293 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
294 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
295 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
296 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
297 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
298 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
299 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
300 /* 64 bit ALU operations */
301 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
302 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
303 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
304 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
305 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
306 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
307 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
308 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
309 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
310 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
311 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
312 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
313 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
314 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
315 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
316 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
317 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
318 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
319 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
320 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
321 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
322 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
323 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
324 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
325 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
326 /* Call instruction */
327 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
328 [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
330 [BPF_JMP | BPF_JA] = &&JMP_JA,
331 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
332 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
333 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
334 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
335 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
336 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
337 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
338 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
339 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
340 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
341 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
342 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
343 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
344 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
346 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
347 /* Store instructions */
348 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
349 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
350 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
351 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
352 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
353 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
354 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
355 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
356 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
357 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
358 /* Load instructions */
359 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
360 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
361 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
362 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
363 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
364 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
365 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
366 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
367 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
368 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
369 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
371 u32 tail_call_cnt = 0;
375 #define CONT ({ insn++; goto select_insn; })
376 #define CONT_JMP ({ insn++; goto select_insn; })
378 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
379 ARG1 = (u64) (unsigned long) ctx;
381 /* Registers used in classic BPF programs need to be reset first. */
386 goto *jumptable[insn->code];
389 #define ALU(OPCODE, OP) \
390 ALU64_##OPCODE##_X: \
394 DST = (u32) DST OP (u32) SRC; \
396 ALU64_##OPCODE##_K: \
400 DST = (u32) DST OP (u32) IMM; \
431 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
435 (*(s64 *) &DST) >>= SRC;
438 (*(s64 *) &DST) >>= IMM;
441 if (unlikely(SRC == 0))
443 div64_u64_rem(DST, SRC, &tmp);
447 if (unlikely((u32)SRC == 0))
450 DST = do_div(tmp, (u32) SRC);
453 div64_u64_rem(DST, IMM, &tmp);
458 DST = do_div(tmp, (u32) IMM);
461 if (unlikely(SRC == 0))
463 DST = div64_u64(DST, SRC);
466 if (unlikely((u32)SRC == 0))
469 do_div(tmp, (u32) SRC);
473 DST = div64_u64(DST, IMM);
477 do_div(tmp, (u32) IMM);
483 DST = (__force u16) cpu_to_be16(DST);
486 DST = (__force u32) cpu_to_be32(DST);
489 DST = (__force u64) cpu_to_be64(DST);
496 DST = (__force u16) cpu_to_le16(DST);
499 DST = (__force u32) cpu_to_le32(DST);
502 DST = (__force u64) cpu_to_le64(DST);
509 /* Function call scratches BPF_R1-BPF_R5 registers,
510 * preserves BPF_R6-BPF_R9, and stores return value
513 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
518 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
519 struct bpf_array *array = container_of(map, struct bpf_array, map);
520 struct bpf_prog *prog;
523 if (unlikely(index >= array->map.max_entries))
526 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
531 prog = READ_ONCE(array->ptrs[index]);
535 /* ARG1 at this point is guaranteed to point to CTX from
536 * the verifier side due to the fact that the tail call is
537 * handeled like a helper, that is, bpf_tail_call_proto,
538 * where arg1_type is ARG_PTR_TO_CTX.
598 if (((s64) DST) > ((s64) SRC)) {
604 if (((s64) DST) > ((s64) IMM)) {
610 if (((s64) DST) >= ((s64) SRC)) {
616 if (((s64) DST) >= ((s64) IMM)) {
636 /* STX and ST and LDX*/
637 #define LDST(SIZEOP, SIZE) \
639 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
642 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
645 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
653 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
654 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
657 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
658 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
661 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
664 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
665 * only appearing in the programs where ctx ==
666 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
667 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
668 * internal BPF verifier will check that BPF_R6 ==
671 * BPF_ABS and BPF_IND are wrappers of function calls,
672 * so they scratch BPF_R1-BPF_R5 registers, preserve
673 * BPF_R6-BPF_R9, and store return value into BPF_R0.
676 * ctx == skb == BPF_R6 == CTX
679 * SRC == any register
680 * IMM == 32-bit immediate
683 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
686 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
687 if (likely(ptr != NULL)) {
688 BPF_R0 = get_unaligned_be32(ptr);
693 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
696 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
697 if (likely(ptr != NULL)) {
698 BPF_R0 = get_unaligned_be16(ptr);
703 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
706 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
707 if (likely(ptr != NULL)) {
713 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
716 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
719 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
724 /* If we ever reach this, we have a bug somewhere. */
725 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
730 static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
736 bool bpf_prog_array_compatible(struct bpf_array *array,
737 const struct bpf_prog *fp)
739 if (!array->owner_prog_type) {
740 /* There's no owner yet where we could check for
743 array->owner_prog_type = fp->type;
744 array->owner_jited = fp->jited;
749 return array->owner_prog_type == fp->type &&
750 array->owner_jited == fp->jited;
753 static int bpf_check_tail_call(const struct bpf_prog *fp)
755 struct bpf_prog_aux *aux = fp->aux;
758 for (i = 0; i < aux->used_map_cnt; i++) {
759 struct bpf_map *map = aux->used_maps[i];
760 struct bpf_array *array;
762 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
765 array = container_of(map, struct bpf_array, map);
766 if (!bpf_prog_array_compatible(array, fp))
774 * bpf_prog_select_runtime - select exec runtime for BPF program
775 * @fp: bpf_prog populated with internal BPF program
777 * Try to JIT eBPF program, if JIT is not available, use interpreter.
778 * The BPF program will be executed via BPF_PROG_RUN() macro.
780 int bpf_prog_select_runtime(struct bpf_prog *fp)
782 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
783 fp->bpf_func = (void *) __bpf_prog_run;
785 fp->bpf_func = (void *) __bpf_prog_ret0;
788 /* eBPF JITs can rewrite the program in case constant
789 * blinding is active. However, in case of error during
790 * blinding, bpf_int_jit_compile() must always return a
791 * valid program, which in this case would simply not
792 * be JITed, but falls back to the interpreter.
794 bpf_int_jit_compile(fp);
795 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
799 bpf_prog_lock_ro(fp);
801 /* The tail call compatibility check can only be done at
802 * this late stage as we need to determine, if we deal
803 * with JITed or non JITed program concatenations and not
804 * all eBPF JITs might immediately support all features.
806 return bpf_check_tail_call(fp);
808 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
810 static void bpf_prog_free_deferred(struct work_struct *work)
812 struct bpf_prog_aux *aux;
814 aux = container_of(work, struct bpf_prog_aux, work);
815 bpf_jit_free(aux->prog);
818 /* Free internal BPF program */
819 void bpf_prog_free(struct bpf_prog *fp)
821 struct bpf_prog_aux *aux = fp->aux;
823 INIT_WORK(&aux->work, bpf_prog_free_deferred);
824 schedule_work(&aux->work);
826 EXPORT_SYMBOL_GPL(bpf_prog_free);
828 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
829 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
831 void bpf_user_rnd_init_once(void)
833 prandom_init_once(&bpf_user_rnd_state);
836 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
838 /* Should someone ever have the rather unwise idea to use some
839 * of the registers passed into this function, then note that
840 * this function is called from native eBPF and classic-to-eBPF
841 * transformations. Register assignments from both sides are
842 * different, f.e. classic always sets fn(ctx, A, X) here.
844 struct rnd_state *state;
847 state = &get_cpu_var(bpf_user_rnd_state);
848 res = prandom_u32_state(state);
854 /* Weak definitions of helper functions in case we don't have bpf syscall. */
855 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
856 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
857 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
859 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
860 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
861 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
862 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
863 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
864 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
865 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
870 /* Always built-in helper functions. */
871 const struct bpf_func_proto bpf_tail_call_proto = {
874 .ret_type = RET_VOID,
875 .arg1_type = ARG_PTR_TO_CTX,
876 .arg2_type = ARG_CONST_MAP_PTR,
877 .arg3_type = ARG_ANYTHING,
880 /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
881 void __weak bpf_int_jit_compile(struct bpf_prog *prog)
885 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
886 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
888 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,