1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/filter.h>
17 #include <net/netlink.h>
18 #include <linux/file.h>
19 #include <linux/vmalloc.h>
21 /* bpf_check() is a static code analyzer that walks eBPF program
22 * instruction by instruction and updates register/stack state.
23 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
25 * The first pass is depth-first-search to check that the program is a DAG.
26 * It rejects the following programs:
27 * - larger than BPF_MAXINSNS insns
28 * - if loop is present (detected via back-edge)
29 * - unreachable insns exist (shouldn't be a forest. program = one function)
30 * - out of bounds or malformed jumps
31 * The second pass is all possible path descent from the 1st insn.
32 * Since it's analyzing all pathes through the program, the length of the
33 * analysis is limited to 32k insn, which may be hit even if total number of
34 * insn is less then 4K, but there are too many branches that change stack/regs.
35 * Number of 'branches to be analyzed' is limited to 1k
37 * On entry to each instruction, each register has a type, and the instruction
38 * changes the types of the registers depending on instruction semantics.
39 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
42 * All registers are 64-bit.
43 * R0 - return register
44 * R1-R5 argument passing registers
45 * R6-R9 callee saved registers
46 * R10 - frame pointer read-only
48 * At the start of BPF program the register R1 contains a pointer to bpf_context
49 * and has type PTR_TO_CTX.
51 * Verifier tracks arithmetic operations on pointers in case:
52 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
53 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
54 * 1st insn copies R10 (which has FRAME_PTR) type into R1
55 * and 2nd arithmetic instruction is pattern matched to recognize
56 * that it wants to construct a pointer to some element within stack.
57 * So after 2nd insn, the register R1 has type PTR_TO_STACK
58 * (and -20 constant is saved for further stack bounds checking).
59 * Meaning that this reg is a pointer to stack plus known immediate constant.
61 * Most of the time the registers have UNKNOWN_VALUE type, which
62 * means the register has some value, but it's not a valid pointer.
63 * (like pointer plus pointer becomes UNKNOWN_VALUE type)
65 * When verifier sees load or store instructions the type of base register
66 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer
67 * types recognized by check_mem_access() function.
69 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
70 * and the range of [ptr, ptr + map's value_size) is accessible.
72 * registers used to pass values to function calls are checked against
73 * function argument constraints.
75 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
76 * It means that the register type passed to this function must be
77 * PTR_TO_STACK and it will be used inside the function as
78 * 'pointer to map element key'
80 * For example the argument constraints for bpf_map_lookup_elem():
81 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
82 * .arg1_type = ARG_CONST_MAP_PTR,
83 * .arg2_type = ARG_PTR_TO_MAP_KEY,
85 * ret_type says that this function returns 'pointer to map elem value or null'
86 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
87 * 2nd argument should be a pointer to stack, which will be used inside
88 * the helper function as a pointer to map element key.
90 * On the kernel side the helper function looks like:
91 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
93 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
94 * void *key = (void *) (unsigned long) r2;
97 * here kernel can access 'key' and 'map' pointers safely, knowing that
98 * [key, key + map->key_size) bytes are valid and were initialized on
99 * the stack of eBPF program.
102 * Corresponding eBPF program may look like:
103 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
104 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
105 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
106 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107 * here verifier looks at prototype of map_lookup_elem() and sees:
108 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
109 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
111 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
112 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
113 * and were initialized prior to this call.
114 * If it's ok, then verifier allows this BPF_CALL insn and looks at
115 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
116 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
117 * returns ether pointer to map value or NULL.
119 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
120 * insn, the register holding that pointer in the true branch changes state to
121 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
122 * branch. See check_cond_jmp_op().
124 * After the call R0 is set to return type of the function and registers R1-R5
125 * are set to NOT_INIT to indicate that they are no longer readable.
128 /* types of values stored in eBPF registers */
130 NOT_INIT = 0, /* nothing was written into register */
131 UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
132 PTR_TO_CTX, /* reg points to bpf_context */
133 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
134 PTR_TO_MAP_VALUE, /* reg points to map element value */
135 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
136 FRAME_PTR, /* reg == frame_pointer */
137 PTR_TO_STACK, /* reg == frame_pointer + imm */
138 CONST_IMM, /* constant integer value */
142 enum bpf_reg_type type;
144 /* valid when type == CONST_IMM | PTR_TO_STACK */
147 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
148 * PTR_TO_MAP_VALUE_OR_NULL
150 struct bpf_map *map_ptr;
154 enum bpf_stack_slot_type {
155 STACK_INVALID, /* nothing was stored in this stack slot */
156 STACK_SPILL, /* register spilled into stack */
157 STACK_MISC /* BPF program wrote some data into this slot */
160 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
162 /* state of the program:
163 * type of all registers and stack info
165 struct verifier_state {
166 struct reg_state regs[MAX_BPF_REG];
167 u8 stack_slot_type[MAX_BPF_STACK];
168 struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
171 /* linked list of verifier states used to prune search */
172 struct verifier_state_list {
173 struct verifier_state state;
174 struct verifier_state_list *next;
177 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
178 struct verifier_stack_elem {
179 /* verifer state is 'st'
180 * before processing instruction 'insn_idx'
181 * and after processing instruction 'prev_insn_idx'
183 struct verifier_state st;
186 struct verifier_stack_elem *next;
189 struct bpf_insn_aux_data {
191 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
192 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
194 int sanitize_stack_off; /* stack slot to be cleared */
195 bool seen; /* this insn was processed by the verifier */
198 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
200 /* single container for all structs
201 * one verifier_env per bpf_check() call
203 struct verifier_env {
204 struct bpf_prog *prog; /* eBPF program being verified */
205 struct verifier_stack_elem *head; /* stack of verifier states to be processed */
206 int stack_size; /* number of states to be processed */
207 struct verifier_state cur_state; /* current verifier state */
208 struct verifier_state_list **explored_states; /* search pruning optimization */
209 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
210 u32 used_map_cnt; /* number of used maps */
211 bool allow_ptr_leaks;
212 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
215 /* verbose verifier prints what it's seeing
216 * bpf_check() is called under lock, so no race to access these global vars
218 static u32 log_level, log_size, log_len;
219 static char *log_buf;
221 static DEFINE_MUTEX(bpf_verifier_lock);
223 /* log_level controls verbosity level of eBPF verifier.
224 * verbose() is used to dump the verification trace to the log, so the user
225 * can figure out what's wrong with the program
227 static __printf(1, 2) void verbose(const char *fmt, ...)
231 if (log_level == 0 || log_len >= log_size - 1)
235 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args);
239 /* string representation of 'enum bpf_reg_type' */
240 static const char * const reg_type_str[] = {
242 [UNKNOWN_VALUE] = "inv",
243 [PTR_TO_CTX] = "ctx",
244 [CONST_PTR_TO_MAP] = "map_ptr",
245 [PTR_TO_MAP_VALUE] = "map_value",
246 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
248 [PTR_TO_STACK] = "fp",
252 static void print_verifier_state(struct verifier_env *env)
257 for (i = 0; i < MAX_BPF_REG; i++) {
258 t = env->cur_state.regs[i].type;
261 verbose(" R%d=%s", i, reg_type_str[t]);
262 if (t == CONST_IMM || t == PTR_TO_STACK)
263 verbose("%d", env->cur_state.regs[i].imm);
264 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
265 t == PTR_TO_MAP_VALUE_OR_NULL)
266 verbose("(ks=%d,vs=%d)",
267 env->cur_state.regs[i].map_ptr->key_size,
268 env->cur_state.regs[i].map_ptr->value_size);
270 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
271 if (env->cur_state.stack_slot_type[i] == STACK_SPILL)
272 verbose(" fp%d=%s", -MAX_BPF_STACK + i,
273 reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]);
278 static const char *const bpf_class_string[] = {
286 [BPF_ALU64] = "alu64",
289 static const char *const bpf_alu_string[16] = {
290 [BPF_ADD >> 4] = "+=",
291 [BPF_SUB >> 4] = "-=",
292 [BPF_MUL >> 4] = "*=",
293 [BPF_DIV >> 4] = "/=",
294 [BPF_OR >> 4] = "|=",
295 [BPF_AND >> 4] = "&=",
296 [BPF_LSH >> 4] = "<<=",
297 [BPF_RSH >> 4] = ">>=",
298 [BPF_NEG >> 4] = "neg",
299 [BPF_MOD >> 4] = "%=",
300 [BPF_XOR >> 4] = "^=",
301 [BPF_MOV >> 4] = "=",
302 [BPF_ARSH >> 4] = "s>>=",
303 [BPF_END >> 4] = "endian",
306 static const char *const bpf_ldst_string[] = {
307 [BPF_W >> 3] = "u32",
308 [BPF_H >> 3] = "u16",
310 [BPF_DW >> 3] = "u64",
313 static const char *const bpf_jmp_string[16] = {
314 [BPF_JA >> 4] = "jmp",
315 [BPF_JEQ >> 4] = "==",
316 [BPF_JGT >> 4] = ">",
317 [BPF_JGE >> 4] = ">=",
318 [BPF_JSET >> 4] = "&",
319 [BPF_JNE >> 4] = "!=",
320 [BPF_JSGT >> 4] = "s>",
321 [BPF_JSGE >> 4] = "s>=",
322 [BPF_CALL >> 4] = "call",
323 [BPF_EXIT >> 4] = "exit",
326 static void print_bpf_insn(const struct verifier_env *env,
327 const struct bpf_insn *insn)
329 u8 class = BPF_CLASS(insn->code);
331 if (class == BPF_ALU || class == BPF_ALU64) {
332 if (BPF_SRC(insn->code) == BPF_X)
333 verbose("(%02x) %sr%d %s %sr%d\n",
334 insn->code, class == BPF_ALU ? "(u32) " : "",
336 bpf_alu_string[BPF_OP(insn->code) >> 4],
337 class == BPF_ALU ? "(u32) " : "",
340 verbose("(%02x) %sr%d %s %s%d\n",
341 insn->code, class == BPF_ALU ? "(u32) " : "",
343 bpf_alu_string[BPF_OP(insn->code) >> 4],
344 class == BPF_ALU ? "(u32) " : "",
346 } else if (class == BPF_STX) {
347 if (BPF_MODE(insn->code) == BPF_MEM)
348 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
350 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
352 insn->off, insn->src_reg);
353 else if (BPF_MODE(insn->code) == BPF_XADD)
354 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
356 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
357 insn->dst_reg, insn->off,
360 verbose("BUG_%02x\n", insn->code);
361 } else if (class == BPF_ST) {
362 if (BPF_MODE(insn->code) != BPF_MEM) {
363 verbose("BUG_st_%02x\n", insn->code);
366 verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
368 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
370 insn->off, insn->imm);
371 } else if (class == BPF_LDX) {
372 if (BPF_MODE(insn->code) != BPF_MEM) {
373 verbose("BUG_ldx_%02x\n", insn->code);
376 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
377 insn->code, insn->dst_reg,
378 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
379 insn->src_reg, insn->off);
380 } else if (class == BPF_LD) {
381 if (BPF_MODE(insn->code) == BPF_ABS) {
382 verbose("(%02x) r0 = *(%s *)skb[%d]\n",
384 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
386 } else if (BPF_MODE(insn->code) == BPF_IND) {
387 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
389 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
390 insn->src_reg, insn->imm);
391 } else if (BPF_MODE(insn->code) == BPF_IMM &&
392 BPF_SIZE(insn->code) == BPF_DW) {
393 /* At this point, we already made sure that the second
394 * part of the ldimm64 insn is accessible.
396 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
397 bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
399 if (map_ptr && !env->allow_ptr_leaks)
402 verbose("(%02x) r%d = 0x%llx\n", insn->code,
403 insn->dst_reg, (unsigned long long)imm);
405 verbose("BUG_ld_%02x\n", insn->code);
408 } else if (class == BPF_JMP) {
409 u8 opcode = BPF_OP(insn->code);
411 if (opcode == BPF_CALL) {
412 verbose("(%02x) call %d\n", insn->code, insn->imm);
413 } else if (insn->code == (BPF_JMP | BPF_JA)) {
414 verbose("(%02x) goto pc%+d\n",
415 insn->code, insn->off);
416 } else if (insn->code == (BPF_JMP | BPF_EXIT)) {
417 verbose("(%02x) exit\n", insn->code);
418 } else if (BPF_SRC(insn->code) == BPF_X) {
419 verbose("(%02x) if r%d %s r%d goto pc%+d\n",
420 insn->code, insn->dst_reg,
421 bpf_jmp_string[BPF_OP(insn->code) >> 4],
422 insn->src_reg, insn->off);
424 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
425 insn->code, insn->dst_reg,
426 bpf_jmp_string[BPF_OP(insn->code) >> 4],
427 insn->imm, insn->off);
430 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
434 static int pop_stack(struct verifier_env *env, int *prev_insn_idx)
436 struct verifier_stack_elem *elem;
439 if (env->head == NULL)
442 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state));
443 insn_idx = env->head->insn_idx;
445 *prev_insn_idx = env->head->prev_insn_idx;
446 elem = env->head->next;
453 static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx,
456 struct verifier_stack_elem *elem;
458 elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL);
462 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state));
463 elem->insn_idx = insn_idx;
464 elem->prev_insn_idx = prev_insn_idx;
465 elem->next = env->head;
468 if (env->stack_size > 1024) {
469 verbose("BPF program is too complex\n");
474 /* pop all elements and return */
475 while (pop_stack(env, NULL) >= 0);
479 #define CALLER_SAVED_REGS 6
480 static const int caller_saved[CALLER_SAVED_REGS] = {
481 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
484 static void init_reg_state(struct reg_state *regs)
488 for (i = 0; i < MAX_BPF_REG; i++) {
489 regs[i].type = NOT_INIT;
491 regs[i].map_ptr = NULL;
495 regs[BPF_REG_FP].type = FRAME_PTR;
497 /* 1st arg to a function */
498 regs[BPF_REG_1].type = PTR_TO_CTX;
501 static void mark_reg_unknown_value(struct reg_state *regs, u32 regno)
503 BUG_ON(regno >= MAX_BPF_REG);
504 regs[regno].type = UNKNOWN_VALUE;
506 regs[regno].map_ptr = NULL;
510 SRC_OP, /* register is used as source operand */
511 DST_OP, /* register is used as destination operand */
512 DST_OP_NO_MARK /* same as above, check only, don't mark */
515 static int check_reg_arg(struct reg_state *regs, u32 regno,
518 if (regno >= MAX_BPF_REG) {
519 verbose("R%d is invalid\n", regno);
524 /* check whether register used as source operand can be read */
525 if (regs[regno].type == NOT_INIT) {
526 verbose("R%d !read_ok\n", regno);
530 /* check whether register used as dest operand can be written to */
531 if (regno == BPF_REG_FP) {
532 verbose("frame pointer is read only\n");
536 mark_reg_unknown_value(regs, regno);
541 static int bpf_size_to_bytes(int bpf_size)
543 if (bpf_size == BPF_W)
545 else if (bpf_size == BPF_H)
547 else if (bpf_size == BPF_B)
549 else if (bpf_size == BPF_DW)
555 static bool is_spillable_regtype(enum bpf_reg_type type)
558 case PTR_TO_MAP_VALUE:
559 case PTR_TO_MAP_VALUE_OR_NULL:
563 case CONST_PTR_TO_MAP:
570 /* check_stack_read/write functions track spill/fill of registers,
571 * stack boundary and alignment are checked in check_mem_access()
573 static int check_stack_write(struct verifier_env *env,
574 struct verifier_state *state, int off,
575 int size, int value_regno, int insn_idx)
577 int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
578 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
579 * so it's aligned access and [off, off + size) are within stack limits
582 if (value_regno >= 0 &&
583 is_spillable_regtype(state->regs[value_regno].type)) {
585 /* register containing pointer is being spilled into stack */
586 if (size != BPF_REG_SIZE) {
587 verbose("invalid size of register spill\n");
591 /* save register state */
592 state->spilled_regs[spi] = state->regs[value_regno];
594 for (i = 0; i < BPF_REG_SIZE; i++) {
595 if (state->stack_slot_type[MAX_BPF_STACK + off + i] == STACK_MISC &&
596 !env->allow_ptr_leaks) {
597 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
598 int soff = (-spi - 1) * BPF_REG_SIZE;
600 /* detected reuse of integer stack slot with a pointer
601 * which means either llvm is reusing stack slot or
602 * an attacker is trying to exploit CVE-2018-3639
603 * (speculative store bypass)
604 * Have to sanitize that slot with preemptive
607 if (*poff && *poff != soff) {
608 /* disallow programs where single insn stores
609 * into two different stack slots, since verifier
610 * cannot sanitize them
612 verbose("insn %d cannot access two stack slots fp%d and fp%d",
613 insn_idx, *poff, soff);
618 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
621 /* regular write of data into stack */
622 state->spilled_regs[spi] = (struct reg_state) {};
624 for (i = 0; i < size; i++)
625 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
630 static int check_stack_read(struct verifier_state *state, int off, int size,
636 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
638 if (slot_type[0] == STACK_SPILL) {
639 if (size != BPF_REG_SIZE) {
640 verbose("invalid size of register spill\n");
643 for (i = 1; i < BPF_REG_SIZE; i++) {
644 if (slot_type[i] != STACK_SPILL) {
645 verbose("corrupted spill memory\n");
650 if (value_regno >= 0)
651 /* restore register state from stack */
652 state->regs[value_regno] =
653 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE];
656 for (i = 0; i < size; i++) {
657 if (slot_type[i] != STACK_MISC) {
658 verbose("invalid read from stack off %d+%d size %d\n",
663 if (value_regno >= 0)
664 /* have read misc data from the stack */
665 mark_reg_unknown_value(state->regs, value_regno);
670 /* check read/write into map element returned by bpf_map_lookup_elem() */
671 static int check_map_access(struct verifier_env *env, u32 regno, int off,
674 struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
676 if (off < 0 || off + size > map->value_size) {
677 verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
678 map->value_size, off, size);
684 /* check access to 'struct bpf_context' fields */
685 static int check_ctx_access(struct verifier_env *env, int off, int size,
686 enum bpf_access_type t)
688 if (env->prog->aux->ops->is_valid_access &&
689 env->prog->aux->ops->is_valid_access(off, size, t))
692 verbose("invalid bpf_context access off=%d size=%d\n", off, size);
696 static bool is_pointer_value(struct verifier_env *env, int regno)
698 if (env->allow_ptr_leaks)
701 switch (env->cur_state.regs[regno].type) {
710 static bool is_ctx_reg(struct verifier_env *env, int regno)
712 const struct reg_state *reg = &env->cur_state.regs[regno];
714 return reg->type == PTR_TO_CTX;
717 /* check whether memory at (regno + off) is accessible for t = (read | write)
718 * if t==write, value_regno is a register which value is stored into memory
719 * if t==read, value_regno is a register which will receive the value from memory
720 * if t==write && value_regno==-1, some unknown value is stored into memory
721 * if t==read && value_regno==-1, don't care what we read from memory
723 static int check_mem_access(struct verifier_env *env, int insn_idx, u32 regno, int off,
724 int bpf_size, enum bpf_access_type t,
727 struct verifier_state *state = &env->cur_state;
730 if (state->regs[regno].type == PTR_TO_STACK)
731 off += state->regs[regno].imm;
733 size = bpf_size_to_bytes(bpf_size);
737 if (off % size != 0) {
738 verbose("misaligned access off %d size %d\n", off, size);
742 if (state->regs[regno].type == PTR_TO_MAP_VALUE) {
743 if (t == BPF_WRITE && value_regno >= 0 &&
744 is_pointer_value(env, value_regno)) {
745 verbose("R%d leaks addr into map\n", value_regno);
748 err = check_map_access(env, regno, off, size);
749 if (!err && t == BPF_READ && value_regno >= 0)
750 mark_reg_unknown_value(state->regs, value_regno);
752 } else if (state->regs[regno].type == PTR_TO_CTX) {
753 if (t == BPF_WRITE && value_regno >= 0 &&
754 is_pointer_value(env, value_regno)) {
755 verbose("R%d leaks addr into ctx\n", value_regno);
758 err = check_ctx_access(env, off, size, t);
759 if (!err && t == BPF_READ && value_regno >= 0)
760 mark_reg_unknown_value(state->regs, value_regno);
762 } else if (state->regs[regno].type == FRAME_PTR ||
763 state->regs[regno].type == PTR_TO_STACK) {
764 if (off >= 0 || off < -MAX_BPF_STACK) {
765 verbose("invalid stack off=%d size=%d\n", off, size);
768 if (t == BPF_WRITE) {
769 if (!env->allow_ptr_leaks &&
770 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
771 size != BPF_REG_SIZE) {
772 verbose("attempt to corrupt spilled pointer on stack\n");
775 err = check_stack_write(env, state, off, size,
776 value_regno, insn_idx);
778 err = check_stack_read(state, off, size, value_regno);
781 verbose("R%d invalid mem access '%s'\n",
782 regno, reg_type_str[state->regs[regno].type]);
788 static int check_xadd(struct verifier_env *env, int insn_idx, struct bpf_insn *insn)
790 struct reg_state *regs = env->cur_state.regs;
793 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
795 verbose("BPF_XADD uses reserved fields\n");
799 /* check src1 operand */
800 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
804 /* check src2 operand */
805 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
809 if (is_pointer_value(env, insn->src_reg)) {
810 verbose("R%d leaks addr into mem\n", insn->src_reg);
814 if (is_ctx_reg(env, insn->dst_reg)) {
815 verbose("BPF_XADD stores into R%d context is not allowed\n",
820 /* check whether atomic_add can read the memory */
821 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
822 BPF_SIZE(insn->code), BPF_READ, -1);
826 /* check whether atomic_add can write into the same memory */
827 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
828 BPF_SIZE(insn->code), BPF_WRITE, -1);
831 /* when register 'regno' is passed into function that will read 'access_size'
832 * bytes from that pointer, make sure that it's within stack boundary
833 * and all elements of stack are initialized
835 static int check_stack_boundary(struct verifier_env *env,
836 int regno, int access_size)
838 struct verifier_state *state = &env->cur_state;
839 struct reg_state *regs = state->regs;
842 if (regs[regno].type != PTR_TO_STACK)
845 off = regs[regno].imm;
846 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
848 verbose("invalid stack type R%d off=%d access_size=%d\n",
849 regno, off, access_size);
853 for (i = 0; i < access_size; i++) {
854 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
855 verbose("invalid indirect read from stack off %d+%d size %d\n",
856 off, i, access_size);
863 static int check_func_arg(struct verifier_env *env, u32 regno,
864 enum bpf_arg_type arg_type, struct bpf_map **mapp)
866 struct reg_state *reg = env->cur_state.regs + regno;
867 enum bpf_reg_type expected_type;
870 if (arg_type == ARG_DONTCARE)
873 if (reg->type == NOT_INIT) {
874 verbose("R%d !read_ok\n", regno);
878 if (arg_type == ARG_ANYTHING) {
879 if (is_pointer_value(env, regno)) {
880 verbose("R%d leaks addr into helper function\n", regno);
886 if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
887 arg_type == ARG_PTR_TO_MAP_VALUE) {
888 expected_type = PTR_TO_STACK;
889 } else if (arg_type == ARG_CONST_STACK_SIZE) {
890 expected_type = CONST_IMM;
891 } else if (arg_type == ARG_CONST_MAP_PTR) {
892 expected_type = CONST_PTR_TO_MAP;
893 } else if (arg_type == ARG_PTR_TO_CTX) {
894 expected_type = PTR_TO_CTX;
896 verbose("unsupported arg_type %d\n", arg_type);
900 if (reg->type != expected_type) {
901 verbose("R%d type=%s expected=%s\n", regno,
902 reg_type_str[reg->type], reg_type_str[expected_type]);
906 if (arg_type == ARG_CONST_MAP_PTR) {
907 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
908 *mapp = reg->map_ptr;
910 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
911 /* bpf_map_xxx(..., map_ptr, ..., key) call:
912 * check that [key, key + map->key_size) are within
913 * stack limits and initialized
916 /* in function declaration map_ptr must come before
917 * map_key, so that it's verified and known before
918 * we have to check map_key here. Otherwise it means
919 * that kernel subsystem misconfigured verifier
921 verbose("invalid map_ptr to access map->key\n");
924 err = check_stack_boundary(env, regno, (*mapp)->key_size);
926 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
927 /* bpf_map_xxx(..., map_ptr, ..., value) call:
928 * check [value, value + map->value_size) validity
931 /* kernel subsystem misconfigured verifier */
932 verbose("invalid map_ptr to access map->value\n");
935 err = check_stack_boundary(env, regno, (*mapp)->value_size);
937 } else if (arg_type == ARG_CONST_STACK_SIZE) {
938 /* bpf_xxx(..., buf, len) call will access 'len' bytes
939 * from stack pointer 'buf'. Check it
940 * note: regno == len, regno - 1 == buf
943 /* kernel subsystem misconfigured verifier */
944 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
947 err = check_stack_boundary(env, regno - 1, reg->imm);
953 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
958 /* We need a two way check, first is from map perspective ... */
959 switch (map->map_type) {
960 case BPF_MAP_TYPE_PROG_ARRAY:
961 if (func_id != BPF_FUNC_tail_call)
964 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
965 if (func_id != BPF_FUNC_perf_event_read &&
966 func_id != BPF_FUNC_perf_event_output)
973 /* ... and second from the function itself. */
975 case BPF_FUNC_tail_call:
976 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
979 case BPF_FUNC_perf_event_read:
980 case BPF_FUNC_perf_event_output:
981 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
990 verbose("cannot pass map_type %d into func %d\n",
991 map->map_type, func_id);
995 static int check_call(struct verifier_env *env, int func_id, int insn_idx)
997 struct verifier_state *state = &env->cur_state;
998 const struct bpf_func_proto *fn = NULL;
999 struct reg_state *regs = state->regs;
1000 struct bpf_map *map = NULL;
1001 struct reg_state *reg;
1004 /* find function prototype */
1005 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
1006 verbose("invalid func %d\n", func_id);
1010 if (env->prog->aux->ops->get_func_proto)
1011 fn = env->prog->aux->ops->get_func_proto(func_id);
1014 verbose("unknown func %d\n", func_id);
1018 /* eBPF programs must be GPL compatible to use GPL-ed functions */
1019 if (!env->prog->gpl_compatible && fn->gpl_only) {
1020 verbose("cannot call GPL only function from proprietary program\n");
1025 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map);
1028 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map);
1031 if (func_id == BPF_FUNC_tail_call) {
1033 verbose("verifier bug\n");
1036 env->insn_aux_data[insn_idx].map_ptr = map;
1038 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map);
1041 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map);
1044 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map);
1048 /* reset caller saved regs */
1049 for (i = 0; i < CALLER_SAVED_REGS; i++) {
1050 reg = regs + caller_saved[i];
1051 reg->type = NOT_INIT;
1055 /* update return register */
1056 if (fn->ret_type == RET_INTEGER) {
1057 regs[BPF_REG_0].type = UNKNOWN_VALUE;
1058 } else if (fn->ret_type == RET_VOID) {
1059 regs[BPF_REG_0].type = NOT_INIT;
1060 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
1061 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
1062 /* remember map_ptr, so that check_map_access()
1063 * can check 'value_size' boundary of memory access
1064 * to map element returned from bpf_map_lookup_elem()
1067 verbose("kernel subsystem misconfigured verifier\n");
1070 regs[BPF_REG_0].map_ptr = map;
1072 verbose("unknown return type %d of func %d\n",
1073 fn->ret_type, func_id);
1077 err = check_map_func_compatibility(map, func_id);
1084 /* check validity of 32-bit and 64-bit arithmetic operations */
1085 static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
1087 struct reg_state *regs = env->cur_state.regs;
1088 u8 opcode = BPF_OP(insn->code);
1091 if (opcode == BPF_END || opcode == BPF_NEG) {
1092 if (opcode == BPF_NEG) {
1093 if (BPF_SRC(insn->code) != 0 ||
1094 insn->src_reg != BPF_REG_0 ||
1095 insn->off != 0 || insn->imm != 0) {
1096 verbose("BPF_NEG uses reserved fields\n");
1100 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
1101 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
1102 BPF_CLASS(insn->code) == BPF_ALU64) {
1103 verbose("BPF_END uses reserved fields\n");
1108 /* check src operand */
1109 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1113 if (is_pointer_value(env, insn->dst_reg)) {
1114 verbose("R%d pointer arithmetic prohibited\n",
1119 /* check dest operand */
1120 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1124 } else if (opcode == BPF_MOV) {
1126 if (BPF_SRC(insn->code) == BPF_X) {
1127 if (insn->imm != 0 || insn->off != 0) {
1128 verbose("BPF_MOV uses reserved fields\n");
1132 /* check src operand */
1133 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1137 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
1138 verbose("BPF_MOV uses reserved fields\n");
1143 /* check dest operand */
1144 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1148 if (BPF_SRC(insn->code) == BPF_X) {
1149 if (BPF_CLASS(insn->code) == BPF_ALU64) {
1151 * copy register state to dest reg
1153 regs[insn->dst_reg] = regs[insn->src_reg];
1155 if (is_pointer_value(env, insn->src_reg)) {
1156 verbose("R%d partial copy of pointer\n",
1160 regs[insn->dst_reg].type = UNKNOWN_VALUE;
1161 regs[insn->dst_reg].map_ptr = NULL;
1163 } else if (BPF_CLASS(insn->code) == BPF_ALU64 ||
1166 * remember the value we stored into this reg
1168 regs[insn->dst_reg].type = CONST_IMM;
1169 regs[insn->dst_reg].imm = insn->imm;
1172 } else if (opcode > BPF_END) {
1173 verbose("invalid BPF_ALU opcode %x\n", opcode);
1176 } else { /* all other ALU ops: and, sub, xor, add, ... */
1178 bool stack_relative = false;
1180 if (BPF_SRC(insn->code) == BPF_X) {
1181 if (insn->imm != 0 || insn->off != 0) {
1182 verbose("BPF_ALU uses reserved fields\n");
1185 /* check src1 operand */
1186 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1190 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
1191 verbose("BPF_ALU uses reserved fields\n");
1196 /* check src2 operand */
1197 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1201 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
1202 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
1203 verbose("div by zero\n");
1207 if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
1208 verbose("BPF_ARSH not supported for 32 bit ALU\n");
1212 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
1213 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
1214 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
1216 if (insn->imm < 0 || insn->imm >= size) {
1217 verbose("invalid shift %d\n", insn->imm);
1222 /* pattern match 'bpf_add Rx, imm' instruction */
1223 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
1224 regs[insn->dst_reg].type == FRAME_PTR &&
1225 BPF_SRC(insn->code) == BPF_K) {
1226 stack_relative = true;
1227 } else if (is_pointer_value(env, insn->dst_reg)) {
1228 verbose("R%d pointer arithmetic prohibited\n",
1231 } else if (BPF_SRC(insn->code) == BPF_X &&
1232 is_pointer_value(env, insn->src_reg)) {
1233 verbose("R%d pointer arithmetic prohibited\n",
1238 /* check dest operand */
1239 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1243 if (stack_relative) {
1244 regs[insn->dst_reg].type = PTR_TO_STACK;
1245 regs[insn->dst_reg].imm = insn->imm;
1252 static int check_cond_jmp_op(struct verifier_env *env,
1253 struct bpf_insn *insn, int *insn_idx)
1255 struct reg_state *regs = env->cur_state.regs;
1256 struct verifier_state *other_branch;
1257 u8 opcode = BPF_OP(insn->code);
1260 if (opcode > BPF_EXIT) {
1261 verbose("invalid BPF_JMP opcode %x\n", opcode);
1265 if (BPF_SRC(insn->code) == BPF_X) {
1266 if (insn->imm != 0) {
1267 verbose("BPF_JMP uses reserved fields\n");
1271 /* check src1 operand */
1272 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1276 if (is_pointer_value(env, insn->src_reg)) {
1277 verbose("R%d pointer comparison prohibited\n",
1282 if (insn->src_reg != BPF_REG_0) {
1283 verbose("BPF_JMP uses reserved fields\n");
1288 /* check src2 operand */
1289 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1293 /* detect if R == 0 where R was initialized to zero earlier */
1294 if (BPF_SRC(insn->code) == BPF_K &&
1295 (opcode == BPF_JEQ || opcode == BPF_JNE) &&
1296 regs[insn->dst_reg].type == CONST_IMM &&
1297 regs[insn->dst_reg].imm == insn->imm) {
1298 if (opcode == BPF_JEQ) {
1299 /* if (imm == imm) goto pc+off;
1300 * only follow the goto, ignore fall-through
1302 *insn_idx += insn->off;
1305 /* if (imm != imm) goto pc+off;
1306 * only follow fall-through branch, since
1307 * that's where the program will go
1313 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
1317 /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
1318 if (BPF_SRC(insn->code) == BPF_K &&
1319 insn->imm == 0 && (opcode == BPF_JEQ ||
1320 opcode == BPF_JNE) &&
1321 regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) {
1322 if (opcode == BPF_JEQ) {
1323 /* next fallthrough insn can access memory via
1326 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
1327 /* branch targer cannot access it, since reg == 0 */
1328 other_branch->regs[insn->dst_reg].type = CONST_IMM;
1329 other_branch->regs[insn->dst_reg].imm = 0;
1331 other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
1332 regs[insn->dst_reg].type = CONST_IMM;
1333 regs[insn->dst_reg].imm = 0;
1335 } else if (is_pointer_value(env, insn->dst_reg)) {
1336 verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
1338 } else if (BPF_SRC(insn->code) == BPF_K &&
1339 (opcode == BPF_JEQ || opcode == BPF_JNE)) {
1341 if (opcode == BPF_JEQ) {
1342 /* detect if (R == imm) goto
1343 * and in the target state recognize that R = imm
1345 other_branch->regs[insn->dst_reg].type = CONST_IMM;
1346 other_branch->regs[insn->dst_reg].imm = insn->imm;
1348 /* detect if (R != imm) goto
1349 * and in the fall-through state recognize that R = imm
1351 regs[insn->dst_reg].type = CONST_IMM;
1352 regs[insn->dst_reg].imm = insn->imm;
1356 print_verifier_state(env);
1360 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
1361 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
1363 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
1365 return (struct bpf_map *) (unsigned long) imm64;
1368 /* verify BPF_LD_IMM64 instruction */
1369 static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
1371 struct reg_state *regs = env->cur_state.regs;
1374 if (BPF_SIZE(insn->code) != BPF_DW) {
1375 verbose("invalid BPF_LD_IMM insn\n");
1378 if (insn->off != 0) {
1379 verbose("BPF_LD_IMM64 uses reserved fields\n");
1383 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1387 if (insn->src_reg == 0)
1388 /* generic move 64-bit immediate into a register */
1391 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
1392 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
1394 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
1395 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
1399 static bool may_access_skb(enum bpf_prog_type type)
1402 case BPF_PROG_TYPE_SOCKET_FILTER:
1403 case BPF_PROG_TYPE_SCHED_CLS:
1404 case BPF_PROG_TYPE_SCHED_ACT:
1411 /* verify safety of LD_ABS|LD_IND instructions:
1412 * - they can only appear in the programs where ctx == skb
1413 * - since they are wrappers of function calls, they scratch R1-R5 registers,
1414 * preserve R6-R9, and store return value into R0
1417 * ctx == skb == R6 == CTX
1420 * SRC == any register
1421 * IMM == 32-bit immediate
1424 * R0 - 8/16/32-bit skb data converted to cpu endianness
1426 static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
1428 struct reg_state *regs = env->cur_state.regs;
1429 u8 mode = BPF_MODE(insn->code);
1430 struct reg_state *reg;
1433 if (!may_access_skb(env->prog->type)) {
1434 verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
1438 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
1439 BPF_SIZE(insn->code) == BPF_DW ||
1440 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
1441 verbose("BPF_LD_ABS uses reserved fields\n");
1445 /* check whether implicit source operand (register R6) is readable */
1446 err = check_reg_arg(regs, BPF_REG_6, SRC_OP);
1450 if (regs[BPF_REG_6].type != PTR_TO_CTX) {
1451 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
1455 if (mode == BPF_IND) {
1456 /* check explicit source operand */
1457 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1462 /* reset caller saved regs to unreadable */
1463 for (i = 0; i < CALLER_SAVED_REGS; i++) {
1464 reg = regs + caller_saved[i];
1465 reg->type = NOT_INIT;
1469 /* mark destination R0 register as readable, since it contains
1470 * the value fetched from the packet
1472 regs[BPF_REG_0].type = UNKNOWN_VALUE;
1476 /* non-recursive DFS pseudo code
1477 * 1 procedure DFS-iterative(G,v):
1478 * 2 label v as discovered
1479 * 3 let S be a stack
1481 * 5 while S is not empty
1483 * 7 if t is what we're looking for:
1485 * 9 for all edges e in G.adjacentEdges(t) do
1486 * 10 if edge e is already labelled
1487 * 11 continue with the next edge
1488 * 12 w <- G.adjacentVertex(t,e)
1489 * 13 if vertex w is not discovered and not explored
1490 * 14 label e as tree-edge
1491 * 15 label w as discovered
1494 * 18 else if vertex w is discovered
1495 * 19 label e as back-edge
1497 * 21 // vertex w is explored
1498 * 22 label e as forward- or cross-edge
1499 * 23 label t as explored
1504 * 0x11 - discovered and fall-through edge labelled
1505 * 0x12 - discovered and fall-through and branch edges labelled
1516 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L)
1518 static int *insn_stack; /* stack of insns to process */
1519 static int cur_stack; /* current stack index */
1520 static int *insn_state;
1522 /* t, w, e - match pseudo-code above:
1523 * t - index of current instruction
1524 * w - next instruction
1527 static int push_insn(int t, int w, int e, struct verifier_env *env)
1529 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
1532 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
1535 if (w < 0 || w >= env->prog->len) {
1536 verbose("jump out of range from insn %d to %d\n", t, w);
1541 /* mark branch target for state pruning */
1542 env->explored_states[w] = STATE_LIST_MARK;
1544 if (insn_state[w] == 0) {
1546 insn_state[t] = DISCOVERED | e;
1547 insn_state[w] = DISCOVERED;
1548 if (cur_stack >= env->prog->len)
1550 insn_stack[cur_stack++] = w;
1552 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
1553 verbose("back-edge from insn %d to %d\n", t, w);
1555 } else if (insn_state[w] == EXPLORED) {
1556 /* forward- or cross-edge */
1557 insn_state[t] = DISCOVERED | e;
1559 verbose("insn state internal bug\n");
1565 /* non-recursive depth-first-search to detect loops in BPF program
1566 * loop == back-edge in directed graph
1568 static int check_cfg(struct verifier_env *env)
1570 struct bpf_insn *insns = env->prog->insnsi;
1571 int insn_cnt = env->prog->len;
1575 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
1579 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
1585 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
1586 insn_stack[0] = 0; /* 0 is the first instruction */
1592 t = insn_stack[cur_stack - 1];
1594 if (BPF_CLASS(insns[t].code) == BPF_JMP) {
1595 u8 opcode = BPF_OP(insns[t].code);
1597 if (opcode == BPF_EXIT) {
1599 } else if (opcode == BPF_CALL) {
1600 ret = push_insn(t, t + 1, FALLTHROUGH, env);
1605 } else if (opcode == BPF_JA) {
1606 if (BPF_SRC(insns[t].code) != BPF_K) {
1610 /* unconditional jump with single edge */
1611 ret = push_insn(t, t + insns[t].off + 1,
1617 /* tell verifier to check for equivalent states
1618 * after every call and jump
1620 if (t + 1 < insn_cnt)
1621 env->explored_states[t + 1] = STATE_LIST_MARK;
1623 /* conditional jump with two edges */
1624 ret = push_insn(t, t + 1, FALLTHROUGH, env);
1630 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
1637 /* all other non-branch instructions with single
1640 ret = push_insn(t, t + 1, FALLTHROUGH, env);
1648 insn_state[t] = EXPLORED;
1649 if (cur_stack-- <= 0) {
1650 verbose("pop stack internal bug\n");
1657 for (i = 0; i < insn_cnt; i++) {
1658 if (insn_state[i] != EXPLORED) {
1659 verbose("unreachable insn %d\n", i);
1664 ret = 0; /* cfg looks good */
1672 /* compare two verifier states
1674 * all states stored in state_list are known to be valid, since
1675 * verifier reached 'bpf_exit' instruction through them
1677 * this function is called when verifier exploring different branches of
1678 * execution popped from the state stack. If it sees an old state that has
1679 * more strict register state and more strict stack state then this execution
1680 * branch doesn't need to be explored further, since verifier already
1681 * concluded that more strict state leads to valid finish.
1683 * Therefore two states are equivalent if register state is more conservative
1684 * and explored stack state is more conservative than the current one.
1687 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
1688 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
1690 * In other words if current stack state (one being explored) has more
1691 * valid slots than old one that already passed validation, it means
1692 * the verifier can stop exploring and conclude that current state is valid too
1694 * Similarly with registers. If explored state has register type as invalid
1695 * whereas register type in current state is meaningful, it means that
1696 * the current state will reach 'bpf_exit' instruction safely
1698 static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
1702 for (i = 0; i < MAX_BPF_REG; i++) {
1703 if (memcmp(&old->regs[i], &cur->regs[i],
1704 sizeof(old->regs[0])) != 0) {
1705 if (old->regs[i].type == NOT_INIT ||
1706 (old->regs[i].type == UNKNOWN_VALUE &&
1707 cur->regs[i].type != NOT_INIT))
1713 for (i = 0; i < MAX_BPF_STACK; i++) {
1714 if (old->stack_slot_type[i] == STACK_INVALID)
1716 if (old->stack_slot_type[i] != cur->stack_slot_type[i])
1717 /* Ex: old explored (safe) state has STACK_SPILL in
1718 * this stack slot, but current has has STACK_MISC ->
1719 * this verifier states are not equivalent,
1720 * return false to continue verification of this path
1723 if (i % BPF_REG_SIZE)
1725 if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
1726 &cur->spilled_regs[i / BPF_REG_SIZE],
1727 sizeof(old->spilled_regs[0])))
1728 /* when explored and current stack slot types are
1729 * the same, check that stored pointers types
1730 * are the same as well.
1731 * Ex: explored safe path could have stored
1732 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
1733 * but current path has stored:
1734 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
1735 * such verifier states are not equivalent.
1736 * return false to continue verification of this path
1745 static int is_state_visited(struct verifier_env *env, int insn_idx)
1747 struct verifier_state_list *new_sl;
1748 struct verifier_state_list *sl;
1750 sl = env->explored_states[insn_idx];
1752 /* this 'insn_idx' instruction wasn't marked, so we will not
1753 * be doing state search here
1757 while (sl != STATE_LIST_MARK) {
1758 if (states_equal(&sl->state, &env->cur_state))
1759 /* reached equivalent register/stack state,
1766 /* there were no equivalent states, remember current one.
1767 * technically the current state is not proven to be safe yet,
1768 * but it will either reach bpf_exit (which means it's safe) or
1769 * it will be rejected. Since there are no loops, we won't be
1770 * seeing this 'insn_idx' instruction again on the way to bpf_exit
1772 new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER);
1776 /* add new state to the head of linked list */
1777 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
1778 new_sl->next = env->explored_states[insn_idx];
1779 env->explored_states[insn_idx] = new_sl;
1783 static int do_check(struct verifier_env *env)
1785 struct verifier_state *state = &env->cur_state;
1786 struct bpf_insn *insns = env->prog->insnsi;
1787 struct reg_state *regs = state->regs;
1788 int insn_cnt = env->prog->len;
1789 int insn_idx, prev_insn_idx = 0;
1790 int insn_processed = 0;
1791 bool do_print_state = false;
1793 init_reg_state(regs);
1796 struct bpf_insn *insn;
1800 if (insn_idx >= insn_cnt) {
1801 verbose("invalid insn idx %d insn_cnt %d\n",
1802 insn_idx, insn_cnt);
1806 insn = &insns[insn_idx];
1807 class = BPF_CLASS(insn->code);
1809 if (++insn_processed > 32768) {
1810 verbose("BPF program is too large. Proccessed %d insn\n",
1815 err = is_state_visited(env, insn_idx);
1819 /* found equivalent state, can prune the search */
1822 verbose("\nfrom %d to %d: safe\n",
1823 prev_insn_idx, insn_idx);
1825 verbose("%d: safe\n", insn_idx);
1827 goto process_bpf_exit;
1830 if (log_level && do_print_state) {
1831 verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
1832 print_verifier_state(env);
1833 do_print_state = false;
1837 verbose("%d: ", insn_idx);
1838 print_bpf_insn(env, insn);
1841 env->insn_aux_data[insn_idx].seen = true;
1842 if (class == BPF_ALU || class == BPF_ALU64) {
1843 err = check_alu_op(env, insn);
1847 } else if (class == BPF_LDX) {
1848 enum bpf_reg_type *prev_src_type, src_reg_type;
1850 /* check for reserved fields is already done */
1852 /* check src operand */
1853 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1857 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
1861 src_reg_type = regs[insn->src_reg].type;
1863 /* check that memory (src_reg + off) is readable,
1864 * the state of dst_reg will be updated by this func
1866 err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
1867 BPF_SIZE(insn->code), BPF_READ,
1872 if (BPF_SIZE(insn->code) != BPF_W &&
1873 BPF_SIZE(insn->code) != BPF_DW) {
1878 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
1880 if (*prev_src_type == NOT_INIT) {
1882 * dst_reg = *(u32 *)(src_reg + off)
1883 * save type to validate intersecting paths
1885 *prev_src_type = src_reg_type;
1887 } else if (src_reg_type != *prev_src_type &&
1888 (src_reg_type == PTR_TO_CTX ||
1889 *prev_src_type == PTR_TO_CTX)) {
1890 /* ABuser program is trying to use the same insn
1891 * dst_reg = *(u32*) (src_reg + off)
1892 * with different pointer types:
1893 * src_reg == ctx in one branch and
1894 * src_reg == stack|map in some other branch.
1897 verbose("same insn cannot be used with different pointers\n");
1901 } else if (class == BPF_STX) {
1902 enum bpf_reg_type *prev_dst_type, dst_reg_type;
1904 if (BPF_MODE(insn->code) == BPF_XADD) {
1905 err = check_xadd(env, insn_idx, insn);
1912 /* check src1 operand */
1913 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1916 /* check src2 operand */
1917 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1921 dst_reg_type = regs[insn->dst_reg].type;
1923 /* check that memory (dst_reg + off) is writeable */
1924 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1925 BPF_SIZE(insn->code), BPF_WRITE,
1930 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
1932 if (*prev_dst_type == NOT_INIT) {
1933 *prev_dst_type = dst_reg_type;
1934 } else if (dst_reg_type != *prev_dst_type &&
1935 (dst_reg_type == PTR_TO_CTX ||
1936 *prev_dst_type == PTR_TO_CTX)) {
1937 verbose("same insn cannot be used with different pointers\n");
1941 } else if (class == BPF_ST) {
1942 if (BPF_MODE(insn->code) != BPF_MEM ||
1943 insn->src_reg != BPF_REG_0) {
1944 verbose("BPF_ST uses reserved fields\n");
1947 /* check src operand */
1948 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1952 if (is_ctx_reg(env, insn->dst_reg)) {
1953 verbose("BPF_ST stores into R%d context is not allowed\n",
1958 /* check that memory (dst_reg + off) is writeable */
1959 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1960 BPF_SIZE(insn->code), BPF_WRITE,
1965 } else if (class == BPF_JMP) {
1966 u8 opcode = BPF_OP(insn->code);
1968 if (opcode == BPF_CALL) {
1969 if (BPF_SRC(insn->code) != BPF_K ||
1971 insn->src_reg != BPF_REG_0 ||
1972 insn->dst_reg != BPF_REG_0) {
1973 verbose("BPF_CALL uses reserved fields\n");
1977 err = check_call(env, insn->imm, insn_idx);
1981 } else if (opcode == BPF_JA) {
1982 if (BPF_SRC(insn->code) != BPF_K ||
1984 insn->src_reg != BPF_REG_0 ||
1985 insn->dst_reg != BPF_REG_0) {
1986 verbose("BPF_JA uses reserved fields\n");
1990 insn_idx += insn->off + 1;
1993 } else if (opcode == BPF_EXIT) {
1994 if (BPF_SRC(insn->code) != BPF_K ||
1996 insn->src_reg != BPF_REG_0 ||
1997 insn->dst_reg != BPF_REG_0) {
1998 verbose("BPF_EXIT uses reserved fields\n");
2002 /* eBPF calling convetion is such that R0 is used
2003 * to return the value from eBPF program.
2004 * Make sure that it's readable at this time
2005 * of bpf_exit, which means that program wrote
2006 * something into it earlier
2008 err = check_reg_arg(regs, BPF_REG_0, SRC_OP);
2012 if (is_pointer_value(env, BPF_REG_0)) {
2013 verbose("R0 leaks addr as return value\n");
2018 insn_idx = pop_stack(env, &prev_insn_idx);
2022 do_print_state = true;
2026 err = check_cond_jmp_op(env, insn, &insn_idx);
2030 } else if (class == BPF_LD) {
2031 u8 mode = BPF_MODE(insn->code);
2033 if (mode == BPF_ABS || mode == BPF_IND) {
2034 err = check_ld_abs(env, insn);
2038 } else if (mode == BPF_IMM) {
2039 err = check_ld_imm(env, insn);
2044 env->insn_aux_data[insn_idx].seen = true;
2046 verbose("invalid BPF_LD mode\n");
2050 verbose("unknown insn class %d\n", class);
2060 /* look for pseudo eBPF instructions that access map FDs and
2061 * replace them with actual map pointers
2063 static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2065 struct bpf_insn *insn = env->prog->insnsi;
2066 int insn_cnt = env->prog->len;
2069 for (i = 0; i < insn_cnt; i++, insn++) {
2070 if (BPF_CLASS(insn->code) == BPF_LDX &&
2071 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
2072 verbose("BPF_LDX uses reserved fields\n");
2076 if (BPF_CLASS(insn->code) == BPF_STX &&
2077 ((BPF_MODE(insn->code) != BPF_MEM &&
2078 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
2079 verbose("BPF_STX uses reserved fields\n");
2083 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
2084 struct bpf_map *map;
2087 if (i == insn_cnt - 1 || insn[1].code != 0 ||
2088 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
2090 verbose("invalid bpf_ld_imm64 insn\n");
2094 if (insn->src_reg == 0)
2095 /* valid generic load 64-bit imm */
2098 if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
2099 verbose("unrecognized bpf_ld_imm64 insn\n");
2103 f = fdget(insn->imm);
2104 map = __bpf_map_get(f);
2106 verbose("fd %d is not pointing to valid bpf_map\n",
2108 return PTR_ERR(map);
2111 /* store map pointer inside BPF_LD_IMM64 instruction */
2112 insn[0].imm = (u32) (unsigned long) map;
2113 insn[1].imm = ((u64) (unsigned long) map) >> 32;
2115 /* check whether we recorded this map already */
2116 for (j = 0; j < env->used_map_cnt; j++)
2117 if (env->used_maps[j] == map) {
2122 if (env->used_map_cnt >= MAX_USED_MAPS) {
2127 /* hold the map. If the program is rejected by verifier,
2128 * the map will be released by release_maps() or it
2129 * will be used by the valid program until it's unloaded
2130 * and all maps are released in free_used_maps()
2132 map = bpf_map_inc(map, false);
2135 return PTR_ERR(map);
2137 env->used_maps[env->used_map_cnt++] = map;
2146 /* now all pseudo BPF_LD_IMM64 instructions load valid
2147 * 'struct bpf_map *' into a register instead of user map_fd.
2148 * These pointers will be used later by verifier to validate map access.
2153 /* drop refcnt of maps used by the rejected program */
2154 static void release_maps(struct verifier_env *env)
2158 for (i = 0; i < env->used_map_cnt; i++)
2159 bpf_map_put(env->used_maps[i]);
2162 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
2163 static void convert_pseudo_ld_imm64(struct verifier_env *env)
2165 struct bpf_insn *insn = env->prog->insnsi;
2166 int insn_cnt = env->prog->len;
2169 for (i = 0; i < insn_cnt; i++, insn++)
2170 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
2174 /* single env->prog->insni[off] instruction was replaced with the range
2175 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
2176 * [0, off) and [off, end) to new locations, so the patched range stays zero
2178 static int adjust_insn_aux_data(struct verifier_env *env, u32 prog_len,
2181 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
2186 new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
2189 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
2190 memcpy(new_data + off + cnt - 1, old_data + off,
2191 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
2192 for (i = off; i < off + cnt - 1; i++)
2193 new_data[i].seen = true;
2194 env->insn_aux_data = new_data;
2199 static struct bpf_prog *bpf_patch_insn_data(struct verifier_env *env, u32 off,
2200 const struct bpf_insn *patch, u32 len)
2202 struct bpf_prog *new_prog;
2204 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
2207 if (adjust_insn_aux_data(env, new_prog->len, off, len))
2212 /* The verifier does more data flow analysis than llvm and will not explore
2213 * branches that are dead at run time. Malicious programs can have dead code
2214 * too. Therefore replace all dead at-run-time code with nops.
2216 static void sanitize_dead_code(struct verifier_env *env)
2218 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2219 struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
2220 struct bpf_insn *insn = env->prog->insnsi;
2221 const int insn_cnt = env->prog->len;
2224 for (i = 0; i < insn_cnt; i++) {
2225 if (aux_data[i].seen)
2227 memcpy(insn + i, &nop, sizeof(nop));
2231 /* convert load instructions that access fields of 'struct __sk_buff'
2232 * into sequence of instructions that access fields of 'struct sk_buff'
2234 static int convert_ctx_accesses(struct verifier_env *env)
2236 struct bpf_insn *insn = env->prog->insnsi;
2237 const int insn_cnt = env->prog->len;
2238 struct bpf_insn insn_buf[16];
2239 struct bpf_prog *new_prog;
2240 enum bpf_access_type type;
2243 if (!env->prog->aux->ops->convert_ctx_access)
2246 for (i = 0; i < insn_cnt; i++, insn++) {
2249 if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
2250 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
2252 else if (insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
2253 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
2258 if (type == BPF_WRITE &&
2259 env->insn_aux_data[i + delta].sanitize_stack_off) {
2260 struct bpf_insn patch[] = {
2261 /* Sanitize suspicious stack slot with zero.
2262 * There are no memory dependencies for this store,
2263 * since it's only using frame pointer and immediate
2266 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
2267 env->insn_aux_data[i + delta].sanitize_stack_off,
2269 /* the original STX instruction will immediately
2270 * overwrite the same stack slot with appropriate value
2275 cnt = ARRAY_SIZE(patch);
2276 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
2281 env->prog = new_prog;
2282 insn = new_prog->insnsi + i + delta;
2286 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
2289 cnt = env->prog->aux->ops->
2290 convert_ctx_access(type, insn->dst_reg, insn->src_reg,
2291 insn->off, insn_buf, env->prog);
2292 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
2293 verbose("bpf verifier is misconfigured\n");
2297 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2303 /* keep walking new program and skip insns we just inserted */
2304 env->prog = new_prog;
2305 insn = new_prog->insnsi + i + delta;
2311 /* fixup insn->imm field of bpf_call instructions
2313 * this function is called after eBPF program passed verification
2315 static int fixup_bpf_calls(struct verifier_env *env)
2317 struct bpf_prog *prog = env->prog;
2318 struct bpf_insn *insn = prog->insnsi;
2319 const struct bpf_func_proto *fn;
2320 const int insn_cnt = prog->len;
2321 struct bpf_insn insn_buf[16];
2322 struct bpf_prog *new_prog;
2323 struct bpf_map *map_ptr;
2324 int i, cnt, delta = 0;
2326 for (i = 0; i < insn_cnt; i++, insn++) {
2327 if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
2328 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
2329 /* due to JIT bugs clear upper 32-bits of src register
2330 * before div/mod operation
2332 insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
2333 insn_buf[1] = *insn;
2335 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2340 env->prog = prog = new_prog;
2341 insn = new_prog->insnsi + i + delta;
2345 if (insn->code != (BPF_JMP | BPF_CALL))
2348 if (insn->imm == BPF_FUNC_get_route_realm)
2349 prog->dst_needed = 1;
2350 if (insn->imm == BPF_FUNC_get_prandom_u32)
2351 bpf_user_rnd_init_once();
2352 if (insn->imm == BPF_FUNC_tail_call) {
2353 /* mark bpf_tail_call as different opcode to avoid
2354 * conditional branch in the interpeter for every normal
2355 * call and to prevent accidental JITing by JIT compiler
2356 * that doesn't support bpf_tail_call yet
2359 insn->code |= BPF_X;
2361 /* instead of changing every JIT dealing with tail_call
2362 * emit two extra insns:
2363 * if (index >= max_entries) goto out;
2364 * index &= array->index_mask;
2365 * to avoid out-of-bounds cpu speculation
2367 map_ptr = env->insn_aux_data[i + delta].map_ptr;
2368 if (!map_ptr->unpriv_array)
2370 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
2371 map_ptr->max_entries, 2);
2372 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
2373 container_of(map_ptr,
2376 insn_buf[2] = *insn;
2378 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2383 env->prog = prog = new_prog;
2384 insn = new_prog->insnsi + i + delta;
2388 fn = prog->aux->ops->get_func_proto(insn->imm);
2389 /* all functions that have prototype and verifier allowed
2390 * programs to call them, must be real in-kernel functions
2393 verbose("kernel subsystem misconfigured func %d\n",
2397 insn->imm = fn->func - __bpf_call_base;
2403 static void free_states(struct verifier_env *env)
2405 struct verifier_state_list *sl, *sln;
2408 if (!env->explored_states)
2411 for (i = 0; i < env->prog->len; i++) {
2412 sl = env->explored_states[i];
2415 while (sl != STATE_LIST_MARK) {
2422 kfree(env->explored_states);
2425 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
2427 char __user *log_ubuf = NULL;
2428 struct verifier_env *env;
2431 if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
2434 /* 'struct verifier_env' can be global, but since it's not small,
2435 * allocate/free it every time bpf_check() is called
2437 env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL);
2441 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
2444 if (!env->insn_aux_data)
2448 /* grab the mutex to protect few globals used by verifier */
2449 mutex_lock(&bpf_verifier_lock);
2451 if (attr->log_level || attr->log_buf || attr->log_size) {
2452 /* user requested verbose verifier output
2453 * and supplied buffer to store the verification trace
2455 log_level = attr->log_level;
2456 log_ubuf = (char __user *) (unsigned long) attr->log_buf;
2457 log_size = attr->log_size;
2461 /* log_* values have to be sane */
2462 if (log_size < 128 || log_size > UINT_MAX >> 8 ||
2463 log_level == 0 || log_ubuf == NULL)
2467 log_buf = vmalloc(log_size);
2474 ret = replace_map_fd_with_map_ptr(env);
2476 goto skip_full_check;
2478 env->explored_states = kcalloc(env->prog->len,
2479 sizeof(struct verifier_state_list *),
2482 if (!env->explored_states)
2483 goto skip_full_check;
2485 ret = check_cfg(env);
2487 goto skip_full_check;
2489 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
2491 ret = do_check(env);
2494 while (pop_stack(env, NULL) >= 0);
2498 sanitize_dead_code(env);
2501 /* program is valid, convert *(u32*)(ctx + off) accesses */
2502 ret = convert_ctx_accesses(env);
2505 ret = fixup_bpf_calls(env);
2507 if (log_level && log_len >= log_size - 1) {
2508 BUG_ON(log_len >= log_size);
2509 /* verifier log exceeded user supplied buffer */
2511 /* fall through to return what was recorded */
2514 /* copy verifier log back to user space including trailing zero */
2515 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) {
2520 if (ret == 0 && env->used_map_cnt) {
2521 /* if program passed verifier, update used_maps in bpf_prog_info */
2522 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
2523 sizeof(env->used_maps[0]),
2526 if (!env->prog->aux->used_maps) {
2531 memcpy(env->prog->aux->used_maps, env->used_maps,
2532 sizeof(env->used_maps[0]) * env->used_map_cnt);
2533 env->prog->aux->used_map_cnt = env->used_map_cnt;
2535 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
2536 * bpf_ld_imm64 instructions
2538 convert_pseudo_ld_imm64(env);
2544 if (!env->prog->aux->used_maps)
2545 /* if we didn't copy map pointers into bpf_prog_info, release
2546 * them now. Otherwise free_used_maps() will release them.
2551 mutex_unlock(&bpf_verifier_lock);
2552 vfree(env->insn_aux_data);