2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <asm/types.h>
13 #include <linux/types.h>
24 #include <sys/capability.h>
25 #include <sys/resource.h>
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
35 # include "autoconf.h"
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 #include "../../../include/linux/filter.h"
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
57 struct bpf_insn insns[MAX_INSNS];
58 int fixup_map1[MAX_FIXUPS];
59 int fixup_map2[MAX_FIXUPS];
60 int fixup_prog[MAX_FIXUPS];
61 int fixup_map_in_map[MAX_FIXUPS];
63 const char *errstr_unpriv;
68 } result, result_unpriv;
69 enum bpf_prog_type prog_type;
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74 * actually the end of the structure.
76 #define MAX_ENTRIES 11
83 static struct bpf_test tests[] = {
87 BPF_MOV64_IMM(BPF_REG_1, 1),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
89 BPF_MOV64_IMM(BPF_REG_2, 3),
90 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
92 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
93 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
104 .errstr = "unreachable",
110 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
111 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
114 .errstr = "unreachable",
120 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
123 .errstr = "jump out of range",
127 "out of range jump2",
129 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
132 .errstr = "jump out of range",
138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
139 BPF_LD_IMM64(BPF_REG_0, 0),
140 BPF_LD_IMM64(BPF_REG_0, 0),
141 BPF_LD_IMM64(BPF_REG_0, 1),
142 BPF_LD_IMM64(BPF_REG_0, 1),
143 BPF_MOV64_IMM(BPF_REG_0, 2),
146 .errstr = "invalid BPF_LD_IMM insn",
147 .errstr_unpriv = "R1 pointer comparison",
153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
154 BPF_LD_IMM64(BPF_REG_0, 0),
155 BPF_LD_IMM64(BPF_REG_0, 0),
156 BPF_LD_IMM64(BPF_REG_0, 1),
157 BPF_LD_IMM64(BPF_REG_0, 1),
160 .errstr = "invalid BPF_LD_IMM insn",
161 .errstr_unpriv = "R1 pointer comparison",
167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
168 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 0),
170 BPF_LD_IMM64(BPF_REG_0, 0),
171 BPF_LD_IMM64(BPF_REG_0, 1),
172 BPF_LD_IMM64(BPF_REG_0, 1),
175 .errstr = "invalid bpf_ld_imm64 insn",
181 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
184 .errstr = "invalid bpf_ld_imm64 insn",
190 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
192 .errstr = "invalid bpf_ld_imm64 insn",
198 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
199 BPF_RAW_INSN(0, 0, 0, 0, 0),
207 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
208 BPF_RAW_INSN(0, 0, 0, 0, 1),
216 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
217 BPF_RAW_INSN(0, 0, 0, 0, 1),
220 .errstr = "uses reserved fields",
226 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
227 BPF_RAW_INSN(0, 0, 0, 1, 1),
230 .errstr = "invalid bpf_ld_imm64 insn",
236 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
237 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
240 .errstr = "invalid bpf_ld_imm64 insn",
246 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
247 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
250 .errstr = "invalid bpf_ld_imm64 insn",
256 BPF_MOV64_IMM(BPF_REG_1, 0),
257 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
258 BPF_RAW_INSN(0, 0, 0, 0, 1),
261 .errstr = "not pointing to valid bpf_map",
267 BPF_MOV64_IMM(BPF_REG_1, 0),
268 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
269 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
272 .errstr = "invalid bpf_ld_imm64 insn",
278 BPF_MOV64_IMM(BPF_REG_0, 1),
279 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
283 .errstr = "BPF_ARSH not supported for 32 bit ALU",
288 BPF_MOV64_IMM(BPF_REG_0, 1),
289 BPF_MOV64_IMM(BPF_REG_1, 5),
290 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
294 .errstr = "BPF_ARSH not supported for 32 bit ALU",
299 BPF_MOV64_IMM(BPF_REG_0, 1),
300 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
308 BPF_MOV64_IMM(BPF_REG_0, 1),
309 BPF_MOV64_IMM(BPF_REG_1, 5),
310 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
318 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
320 .errstr = "jump out of range",
326 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
329 .errstr = "back-edge",
335 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
336 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
337 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
338 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
341 .errstr = "back-edge",
347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
348 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
349 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
350 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
353 .errstr = "back-edge",
357 "read uninitialized register",
359 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
362 .errstr = "R2 !read_ok",
366 "read invalid register",
368 BPF_MOV64_REG(BPF_REG_0, -1),
371 .errstr = "R15 is invalid",
375 "program doesn't init R0 before exit",
377 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
380 .errstr = "R0 !read_ok",
384 "program doesn't init R0 before exit in all branches",
386 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
387 BPF_MOV64_IMM(BPF_REG_0, 1),
388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
391 .errstr = "R0 !read_ok",
392 .errstr_unpriv = "R1 pointer comparison",
396 "stack out of bounds",
398 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
401 .errstr = "invalid stack",
405 "invalid call insn1",
407 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
410 .errstr = "BPF_CALL uses reserved",
414 "invalid call insn2",
416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
419 .errstr = "BPF_CALL uses reserved",
423 "invalid function call",
425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
428 .errstr = "invalid func unknown#1234567",
432 "uninitialized stack1",
434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
436 BPF_LD_MAP_FD(BPF_REG_1, 0),
437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
438 BPF_FUNC_map_lookup_elem),
442 .errstr = "invalid indirect read from stack",
446 "uninitialized stack2",
448 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
449 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
452 .errstr = "invalid read from stack",
456 "invalid fp arithmetic",
457 /* If this gets ever changed, make sure JITs can deal with it. */
459 BPF_MOV64_IMM(BPF_REG_0, 0),
460 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
461 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
462 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
465 .errstr = "R1 subtraction from stack pointer",
469 "non-invalid fp arithmetic",
471 BPF_MOV64_IMM(BPF_REG_0, 0),
472 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
478 "invalid argument register",
480 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
481 BPF_FUNC_get_cgroup_classid),
482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
483 BPF_FUNC_get_cgroup_classid),
486 .errstr = "R1 !read_ok",
488 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
491 "non-invalid argument register",
493 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
495 BPF_FUNC_get_cgroup_classid),
496 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
497 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
498 BPF_FUNC_get_cgroup_classid),
502 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
505 "check valid spill/fill",
507 /* spill R1(ctx) into stack */
508 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
509 /* fill it back into R2 */
510 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
511 /* should be able to access R0 = *(R2 + 8) */
512 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
513 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
516 .errstr_unpriv = "R0 leaks addr",
518 .result_unpriv = REJECT,
521 "check valid spill/fill, skb mark",
523 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
524 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
525 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
526 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
527 offsetof(struct __sk_buff, mark)),
531 .result_unpriv = ACCEPT,
534 "check corrupted spill/fill",
536 /* spill R1(ctx) into stack */
537 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
538 /* mess up with R1 pointer on stack */
539 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
540 /* fill back into R0 should fail */
541 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
544 .errstr_unpriv = "attempt to corrupt spilled",
545 .errstr = "corrupted spill",
549 "invalid src register in STX",
551 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
554 .errstr = "R15 is invalid",
558 "invalid dst register in STX",
560 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
563 .errstr = "R14 is invalid",
567 "invalid dst register in ST",
569 BPF_ST_MEM(BPF_B, 14, -1, -1),
572 .errstr = "R14 is invalid",
576 "invalid src register in LDX",
578 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
581 .errstr = "R12 is invalid",
585 "invalid dst register in LDX",
587 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
590 .errstr = "R11 is invalid",
596 BPF_RAW_INSN(0, 0, 0, 0, 0),
599 .errstr = "invalid BPF_LD_IMM",
605 BPF_RAW_INSN(1, 0, 0, 0, 0),
608 .errstr = "BPF_LDX uses reserved fields",
614 BPF_RAW_INSN(-1, 0, 0, 0, 0),
617 .errstr = "invalid BPF_ALU opcode f0",
623 BPF_RAW_INSN(-1, -1, -1, -1, -1),
626 .errstr = "invalid BPF_ALU opcode f0",
632 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
635 .errstr = "BPF_ALU uses reserved fields",
639 "misaligned read from stack",
641 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
642 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
645 .errstr = "misaligned stack access",
649 "invalid map_fd for function call",
651 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
652 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
654 BPF_LD_MAP_FD(BPF_REG_1, 0),
655 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
656 BPF_FUNC_map_delete_elem),
659 .errstr = "fd 0 is not pointing to valid bpf_map",
663 "don't check return value before access",
665 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
666 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
667 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
668 BPF_LD_MAP_FD(BPF_REG_1, 0),
669 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
670 BPF_FUNC_map_lookup_elem),
671 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
675 .errstr = "R0 invalid mem access 'map_value_or_null'",
679 "access memory with incorrect alignment",
681 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
682 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
684 BPF_LD_MAP_FD(BPF_REG_1, 0),
685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
686 BPF_FUNC_map_lookup_elem),
687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
688 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
692 .errstr = "misaligned value access",
694 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
697 "sometimes access memory with incorrect alignment",
699 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
700 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
702 BPF_LD_MAP_FD(BPF_REG_1, 0),
703 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
704 BPF_FUNC_map_lookup_elem),
705 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
706 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
708 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
712 .errstr = "R0 invalid mem access",
713 .errstr_unpriv = "R0 leaks addr",
715 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
720 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
721 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
722 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
723 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
724 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
725 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
726 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
727 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
728 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
729 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
731 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
732 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
733 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
734 BPF_MOV64_IMM(BPF_REG_0, 0),
737 .errstr_unpriv = "R1 pointer comparison",
738 .result_unpriv = REJECT,
744 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
745 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
746 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
747 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
749 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
750 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
751 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
752 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
753 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
754 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
755 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
756 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
757 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
758 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
759 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
760 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
761 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
762 BPF_MOV64_IMM(BPF_REG_0, 0),
765 .errstr_unpriv = "R1 pointer comparison",
766 .result_unpriv = REJECT,
772 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
774 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
775 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
776 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
778 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
780 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
782 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
784 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
786 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
788 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
790 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
792 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
794 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
796 BPF_LD_MAP_FD(BPF_REG_1, 0),
797 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
798 BPF_FUNC_map_delete_elem),
801 .fixup_map1 = { 24 },
802 .errstr_unpriv = "R1 pointer comparison",
803 .result_unpriv = REJECT,
809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
812 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
813 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
814 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
815 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
816 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
817 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
818 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
820 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
821 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
823 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
825 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
826 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
827 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
828 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
829 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
830 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
832 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
833 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
834 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
835 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
836 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
837 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
838 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
839 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
840 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
842 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
843 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
844 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
845 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
846 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
849 BPF_MOV64_IMM(BPF_REG_0, 0),
852 .errstr_unpriv = "R1 pointer comparison",
853 .result_unpriv = REJECT,
859 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
860 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
861 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
862 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
863 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
864 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
865 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
866 BPF_MOV64_IMM(BPF_REG_0, 0),
867 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
868 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
869 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
870 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
871 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
872 BPF_MOV64_IMM(BPF_REG_0, 0),
873 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
874 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
875 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
876 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
877 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
878 BPF_MOV64_IMM(BPF_REG_0, 0),
879 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
880 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
881 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
882 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
883 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
884 BPF_MOV64_IMM(BPF_REG_0, 0),
885 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
886 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
887 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
888 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
889 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
890 BPF_MOV64_IMM(BPF_REG_0, 0),
893 .errstr_unpriv = "R1 pointer comparison",
894 .result_unpriv = REJECT,
898 "access skb fields ok",
900 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
901 offsetof(struct __sk_buff, len)),
902 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
903 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
904 offsetof(struct __sk_buff, mark)),
905 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
906 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
907 offsetof(struct __sk_buff, pkt_type)),
908 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
909 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
910 offsetof(struct __sk_buff, queue_mapping)),
911 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
912 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
913 offsetof(struct __sk_buff, protocol)),
914 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
915 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
916 offsetof(struct __sk_buff, vlan_present)),
917 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
918 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
919 offsetof(struct __sk_buff, vlan_tci)),
920 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
921 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
922 offsetof(struct __sk_buff, napi_id)),
923 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
929 "access skb fields bad1",
931 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
934 .errstr = "invalid bpf_context access",
938 "access skb fields bad2",
940 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
941 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
942 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
944 BPF_LD_MAP_FD(BPF_REG_1, 0),
945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
946 BPF_FUNC_map_lookup_elem),
947 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
950 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
951 offsetof(struct __sk_buff, pkt_type)),
955 .errstr = "different pointers",
956 .errstr_unpriv = "R1 pointer comparison",
960 "access skb fields bad3",
962 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
963 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
964 offsetof(struct __sk_buff, pkt_type)),
966 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
967 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
969 BPF_LD_MAP_FD(BPF_REG_1, 0),
970 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
971 BPF_FUNC_map_lookup_elem),
972 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
974 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
975 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
978 .errstr = "different pointers",
979 .errstr_unpriv = "R1 pointer comparison",
983 "access skb fields bad4",
985 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
986 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
987 offsetof(struct __sk_buff, len)),
988 BPF_MOV64_IMM(BPF_REG_0, 0),
990 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
991 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
993 BPF_LD_MAP_FD(BPF_REG_1, 0),
994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
995 BPF_FUNC_map_lookup_elem),
996 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
998 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
999 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1001 .fixup_map1 = { 7 },
1002 .errstr = "different pointers",
1003 .errstr_unpriv = "R1 pointer comparison",
1007 "invalid access __sk_buff family",
1009 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1010 offsetof(struct __sk_buff, family)),
1013 .errstr = "invalid bpf_context access",
1017 "invalid access __sk_buff remote_ip4",
1019 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1020 offsetof(struct __sk_buff, remote_ip4)),
1023 .errstr = "invalid bpf_context access",
1027 "invalid access __sk_buff local_ip4",
1029 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1030 offsetof(struct __sk_buff, local_ip4)),
1033 .errstr = "invalid bpf_context access",
1037 "invalid access __sk_buff remote_ip6",
1039 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1040 offsetof(struct __sk_buff, remote_ip6)),
1043 .errstr = "invalid bpf_context access",
1047 "invalid access __sk_buff local_ip6",
1049 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1050 offsetof(struct __sk_buff, local_ip6)),
1053 .errstr = "invalid bpf_context access",
1057 "invalid access __sk_buff remote_port",
1059 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1060 offsetof(struct __sk_buff, remote_port)),
1063 .errstr = "invalid bpf_context access",
1067 "invalid access __sk_buff remote_port",
1069 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1070 offsetof(struct __sk_buff, local_port)),
1073 .errstr = "invalid bpf_context access",
1077 "valid access __sk_buff family",
1079 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1080 offsetof(struct __sk_buff, family)),
1084 .prog_type = BPF_PROG_TYPE_SK_SKB,
1087 "valid access __sk_buff remote_ip4",
1089 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1090 offsetof(struct __sk_buff, remote_ip4)),
1094 .prog_type = BPF_PROG_TYPE_SK_SKB,
1097 "valid access __sk_buff local_ip4",
1099 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1100 offsetof(struct __sk_buff, local_ip4)),
1104 .prog_type = BPF_PROG_TYPE_SK_SKB,
1107 "valid access __sk_buff remote_ip6",
1109 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1110 offsetof(struct __sk_buff, remote_ip6[0])),
1111 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1112 offsetof(struct __sk_buff, remote_ip6[1])),
1113 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1114 offsetof(struct __sk_buff, remote_ip6[2])),
1115 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1116 offsetof(struct __sk_buff, remote_ip6[3])),
1120 .prog_type = BPF_PROG_TYPE_SK_SKB,
1123 "valid access __sk_buff local_ip6",
1125 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1126 offsetof(struct __sk_buff, local_ip6[0])),
1127 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1128 offsetof(struct __sk_buff, local_ip6[1])),
1129 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1130 offsetof(struct __sk_buff, local_ip6[2])),
1131 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1132 offsetof(struct __sk_buff, local_ip6[3])),
1136 .prog_type = BPF_PROG_TYPE_SK_SKB,
1139 "valid access __sk_buff remote_port",
1141 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1142 offsetof(struct __sk_buff, remote_port)),
1146 .prog_type = BPF_PROG_TYPE_SK_SKB,
1149 "valid access __sk_buff remote_port",
1151 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1152 offsetof(struct __sk_buff, local_port)),
1156 .prog_type = BPF_PROG_TYPE_SK_SKB,
1159 "invalid access of tc_classid for SK_SKB",
1161 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1162 offsetof(struct __sk_buff, tc_classid)),
1166 .prog_type = BPF_PROG_TYPE_SK_SKB,
1167 .errstr = "invalid bpf_context access",
1170 "invalid access of skb->mark for SK_SKB",
1172 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1173 offsetof(struct __sk_buff, mark)),
1177 .prog_type = BPF_PROG_TYPE_SK_SKB,
1178 .errstr = "invalid bpf_context access",
1181 "check skb->mark is not writeable by SK_SKB",
1183 BPF_MOV64_IMM(BPF_REG_0, 0),
1184 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1185 offsetof(struct __sk_buff, mark)),
1189 .prog_type = BPF_PROG_TYPE_SK_SKB,
1190 .errstr = "invalid bpf_context access",
1193 "check skb->tc_index is writeable by SK_SKB",
1195 BPF_MOV64_IMM(BPF_REG_0, 0),
1196 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1197 offsetof(struct __sk_buff, tc_index)),
1201 .prog_type = BPF_PROG_TYPE_SK_SKB,
1204 "check skb->priority is writeable by SK_SKB",
1206 BPF_MOV64_IMM(BPF_REG_0, 0),
1207 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1208 offsetof(struct __sk_buff, priority)),
1212 .prog_type = BPF_PROG_TYPE_SK_SKB,
1215 "direct packet read for SK_SKB",
1217 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1218 offsetof(struct __sk_buff, data)),
1219 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1220 offsetof(struct __sk_buff, data_end)),
1221 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1223 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1224 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1225 BPF_MOV64_IMM(BPF_REG_0, 0),
1229 .prog_type = BPF_PROG_TYPE_SK_SKB,
1232 "direct packet write for SK_SKB",
1234 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1235 offsetof(struct __sk_buff, data)),
1236 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1237 offsetof(struct __sk_buff, data_end)),
1238 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1240 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1241 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1242 BPF_MOV64_IMM(BPF_REG_0, 0),
1246 .prog_type = BPF_PROG_TYPE_SK_SKB,
1249 "overlapping checks for direct packet access SK_SKB",
1251 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1252 offsetof(struct __sk_buff, data)),
1253 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1254 offsetof(struct __sk_buff, data_end)),
1255 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1257 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1258 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1260 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1261 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1262 BPF_MOV64_IMM(BPF_REG_0, 0),
1266 .prog_type = BPF_PROG_TYPE_SK_SKB,
1269 "check skb->mark is not writeable by sockets",
1271 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1272 offsetof(struct __sk_buff, mark)),
1275 .errstr = "invalid bpf_context access",
1276 .errstr_unpriv = "R1 leaks addr",
1280 "check skb->tc_index is not writeable by sockets",
1282 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1283 offsetof(struct __sk_buff, tc_index)),
1286 .errstr = "invalid bpf_context access",
1287 .errstr_unpriv = "R1 leaks addr",
1291 "check cb access: byte",
1293 BPF_MOV64_IMM(BPF_REG_0, 0),
1294 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1295 offsetof(struct __sk_buff, cb[0])),
1296 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1297 offsetof(struct __sk_buff, cb[0]) + 1),
1298 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1299 offsetof(struct __sk_buff, cb[0]) + 2),
1300 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1301 offsetof(struct __sk_buff, cb[0]) + 3),
1302 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1303 offsetof(struct __sk_buff, cb[1])),
1304 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1305 offsetof(struct __sk_buff, cb[1]) + 1),
1306 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1307 offsetof(struct __sk_buff, cb[1]) + 2),
1308 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1309 offsetof(struct __sk_buff, cb[1]) + 3),
1310 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1311 offsetof(struct __sk_buff, cb[2])),
1312 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1313 offsetof(struct __sk_buff, cb[2]) + 1),
1314 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1315 offsetof(struct __sk_buff, cb[2]) + 2),
1316 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1317 offsetof(struct __sk_buff, cb[2]) + 3),
1318 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1319 offsetof(struct __sk_buff, cb[3])),
1320 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1321 offsetof(struct __sk_buff, cb[3]) + 1),
1322 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1323 offsetof(struct __sk_buff, cb[3]) + 2),
1324 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1325 offsetof(struct __sk_buff, cb[3]) + 3),
1326 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1327 offsetof(struct __sk_buff, cb[4])),
1328 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1329 offsetof(struct __sk_buff, cb[4]) + 1),
1330 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1331 offsetof(struct __sk_buff, cb[4]) + 2),
1332 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1333 offsetof(struct __sk_buff, cb[4]) + 3),
1334 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1335 offsetof(struct __sk_buff, cb[0])),
1336 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1337 offsetof(struct __sk_buff, cb[0]) + 1),
1338 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1339 offsetof(struct __sk_buff, cb[0]) + 2),
1340 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1341 offsetof(struct __sk_buff, cb[0]) + 3),
1342 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1343 offsetof(struct __sk_buff, cb[1])),
1344 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1345 offsetof(struct __sk_buff, cb[1]) + 1),
1346 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1347 offsetof(struct __sk_buff, cb[1]) + 2),
1348 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1349 offsetof(struct __sk_buff, cb[1]) + 3),
1350 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1351 offsetof(struct __sk_buff, cb[2])),
1352 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1353 offsetof(struct __sk_buff, cb[2]) + 1),
1354 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1355 offsetof(struct __sk_buff, cb[2]) + 2),
1356 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1357 offsetof(struct __sk_buff, cb[2]) + 3),
1358 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1359 offsetof(struct __sk_buff, cb[3])),
1360 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1361 offsetof(struct __sk_buff, cb[3]) + 1),
1362 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1363 offsetof(struct __sk_buff, cb[3]) + 2),
1364 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1365 offsetof(struct __sk_buff, cb[3]) + 3),
1366 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1367 offsetof(struct __sk_buff, cb[4])),
1368 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1369 offsetof(struct __sk_buff, cb[4]) + 1),
1370 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1371 offsetof(struct __sk_buff, cb[4]) + 2),
1372 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1373 offsetof(struct __sk_buff, cb[4]) + 3),
1379 "__sk_buff->hash, offset 0, byte store not permitted",
1381 BPF_MOV64_IMM(BPF_REG_0, 0),
1382 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1383 offsetof(struct __sk_buff, hash)),
1386 .errstr = "invalid bpf_context access",
1390 "__sk_buff->tc_index, offset 3, byte store not permitted",
1392 BPF_MOV64_IMM(BPF_REG_0, 0),
1393 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1394 offsetof(struct __sk_buff, tc_index) + 3),
1397 .errstr = "invalid bpf_context access",
1401 "check skb->hash byte load permitted",
1403 BPF_MOV64_IMM(BPF_REG_0, 0),
1404 #if __BYTE_ORDER == __LITTLE_ENDIAN
1405 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1406 offsetof(struct __sk_buff, hash)),
1408 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1409 offsetof(struct __sk_buff, hash) + 3),
1416 "check skb->hash byte load not permitted 1",
1418 BPF_MOV64_IMM(BPF_REG_0, 0),
1419 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1420 offsetof(struct __sk_buff, hash) + 1),
1423 .errstr = "invalid bpf_context access",
1427 "check skb->hash byte load not permitted 2",
1429 BPF_MOV64_IMM(BPF_REG_0, 0),
1430 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1431 offsetof(struct __sk_buff, hash) + 2),
1434 .errstr = "invalid bpf_context access",
1438 "check skb->hash byte load not permitted 3",
1440 BPF_MOV64_IMM(BPF_REG_0, 0),
1441 #if __BYTE_ORDER == __LITTLE_ENDIAN
1442 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1443 offsetof(struct __sk_buff, hash) + 3),
1445 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1446 offsetof(struct __sk_buff, hash)),
1450 .errstr = "invalid bpf_context access",
1454 "check cb access: byte, wrong type",
1456 BPF_MOV64_IMM(BPF_REG_0, 0),
1457 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1458 offsetof(struct __sk_buff, cb[0])),
1461 .errstr = "invalid bpf_context access",
1463 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1466 "check cb access: half",
1468 BPF_MOV64_IMM(BPF_REG_0, 0),
1469 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1470 offsetof(struct __sk_buff, cb[0])),
1471 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1472 offsetof(struct __sk_buff, cb[0]) + 2),
1473 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1474 offsetof(struct __sk_buff, cb[1])),
1475 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1476 offsetof(struct __sk_buff, cb[1]) + 2),
1477 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1478 offsetof(struct __sk_buff, cb[2])),
1479 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1480 offsetof(struct __sk_buff, cb[2]) + 2),
1481 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1482 offsetof(struct __sk_buff, cb[3])),
1483 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1484 offsetof(struct __sk_buff, cb[3]) + 2),
1485 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1486 offsetof(struct __sk_buff, cb[4])),
1487 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1488 offsetof(struct __sk_buff, cb[4]) + 2),
1489 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1490 offsetof(struct __sk_buff, cb[0])),
1491 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1492 offsetof(struct __sk_buff, cb[0]) + 2),
1493 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1494 offsetof(struct __sk_buff, cb[1])),
1495 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1496 offsetof(struct __sk_buff, cb[1]) + 2),
1497 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1498 offsetof(struct __sk_buff, cb[2])),
1499 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1500 offsetof(struct __sk_buff, cb[2]) + 2),
1501 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1502 offsetof(struct __sk_buff, cb[3])),
1503 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1504 offsetof(struct __sk_buff, cb[3]) + 2),
1505 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1506 offsetof(struct __sk_buff, cb[4])),
1507 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1508 offsetof(struct __sk_buff, cb[4]) + 2),
1514 "check cb access: half, unaligned",
1516 BPF_MOV64_IMM(BPF_REG_0, 0),
1517 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1518 offsetof(struct __sk_buff, cb[0]) + 1),
1521 .errstr = "misaligned context access",
1523 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1526 "check __sk_buff->hash, offset 0, half store not permitted",
1528 BPF_MOV64_IMM(BPF_REG_0, 0),
1529 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1530 offsetof(struct __sk_buff, hash)),
1533 .errstr = "invalid bpf_context access",
1537 "check __sk_buff->tc_index, offset 2, half store not permitted",
1539 BPF_MOV64_IMM(BPF_REG_0, 0),
1540 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1541 offsetof(struct __sk_buff, tc_index) + 2),
1544 .errstr = "invalid bpf_context access",
1548 "check skb->hash half load permitted",
1550 BPF_MOV64_IMM(BPF_REG_0, 0),
1551 #if __BYTE_ORDER == __LITTLE_ENDIAN
1552 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1553 offsetof(struct __sk_buff, hash)),
1555 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1556 offsetof(struct __sk_buff, hash) + 2),
1563 "check skb->hash half load not permitted",
1565 BPF_MOV64_IMM(BPF_REG_0, 0),
1566 #if __BYTE_ORDER == __LITTLE_ENDIAN
1567 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1568 offsetof(struct __sk_buff, hash) + 2),
1570 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1571 offsetof(struct __sk_buff, hash)),
1575 .errstr = "invalid bpf_context access",
1579 "check cb access: half, wrong type",
1581 BPF_MOV64_IMM(BPF_REG_0, 0),
1582 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1583 offsetof(struct __sk_buff, cb[0])),
1586 .errstr = "invalid bpf_context access",
1588 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1591 "check cb access: word",
1593 BPF_MOV64_IMM(BPF_REG_0, 0),
1594 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1595 offsetof(struct __sk_buff, cb[0])),
1596 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1597 offsetof(struct __sk_buff, cb[1])),
1598 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1599 offsetof(struct __sk_buff, cb[2])),
1600 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1601 offsetof(struct __sk_buff, cb[3])),
1602 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1603 offsetof(struct __sk_buff, cb[4])),
1604 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1605 offsetof(struct __sk_buff, cb[0])),
1606 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1607 offsetof(struct __sk_buff, cb[1])),
1608 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1609 offsetof(struct __sk_buff, cb[2])),
1610 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1611 offsetof(struct __sk_buff, cb[3])),
1612 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1613 offsetof(struct __sk_buff, cb[4])),
1619 "check cb access: word, unaligned 1",
1621 BPF_MOV64_IMM(BPF_REG_0, 0),
1622 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1623 offsetof(struct __sk_buff, cb[0]) + 2),
1626 .errstr = "misaligned context access",
1628 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1631 "check cb access: word, unaligned 2",
1633 BPF_MOV64_IMM(BPF_REG_0, 0),
1634 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1635 offsetof(struct __sk_buff, cb[4]) + 1),
1638 .errstr = "misaligned context access",
1640 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1643 "check cb access: word, unaligned 3",
1645 BPF_MOV64_IMM(BPF_REG_0, 0),
1646 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1647 offsetof(struct __sk_buff, cb[4]) + 2),
1650 .errstr = "misaligned context access",
1652 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1655 "check cb access: word, unaligned 4",
1657 BPF_MOV64_IMM(BPF_REG_0, 0),
1658 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1659 offsetof(struct __sk_buff, cb[4]) + 3),
1662 .errstr = "misaligned context access",
1664 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1667 "check cb access: double",
1669 BPF_MOV64_IMM(BPF_REG_0, 0),
1670 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1671 offsetof(struct __sk_buff, cb[0])),
1672 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1673 offsetof(struct __sk_buff, cb[2])),
1674 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1675 offsetof(struct __sk_buff, cb[0])),
1676 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1677 offsetof(struct __sk_buff, cb[2])),
1683 "check cb access: double, unaligned 1",
1685 BPF_MOV64_IMM(BPF_REG_0, 0),
1686 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1687 offsetof(struct __sk_buff, cb[1])),
1690 .errstr = "misaligned context access",
1692 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1695 "check cb access: double, unaligned 2",
1697 BPF_MOV64_IMM(BPF_REG_0, 0),
1698 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1699 offsetof(struct __sk_buff, cb[3])),
1702 .errstr = "misaligned context access",
1704 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1707 "check cb access: double, oob 1",
1709 BPF_MOV64_IMM(BPF_REG_0, 0),
1710 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1711 offsetof(struct __sk_buff, cb[4])),
1714 .errstr = "invalid bpf_context access",
1718 "check cb access: double, oob 2",
1720 BPF_MOV64_IMM(BPF_REG_0, 0),
1721 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1722 offsetof(struct __sk_buff, cb[4])),
1725 .errstr = "invalid bpf_context access",
1729 "check __sk_buff->ifindex dw store not permitted",
1731 BPF_MOV64_IMM(BPF_REG_0, 0),
1732 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1733 offsetof(struct __sk_buff, ifindex)),
1736 .errstr = "invalid bpf_context access",
1740 "check __sk_buff->ifindex dw load not permitted",
1742 BPF_MOV64_IMM(BPF_REG_0, 0),
1743 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1744 offsetof(struct __sk_buff, ifindex)),
1747 .errstr = "invalid bpf_context access",
1751 "check cb access: double, wrong type",
1753 BPF_MOV64_IMM(BPF_REG_0, 0),
1754 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1755 offsetof(struct __sk_buff, cb[0])),
1758 .errstr = "invalid bpf_context access",
1760 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1763 "check out of range skb->cb access",
1765 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1766 offsetof(struct __sk_buff, cb[0]) + 256),
1769 .errstr = "invalid bpf_context access",
1770 .errstr_unpriv = "",
1772 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1775 "write skb fields from socket prog",
1777 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1778 offsetof(struct __sk_buff, cb[4])),
1779 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1780 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1781 offsetof(struct __sk_buff, mark)),
1782 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1783 offsetof(struct __sk_buff, tc_index)),
1784 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1785 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1786 offsetof(struct __sk_buff, cb[0])),
1787 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1788 offsetof(struct __sk_buff, cb[2])),
1792 .errstr_unpriv = "R1 leaks addr",
1793 .result_unpriv = REJECT,
1796 "write skb fields from tc_cls_act prog",
1798 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1799 offsetof(struct __sk_buff, cb[0])),
1800 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1801 offsetof(struct __sk_buff, mark)),
1802 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1803 offsetof(struct __sk_buff, tc_index)),
1804 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1805 offsetof(struct __sk_buff, tc_index)),
1806 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1807 offsetof(struct __sk_buff, cb[3])),
1810 .errstr_unpriv = "",
1811 .result_unpriv = REJECT,
1813 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1816 "PTR_TO_STACK store/load",
1818 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1820 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1821 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1827 "PTR_TO_STACK store/load - bad alignment on off",
1829 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1831 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1832 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1836 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1839 "PTR_TO_STACK store/load - bad alignment on reg",
1841 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1843 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1844 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1848 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1851 "PTR_TO_STACK store/load - out of bounds low",
1853 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1855 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1856 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1860 .errstr = "invalid stack off=-79992 size=8",
1861 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
1864 "PTR_TO_STACK store/load - out of bounds high",
1866 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1868 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1869 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1873 .errstr = "invalid stack off=0 size=8",
1876 "unpriv: return pointer",
1878 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1882 .result_unpriv = REJECT,
1883 .errstr_unpriv = "R0 leaks addr",
1886 "unpriv: add const to pointer",
1888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1889 BPF_MOV64_IMM(BPF_REG_0, 0),
1895 "unpriv: add pointer to pointer",
1897 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1898 BPF_MOV64_IMM(BPF_REG_0, 0),
1902 .errstr = "R1 pointer += pointer",
1905 "unpriv: neg pointer",
1907 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1908 BPF_MOV64_IMM(BPF_REG_0, 0),
1912 .result_unpriv = REJECT,
1913 .errstr_unpriv = "R1 pointer arithmetic",
1916 "unpriv: cmp pointer with const",
1918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1919 BPF_MOV64_IMM(BPF_REG_0, 0),
1923 .result_unpriv = REJECT,
1924 .errstr_unpriv = "R1 pointer comparison",
1927 "unpriv: cmp pointer with pointer",
1929 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1930 BPF_MOV64_IMM(BPF_REG_0, 0),
1934 .result_unpriv = REJECT,
1935 .errstr_unpriv = "R10 pointer comparison",
1938 "unpriv: check that printk is disallowed",
1940 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1941 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1943 BPF_MOV64_IMM(BPF_REG_2, 8),
1944 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1946 BPF_FUNC_trace_printk),
1947 BPF_MOV64_IMM(BPF_REG_0, 0),
1950 .errstr_unpriv = "unknown func bpf_trace_printk#6",
1951 .result_unpriv = REJECT,
1955 "unpriv: pass pointer to helper function",
1957 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1958 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1960 BPF_LD_MAP_FD(BPF_REG_1, 0),
1961 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1962 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1963 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1964 BPF_FUNC_map_update_elem),
1965 BPF_MOV64_IMM(BPF_REG_0, 0),
1968 .fixup_map1 = { 3 },
1969 .errstr_unpriv = "R4 leaks addr",
1970 .result_unpriv = REJECT,
1974 "unpriv: indirectly pass pointer on stack to helper function",
1976 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1977 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1979 BPF_LD_MAP_FD(BPF_REG_1, 0),
1980 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1981 BPF_FUNC_map_lookup_elem),
1982 BPF_MOV64_IMM(BPF_REG_0, 0),
1985 .fixup_map1 = { 3 },
1986 .errstr = "invalid indirect read from stack off -8+0 size 8",
1990 "unpriv: mangle pointer on stack 1",
1992 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1993 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1994 BPF_MOV64_IMM(BPF_REG_0, 0),
1997 .errstr_unpriv = "attempt to corrupt spilled",
1998 .result_unpriv = REJECT,
2002 "unpriv: mangle pointer on stack 2",
2004 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2005 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2006 BPF_MOV64_IMM(BPF_REG_0, 0),
2009 .errstr_unpriv = "attempt to corrupt spilled",
2010 .result_unpriv = REJECT,
2014 "unpriv: read pointer from stack in small chunks",
2016 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2017 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2018 BPF_MOV64_IMM(BPF_REG_0, 0),
2021 .errstr = "invalid size",
2025 "unpriv: write pointer into ctx",
2027 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2028 BPF_MOV64_IMM(BPF_REG_0, 0),
2031 .errstr_unpriv = "R1 leaks addr",
2032 .result_unpriv = REJECT,
2033 .errstr = "invalid bpf_context access",
2037 "unpriv: spill/fill of ctx",
2039 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2041 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2042 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2043 BPF_MOV64_IMM(BPF_REG_0, 0),
2049 "unpriv: spill/fill of ctx 2",
2051 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2053 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2054 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2055 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2056 BPF_FUNC_get_hash_recalc),
2060 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2063 "unpriv: spill/fill of ctx 3",
2065 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2067 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2068 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2069 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2070 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2071 BPF_FUNC_get_hash_recalc),
2075 .errstr = "R1 type=fp expected=ctx",
2076 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2079 "unpriv: spill/fill of ctx 4",
2081 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2083 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2084 BPF_MOV64_IMM(BPF_REG_0, 1),
2085 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2087 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2088 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2089 BPF_FUNC_get_hash_recalc),
2093 .errstr = "R1 type=inv expected=ctx",
2094 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2097 "unpriv: spill/fill of different pointers stx",
2099 BPF_MOV64_IMM(BPF_REG_3, 42),
2100 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2102 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2105 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2106 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2107 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2108 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2109 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2110 offsetof(struct __sk_buff, mark)),
2111 BPF_MOV64_IMM(BPF_REG_0, 0),
2115 .errstr = "same insn cannot be used with different pointers",
2116 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2119 "unpriv: spill/fill of different pointers ldx",
2121 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2123 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2124 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2126 -(__s32)offsetof(struct bpf_perf_event_data,
2127 sample_period) - 8),
2128 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2129 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2130 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2131 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2132 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2133 offsetof(struct bpf_perf_event_data,
2135 BPF_MOV64_IMM(BPF_REG_0, 0),
2139 .errstr = "same insn cannot be used with different pointers",
2140 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2143 "unpriv: write pointer into map elem value",
2145 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2146 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2148 BPF_LD_MAP_FD(BPF_REG_1, 0),
2149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2150 BPF_FUNC_map_lookup_elem),
2151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2152 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2155 .fixup_map1 = { 3 },
2156 .errstr_unpriv = "R0 leaks addr",
2157 .result_unpriv = REJECT,
2161 "unpriv: partial copy of pointer",
2163 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2164 BPF_MOV64_IMM(BPF_REG_0, 0),
2167 .errstr_unpriv = "R10 partial copy",
2168 .result_unpriv = REJECT,
2172 "unpriv: pass pointer to tail_call",
2174 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2175 BPF_LD_MAP_FD(BPF_REG_2, 0),
2176 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2177 BPF_FUNC_tail_call),
2178 BPF_MOV64_IMM(BPF_REG_0, 0),
2181 .fixup_prog = { 1 },
2182 .errstr_unpriv = "R3 leaks addr into helper",
2183 .result_unpriv = REJECT,
2187 "unpriv: cmp map pointer with zero",
2189 BPF_MOV64_IMM(BPF_REG_1, 0),
2190 BPF_LD_MAP_FD(BPF_REG_1, 0),
2191 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2192 BPF_MOV64_IMM(BPF_REG_0, 0),
2195 .fixup_map1 = { 1 },
2196 .errstr_unpriv = "R1 pointer comparison",
2197 .result_unpriv = REJECT,
2201 "unpriv: write into frame pointer",
2203 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2204 BPF_MOV64_IMM(BPF_REG_0, 0),
2207 .errstr = "frame pointer is read only",
2211 "unpriv: spill/fill frame pointer",
2213 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2215 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2216 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2217 BPF_MOV64_IMM(BPF_REG_0, 0),
2220 .errstr = "frame pointer is read only",
2224 "unpriv: cmp of frame pointer",
2226 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2227 BPF_MOV64_IMM(BPF_REG_0, 0),
2230 .errstr_unpriv = "R10 pointer comparison",
2231 .result_unpriv = REJECT,
2235 "unpriv: adding of fp, reg",
2237 BPF_MOV64_IMM(BPF_REG_0, 0),
2238 BPF_MOV64_IMM(BPF_REG_1, 0),
2239 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2240 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2243 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2244 .result_unpriv = REJECT,
2248 "unpriv: adding of fp, imm",
2250 BPF_MOV64_IMM(BPF_REG_0, 0),
2251 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
2253 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2256 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
2257 .result_unpriv = REJECT,
2261 "unpriv: cmp of stack pointer",
2263 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2266 BPF_MOV64_IMM(BPF_REG_0, 0),
2269 .errstr_unpriv = "R2 pointer comparison",
2270 .result_unpriv = REJECT,
2274 "runtime/jit: pass negative index to tail_call",
2276 BPF_MOV64_IMM(BPF_REG_3, -1),
2277 BPF_LD_MAP_FD(BPF_REG_2, 0),
2278 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2279 BPF_FUNC_tail_call),
2280 BPF_MOV64_IMM(BPF_REG_0, 0),
2283 .fixup_prog = { 1 },
2287 "runtime/jit: pass > 32bit index to tail_call",
2289 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
2290 BPF_LD_MAP_FD(BPF_REG_2, 0),
2291 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2292 BPF_FUNC_tail_call),
2293 BPF_MOV64_IMM(BPF_REG_0, 0),
2296 .fixup_prog = { 2 },
2300 "stack pointer arithmetic",
2302 BPF_MOV64_IMM(BPF_REG_1, 4),
2303 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2304 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2308 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2309 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2310 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2312 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2313 BPF_MOV64_IMM(BPF_REG_0, 0),
2319 "raw_stack: no skb_load_bytes",
2321 BPF_MOV64_IMM(BPF_REG_2, 4),
2322 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2324 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2325 BPF_MOV64_IMM(BPF_REG_4, 8),
2326 /* Call to skb_load_bytes() omitted. */
2327 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2331 .errstr = "invalid read from stack off -8+0 size 8",
2332 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2335 "raw_stack: skb_load_bytes, negative len",
2337 BPF_MOV64_IMM(BPF_REG_2, 4),
2338 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2340 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2341 BPF_MOV64_IMM(BPF_REG_4, -8),
2342 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2343 BPF_FUNC_skb_load_bytes),
2344 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2348 .errstr = "R4 min value is negative",
2349 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2352 "raw_stack: skb_load_bytes, negative len 2",
2354 BPF_MOV64_IMM(BPF_REG_2, 4),
2355 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2357 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2358 BPF_MOV64_IMM(BPF_REG_4, ~0),
2359 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2360 BPF_FUNC_skb_load_bytes),
2361 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2365 .errstr = "R4 min value is negative",
2366 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2369 "raw_stack: skb_load_bytes, zero len",
2371 BPF_MOV64_IMM(BPF_REG_2, 4),
2372 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2374 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2375 BPF_MOV64_IMM(BPF_REG_4, 0),
2376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2377 BPF_FUNC_skb_load_bytes),
2378 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2382 .errstr = "invalid stack type R3",
2383 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2386 "raw_stack: skb_load_bytes, no init",
2388 BPF_MOV64_IMM(BPF_REG_2, 4),
2389 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2391 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2392 BPF_MOV64_IMM(BPF_REG_4, 8),
2393 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2394 BPF_FUNC_skb_load_bytes),
2395 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2399 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2402 "raw_stack: skb_load_bytes, init",
2404 BPF_MOV64_IMM(BPF_REG_2, 4),
2405 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2407 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2408 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2409 BPF_MOV64_IMM(BPF_REG_4, 8),
2410 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2411 BPF_FUNC_skb_load_bytes),
2412 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2416 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2419 "raw_stack: skb_load_bytes, spilled regs around bounds",
2421 BPF_MOV64_IMM(BPF_REG_2, 4),
2422 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2424 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2425 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2426 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2427 BPF_MOV64_IMM(BPF_REG_4, 8),
2428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2429 BPF_FUNC_skb_load_bytes),
2430 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2431 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2432 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2433 offsetof(struct __sk_buff, mark)),
2434 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2435 offsetof(struct __sk_buff, priority)),
2436 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2440 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2443 "raw_stack: skb_load_bytes, spilled regs corruption",
2445 BPF_MOV64_IMM(BPF_REG_2, 4),
2446 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2448 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2449 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2450 BPF_MOV64_IMM(BPF_REG_4, 8),
2451 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2452 BPF_FUNC_skb_load_bytes),
2453 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2454 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2455 offsetof(struct __sk_buff, mark)),
2459 .errstr = "R0 invalid mem access 'inv'",
2460 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2463 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2465 BPF_MOV64_IMM(BPF_REG_2, 4),
2466 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2468 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2469 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2470 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2471 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2472 BPF_MOV64_IMM(BPF_REG_4, 8),
2473 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2474 BPF_FUNC_skb_load_bytes),
2475 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2476 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2477 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2479 offsetof(struct __sk_buff, mark)),
2480 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2481 offsetof(struct __sk_buff, priority)),
2482 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2483 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2484 offsetof(struct __sk_buff, pkt_type)),
2485 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2489 .errstr = "R3 invalid mem access 'inv'",
2490 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2493 "raw_stack: skb_load_bytes, spilled regs + data",
2495 BPF_MOV64_IMM(BPF_REG_2, 4),
2496 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2498 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2499 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2500 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2501 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2502 BPF_MOV64_IMM(BPF_REG_4, 8),
2503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2504 BPF_FUNC_skb_load_bytes),
2505 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2506 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2507 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2508 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2509 offsetof(struct __sk_buff, mark)),
2510 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2511 offsetof(struct __sk_buff, priority)),
2512 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2513 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2517 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2520 "raw_stack: skb_load_bytes, invalid access 1",
2522 BPF_MOV64_IMM(BPF_REG_2, 4),
2523 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2525 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2526 BPF_MOV64_IMM(BPF_REG_4, 8),
2527 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2528 BPF_FUNC_skb_load_bytes),
2529 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2533 .errstr = "invalid stack type R3 off=-513 access_size=8",
2534 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2537 "raw_stack: skb_load_bytes, invalid access 2",
2539 BPF_MOV64_IMM(BPF_REG_2, 4),
2540 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2542 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2543 BPF_MOV64_IMM(BPF_REG_4, 8),
2544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2545 BPF_FUNC_skb_load_bytes),
2546 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2550 .errstr = "invalid stack type R3 off=-1 access_size=8",
2551 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2554 "raw_stack: skb_load_bytes, invalid access 3",
2556 BPF_MOV64_IMM(BPF_REG_2, 4),
2557 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2559 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2560 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2561 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2562 BPF_FUNC_skb_load_bytes),
2563 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2567 .errstr = "R4 min value is negative",
2568 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2571 "raw_stack: skb_load_bytes, invalid access 4",
2573 BPF_MOV64_IMM(BPF_REG_2, 4),
2574 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2576 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2577 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2578 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2579 BPF_FUNC_skb_load_bytes),
2580 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2584 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2585 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2588 "raw_stack: skb_load_bytes, invalid access 5",
2590 BPF_MOV64_IMM(BPF_REG_2, 4),
2591 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2593 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2594 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2595 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2596 BPF_FUNC_skb_load_bytes),
2597 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2601 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2602 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2605 "raw_stack: skb_load_bytes, invalid access 6",
2607 BPF_MOV64_IMM(BPF_REG_2, 4),
2608 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2610 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2611 BPF_MOV64_IMM(BPF_REG_4, 0),
2612 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2613 BPF_FUNC_skb_load_bytes),
2614 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2618 .errstr = "invalid stack type R3 off=-512 access_size=0",
2619 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2622 "raw_stack: skb_load_bytes, large access",
2624 BPF_MOV64_IMM(BPF_REG_2, 4),
2625 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2627 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2628 BPF_MOV64_IMM(BPF_REG_4, 512),
2629 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2630 BPF_FUNC_skb_load_bytes),
2631 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2635 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2638 "context stores via ST",
2640 BPF_MOV64_IMM(BPF_REG_0, 0),
2641 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
2644 .errstr = "BPF_ST stores into R1 context is not allowed",
2646 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2649 "context stores via XADD",
2651 BPF_MOV64_IMM(BPF_REG_0, 0),
2652 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
2653 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
2656 .errstr = "BPF_XADD stores into R1 context is not allowed",
2658 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2661 "direct packet access: test1",
2663 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2664 offsetof(struct __sk_buff, data)),
2665 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2666 offsetof(struct __sk_buff, data_end)),
2667 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2669 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2670 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2671 BPF_MOV64_IMM(BPF_REG_0, 0),
2675 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2678 "direct packet access: test2",
2680 BPF_MOV64_IMM(BPF_REG_0, 1),
2681 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2682 offsetof(struct __sk_buff, data_end)),
2683 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2684 offsetof(struct __sk_buff, data)),
2685 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2687 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2688 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2689 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2690 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2691 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2692 offsetof(struct __sk_buff, data)),
2693 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2694 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2695 offsetof(struct __sk_buff, len)),
2696 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2697 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2698 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2699 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2701 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2702 offsetof(struct __sk_buff, data_end)),
2703 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2704 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2705 BPF_MOV64_IMM(BPF_REG_0, 0),
2709 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2712 "direct packet access: test3",
2714 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2715 offsetof(struct __sk_buff, data)),
2716 BPF_MOV64_IMM(BPF_REG_0, 0),
2719 .errstr = "invalid bpf_context access off=76",
2721 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2724 "direct packet access: test4 (write)",
2726 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2727 offsetof(struct __sk_buff, data)),
2728 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2729 offsetof(struct __sk_buff, data_end)),
2730 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2731 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2732 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2733 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2734 BPF_MOV64_IMM(BPF_REG_0, 0),
2738 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2741 "direct packet access: test5 (pkt_end >= reg, good access)",
2743 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2744 offsetof(struct __sk_buff, data)),
2745 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2746 offsetof(struct __sk_buff, data_end)),
2747 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2749 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2750 BPF_MOV64_IMM(BPF_REG_0, 1),
2752 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2753 BPF_MOV64_IMM(BPF_REG_0, 0),
2757 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2760 "direct packet access: test6 (pkt_end >= reg, bad access)",
2762 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2763 offsetof(struct __sk_buff, data)),
2764 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2765 offsetof(struct __sk_buff, data_end)),
2766 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2768 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2769 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2770 BPF_MOV64_IMM(BPF_REG_0, 1),
2772 BPF_MOV64_IMM(BPF_REG_0, 0),
2775 .errstr = "invalid access to packet",
2777 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2780 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2782 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2783 offsetof(struct __sk_buff, data)),
2784 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2785 offsetof(struct __sk_buff, data_end)),
2786 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2788 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2789 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2790 BPF_MOV64_IMM(BPF_REG_0, 1),
2792 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2793 BPF_MOV64_IMM(BPF_REG_0, 0),
2796 .errstr = "invalid access to packet",
2798 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2801 "direct packet access: test8 (double test, variant 1)",
2803 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2804 offsetof(struct __sk_buff, data)),
2805 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2806 offsetof(struct __sk_buff, data_end)),
2807 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2809 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2810 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2811 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2812 BPF_MOV64_IMM(BPF_REG_0, 1),
2814 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2815 BPF_MOV64_IMM(BPF_REG_0, 0),
2819 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2822 "direct packet access: test9 (double test, variant 2)",
2824 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2825 offsetof(struct __sk_buff, data)),
2826 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2827 offsetof(struct __sk_buff, data_end)),
2828 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2830 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2831 BPF_MOV64_IMM(BPF_REG_0, 1),
2833 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2834 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2835 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2836 BPF_MOV64_IMM(BPF_REG_0, 0),
2840 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2843 "direct packet access: test10 (write invalid)",
2845 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2846 offsetof(struct __sk_buff, data)),
2847 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2848 offsetof(struct __sk_buff, data_end)),
2849 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2851 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2852 BPF_MOV64_IMM(BPF_REG_0, 0),
2854 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2855 BPF_MOV64_IMM(BPF_REG_0, 0),
2858 .errstr = "invalid access to packet",
2860 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2863 "direct packet access: test11 (shift, good access)",
2865 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2866 offsetof(struct __sk_buff, data)),
2867 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2868 offsetof(struct __sk_buff, data_end)),
2869 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2871 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2872 BPF_MOV64_IMM(BPF_REG_3, 144),
2873 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2875 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2876 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2877 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2878 BPF_MOV64_IMM(BPF_REG_0, 1),
2880 BPF_MOV64_IMM(BPF_REG_0, 0),
2884 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2887 "direct packet access: test12 (and, good access)",
2889 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2890 offsetof(struct __sk_buff, data)),
2891 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2892 offsetof(struct __sk_buff, data_end)),
2893 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2895 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2896 BPF_MOV64_IMM(BPF_REG_3, 144),
2897 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2899 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2900 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2901 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2902 BPF_MOV64_IMM(BPF_REG_0, 1),
2904 BPF_MOV64_IMM(BPF_REG_0, 0),
2908 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2911 "direct packet access: test13 (branches, good access)",
2913 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2914 offsetof(struct __sk_buff, data)),
2915 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2916 offsetof(struct __sk_buff, data_end)),
2917 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2919 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2920 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2921 offsetof(struct __sk_buff, mark)),
2922 BPF_MOV64_IMM(BPF_REG_4, 1),
2923 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2924 BPF_MOV64_IMM(BPF_REG_3, 14),
2925 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2926 BPF_MOV64_IMM(BPF_REG_3, 24),
2927 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2929 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2930 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2931 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2932 BPF_MOV64_IMM(BPF_REG_0, 1),
2934 BPF_MOV64_IMM(BPF_REG_0, 0),
2938 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2941 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2943 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2944 offsetof(struct __sk_buff, data)),
2945 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2946 offsetof(struct __sk_buff, data_end)),
2947 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2949 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2950 BPF_MOV64_IMM(BPF_REG_5, 12),
2951 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2952 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2953 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2954 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2955 BPF_MOV64_IMM(BPF_REG_0, 1),
2957 BPF_MOV64_IMM(BPF_REG_0, 0),
2961 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2964 "direct packet access: test15 (spill with xadd)",
2966 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2967 offsetof(struct __sk_buff, data)),
2968 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2969 offsetof(struct __sk_buff, data_end)),
2970 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2972 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2973 BPF_MOV64_IMM(BPF_REG_5, 4096),
2974 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2976 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2977 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2978 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2979 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2980 BPF_MOV64_IMM(BPF_REG_0, 0),
2983 .errstr = "R2 invalid mem access 'inv'",
2985 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2988 "direct packet access: test16 (arith on data_end)",
2990 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2991 offsetof(struct __sk_buff, data)),
2992 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2993 offsetof(struct __sk_buff, data_end)),
2994 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2996 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2997 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2998 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2999 BPF_MOV64_IMM(BPF_REG_0, 0),
3002 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
3004 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3007 "direct packet access: test17 (pruning, alignment)",
3009 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3010 offsetof(struct __sk_buff, data)),
3011 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3012 offsetof(struct __sk_buff, data_end)),
3013 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3014 offsetof(struct __sk_buff, mark)),
3015 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3016 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3017 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3018 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3019 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3020 BPF_MOV64_IMM(BPF_REG_0, 0),
3022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3025 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3027 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3028 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3031 "direct packet access: test18 (imm += pkt_ptr, 1)",
3033 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3034 offsetof(struct __sk_buff, data)),
3035 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3036 offsetof(struct __sk_buff, data_end)),
3037 BPF_MOV64_IMM(BPF_REG_0, 8),
3038 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3039 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3040 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3041 BPF_MOV64_IMM(BPF_REG_0, 0),
3045 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3048 "direct packet access: test19 (imm += pkt_ptr, 2)",
3050 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3051 offsetof(struct __sk_buff, data)),
3052 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3053 offsetof(struct __sk_buff, data_end)),
3054 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3056 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3057 BPF_MOV64_IMM(BPF_REG_4, 4),
3058 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3059 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3060 BPF_MOV64_IMM(BPF_REG_0, 0),
3064 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3067 "direct packet access: test20 (x += pkt_ptr, 1)",
3069 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3070 offsetof(struct __sk_buff, data)),
3071 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3072 offsetof(struct __sk_buff, data_end)),
3073 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3074 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3075 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3076 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3077 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3078 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3079 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3081 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3082 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3083 BPF_MOV64_IMM(BPF_REG_0, 0),
3086 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3090 "direct packet access: test21 (x += pkt_ptr, 2)",
3092 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3093 offsetof(struct __sk_buff, data)),
3094 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3095 offsetof(struct __sk_buff, data_end)),
3096 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3097 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3098 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3099 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3100 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3101 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3102 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3103 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3104 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3106 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3107 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3108 BPF_MOV64_IMM(BPF_REG_0, 0),
3111 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3115 "direct packet access: test22 (x += pkt_ptr, 3)",
3117 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3118 offsetof(struct __sk_buff, data)),
3119 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3120 offsetof(struct __sk_buff, data_end)),
3121 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3123 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3124 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3125 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3126 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3127 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3128 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3129 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3130 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3131 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3132 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3133 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3135 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3136 BPF_MOV64_IMM(BPF_REG_2, 1),
3137 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3138 BPF_MOV64_IMM(BPF_REG_0, 0),
3141 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3145 "direct packet access: test23 (x += pkt_ptr, 4)",
3147 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3148 offsetof(struct __sk_buff, data)),
3149 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3150 offsetof(struct __sk_buff, data_end)),
3151 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3152 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3153 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3154 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3155 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3156 BPF_MOV64_IMM(BPF_REG_0, 31),
3157 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3158 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3159 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3161 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3162 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3163 BPF_MOV64_IMM(BPF_REG_0, 0),
3166 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3168 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3171 "direct packet access: test24 (x += pkt_ptr, 5)",
3173 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3174 offsetof(struct __sk_buff, data)),
3175 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3176 offsetof(struct __sk_buff, data_end)),
3177 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3178 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3179 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3180 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3181 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3182 BPF_MOV64_IMM(BPF_REG_0, 64),
3183 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3184 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3185 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3187 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3188 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3189 BPF_MOV64_IMM(BPF_REG_0, 0),
3192 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3196 "direct packet access: test25 (marking on <, good access)",
3198 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3199 offsetof(struct __sk_buff, data)),
3200 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3201 offsetof(struct __sk_buff, data_end)),
3202 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3203 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3204 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3205 BPF_MOV64_IMM(BPF_REG_0, 0),
3207 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3208 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3211 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3214 "direct packet access: test26 (marking on <, bad access)",
3216 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3217 offsetof(struct __sk_buff, data)),
3218 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3219 offsetof(struct __sk_buff, data_end)),
3220 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3222 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3223 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3224 BPF_MOV64_IMM(BPF_REG_0, 0),
3226 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3229 .errstr = "invalid access to packet",
3230 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3233 "direct packet access: test27 (marking on <=, good access)",
3235 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3236 offsetof(struct __sk_buff, data)),
3237 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3238 offsetof(struct __sk_buff, data_end)),
3239 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3241 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3242 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3243 BPF_MOV64_IMM(BPF_REG_0, 1),
3247 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3250 "direct packet access: test28 (marking on <=, bad access)",
3252 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3253 offsetof(struct __sk_buff, data)),
3254 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3255 offsetof(struct __sk_buff, data_end)),
3256 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3258 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3259 BPF_MOV64_IMM(BPF_REG_0, 1),
3261 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3262 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3265 .errstr = "invalid access to packet",
3266 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3269 "helper access to packet: test1, valid packet_ptr range",
3271 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3272 offsetof(struct xdp_md, data)),
3273 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3274 offsetof(struct xdp_md, data_end)),
3275 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3277 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3278 BPF_LD_MAP_FD(BPF_REG_1, 0),
3279 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3280 BPF_MOV64_IMM(BPF_REG_4, 0),
3281 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3282 BPF_FUNC_map_update_elem),
3283 BPF_MOV64_IMM(BPF_REG_0, 0),
3286 .fixup_map1 = { 5 },
3287 .result_unpriv = ACCEPT,
3289 .prog_type = BPF_PROG_TYPE_XDP,
3292 "helper access to packet: test2, unchecked packet_ptr",
3294 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3295 offsetof(struct xdp_md, data)),
3296 BPF_LD_MAP_FD(BPF_REG_1, 0),
3297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3298 BPF_FUNC_map_lookup_elem),
3299 BPF_MOV64_IMM(BPF_REG_0, 0),
3302 .fixup_map1 = { 1 },
3304 .errstr = "invalid access to packet",
3305 .prog_type = BPF_PROG_TYPE_XDP,
3308 "helper access to packet: test3, variable add",
3310 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3311 offsetof(struct xdp_md, data)),
3312 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3313 offsetof(struct xdp_md, data_end)),
3314 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3316 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3317 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3318 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3319 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3320 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3321 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3322 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3323 BPF_LD_MAP_FD(BPF_REG_1, 0),
3324 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3325 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3326 BPF_FUNC_map_lookup_elem),
3327 BPF_MOV64_IMM(BPF_REG_0, 0),
3330 .fixup_map1 = { 11 },
3332 .prog_type = BPF_PROG_TYPE_XDP,
3335 "helper access to packet: test4, packet_ptr with bad range",
3337 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3338 offsetof(struct xdp_md, data)),
3339 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3340 offsetof(struct xdp_md, data_end)),
3341 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3343 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3344 BPF_MOV64_IMM(BPF_REG_0, 0),
3346 BPF_LD_MAP_FD(BPF_REG_1, 0),
3347 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3348 BPF_FUNC_map_lookup_elem),
3349 BPF_MOV64_IMM(BPF_REG_0, 0),
3352 .fixup_map1 = { 7 },
3354 .errstr = "invalid access to packet",
3355 .prog_type = BPF_PROG_TYPE_XDP,
3358 "helper access to packet: test5, packet_ptr with too short range",
3360 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3361 offsetof(struct xdp_md, data)),
3362 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3363 offsetof(struct xdp_md, data_end)),
3364 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3365 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3367 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3368 BPF_LD_MAP_FD(BPF_REG_1, 0),
3369 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3370 BPF_FUNC_map_lookup_elem),
3371 BPF_MOV64_IMM(BPF_REG_0, 0),
3374 .fixup_map1 = { 6 },
3376 .errstr = "invalid access to packet",
3377 .prog_type = BPF_PROG_TYPE_XDP,
3380 "helper access to packet: test6, cls valid packet_ptr range",
3382 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3383 offsetof(struct __sk_buff, data)),
3384 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3385 offsetof(struct __sk_buff, data_end)),
3386 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3388 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3389 BPF_LD_MAP_FD(BPF_REG_1, 0),
3390 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3391 BPF_MOV64_IMM(BPF_REG_4, 0),
3392 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3393 BPF_FUNC_map_update_elem),
3394 BPF_MOV64_IMM(BPF_REG_0, 0),
3397 .fixup_map1 = { 5 },
3399 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3402 "helper access to packet: test7, cls unchecked packet_ptr",
3404 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3405 offsetof(struct __sk_buff, data)),
3406 BPF_LD_MAP_FD(BPF_REG_1, 0),
3407 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3408 BPF_FUNC_map_lookup_elem),
3409 BPF_MOV64_IMM(BPF_REG_0, 0),
3412 .fixup_map1 = { 1 },
3414 .errstr = "invalid access to packet",
3415 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3418 "helper access to packet: test8, cls variable add",
3420 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3421 offsetof(struct __sk_buff, data)),
3422 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3423 offsetof(struct __sk_buff, data_end)),
3424 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3426 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3427 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3428 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3429 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3430 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3431 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3432 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3433 BPF_LD_MAP_FD(BPF_REG_1, 0),
3434 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3435 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3436 BPF_FUNC_map_lookup_elem),
3437 BPF_MOV64_IMM(BPF_REG_0, 0),
3440 .fixup_map1 = { 11 },
3442 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3445 "helper access to packet: test9, cls packet_ptr with bad range",
3447 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3448 offsetof(struct __sk_buff, data)),
3449 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3450 offsetof(struct __sk_buff, data_end)),
3451 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3453 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3454 BPF_MOV64_IMM(BPF_REG_0, 0),
3456 BPF_LD_MAP_FD(BPF_REG_1, 0),
3457 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3458 BPF_FUNC_map_lookup_elem),
3459 BPF_MOV64_IMM(BPF_REG_0, 0),
3462 .fixup_map1 = { 7 },
3464 .errstr = "invalid access to packet",
3465 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3468 "helper access to packet: test10, cls packet_ptr with too short range",
3470 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3471 offsetof(struct __sk_buff, data)),
3472 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3473 offsetof(struct __sk_buff, data_end)),
3474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3475 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3477 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3478 BPF_LD_MAP_FD(BPF_REG_1, 0),
3479 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3480 BPF_FUNC_map_lookup_elem),
3481 BPF_MOV64_IMM(BPF_REG_0, 0),
3484 .fixup_map1 = { 6 },
3486 .errstr = "invalid access to packet",
3487 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3490 "helper access to packet: test11, cls unsuitable helper 1",
3492 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3493 offsetof(struct __sk_buff, data)),
3494 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3495 offsetof(struct __sk_buff, data_end)),
3496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3497 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3498 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3499 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3500 BPF_MOV64_IMM(BPF_REG_2, 0),
3501 BPF_MOV64_IMM(BPF_REG_4, 42),
3502 BPF_MOV64_IMM(BPF_REG_5, 0),
3503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3504 BPF_FUNC_skb_store_bytes),
3505 BPF_MOV64_IMM(BPF_REG_0, 0),
3509 .errstr = "helper access to the packet",
3510 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3513 "helper access to packet: test12, cls unsuitable helper 2",
3515 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3516 offsetof(struct __sk_buff, data)),
3517 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3518 offsetof(struct __sk_buff, data_end)),
3519 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3521 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3522 BPF_MOV64_IMM(BPF_REG_2, 0),
3523 BPF_MOV64_IMM(BPF_REG_4, 4),
3524 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3525 BPF_FUNC_skb_load_bytes),
3526 BPF_MOV64_IMM(BPF_REG_0, 0),
3530 .errstr = "helper access to the packet",
3531 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3534 "helper access to packet: test13, cls helper ok",
3536 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3537 offsetof(struct __sk_buff, data)),
3538 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3539 offsetof(struct __sk_buff, data_end)),
3540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3541 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3543 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3545 BPF_MOV64_IMM(BPF_REG_2, 4),
3546 BPF_MOV64_IMM(BPF_REG_3, 0),
3547 BPF_MOV64_IMM(BPF_REG_4, 0),
3548 BPF_MOV64_IMM(BPF_REG_5, 0),
3549 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3550 BPF_FUNC_csum_diff),
3551 BPF_MOV64_IMM(BPF_REG_0, 0),
3555 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3558 "helper access to packet: test14, cls helper ok sub",
3560 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3561 offsetof(struct __sk_buff, data)),
3562 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3563 offsetof(struct __sk_buff, data_end)),
3564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3565 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3567 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3568 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3569 BPF_MOV64_IMM(BPF_REG_2, 4),
3570 BPF_MOV64_IMM(BPF_REG_3, 0),
3571 BPF_MOV64_IMM(BPF_REG_4, 0),
3572 BPF_MOV64_IMM(BPF_REG_5, 0),
3573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3574 BPF_FUNC_csum_diff),
3575 BPF_MOV64_IMM(BPF_REG_0, 0),
3579 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3582 "helper access to packet: test15, cls helper fail sub",
3584 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3585 offsetof(struct __sk_buff, data)),
3586 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3587 offsetof(struct __sk_buff, data_end)),
3588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3591 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3592 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3593 BPF_MOV64_IMM(BPF_REG_2, 4),
3594 BPF_MOV64_IMM(BPF_REG_3, 0),
3595 BPF_MOV64_IMM(BPF_REG_4, 0),
3596 BPF_MOV64_IMM(BPF_REG_5, 0),
3597 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3598 BPF_FUNC_csum_diff),
3599 BPF_MOV64_IMM(BPF_REG_0, 0),
3603 .errstr = "invalid access to packet",
3604 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3607 "helper access to packet: test16, cls helper fail range 1",
3609 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3610 offsetof(struct __sk_buff, data)),
3611 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3612 offsetof(struct __sk_buff, data_end)),
3613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3614 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3616 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3617 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3618 BPF_MOV64_IMM(BPF_REG_2, 8),
3619 BPF_MOV64_IMM(BPF_REG_3, 0),
3620 BPF_MOV64_IMM(BPF_REG_4, 0),
3621 BPF_MOV64_IMM(BPF_REG_5, 0),
3622 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3623 BPF_FUNC_csum_diff),
3624 BPF_MOV64_IMM(BPF_REG_0, 0),
3628 .errstr = "invalid access to packet",
3629 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3632 "helper access to packet: test17, cls helper fail range 2",
3634 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3635 offsetof(struct __sk_buff, data)),
3636 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3637 offsetof(struct __sk_buff, data_end)),
3638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3639 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3641 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3642 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3643 BPF_MOV64_IMM(BPF_REG_2, -9),
3644 BPF_MOV64_IMM(BPF_REG_3, 0),
3645 BPF_MOV64_IMM(BPF_REG_4, 0),
3646 BPF_MOV64_IMM(BPF_REG_5, 0),
3647 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3648 BPF_FUNC_csum_diff),
3649 BPF_MOV64_IMM(BPF_REG_0, 0),
3653 .errstr = "R2 min value is negative",
3654 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3657 "helper access to packet: test18, cls helper fail range 3",
3659 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3660 offsetof(struct __sk_buff, data)),
3661 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3662 offsetof(struct __sk_buff, data_end)),
3663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3664 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3665 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3666 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3668 BPF_MOV64_IMM(BPF_REG_2, ~0),
3669 BPF_MOV64_IMM(BPF_REG_3, 0),
3670 BPF_MOV64_IMM(BPF_REG_4, 0),
3671 BPF_MOV64_IMM(BPF_REG_5, 0),
3672 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3673 BPF_FUNC_csum_diff),
3674 BPF_MOV64_IMM(BPF_REG_0, 0),
3678 .errstr = "R2 min value is negative",
3679 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3682 "helper access to packet: test19, cls helper fail range zero",
3684 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3685 offsetof(struct __sk_buff, data)),
3686 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3687 offsetof(struct __sk_buff, data_end)),
3688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3689 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3691 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3692 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3693 BPF_MOV64_IMM(BPF_REG_2, 0),
3694 BPF_MOV64_IMM(BPF_REG_3, 0),
3695 BPF_MOV64_IMM(BPF_REG_4, 0),
3696 BPF_MOV64_IMM(BPF_REG_5, 0),
3697 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3698 BPF_FUNC_csum_diff),
3699 BPF_MOV64_IMM(BPF_REG_0, 0),
3703 .errstr = "invalid access to packet",
3704 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3707 "helper access to packet: test20, pkt end as input",
3709 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3710 offsetof(struct __sk_buff, data)),
3711 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3712 offsetof(struct __sk_buff, data_end)),
3713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3714 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3716 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3717 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3718 BPF_MOV64_IMM(BPF_REG_2, 4),
3719 BPF_MOV64_IMM(BPF_REG_3, 0),
3720 BPF_MOV64_IMM(BPF_REG_4, 0),
3721 BPF_MOV64_IMM(BPF_REG_5, 0),
3722 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3723 BPF_FUNC_csum_diff),
3724 BPF_MOV64_IMM(BPF_REG_0, 0),
3728 .errstr = "R1 type=pkt_end expected=fp",
3729 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3732 "helper access to packet: test21, wrong reg",
3734 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3735 offsetof(struct __sk_buff, data)),
3736 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3737 offsetof(struct __sk_buff, data_end)),
3738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3739 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3741 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3742 BPF_MOV64_IMM(BPF_REG_2, 4),
3743 BPF_MOV64_IMM(BPF_REG_3, 0),
3744 BPF_MOV64_IMM(BPF_REG_4, 0),
3745 BPF_MOV64_IMM(BPF_REG_5, 0),
3746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3747 BPF_FUNC_csum_diff),
3748 BPF_MOV64_IMM(BPF_REG_0, 0),
3752 .errstr = "invalid access to packet",
3753 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3756 "valid map access into an array with a constant",
3758 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3759 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3761 BPF_LD_MAP_FD(BPF_REG_1, 0),
3762 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3763 BPF_FUNC_map_lookup_elem),
3764 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3765 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3766 offsetof(struct test_val, foo)),
3769 .fixup_map2 = { 3 },
3770 .errstr_unpriv = "R0 leaks addr",
3771 .result_unpriv = REJECT,
3775 "valid map access into an array with a register",
3777 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3778 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3780 BPF_LD_MAP_FD(BPF_REG_1, 0),
3781 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3782 BPF_FUNC_map_lookup_elem),
3783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3784 BPF_MOV64_IMM(BPF_REG_1, 4),
3785 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3786 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3787 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3788 offsetof(struct test_val, foo)),
3791 .fixup_map2 = { 3 },
3792 .errstr_unpriv = "R0 leaks addr",
3793 .result_unpriv = REJECT,
3795 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3798 "valid map access into an array with a variable",
3800 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3801 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3803 BPF_LD_MAP_FD(BPF_REG_1, 0),
3804 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3805 BPF_FUNC_map_lookup_elem),
3806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3807 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3808 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3809 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3810 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3811 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3812 offsetof(struct test_val, foo)),
3815 .fixup_map2 = { 3 },
3816 .errstr_unpriv = "R0 leaks addr",
3817 .result_unpriv = REJECT,
3819 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3822 "valid map access into an array with a signed variable",
3824 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3825 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3827 BPF_LD_MAP_FD(BPF_REG_1, 0),
3828 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3829 BPF_FUNC_map_lookup_elem),
3830 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3831 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3832 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3833 BPF_MOV32_IMM(BPF_REG_1, 0),
3834 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3835 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3836 BPF_MOV32_IMM(BPF_REG_1, 0),
3837 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3838 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3839 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3840 offsetof(struct test_val, foo)),
3843 .fixup_map2 = { 3 },
3844 .errstr_unpriv = "R0 leaks addr",
3845 .result_unpriv = REJECT,
3847 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3850 "invalid map access into an array with a constant",
3852 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3853 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3855 BPF_LD_MAP_FD(BPF_REG_1, 0),
3856 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3857 BPF_FUNC_map_lookup_elem),
3858 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3859 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3860 offsetof(struct test_val, foo)),
3863 .fixup_map2 = { 3 },
3864 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3868 "invalid map access into an array with a register",
3870 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3871 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3873 BPF_LD_MAP_FD(BPF_REG_1, 0),
3874 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3875 BPF_FUNC_map_lookup_elem),
3876 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3877 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3878 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3879 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3880 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3881 offsetof(struct test_val, foo)),
3884 .fixup_map2 = { 3 },
3885 .errstr = "R0 min value is outside of the array range",
3887 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3890 "invalid map access into an array with a variable",
3892 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3893 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3895 BPF_LD_MAP_FD(BPF_REG_1, 0),
3896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3897 BPF_FUNC_map_lookup_elem),
3898 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3899 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3900 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3901 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3902 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3903 offsetof(struct test_val, foo)),
3906 .fixup_map2 = { 3 },
3907 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3909 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3912 "invalid map access into an array with no floor check",
3914 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3915 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3917 BPF_LD_MAP_FD(BPF_REG_1, 0),
3918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3919 BPF_FUNC_map_lookup_elem),
3920 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3921 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3922 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3923 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3924 BPF_MOV32_IMM(BPF_REG_1, 0),
3925 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3926 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3927 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3928 offsetof(struct test_val, foo)),
3931 .fixup_map2 = { 3 },
3932 .errstr_unpriv = "R0 leaks addr",
3933 .errstr = "R0 unbounded memory access",
3934 .result_unpriv = REJECT,
3936 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3939 "invalid map access into an array with a invalid max check",
3941 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3942 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3944 BPF_LD_MAP_FD(BPF_REG_1, 0),
3945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3946 BPF_FUNC_map_lookup_elem),
3947 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3948 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3949 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3950 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3951 BPF_MOV32_IMM(BPF_REG_1, 0),
3952 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3953 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3954 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3955 offsetof(struct test_val, foo)),
3958 .fixup_map2 = { 3 },
3959 .errstr_unpriv = "R0 leaks addr",
3960 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3961 .result_unpriv = REJECT,
3963 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3966 "invalid map access into an array with a invalid max check",
3968 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3971 BPF_LD_MAP_FD(BPF_REG_1, 0),
3972 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3973 BPF_FUNC_map_lookup_elem),
3974 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3975 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3976 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3977 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3979 BPF_LD_MAP_FD(BPF_REG_1, 0),
3980 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3981 BPF_FUNC_map_lookup_elem),
3982 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3983 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
3984 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3985 offsetof(struct test_val, foo)),
3988 .fixup_map2 = { 3, 11 },
3989 .errstr = "R0 pointer += pointer",
3991 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3994 "multiple registers share map_lookup_elem result",
3996 BPF_MOV64_IMM(BPF_REG_1, 10),
3997 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3998 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4000 BPF_LD_MAP_FD(BPF_REG_1, 0),
4001 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4002 BPF_FUNC_map_lookup_elem),
4003 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4004 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4005 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4008 .fixup_map1 = { 4 },
4010 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4013 "alu ops on ptr_to_map_value_or_null, 1",
4015 BPF_MOV64_IMM(BPF_REG_1, 10),
4016 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4017 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4019 BPF_LD_MAP_FD(BPF_REG_1, 0),
4020 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4021 BPF_FUNC_map_lookup_elem),
4022 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4025 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4026 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4029 .fixup_map1 = { 4 },
4030 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4032 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4035 "alu ops on ptr_to_map_value_or_null, 2",
4037 BPF_MOV64_IMM(BPF_REG_1, 10),
4038 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4039 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4041 BPF_LD_MAP_FD(BPF_REG_1, 0),
4042 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4043 BPF_FUNC_map_lookup_elem),
4044 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4045 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4046 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4047 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4050 .fixup_map1 = { 4 },
4051 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4053 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4056 "alu ops on ptr_to_map_value_or_null, 3",
4058 BPF_MOV64_IMM(BPF_REG_1, 10),
4059 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4060 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4061 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4062 BPF_LD_MAP_FD(BPF_REG_1, 0),
4063 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4064 BPF_FUNC_map_lookup_elem),
4065 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4066 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4067 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4068 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4071 .fixup_map1 = { 4 },
4072 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
4074 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4077 "invalid memory access with multiple map_lookup_elem calls",
4079 BPF_MOV64_IMM(BPF_REG_1, 10),
4080 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4081 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4083 BPF_LD_MAP_FD(BPF_REG_1, 0),
4084 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4085 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4086 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4087 BPF_FUNC_map_lookup_elem),
4088 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4089 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4090 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4091 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4092 BPF_FUNC_map_lookup_elem),
4093 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4094 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4097 .fixup_map1 = { 4 },
4099 .errstr = "R4 !read_ok",
4100 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4103 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4105 BPF_MOV64_IMM(BPF_REG_1, 10),
4106 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4107 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4109 BPF_LD_MAP_FD(BPF_REG_1, 0),
4110 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4111 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4112 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4113 BPF_FUNC_map_lookup_elem),
4114 BPF_MOV64_IMM(BPF_REG_2, 10),
4115 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4116 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4117 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4118 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4119 BPF_FUNC_map_lookup_elem),
4120 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4121 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4122 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4125 .fixup_map1 = { 4 },
4127 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4130 "invalid map access from else condition",
4132 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4133 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4135 BPF_LD_MAP_FD(BPF_REG_1, 0),
4136 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4137 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4138 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4139 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4141 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4142 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4143 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4146 .fixup_map2 = { 3 },
4147 .errstr = "R0 unbounded memory access",
4149 .errstr_unpriv = "R0 leaks addr",
4150 .result_unpriv = REJECT,
4151 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4154 "constant register |= constant should keep constant type",
4156 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4157 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4158 BPF_MOV64_IMM(BPF_REG_2, 34),
4159 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4160 BPF_MOV64_IMM(BPF_REG_3, 0),
4161 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4165 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4168 "constant register |= constant should not bypass stack boundary checks",
4170 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4171 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4172 BPF_MOV64_IMM(BPF_REG_2, 34),
4173 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4174 BPF_MOV64_IMM(BPF_REG_3, 0),
4175 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4178 .errstr = "invalid stack type R1 off=-48 access_size=58",
4180 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4183 "constant register |= constant register should keep constant type",
4185 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4187 BPF_MOV64_IMM(BPF_REG_2, 34),
4188 BPF_MOV64_IMM(BPF_REG_4, 13),
4189 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4190 BPF_MOV64_IMM(BPF_REG_3, 0),
4191 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4195 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4198 "constant register |= constant register should not bypass stack boundary checks",
4200 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4201 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4202 BPF_MOV64_IMM(BPF_REG_2, 34),
4203 BPF_MOV64_IMM(BPF_REG_4, 24),
4204 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4205 BPF_MOV64_IMM(BPF_REG_3, 0),
4206 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4209 .errstr = "invalid stack type R1 off=-48 access_size=58",
4211 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4214 "invalid direct packet write for LWT_IN",
4216 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4217 offsetof(struct __sk_buff, data)),
4218 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4219 offsetof(struct __sk_buff, data_end)),
4220 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4222 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4223 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4224 BPF_MOV64_IMM(BPF_REG_0, 0),
4227 .errstr = "cannot write into packet",
4229 .prog_type = BPF_PROG_TYPE_LWT_IN,
4232 "invalid direct packet write for LWT_OUT",
4234 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4235 offsetof(struct __sk_buff, data)),
4236 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4237 offsetof(struct __sk_buff, data_end)),
4238 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4240 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4241 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4242 BPF_MOV64_IMM(BPF_REG_0, 0),
4245 .errstr = "cannot write into packet",
4247 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4250 "direct packet write for LWT_XMIT",
4252 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4253 offsetof(struct __sk_buff, data)),
4254 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4255 offsetof(struct __sk_buff, data_end)),
4256 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4257 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4258 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4259 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4260 BPF_MOV64_IMM(BPF_REG_0, 0),
4264 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4267 "direct packet read for LWT_IN",
4269 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4270 offsetof(struct __sk_buff, data)),
4271 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4272 offsetof(struct __sk_buff, data_end)),
4273 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4275 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4276 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4277 BPF_MOV64_IMM(BPF_REG_0, 0),
4281 .prog_type = BPF_PROG_TYPE_LWT_IN,
4284 "direct packet read for LWT_OUT",
4286 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4287 offsetof(struct __sk_buff, data)),
4288 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4289 offsetof(struct __sk_buff, data_end)),
4290 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4292 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4293 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4294 BPF_MOV64_IMM(BPF_REG_0, 0),
4298 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4301 "direct packet read for LWT_XMIT",
4303 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4304 offsetof(struct __sk_buff, data)),
4305 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4306 offsetof(struct __sk_buff, data_end)),
4307 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4309 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4310 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4311 BPF_MOV64_IMM(BPF_REG_0, 0),
4315 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4318 "overlapping checks for direct packet access",
4320 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4321 offsetof(struct __sk_buff, data)),
4322 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4323 offsetof(struct __sk_buff, data_end)),
4324 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4326 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4327 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4329 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4330 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4331 BPF_MOV64_IMM(BPF_REG_0, 0),
4335 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4338 "make headroom for LWT_XMIT",
4340 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4341 BPF_MOV64_IMM(BPF_REG_2, 34),
4342 BPF_MOV64_IMM(BPF_REG_3, 0),
4343 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
4344 /* split for s390 to succeed */
4345 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4346 BPF_MOV64_IMM(BPF_REG_2, 42),
4347 BPF_MOV64_IMM(BPF_REG_3, 0),
4348 BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
4349 BPF_MOV64_IMM(BPF_REG_0, 0),
4353 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4356 "invalid access of tc_classid for LWT_IN",
4358 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4359 offsetof(struct __sk_buff, tc_classid)),
4363 .errstr = "invalid bpf_context access",
4366 "invalid access of tc_classid for LWT_OUT",
4368 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4369 offsetof(struct __sk_buff, tc_classid)),
4373 .errstr = "invalid bpf_context access",
4376 "invalid access of tc_classid for LWT_XMIT",
4378 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4379 offsetof(struct __sk_buff, tc_classid)),
4383 .errstr = "invalid bpf_context access",
4386 "leak pointer into ctx 1",
4388 BPF_MOV64_IMM(BPF_REG_0, 0),
4389 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4390 offsetof(struct __sk_buff, cb[0])),
4391 BPF_LD_MAP_FD(BPF_REG_2, 0),
4392 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4393 offsetof(struct __sk_buff, cb[0])),
4396 .fixup_map1 = { 2 },
4397 .errstr_unpriv = "R2 leaks addr into mem",
4398 .result_unpriv = REJECT,
4400 .errstr = "BPF_XADD stores into R1 context is not allowed",
4403 "leak pointer into ctx 2",
4405 BPF_MOV64_IMM(BPF_REG_0, 0),
4406 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4407 offsetof(struct __sk_buff, cb[0])),
4408 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4409 offsetof(struct __sk_buff, cb[0])),
4412 .errstr_unpriv = "R10 leaks addr into mem",
4413 .result_unpriv = REJECT,
4415 .errstr = "BPF_XADD stores into R1 context is not allowed",
4418 "leak pointer into ctx 3",
4420 BPF_MOV64_IMM(BPF_REG_0, 0),
4421 BPF_LD_MAP_FD(BPF_REG_2, 0),
4422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4423 offsetof(struct __sk_buff, cb[0])),
4426 .fixup_map1 = { 1 },
4427 .errstr_unpriv = "R2 leaks addr into ctx",
4428 .result_unpriv = REJECT,
4432 "leak pointer into map val",
4434 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4435 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4436 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4438 BPF_LD_MAP_FD(BPF_REG_1, 0),
4439 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4440 BPF_FUNC_map_lookup_elem),
4441 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4442 BPF_MOV64_IMM(BPF_REG_3, 0),
4443 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4444 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4445 BPF_MOV64_IMM(BPF_REG_0, 0),
4448 .fixup_map1 = { 4 },
4449 .errstr_unpriv = "R6 leaks addr into mem",
4450 .result_unpriv = REJECT,
4454 "helper access to map: full range",
4456 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4458 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4459 BPF_LD_MAP_FD(BPF_REG_1, 0),
4460 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4462 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4463 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4464 BPF_MOV64_IMM(BPF_REG_3, 0),
4465 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4468 .fixup_map2 = { 3 },
4470 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4473 "helper access to map: partial range",
4475 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4477 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4478 BPF_LD_MAP_FD(BPF_REG_1, 0),
4479 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4480 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4481 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4482 BPF_MOV64_IMM(BPF_REG_2, 8),
4483 BPF_MOV64_IMM(BPF_REG_3, 0),
4484 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4487 .fixup_map2 = { 3 },
4489 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4492 "helper access to map: empty range",
4494 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4496 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4497 BPF_LD_MAP_FD(BPF_REG_1, 0),
4498 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4499 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4500 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4501 BPF_MOV64_IMM(BPF_REG_2, 0),
4502 BPF_MOV64_IMM(BPF_REG_3, 0),
4503 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4506 .fixup_map2 = { 3 },
4507 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4509 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4512 "helper access to map: out-of-bound range",
4514 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4516 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4517 BPF_LD_MAP_FD(BPF_REG_1, 0),
4518 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4520 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4521 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4522 BPF_MOV64_IMM(BPF_REG_3, 0),
4523 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4526 .fixup_map2 = { 3 },
4527 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4529 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4532 "helper access to map: negative range",
4534 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4536 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4537 BPF_LD_MAP_FD(BPF_REG_1, 0),
4538 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4539 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4540 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4541 BPF_MOV64_IMM(BPF_REG_2, -8),
4542 BPF_MOV64_IMM(BPF_REG_3, 0),
4543 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4546 .fixup_map2 = { 3 },
4547 .errstr = "R2 min value is negative",
4549 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4552 "helper access to adjusted map (via const imm): full range",
4554 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4555 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4556 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4557 BPF_LD_MAP_FD(BPF_REG_1, 0),
4558 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4560 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4562 offsetof(struct test_val, foo)),
4563 BPF_MOV64_IMM(BPF_REG_2,
4564 sizeof(struct test_val) -
4565 offsetof(struct test_val, foo)),
4566 BPF_MOV64_IMM(BPF_REG_3, 0),
4567 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4570 .fixup_map2 = { 3 },
4572 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4575 "helper access to adjusted map (via const imm): partial range",
4577 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4579 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4580 BPF_LD_MAP_FD(BPF_REG_1, 0),
4581 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4582 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4583 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4584 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4585 offsetof(struct test_val, foo)),
4586 BPF_MOV64_IMM(BPF_REG_2, 8),
4587 BPF_MOV64_IMM(BPF_REG_3, 0),
4588 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4591 .fixup_map2 = { 3 },
4593 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4596 "helper access to adjusted map (via const imm): empty range",
4598 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4600 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4601 BPF_LD_MAP_FD(BPF_REG_1, 0),
4602 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4604 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4606 offsetof(struct test_val, foo)),
4607 BPF_MOV64_IMM(BPF_REG_2, 0),
4608 BPF_MOV64_IMM(BPF_REG_3, 0),
4609 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4612 .fixup_map2 = { 3 },
4613 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
4615 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4618 "helper access to adjusted map (via const imm): out-of-bound range",
4620 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4622 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4623 BPF_LD_MAP_FD(BPF_REG_1, 0),
4624 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4625 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4626 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4628 offsetof(struct test_val, foo)),
4629 BPF_MOV64_IMM(BPF_REG_2,
4630 sizeof(struct test_val) -
4631 offsetof(struct test_val, foo) + 8),
4632 BPF_MOV64_IMM(BPF_REG_3, 0),
4633 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4636 .fixup_map2 = { 3 },
4637 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4639 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4642 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4644 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4646 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4647 BPF_LD_MAP_FD(BPF_REG_1, 0),
4648 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4649 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4650 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4651 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4652 offsetof(struct test_val, foo)),
4653 BPF_MOV64_IMM(BPF_REG_2, -8),
4654 BPF_MOV64_IMM(BPF_REG_3, 0),
4655 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4658 .fixup_map2 = { 3 },
4659 .errstr = "R2 min value is negative",
4661 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4664 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4666 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4667 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4668 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4669 BPF_LD_MAP_FD(BPF_REG_1, 0),
4670 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4671 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4672 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4673 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4674 offsetof(struct test_val, foo)),
4675 BPF_MOV64_IMM(BPF_REG_2, -1),
4676 BPF_MOV64_IMM(BPF_REG_3, 0),
4677 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4680 .fixup_map2 = { 3 },
4681 .errstr = "R2 min value is negative",
4683 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4686 "helper access to adjusted map (via const reg): full range",
4688 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4690 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4691 BPF_LD_MAP_FD(BPF_REG_1, 0),
4692 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4694 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4695 BPF_MOV64_IMM(BPF_REG_3,
4696 offsetof(struct test_val, foo)),
4697 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4698 BPF_MOV64_IMM(BPF_REG_2,
4699 sizeof(struct test_val) -
4700 offsetof(struct test_val, foo)),
4701 BPF_MOV64_IMM(BPF_REG_3, 0),
4702 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4705 .fixup_map2 = { 3 },
4707 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4710 "helper access to adjusted map (via const reg): partial range",
4712 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4714 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4715 BPF_LD_MAP_FD(BPF_REG_1, 0),
4716 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4718 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4719 BPF_MOV64_IMM(BPF_REG_3,
4720 offsetof(struct test_val, foo)),
4721 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4722 BPF_MOV64_IMM(BPF_REG_2, 8),
4723 BPF_MOV64_IMM(BPF_REG_3, 0),
4724 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4727 .fixup_map2 = { 3 },
4729 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4732 "helper access to adjusted map (via const reg): empty range",
4734 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4736 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4737 BPF_LD_MAP_FD(BPF_REG_1, 0),
4738 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4741 BPF_MOV64_IMM(BPF_REG_3, 0),
4742 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4743 BPF_MOV64_IMM(BPF_REG_2, 0),
4744 BPF_MOV64_IMM(BPF_REG_3, 0),
4745 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4748 .fixup_map2 = { 3 },
4749 .errstr = "R1 min value is outside of the array range",
4751 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4754 "helper access to adjusted map (via const reg): out-of-bound range",
4756 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4758 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4759 BPF_LD_MAP_FD(BPF_REG_1, 0),
4760 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4761 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4762 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4763 BPF_MOV64_IMM(BPF_REG_3,
4764 offsetof(struct test_val, foo)),
4765 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4766 BPF_MOV64_IMM(BPF_REG_2,
4767 sizeof(struct test_val) -
4768 offsetof(struct test_val, foo) + 8),
4769 BPF_MOV64_IMM(BPF_REG_3, 0),
4770 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4773 .fixup_map2 = { 3 },
4774 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4776 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4779 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4781 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4782 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4783 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4784 BPF_LD_MAP_FD(BPF_REG_1, 0),
4785 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4787 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4788 BPF_MOV64_IMM(BPF_REG_3,
4789 offsetof(struct test_val, foo)),
4790 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4791 BPF_MOV64_IMM(BPF_REG_2, -8),
4792 BPF_MOV64_IMM(BPF_REG_3, 0),
4793 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4796 .fixup_map2 = { 3 },
4797 .errstr = "R2 min value is negative",
4799 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4802 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4804 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4806 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4807 BPF_LD_MAP_FD(BPF_REG_1, 0),
4808 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4810 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4811 BPF_MOV64_IMM(BPF_REG_3,
4812 offsetof(struct test_val, foo)),
4813 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4814 BPF_MOV64_IMM(BPF_REG_2, -1),
4815 BPF_MOV64_IMM(BPF_REG_3, 0),
4816 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4819 .fixup_map2 = { 3 },
4820 .errstr = "R2 min value is negative",
4822 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4825 "helper access to adjusted map (via variable): full range",
4827 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4829 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4830 BPF_LD_MAP_FD(BPF_REG_1, 0),
4831 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4832 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4833 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4834 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4835 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4836 offsetof(struct test_val, foo), 4),
4837 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4838 BPF_MOV64_IMM(BPF_REG_2,
4839 sizeof(struct test_val) -
4840 offsetof(struct test_val, foo)),
4841 BPF_MOV64_IMM(BPF_REG_3, 0),
4842 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4845 .fixup_map2 = { 3 },
4847 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4850 "helper access to adjusted map (via variable): partial range",
4852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4854 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4855 BPF_LD_MAP_FD(BPF_REG_1, 0),
4856 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4857 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4858 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4859 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4860 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4861 offsetof(struct test_val, foo), 4),
4862 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4863 BPF_MOV64_IMM(BPF_REG_2, 8),
4864 BPF_MOV64_IMM(BPF_REG_3, 0),
4865 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4868 .fixup_map2 = { 3 },
4870 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4873 "helper access to adjusted map (via variable): empty range",
4875 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4877 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4878 BPF_LD_MAP_FD(BPF_REG_1, 0),
4879 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4880 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4881 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4882 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4883 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4884 offsetof(struct test_val, foo), 4),
4885 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4886 BPF_MOV64_IMM(BPF_REG_2, 0),
4887 BPF_MOV64_IMM(BPF_REG_3, 0),
4888 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4891 .fixup_map2 = { 3 },
4892 .errstr = "R1 min value is outside of the array range",
4894 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4897 "helper access to adjusted map (via variable): no max check",
4899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4901 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4902 BPF_LD_MAP_FD(BPF_REG_1, 0),
4903 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4904 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4905 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4906 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4907 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4908 BPF_MOV64_IMM(BPF_REG_2, 1),
4909 BPF_MOV64_IMM(BPF_REG_3, 0),
4910 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4913 .fixup_map2 = { 3 },
4914 .errstr = "R1 unbounded memory access",
4916 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4919 "helper access to adjusted map (via variable): wrong max check",
4921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4923 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4924 BPF_LD_MAP_FD(BPF_REG_1, 0),
4925 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4927 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4928 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4929 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4930 offsetof(struct test_val, foo), 4),
4931 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4932 BPF_MOV64_IMM(BPF_REG_2,
4933 sizeof(struct test_val) -
4934 offsetof(struct test_val, foo) + 1),
4935 BPF_MOV64_IMM(BPF_REG_3, 0),
4936 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4939 .fixup_map2 = { 3 },
4940 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4942 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4945 "helper access to map: bounds check using <, good access",
4947 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4949 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4950 BPF_LD_MAP_FD(BPF_REG_1, 0),
4951 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4952 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4953 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4954 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4955 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4956 BPF_MOV64_IMM(BPF_REG_0, 0),
4958 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4959 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4960 BPF_MOV64_IMM(BPF_REG_0, 0),
4963 .fixup_map2 = { 3 },
4965 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4968 "helper access to map: bounds check using <, bad access",
4970 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4972 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4973 BPF_LD_MAP_FD(BPF_REG_1, 0),
4974 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4975 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4977 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4978 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4979 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4980 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4981 BPF_MOV64_IMM(BPF_REG_0, 0),
4983 BPF_MOV64_IMM(BPF_REG_0, 0),
4986 .fixup_map2 = { 3 },
4988 .errstr = "R1 unbounded memory access",
4989 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4992 "helper access to map: bounds check using <=, good access",
4994 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4996 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4997 BPF_LD_MAP_FD(BPF_REG_1, 0),
4998 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4999 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5000 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5001 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5002 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5003 BPF_MOV64_IMM(BPF_REG_0, 0),
5005 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5006 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5007 BPF_MOV64_IMM(BPF_REG_0, 0),
5010 .fixup_map2 = { 3 },
5012 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5015 "helper access to map: bounds check using <=, bad access",
5017 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5019 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5020 BPF_LD_MAP_FD(BPF_REG_1, 0),
5021 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5022 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5023 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5024 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5025 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5026 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5027 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5028 BPF_MOV64_IMM(BPF_REG_0, 0),
5030 BPF_MOV64_IMM(BPF_REG_0, 0),
5033 .fixup_map2 = { 3 },
5035 .errstr = "R1 unbounded memory access",
5036 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5039 "helper access to map: bounds check using s<, good access",
5041 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5042 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5043 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5044 BPF_LD_MAP_FD(BPF_REG_1, 0),
5045 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5046 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5047 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5048 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5049 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5050 BPF_MOV64_IMM(BPF_REG_0, 0),
5052 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5053 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5054 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5055 BPF_MOV64_IMM(BPF_REG_0, 0),
5058 .fixup_map2 = { 3 },
5060 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5063 "helper access to map: bounds check using s<, good access 2",
5065 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5067 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5068 BPF_LD_MAP_FD(BPF_REG_1, 0),
5069 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5070 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5071 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5072 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5073 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5074 BPF_MOV64_IMM(BPF_REG_0, 0),
5076 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5077 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5078 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5079 BPF_MOV64_IMM(BPF_REG_0, 0),
5082 .fixup_map2 = { 3 },
5084 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5087 "helper access to map: bounds check using s<, bad access",
5089 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5091 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5092 BPF_LD_MAP_FD(BPF_REG_1, 0),
5093 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5094 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5095 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5096 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5097 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5098 BPF_MOV64_IMM(BPF_REG_0, 0),
5100 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5101 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5102 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5103 BPF_MOV64_IMM(BPF_REG_0, 0),
5106 .fixup_map2 = { 3 },
5108 .errstr = "R1 min value is negative",
5109 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5112 "helper access to map: bounds check using s<=, good access",
5114 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5116 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5117 BPF_LD_MAP_FD(BPF_REG_1, 0),
5118 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5119 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5120 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5121 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5122 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5123 BPF_MOV64_IMM(BPF_REG_0, 0),
5125 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5126 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5127 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5128 BPF_MOV64_IMM(BPF_REG_0, 0),
5131 .fixup_map2 = { 3 },
5133 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5136 "helper access to map: bounds check using s<=, good access 2",
5138 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5139 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5140 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5141 BPF_LD_MAP_FD(BPF_REG_1, 0),
5142 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5143 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5144 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5145 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5146 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5147 BPF_MOV64_IMM(BPF_REG_0, 0),
5149 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5150 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5151 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5152 BPF_MOV64_IMM(BPF_REG_0, 0),
5155 .fixup_map2 = { 3 },
5157 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5160 "helper access to map: bounds check using s<=, bad access",
5162 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5164 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5165 BPF_LD_MAP_FD(BPF_REG_1, 0),
5166 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5168 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5169 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5170 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5171 BPF_MOV64_IMM(BPF_REG_0, 0),
5173 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5174 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5175 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5176 BPF_MOV64_IMM(BPF_REG_0, 0),
5179 .fixup_map2 = { 3 },
5181 .errstr = "R1 min value is negative",
5182 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5185 "map element value is preserved across register spilling",
5187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5189 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5190 BPF_LD_MAP_FD(BPF_REG_1, 0),
5191 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5193 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5194 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5196 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5197 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5198 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5201 .fixup_map2 = { 3 },
5202 .errstr_unpriv = "R0 leaks addr",
5204 .result_unpriv = REJECT,
5207 "map element value or null is marked on register spilling",
5209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5211 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5212 BPF_LD_MAP_FD(BPF_REG_1, 0),
5213 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5214 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5216 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5217 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5218 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5219 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5222 .fixup_map2 = { 3 },
5223 .errstr_unpriv = "R0 leaks addr",
5225 .result_unpriv = REJECT,
5228 "map element value store of cleared call register",
5230 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5231 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5232 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5233 BPF_LD_MAP_FD(BPF_REG_1, 0),
5234 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5235 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5236 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5239 .fixup_map2 = { 3 },
5240 .errstr_unpriv = "R1 !read_ok",
5241 .errstr = "R1 !read_ok",
5243 .result_unpriv = REJECT,
5246 "map element value with unaligned store",
5248 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5250 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5251 BPF_LD_MAP_FD(BPF_REG_1, 0),
5252 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5253 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5255 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5256 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5257 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5258 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5259 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5260 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5261 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5262 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5263 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5264 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5265 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5266 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5268 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5269 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5270 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5273 .fixup_map2 = { 3 },
5274 .errstr_unpriv = "R0 leaks addr",
5276 .result_unpriv = REJECT,
5277 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5280 "map element value with unaligned load",
5282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5284 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5285 BPF_LD_MAP_FD(BPF_REG_1, 0),
5286 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5287 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5288 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5289 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5291 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5292 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5293 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5294 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5295 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5297 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5298 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5301 .fixup_map2 = { 3 },
5302 .errstr_unpriv = "R0 leaks addr",
5304 .result_unpriv = REJECT,
5305 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5308 "map element value illegal alu op, 1",
5310 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5312 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5313 BPF_LD_MAP_FD(BPF_REG_1, 0),
5314 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5315 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5316 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5317 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5320 .fixup_map2 = { 3 },
5321 .errstr = "R0 bitwise operator &= on pointer",
5325 "map element value illegal alu op, 2",
5327 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5329 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5330 BPF_LD_MAP_FD(BPF_REG_1, 0),
5331 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5332 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5333 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5334 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5337 .fixup_map2 = { 3 },
5338 .errstr = "R0 32-bit pointer arithmetic prohibited",
5342 "map element value illegal alu op, 3",
5344 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5346 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5347 BPF_LD_MAP_FD(BPF_REG_1, 0),
5348 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5349 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5350 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5351 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5354 .fixup_map2 = { 3 },
5355 .errstr = "R0 pointer arithmetic with /= operator",
5359 "map element value illegal alu op, 4",
5361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5363 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5364 BPF_LD_MAP_FD(BPF_REG_1, 0),
5365 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5366 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5367 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5368 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5371 .fixup_map2 = { 3 },
5372 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5373 .errstr = "invalid mem access 'inv'",
5375 .result_unpriv = REJECT,
5378 "map element value illegal alu op, 5",
5380 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5382 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5383 BPF_LD_MAP_FD(BPF_REG_1, 0),
5384 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5385 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5386 BPF_MOV64_IMM(BPF_REG_3, 4096),
5387 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5389 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5390 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5391 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5392 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5395 .fixup_map2 = { 3 },
5396 .errstr = "R0 invalid mem access 'inv'",
5400 "map element value is preserved across register spilling",
5402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5404 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5405 BPF_LD_MAP_FD(BPF_REG_1, 0),
5406 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5407 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5409 offsetof(struct test_val, foo)),
5410 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5413 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5414 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5415 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5418 .fixup_map2 = { 3 },
5419 .errstr_unpriv = "R0 leaks addr",
5421 .result_unpriv = REJECT,
5422 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5425 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5427 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5429 BPF_MOV64_IMM(BPF_REG_0, 0),
5430 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5431 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5432 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5433 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5434 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5435 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5436 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5437 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5438 BPF_MOV64_IMM(BPF_REG_2, 16),
5439 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5440 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5441 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5442 BPF_MOV64_IMM(BPF_REG_4, 0),
5443 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5444 BPF_MOV64_IMM(BPF_REG_3, 0),
5445 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5446 BPF_MOV64_IMM(BPF_REG_0, 0),
5450 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5453 "helper access to variable memory: stack, bitwise AND, zero included",
5455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5457 BPF_MOV64_IMM(BPF_REG_2, 16),
5458 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5459 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5460 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5461 BPF_MOV64_IMM(BPF_REG_3, 0),
5462 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5465 .errstr = "invalid stack type R1 off=-64 access_size=0",
5467 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5470 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5472 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5474 BPF_MOV64_IMM(BPF_REG_2, 16),
5475 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5476 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5477 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5478 BPF_MOV64_IMM(BPF_REG_4, 0),
5479 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5480 BPF_MOV64_IMM(BPF_REG_3, 0),
5481 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5482 BPF_MOV64_IMM(BPF_REG_0, 0),
5485 .errstr = "invalid stack type R1 off=-64 access_size=65",
5487 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5490 "helper access to variable memory: stack, JMP, correct bounds",
5492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5494 BPF_MOV64_IMM(BPF_REG_0, 0),
5495 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5496 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5497 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5498 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5499 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5500 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5501 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5502 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5503 BPF_MOV64_IMM(BPF_REG_2, 16),
5504 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5505 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5506 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5507 BPF_MOV64_IMM(BPF_REG_4, 0),
5508 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5509 BPF_MOV64_IMM(BPF_REG_3, 0),
5510 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5511 BPF_MOV64_IMM(BPF_REG_0, 0),
5515 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5518 "helper access to variable memory: stack, JMP (signed), correct bounds",
5520 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5522 BPF_MOV64_IMM(BPF_REG_0, 0),
5523 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5524 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5525 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5526 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5527 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5528 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5529 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5530 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5531 BPF_MOV64_IMM(BPF_REG_2, 16),
5532 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5533 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5534 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5535 BPF_MOV64_IMM(BPF_REG_4, 0),
5536 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5537 BPF_MOV64_IMM(BPF_REG_3, 0),
5538 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5539 BPF_MOV64_IMM(BPF_REG_0, 0),
5543 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5546 "helper access to variable memory: stack, JMP, bounds + offset",
5548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5550 BPF_MOV64_IMM(BPF_REG_2, 16),
5551 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5552 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5553 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5554 BPF_MOV64_IMM(BPF_REG_4, 0),
5555 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5557 BPF_MOV64_IMM(BPF_REG_3, 0),
5558 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5559 BPF_MOV64_IMM(BPF_REG_0, 0),
5562 .errstr = "invalid stack type R1 off=-64 access_size=65",
5564 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5567 "helper access to variable memory: stack, JMP, wrong max",
5569 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5571 BPF_MOV64_IMM(BPF_REG_2, 16),
5572 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5573 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5574 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5575 BPF_MOV64_IMM(BPF_REG_4, 0),
5576 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5577 BPF_MOV64_IMM(BPF_REG_3, 0),
5578 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5579 BPF_MOV64_IMM(BPF_REG_0, 0),
5582 .errstr = "invalid stack type R1 off=-64 access_size=65",
5584 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5587 "helper access to variable memory: stack, JMP, no max check",
5589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5591 BPF_MOV64_IMM(BPF_REG_2, 16),
5592 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5593 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5594 BPF_MOV64_IMM(BPF_REG_4, 0),
5595 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5596 BPF_MOV64_IMM(BPF_REG_3, 0),
5597 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5598 BPF_MOV64_IMM(BPF_REG_0, 0),
5601 /* because max wasn't checked, signed min is negative */
5602 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
5604 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5607 "helper access to variable memory: stack, JMP, no min check",
5609 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5610 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5611 BPF_MOV64_IMM(BPF_REG_2, 16),
5612 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5613 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5614 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5615 BPF_MOV64_IMM(BPF_REG_3, 0),
5616 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5617 BPF_MOV64_IMM(BPF_REG_0, 0),
5620 .errstr = "invalid stack type R1 off=-64 access_size=0",
5622 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5625 "helper access to variable memory: stack, JMP (signed), no min check",
5627 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5629 BPF_MOV64_IMM(BPF_REG_2, 16),
5630 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5631 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5632 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5633 BPF_MOV64_IMM(BPF_REG_3, 0),
5634 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5635 BPF_MOV64_IMM(BPF_REG_0, 0),
5638 .errstr = "R2 min value is negative",
5640 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5643 "helper access to variable memory: map, JMP, correct bounds",
5645 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5647 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5648 BPF_LD_MAP_FD(BPF_REG_1, 0),
5649 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5651 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5652 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5653 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5654 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5655 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5656 sizeof(struct test_val), 4),
5657 BPF_MOV64_IMM(BPF_REG_4, 0),
5658 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5659 BPF_MOV64_IMM(BPF_REG_3, 0),
5660 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5661 BPF_MOV64_IMM(BPF_REG_0, 0),
5664 .fixup_map2 = { 3 },
5666 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5669 "helper access to variable memory: map, JMP, wrong max",
5671 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5673 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5674 BPF_LD_MAP_FD(BPF_REG_1, 0),
5675 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5676 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5677 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5678 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5679 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5680 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5681 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5682 sizeof(struct test_val) + 1, 4),
5683 BPF_MOV64_IMM(BPF_REG_4, 0),
5684 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5685 BPF_MOV64_IMM(BPF_REG_3, 0),
5686 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5687 BPF_MOV64_IMM(BPF_REG_0, 0),
5690 .fixup_map2 = { 3 },
5691 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5693 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5696 "helper access to variable memory: map adjusted, JMP, correct bounds",
5698 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5700 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5701 BPF_LD_MAP_FD(BPF_REG_1, 0),
5702 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5703 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5706 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5707 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5708 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5709 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5710 sizeof(struct test_val) - 20, 4),
5711 BPF_MOV64_IMM(BPF_REG_4, 0),
5712 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5713 BPF_MOV64_IMM(BPF_REG_3, 0),
5714 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5715 BPF_MOV64_IMM(BPF_REG_0, 0),
5718 .fixup_map2 = { 3 },
5720 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5723 "helper access to variable memory: map adjusted, JMP, wrong max",
5725 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5727 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5728 BPF_LD_MAP_FD(BPF_REG_1, 0),
5729 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5731 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5733 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5734 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5735 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5736 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5737 sizeof(struct test_val) - 19, 4),
5738 BPF_MOV64_IMM(BPF_REG_4, 0),
5739 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5740 BPF_MOV64_IMM(BPF_REG_3, 0),
5741 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5742 BPF_MOV64_IMM(BPF_REG_0, 0),
5745 .fixup_map2 = { 3 },
5746 .errstr = "R1 min value is outside of the array range",
5748 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5751 "helper access to variable memory: size = 0 allowed on NULL",
5753 BPF_MOV64_IMM(BPF_REG_1, 0),
5754 BPF_MOV64_IMM(BPF_REG_2, 0),
5755 BPF_MOV64_IMM(BPF_REG_3, 0),
5756 BPF_MOV64_IMM(BPF_REG_4, 0),
5757 BPF_MOV64_IMM(BPF_REG_5, 0),
5758 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5762 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5765 "helper access to variable memory: size > 0 not allowed on NULL",
5767 BPF_MOV64_IMM(BPF_REG_1, 0),
5768 BPF_MOV64_IMM(BPF_REG_2, 0),
5769 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5770 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5771 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5772 BPF_MOV64_IMM(BPF_REG_3, 0),
5773 BPF_MOV64_IMM(BPF_REG_4, 0),
5774 BPF_MOV64_IMM(BPF_REG_5, 0),
5775 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5778 .errstr = "R1 type=inv expected=fp",
5780 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5783 "helper access to variable memory: size = 0 not allowed on != NULL",
5785 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5786 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5787 BPF_MOV64_IMM(BPF_REG_2, 0),
5788 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5789 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5790 BPF_MOV64_IMM(BPF_REG_3, 0),
5791 BPF_MOV64_IMM(BPF_REG_4, 0),
5792 BPF_MOV64_IMM(BPF_REG_5, 0),
5793 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5796 .errstr = "invalid stack type R1 off=-8 access_size=0",
5798 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5801 "helper access to variable memory: 8 bytes leak",
5803 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5805 BPF_MOV64_IMM(BPF_REG_0, 0),
5806 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5807 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5808 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5809 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5810 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5811 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5812 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5813 BPF_MOV64_IMM(BPF_REG_2, 0),
5814 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5815 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5816 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5818 BPF_MOV64_IMM(BPF_REG_3, 0),
5819 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5820 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5823 .errstr = "invalid indirect read from stack off -64+32 size 64",
5825 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5828 "helper access to variable memory: 8 bytes no leak (init memory)",
5830 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5831 BPF_MOV64_IMM(BPF_REG_0, 0),
5832 BPF_MOV64_IMM(BPF_REG_0, 0),
5833 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5834 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5835 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5836 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5837 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5838 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5839 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5840 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5842 BPF_MOV64_IMM(BPF_REG_2, 0),
5843 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5844 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5845 BPF_MOV64_IMM(BPF_REG_3, 0),
5846 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5847 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5851 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5854 "invalid and of negative number",
5856 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5857 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5858 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5859 BPF_LD_MAP_FD(BPF_REG_1, 0),
5860 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5861 BPF_FUNC_map_lookup_elem),
5862 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5863 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
5864 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5865 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5866 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5867 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5868 offsetof(struct test_val, foo)),
5871 .fixup_map2 = { 3 },
5872 .errstr = "R0 max value is outside of the array range",
5874 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5877 "invalid range check",
5879 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5880 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5882 BPF_LD_MAP_FD(BPF_REG_1, 0),
5883 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5884 BPF_FUNC_map_lookup_elem),
5885 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5886 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5887 BPF_MOV64_IMM(BPF_REG_9, 1),
5888 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5889 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5890 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5891 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5892 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5893 BPF_MOV32_IMM(BPF_REG_3, 1),
5894 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5895 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5896 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5897 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5898 BPF_MOV64_REG(BPF_REG_0, 0),
5901 .fixup_map2 = { 3 },
5902 .errstr = "R0 max value is outside of the array range",
5904 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5907 "map in map access",
5909 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5912 BPF_LD_MAP_FD(BPF_REG_1, 0),
5913 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5914 BPF_FUNC_map_lookup_elem),
5915 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5916 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5917 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5919 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5920 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5921 BPF_FUNC_map_lookup_elem),
5922 BPF_MOV64_IMM(BPF_REG_0, 0),
5925 .fixup_map_in_map = { 3 },
5929 "invalid inner map pointer",
5931 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5932 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5933 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5934 BPF_LD_MAP_FD(BPF_REG_1, 0),
5935 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5936 BPF_FUNC_map_lookup_elem),
5937 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5938 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5939 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5941 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5944 BPF_FUNC_map_lookup_elem),
5945 BPF_MOV64_IMM(BPF_REG_0, 0),
5948 .fixup_map_in_map = { 3 },
5949 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5953 "forgot null checking on the inner map pointer",
5955 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5956 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5957 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5958 BPF_LD_MAP_FD(BPF_REG_1, 0),
5959 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5960 BPF_FUNC_map_lookup_elem),
5961 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5962 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5964 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5965 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5966 BPF_FUNC_map_lookup_elem),
5967 BPF_MOV64_IMM(BPF_REG_0, 0),
5970 .fixup_map_in_map = { 3 },
5971 .errstr = "R1 type=map_value_or_null expected=map_ptr",
5975 "ld_abs: check calling conv, r1",
5977 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5978 BPF_MOV64_IMM(BPF_REG_1, 0),
5979 BPF_LD_ABS(BPF_W, -0x200000),
5980 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5983 .errstr = "R1 !read_ok",
5987 "ld_abs: check calling conv, r2",
5989 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5990 BPF_MOV64_IMM(BPF_REG_2, 0),
5991 BPF_LD_ABS(BPF_W, -0x200000),
5992 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5995 .errstr = "R2 !read_ok",
5999 "ld_abs: check calling conv, r3",
6001 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6002 BPF_MOV64_IMM(BPF_REG_3, 0),
6003 BPF_LD_ABS(BPF_W, -0x200000),
6004 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6007 .errstr = "R3 !read_ok",
6011 "ld_abs: check calling conv, r4",
6013 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6014 BPF_MOV64_IMM(BPF_REG_4, 0),
6015 BPF_LD_ABS(BPF_W, -0x200000),
6016 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6019 .errstr = "R4 !read_ok",
6023 "ld_abs: check calling conv, r5",
6025 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6026 BPF_MOV64_IMM(BPF_REG_5, 0),
6027 BPF_LD_ABS(BPF_W, -0x200000),
6028 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6031 .errstr = "R5 !read_ok",
6035 "ld_abs: check calling conv, r7",
6037 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6038 BPF_MOV64_IMM(BPF_REG_7, 0),
6039 BPF_LD_ABS(BPF_W, -0x200000),
6040 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6046 "ld_ind: check calling conv, r1",
6048 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6049 BPF_MOV64_IMM(BPF_REG_1, 1),
6050 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6051 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6054 .errstr = "R1 !read_ok",
6058 "ld_ind: check calling conv, r2",
6060 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6061 BPF_MOV64_IMM(BPF_REG_2, 1),
6062 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6063 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6066 .errstr = "R2 !read_ok",
6070 "ld_ind: check calling conv, r3",
6072 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6073 BPF_MOV64_IMM(BPF_REG_3, 1),
6074 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6075 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6078 .errstr = "R3 !read_ok",
6082 "ld_ind: check calling conv, r4",
6084 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6085 BPF_MOV64_IMM(BPF_REG_4, 1),
6086 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6087 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6090 .errstr = "R4 !read_ok",
6094 "ld_ind: check calling conv, r5",
6096 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6097 BPF_MOV64_IMM(BPF_REG_5, 1),
6098 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6099 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6102 .errstr = "R5 !read_ok",
6106 "ld_ind: check calling conv, r7",
6108 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6109 BPF_MOV64_IMM(BPF_REG_7, 1),
6110 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6111 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6117 "check bpf_perf_event_data->sample_period byte load permitted",
6119 BPF_MOV64_IMM(BPF_REG_0, 0),
6120 #if __BYTE_ORDER == __LITTLE_ENDIAN
6121 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6122 offsetof(struct bpf_perf_event_data, sample_period)),
6124 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6125 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6130 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6133 "check bpf_perf_event_data->sample_period half load permitted",
6135 BPF_MOV64_IMM(BPF_REG_0, 0),
6136 #if __BYTE_ORDER == __LITTLE_ENDIAN
6137 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6138 offsetof(struct bpf_perf_event_data, sample_period)),
6140 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6141 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6146 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6149 "check bpf_perf_event_data->sample_period word load permitted",
6151 BPF_MOV64_IMM(BPF_REG_0, 0),
6152 #if __BYTE_ORDER == __LITTLE_ENDIAN
6153 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6154 offsetof(struct bpf_perf_event_data, sample_period)),
6156 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6157 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6162 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6165 "check bpf_perf_event_data->sample_period dword load permitted",
6167 BPF_MOV64_IMM(BPF_REG_0, 0),
6168 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6169 offsetof(struct bpf_perf_event_data, sample_period)),
6173 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6176 "check skb->data half load not permitted",
6178 BPF_MOV64_IMM(BPF_REG_0, 0),
6179 #if __BYTE_ORDER == __LITTLE_ENDIAN
6180 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6181 offsetof(struct __sk_buff, data)),
6183 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6184 offsetof(struct __sk_buff, data) + 2),
6189 .errstr = "invalid bpf_context access",
6192 "check skb->tc_classid half load not permitted for lwt prog",
6194 BPF_MOV64_IMM(BPF_REG_0, 0),
6195 #if __BYTE_ORDER == __LITTLE_ENDIAN
6196 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6197 offsetof(struct __sk_buff, tc_classid)),
6199 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6200 offsetof(struct __sk_buff, tc_classid) + 2),
6205 .errstr = "invalid bpf_context access",
6206 .prog_type = BPF_PROG_TYPE_LWT_IN,
6209 "bounds checks mixing signed and unsigned, positive bounds",
6211 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6212 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6214 BPF_LD_MAP_FD(BPF_REG_1, 0),
6215 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6216 BPF_FUNC_map_lookup_elem),
6217 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6218 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6219 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6220 BPF_MOV64_IMM(BPF_REG_2, 2),
6221 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6222 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6223 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6224 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6225 BPF_MOV64_IMM(BPF_REG_0, 0),
6228 .fixup_map1 = { 3 },
6229 .errstr = "unbounded min value",
6233 "bounds checks mixing signed and unsigned",
6235 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6236 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6238 BPF_LD_MAP_FD(BPF_REG_1, 0),
6239 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6240 BPF_FUNC_map_lookup_elem),
6241 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6242 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6243 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6244 BPF_MOV64_IMM(BPF_REG_2, -1),
6245 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6246 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6247 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6248 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6249 BPF_MOV64_IMM(BPF_REG_0, 0),
6252 .fixup_map1 = { 3 },
6253 .errstr = "unbounded min value",
6257 "bounds checks mixing signed and unsigned, variant 2",
6259 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6260 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6262 BPF_LD_MAP_FD(BPF_REG_1, 0),
6263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6264 BPF_FUNC_map_lookup_elem),
6265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6266 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6267 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6268 BPF_MOV64_IMM(BPF_REG_2, -1),
6269 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6270 BPF_MOV64_IMM(BPF_REG_8, 0),
6271 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6272 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6273 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6274 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6275 BPF_MOV64_IMM(BPF_REG_0, 0),
6278 .fixup_map1 = { 3 },
6279 .errstr = "unbounded min value",
6283 "bounds checks mixing signed and unsigned, variant 3",
6285 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6288 BPF_LD_MAP_FD(BPF_REG_1, 0),
6289 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6290 BPF_FUNC_map_lookup_elem),
6291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6292 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6293 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6294 BPF_MOV64_IMM(BPF_REG_2, -1),
6295 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6296 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6297 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6298 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6299 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6300 BPF_MOV64_IMM(BPF_REG_0, 0),
6303 .fixup_map1 = { 3 },
6304 .errstr = "unbounded min value",
6308 "bounds checks mixing signed and unsigned, variant 4",
6310 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6313 BPF_LD_MAP_FD(BPF_REG_1, 0),
6314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6315 BPF_FUNC_map_lookup_elem),
6316 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6317 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6318 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6319 BPF_MOV64_IMM(BPF_REG_2, 1),
6320 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6321 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6322 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6323 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6324 BPF_MOV64_IMM(BPF_REG_0, 0),
6327 .fixup_map1 = { 3 },
6331 "bounds checks mixing signed and unsigned, variant 5",
6333 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6334 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6336 BPF_LD_MAP_FD(BPF_REG_1, 0),
6337 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6338 BPF_FUNC_map_lookup_elem),
6339 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6340 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6341 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6342 BPF_MOV64_IMM(BPF_REG_2, -1),
6343 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6344 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6346 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6347 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6348 BPF_MOV64_IMM(BPF_REG_0, 0),
6351 .fixup_map1 = { 3 },
6352 .errstr = "unbounded min value",
6356 "bounds checks mixing signed and unsigned, variant 6",
6358 BPF_MOV64_IMM(BPF_REG_2, 0),
6359 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6361 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6362 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6363 BPF_MOV64_IMM(BPF_REG_6, -1),
6364 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6365 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6367 BPF_MOV64_IMM(BPF_REG_5, 0),
6368 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6369 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6370 BPF_FUNC_skb_load_bytes),
6371 BPF_MOV64_IMM(BPF_REG_0, 0),
6374 .errstr = "R4 min value is negative, either use unsigned",
6378 "bounds checks mixing signed and unsigned, variant 7",
6380 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6381 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6383 BPF_LD_MAP_FD(BPF_REG_1, 0),
6384 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6385 BPF_FUNC_map_lookup_elem),
6386 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6387 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6388 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6389 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6390 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6391 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6392 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6393 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6394 BPF_MOV64_IMM(BPF_REG_0, 0),
6397 .fixup_map1 = { 3 },
6401 "bounds checks mixing signed and unsigned, variant 8",
6403 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6404 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6406 BPF_LD_MAP_FD(BPF_REG_1, 0),
6407 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6408 BPF_FUNC_map_lookup_elem),
6409 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6410 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6411 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6412 BPF_MOV64_IMM(BPF_REG_2, -1),
6413 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6414 BPF_MOV64_IMM(BPF_REG_0, 0),
6416 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6417 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6418 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6419 BPF_MOV64_IMM(BPF_REG_0, 0),
6422 .fixup_map1 = { 3 },
6423 .errstr = "unbounded min value",
6427 "bounds checks mixing signed and unsigned, variant 9",
6429 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6430 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6431 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6432 BPF_LD_MAP_FD(BPF_REG_1, 0),
6433 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6434 BPF_FUNC_map_lookup_elem),
6435 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6436 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6437 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6438 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6439 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6440 BPF_MOV64_IMM(BPF_REG_0, 0),
6442 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6443 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6444 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6445 BPF_MOV64_IMM(BPF_REG_0, 0),
6448 .fixup_map1 = { 3 },
6452 "bounds checks mixing signed and unsigned, variant 10",
6454 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6457 BPF_LD_MAP_FD(BPF_REG_1, 0),
6458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6459 BPF_FUNC_map_lookup_elem),
6460 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6461 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6462 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6463 BPF_MOV64_IMM(BPF_REG_2, 0),
6464 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6465 BPF_MOV64_IMM(BPF_REG_0, 0),
6467 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6468 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6469 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6470 BPF_MOV64_IMM(BPF_REG_0, 0),
6473 .fixup_map1 = { 3 },
6474 .errstr = "unbounded min value",
6478 "bounds checks mixing signed and unsigned, variant 11",
6480 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6481 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6483 BPF_LD_MAP_FD(BPF_REG_1, 0),
6484 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6485 BPF_FUNC_map_lookup_elem),
6486 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6487 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6488 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6489 BPF_MOV64_IMM(BPF_REG_2, -1),
6490 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6492 BPF_MOV64_IMM(BPF_REG_0, 0),
6494 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6495 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6496 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6497 BPF_MOV64_IMM(BPF_REG_0, 0),
6500 .fixup_map1 = { 3 },
6501 .errstr = "unbounded min value",
6505 "bounds checks mixing signed and unsigned, variant 12",
6507 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6508 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6510 BPF_LD_MAP_FD(BPF_REG_1, 0),
6511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6512 BPF_FUNC_map_lookup_elem),
6513 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6514 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6515 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6516 BPF_MOV64_IMM(BPF_REG_2, -6),
6517 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6518 BPF_MOV64_IMM(BPF_REG_0, 0),
6520 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6521 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6522 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6523 BPF_MOV64_IMM(BPF_REG_0, 0),
6526 .fixup_map1 = { 3 },
6527 .errstr = "unbounded min value",
6531 "bounds checks mixing signed and unsigned, variant 13",
6533 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6534 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6536 BPF_LD_MAP_FD(BPF_REG_1, 0),
6537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6538 BPF_FUNC_map_lookup_elem),
6539 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6540 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6541 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6542 BPF_MOV64_IMM(BPF_REG_2, 2),
6543 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6544 BPF_MOV64_IMM(BPF_REG_7, 1),
6545 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6546 BPF_MOV64_IMM(BPF_REG_0, 0),
6548 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6549 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6550 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6551 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6552 BPF_MOV64_IMM(BPF_REG_0, 0),
6555 .fixup_map1 = { 3 },
6556 .errstr = "unbounded min value",
6560 "bounds checks mixing signed and unsigned, variant 14",
6562 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6563 offsetof(struct __sk_buff, mark)),
6564 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6565 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6567 BPF_LD_MAP_FD(BPF_REG_1, 0),
6568 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6569 BPF_FUNC_map_lookup_elem),
6570 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6571 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6572 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6573 BPF_MOV64_IMM(BPF_REG_2, -1),
6574 BPF_MOV64_IMM(BPF_REG_8, 2),
6575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6576 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6577 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6578 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6579 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6580 BPF_MOV64_IMM(BPF_REG_0, 0),
6582 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6583 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6585 .fixup_map1 = { 4 },
6586 .errstr = "R0 invalid mem access 'inv'",
6590 "bounds checks mixing signed and unsigned, variant 15",
6592 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6593 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6595 BPF_LD_MAP_FD(BPF_REG_1, 0),
6596 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6597 BPF_FUNC_map_lookup_elem),
6598 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6599 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6600 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6601 BPF_MOV64_IMM(BPF_REG_2, -6),
6602 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6603 BPF_MOV64_IMM(BPF_REG_0, 0),
6605 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6606 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6607 BPF_MOV64_IMM(BPF_REG_0, 0),
6609 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6610 BPF_MOV64_IMM(BPF_REG_0, 0),
6613 .fixup_map1 = { 3 },
6614 .errstr = "unbounded min value",
6616 .result_unpriv = REJECT,
6619 "subtraction bounds (map value) variant 1",
6621 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6622 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6624 BPF_LD_MAP_FD(BPF_REG_1, 0),
6625 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6626 BPF_FUNC_map_lookup_elem),
6627 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6628 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6629 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6630 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6631 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6632 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6633 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6634 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6635 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6637 BPF_MOV64_IMM(BPF_REG_0, 0),
6640 .fixup_map1 = { 3 },
6641 .errstr = "R0 max value is outside of the array range",
6645 "subtraction bounds (map value) variant 2",
6647 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6648 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6649 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6650 BPF_LD_MAP_FD(BPF_REG_1, 0),
6651 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6652 BPF_FUNC_map_lookup_elem),
6653 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6654 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6655 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6656 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6657 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6658 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6659 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6660 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6662 BPF_MOV64_IMM(BPF_REG_0, 0),
6665 .fixup_map1 = { 3 },
6666 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6667 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
6671 "bounds check based on zero-extended MOV",
6673 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6676 BPF_LD_MAP_FD(BPF_REG_1, 0),
6677 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6678 BPF_FUNC_map_lookup_elem),
6679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6680 /* r2 = 0x0000'0000'ffff'ffff */
6681 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
6683 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6685 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6686 /* access at offset 0 */
6687 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6689 BPF_MOV64_IMM(BPF_REG_0, 0),
6692 .fixup_map1 = { 3 },
6696 "bounds check based on sign-extended MOV. test1",
6698 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6699 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6701 BPF_LD_MAP_FD(BPF_REG_1, 0),
6702 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6703 BPF_FUNC_map_lookup_elem),
6704 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6705 /* r2 = 0xffff'ffff'ffff'ffff */
6706 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6707 /* r2 = 0xffff'ffff */
6708 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6709 /* r0 = <oob pointer> */
6710 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6711 /* access to OOB pointer */
6712 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6714 BPF_MOV64_IMM(BPF_REG_0, 0),
6717 .fixup_map1 = { 3 },
6718 .errstr = "map_value pointer and 4294967295",
6722 "bounds check based on sign-extended MOV. test2",
6724 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6725 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6727 BPF_LD_MAP_FD(BPF_REG_1, 0),
6728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6729 BPF_FUNC_map_lookup_elem),
6730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6731 /* r2 = 0xffff'ffff'ffff'ffff */
6732 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6733 /* r2 = 0xfff'ffff */
6734 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
6735 /* r0 = <oob pointer> */
6736 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6737 /* access to OOB pointer */
6738 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6740 BPF_MOV64_IMM(BPF_REG_0, 0),
6743 .fixup_map1 = { 3 },
6744 .errstr = "R0 min value is outside of the array range",
6748 "bounds check based on reg_off + var_off + insn_off. test1",
6750 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6751 offsetof(struct __sk_buff, mark)),
6752 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6753 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6755 BPF_LD_MAP_FD(BPF_REG_1, 0),
6756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6757 BPF_FUNC_map_lookup_elem),
6758 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6759 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
6761 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6763 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6764 BPF_MOV64_IMM(BPF_REG_0, 0),
6767 .fixup_map1 = { 4 },
6768 .errstr = "value_size=8 off=1073741825",
6770 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6773 "bounds check based on reg_off + var_off + insn_off. test2",
6775 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6776 offsetof(struct __sk_buff, mark)),
6777 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6778 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6780 BPF_LD_MAP_FD(BPF_REG_1, 0),
6781 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6782 BPF_FUNC_map_lookup_elem),
6783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6784 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
6786 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6788 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6789 BPF_MOV64_IMM(BPF_REG_0, 0),
6792 .fixup_map1 = { 4 },
6793 .errstr = "value 1073741823",
6795 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6798 "bounds check after truncation of non-boundary-crossing range",
6800 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6801 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6802 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6803 BPF_LD_MAP_FD(BPF_REG_1, 0),
6804 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6805 BPF_FUNC_map_lookup_elem),
6806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6807 /* r1 = [0x00, 0xff] */
6808 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6809 BPF_MOV64_IMM(BPF_REG_2, 1),
6810 /* r2 = 0x10'0000'0000 */
6811 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
6812 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6813 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6814 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6816 /* r1 = [0x00, 0xff] */
6817 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
6819 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6821 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6822 /* access at offset 0 */
6823 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6825 BPF_MOV64_IMM(BPF_REG_0, 0),
6828 .fixup_map1 = { 3 },
6832 "bounds check after truncation of boundary-crossing range (1)",
6834 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6835 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6837 BPF_LD_MAP_FD(BPF_REG_1, 0),
6838 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6839 BPF_FUNC_map_lookup_elem),
6840 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6841 /* r1 = [0x00, 0xff] */
6842 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6844 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6846 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6847 * [0x0000'0000, 0x0000'007f]
6849 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
6850 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6851 /* r1 = [0x00, 0xff] or
6852 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6854 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6856 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6858 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6859 /* no-op or OOB pointer computation */
6860 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6861 /* potentially OOB access */
6862 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6864 BPF_MOV64_IMM(BPF_REG_0, 0),
6867 .fixup_map1 = { 3 },
6868 /* not actually fully unbounded, but the bound is very high */
6869 .errstr = "R0 unbounded memory access",
6873 "bounds check after truncation of boundary-crossing range (2)",
6875 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6876 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6878 BPF_LD_MAP_FD(BPF_REG_1, 0),
6879 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6880 BPF_FUNC_map_lookup_elem),
6881 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6882 /* r1 = [0x00, 0xff] */
6883 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6885 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6887 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6888 * [0x0000'0000, 0x0000'007f]
6889 * difference to previous test: truncation via MOV32
6892 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
6893 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6894 /* r1 = [0x00, 0xff] or
6895 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6897 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6899 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6901 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6902 /* no-op or OOB pointer computation */
6903 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6904 /* potentially OOB access */
6905 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6907 BPF_MOV64_IMM(BPF_REG_0, 0),
6910 .fixup_map1 = { 3 },
6911 /* not actually fully unbounded, but the bound is very high */
6912 .errstr = "R0 unbounded memory access",
6916 "bounds check after wrapping 32-bit addition",
6918 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6919 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6920 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6921 BPF_LD_MAP_FD(BPF_REG_1, 0),
6922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6923 BPF_FUNC_map_lookup_elem),
6924 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6925 /* r1 = 0x7fff'ffff */
6926 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
6927 /* r1 = 0xffff'fffe */
6928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6930 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
6932 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6933 /* access at offset 0 */
6934 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6936 BPF_MOV64_IMM(BPF_REG_0, 0),
6939 .fixup_map1 = { 3 },
6943 "bounds check after shift with oversized count operand",
6945 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6946 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6948 BPF_LD_MAP_FD(BPF_REG_1, 0),
6949 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6950 BPF_FUNC_map_lookup_elem),
6951 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6952 BPF_MOV64_IMM(BPF_REG_2, 32),
6953 BPF_MOV64_IMM(BPF_REG_1, 1),
6954 /* r1 = (u32)1 << (u32)32 = ? */
6955 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
6956 /* r1 = [0x0000, 0xffff] */
6957 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
6958 /* computes unknown pointer, potentially OOB */
6959 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6960 /* potentially OOB access */
6961 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6963 BPF_MOV64_IMM(BPF_REG_0, 0),
6966 .fixup_map1 = { 3 },
6967 .errstr = "R0 max value is outside of the array range",
6971 "bounds check after right shift of maybe-negative number",
6973 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6974 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6976 BPF_LD_MAP_FD(BPF_REG_1, 0),
6977 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6978 BPF_FUNC_map_lookup_elem),
6979 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6980 /* r1 = [0x00, 0xff] */
6981 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6982 /* r1 = [-0x01, 0xfe] */
6983 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
6984 /* r1 = 0 or 0xff'ffff'ffff'ffff */
6985 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6986 /* r1 = 0 or 0xffff'ffff'ffff */
6987 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6988 /* computes unknown pointer, potentially OOB */
6989 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6990 /* potentially OOB access */
6991 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6993 BPF_MOV64_IMM(BPF_REG_0, 0),
6996 .fixup_map1 = { 3 },
6997 .errstr = "R0 unbounded memory access",
7001 "bounds check map access with off+size signed 32bit overflow. test1",
7003 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7004 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7006 BPF_LD_MAP_FD(BPF_REG_1, 0),
7007 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7008 BPF_FUNC_map_lookup_elem),
7009 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7012 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7016 .fixup_map1 = { 3 },
7017 .errstr = "map_value pointer and 2147483646",
7021 "bounds check map access with off+size signed 32bit overflow. test2",
7023 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7024 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7025 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7026 BPF_LD_MAP_FD(BPF_REG_1, 0),
7027 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7028 BPF_FUNC_map_lookup_elem),
7029 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7034 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7038 .fixup_map1 = { 3 },
7039 .errstr = "pointer offset 1073741822",
7040 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7044 "bounds check map access with off+size signed 32bit overflow. test3",
7046 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7047 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7048 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7049 BPF_LD_MAP_FD(BPF_REG_1, 0),
7050 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7051 BPF_FUNC_map_lookup_elem),
7052 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7054 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7055 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7056 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7060 .fixup_map1 = { 3 },
7061 .errstr = "pointer offset -1073741822",
7062 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
7066 "bounds check map access with off+size signed 32bit overflow. test4",
7068 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7069 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7071 BPF_LD_MAP_FD(BPF_REG_1, 0),
7072 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7073 BPF_FUNC_map_lookup_elem),
7074 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7076 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7077 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7078 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7079 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7083 .fixup_map1 = { 3 },
7084 .errstr = "map_value pointer and 1000000000000",
7088 "pointer/scalar confusion in state equality check (way 1)",
7090 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7091 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7092 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7093 BPF_LD_MAP_FD(BPF_REG_1, 0),
7094 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7095 BPF_FUNC_map_lookup_elem),
7096 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7097 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7099 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7103 .fixup_map1 = { 3 },
7105 .result_unpriv = REJECT,
7106 .errstr_unpriv = "R0 leaks addr as return value"
7109 "pointer/scalar confusion in state equality check (way 2)",
7111 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7112 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7114 BPF_LD_MAP_FD(BPF_REG_1, 0),
7115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7116 BPF_FUNC_map_lookup_elem),
7117 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7118 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7120 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7123 .fixup_map1 = { 3 },
7125 .result_unpriv = REJECT,
7126 .errstr_unpriv = "R0 leaks addr as return value"
7129 "variable-offset ctx access",
7131 /* Get an unknown value */
7132 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7133 /* Make it small and 4-byte aligned */
7134 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7135 /* add it to skb. We now have either &skb->len or
7136 * &skb->pkt_type, but we don't know which
7138 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7139 /* dereference it */
7140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7143 .errstr = "variable ctx access var_off=(0x0; 0x4)",
7145 .prog_type = BPF_PROG_TYPE_LWT_IN,
7148 "variable-offset stack access",
7150 /* Fill the top 8 bytes of the stack */
7151 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7152 /* Get an unknown value */
7153 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7154 /* Make it small and 4-byte aligned */
7155 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7156 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7157 /* add it to fp. We now have either fp-4 or fp-8, but
7158 * we don't know which
7160 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7161 /* dereference it */
7162 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
7165 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7167 .prog_type = BPF_PROG_TYPE_LWT_IN,
7170 "indirect variable-offset stack access",
7172 /* Fill the top 8 bytes of the stack */
7173 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7174 /* Get an unknown value */
7175 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7176 /* Make it small and 4-byte aligned */
7177 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7178 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7179 /* add it to fp. We now have either fp-4 or fp-8, but
7180 * we don't know which
7182 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7183 /* dereference it indirectly */
7184 BPF_LD_MAP_FD(BPF_REG_1, 0),
7185 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7186 BPF_FUNC_map_lookup_elem),
7187 BPF_MOV64_IMM(BPF_REG_0, 0),
7190 .fixup_map1 = { 5 },
7191 .errstr = "variable stack read R2",
7193 .prog_type = BPF_PROG_TYPE_LWT_IN,
7196 "direct stack access with 32-bit wraparound. test1",
7198 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7199 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7200 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7201 BPF_MOV32_IMM(BPF_REG_0, 0),
7202 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7205 .errstr = "fp pointer and 2147483647",
7209 "direct stack access with 32-bit wraparound. test2",
7211 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7214 BPF_MOV32_IMM(BPF_REG_0, 0),
7215 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7218 .errstr = "fp pointer and 1073741823",
7222 "direct stack access with 32-bit wraparound. test3",
7224 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7227 BPF_MOV32_IMM(BPF_REG_0, 0),
7228 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7231 .errstr = "fp pointer offset 1073741822",
7232 .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
7236 "liveness pruning and write screening",
7238 /* Get an unknown value */
7239 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7240 /* branch conditions teach us nothing about R2 */
7241 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7242 BPF_MOV64_IMM(BPF_REG_0, 0),
7243 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7244 BPF_MOV64_IMM(BPF_REG_0, 0),
7247 .errstr = "R0 !read_ok",
7249 .prog_type = BPF_PROG_TYPE_LWT_IN,
7252 "varlen_map_value_access pruning",
7254 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7255 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7257 BPF_LD_MAP_FD(BPF_REG_1, 0),
7258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7259 BPF_FUNC_map_lookup_elem),
7260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7261 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7262 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
7263 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
7264 BPF_MOV32_IMM(BPF_REG_1, 0),
7265 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
7266 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7267 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
7268 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7269 offsetof(struct test_val, foo)),
7272 .fixup_map2 = { 3 },
7273 .errstr_unpriv = "R0 leaks addr",
7274 .errstr = "R0 unbounded memory access",
7275 .result_unpriv = REJECT,
7277 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7280 "invalid 64-bit BPF_END",
7282 BPF_MOV32_IMM(BPF_REG_0, 0),
7284 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
7285 .dst_reg = BPF_REG_0,
7292 .errstr = "BPF_END uses reserved fields",
7296 "arithmetic ops make PTR_TO_CTX unusable",
7298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
7299 offsetof(struct __sk_buff, data) -
7300 offsetof(struct __sk_buff, mark)),
7301 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7302 offsetof(struct __sk_buff, mark)),
7305 .errstr = "dereference of modified ctx ptr",
7307 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7310 "pkt_end - pkt_start is allowed",
7312 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7313 offsetof(struct __sk_buff, data_end)),
7314 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7315 offsetof(struct __sk_buff, data)),
7316 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
7320 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7323 "XDP pkt read, pkt_end mangling, bad access 1",
7325 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7326 offsetof(struct xdp_md, data)),
7327 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7328 offsetof(struct xdp_md, data_end)),
7329 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
7332 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7333 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7334 BPF_MOV64_IMM(BPF_REG_0, 0),
7337 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
7339 .prog_type = BPF_PROG_TYPE_XDP,
7342 "XDP pkt read, pkt_end mangling, bad access 2",
7344 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7345 offsetof(struct xdp_md, data)),
7346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7347 offsetof(struct xdp_md, data_end)),
7348 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7350 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
7351 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7352 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7353 BPF_MOV64_IMM(BPF_REG_0, 0),
7356 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
7358 .prog_type = BPF_PROG_TYPE_XDP,
7361 "XDP pkt read, pkt_data' > pkt_end, good access",
7363 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7364 offsetof(struct xdp_md, data)),
7365 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7366 offsetof(struct xdp_md, data_end)),
7367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7369 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7370 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7371 BPF_MOV64_IMM(BPF_REG_0, 0),
7375 .prog_type = BPF_PROG_TYPE_XDP,
7378 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
7380 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7381 offsetof(struct xdp_md, data)),
7382 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7383 offsetof(struct xdp_md, data_end)),
7384 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7386 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7387 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7388 BPF_MOV64_IMM(BPF_REG_0, 0),
7391 .errstr = "R1 offset is outside of the packet",
7393 .prog_type = BPF_PROG_TYPE_XDP,
7394 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7397 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
7399 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7400 offsetof(struct xdp_md, data)),
7401 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7402 offsetof(struct xdp_md, data_end)),
7403 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7405 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
7406 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7407 BPF_MOV64_IMM(BPF_REG_0, 0),
7410 .errstr = "R1 offset is outside of the packet",
7412 .prog_type = BPF_PROG_TYPE_XDP,
7415 "XDP pkt read, pkt_end > pkt_data', good access",
7417 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7418 offsetof(struct xdp_md, data)),
7419 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7420 offsetof(struct xdp_md, data_end)),
7421 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7423 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7424 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7425 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7426 BPF_MOV64_IMM(BPF_REG_0, 0),
7430 .prog_type = BPF_PROG_TYPE_XDP,
7431 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7434 "XDP pkt read, pkt_end > pkt_data', bad access 1",
7436 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7437 offsetof(struct xdp_md, data)),
7438 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7439 offsetof(struct xdp_md, data_end)),
7440 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7441 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7442 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7443 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7444 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7445 BPF_MOV64_IMM(BPF_REG_0, 0),
7448 .errstr = "R1 offset is outside of the packet",
7450 .prog_type = BPF_PROG_TYPE_XDP,
7453 "XDP pkt read, pkt_end > pkt_data', bad access 2",
7455 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7456 offsetof(struct xdp_md, data)),
7457 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7458 offsetof(struct xdp_md, data_end)),
7459 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7461 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7462 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7463 BPF_MOV64_IMM(BPF_REG_0, 0),
7466 .errstr = "R1 offset is outside of the packet",
7468 .prog_type = BPF_PROG_TYPE_XDP,
7471 "XDP pkt read, pkt_data' < pkt_end, good access",
7473 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7474 offsetof(struct xdp_md, data)),
7475 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7476 offsetof(struct xdp_md, data_end)),
7477 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7479 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7480 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7481 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7482 BPF_MOV64_IMM(BPF_REG_0, 0),
7486 .prog_type = BPF_PROG_TYPE_XDP,
7487 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7490 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
7492 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7493 offsetof(struct xdp_md, data)),
7494 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7495 offsetof(struct xdp_md, data_end)),
7496 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7498 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7499 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7500 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7501 BPF_MOV64_IMM(BPF_REG_0, 0),
7504 .errstr = "R1 offset is outside of the packet",
7506 .prog_type = BPF_PROG_TYPE_XDP,
7509 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
7511 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7512 offsetof(struct xdp_md, data)),
7513 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7514 offsetof(struct xdp_md, data_end)),
7515 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7517 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7518 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7519 BPF_MOV64_IMM(BPF_REG_0, 0),
7522 .errstr = "R1 offset is outside of the packet",
7524 .prog_type = BPF_PROG_TYPE_XDP,
7527 "XDP pkt read, pkt_end < pkt_data', good access",
7529 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7530 offsetof(struct xdp_md, data)),
7531 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7532 offsetof(struct xdp_md, data_end)),
7533 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7535 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7536 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7537 BPF_MOV64_IMM(BPF_REG_0, 0),
7541 .prog_type = BPF_PROG_TYPE_XDP,
7544 "XDP pkt read, pkt_end < pkt_data', bad access 1",
7546 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7547 offsetof(struct xdp_md, data)),
7548 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7549 offsetof(struct xdp_md, data_end)),
7550 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7552 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7553 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7554 BPF_MOV64_IMM(BPF_REG_0, 0),
7557 .errstr = "R1 offset is outside of the packet",
7559 .prog_type = BPF_PROG_TYPE_XDP,
7560 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7563 "XDP pkt read, pkt_end < pkt_data', bad access 2",
7565 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7566 offsetof(struct xdp_md, data)),
7567 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7568 offsetof(struct xdp_md, data_end)),
7569 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7571 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
7572 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7573 BPF_MOV64_IMM(BPF_REG_0, 0),
7576 .errstr = "R1 offset is outside of the packet",
7578 .prog_type = BPF_PROG_TYPE_XDP,
7581 "XDP pkt read, pkt_data' >= pkt_end, good access",
7583 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7584 offsetof(struct xdp_md, data)),
7585 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7586 offsetof(struct xdp_md, data_end)),
7587 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7589 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7590 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7591 BPF_MOV64_IMM(BPF_REG_0, 0),
7595 .prog_type = BPF_PROG_TYPE_XDP,
7596 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7599 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
7601 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7602 offsetof(struct xdp_md, data)),
7603 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7604 offsetof(struct xdp_md, data_end)),
7605 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7607 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7608 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7609 BPF_MOV64_IMM(BPF_REG_0, 0),
7612 .errstr = "R1 offset is outside of the packet",
7614 .prog_type = BPF_PROG_TYPE_XDP,
7617 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
7619 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7620 offsetof(struct xdp_md, data)),
7621 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7622 offsetof(struct xdp_md, data_end)),
7623 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7625 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
7626 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7627 BPF_MOV64_IMM(BPF_REG_0, 0),
7630 .errstr = "R1 offset is outside of the packet",
7632 .prog_type = BPF_PROG_TYPE_XDP,
7633 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7636 "XDP pkt read, pkt_end >= pkt_data', good access",
7638 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7639 offsetof(struct xdp_md, data)),
7640 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7641 offsetof(struct xdp_md, data_end)),
7642 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7644 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7645 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7646 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7647 BPF_MOV64_IMM(BPF_REG_0, 0),
7651 .prog_type = BPF_PROG_TYPE_XDP,
7654 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
7656 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7657 offsetof(struct xdp_md, data)),
7658 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7659 offsetof(struct xdp_md, data_end)),
7660 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7662 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7663 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7664 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7665 BPF_MOV64_IMM(BPF_REG_0, 0),
7668 .errstr = "R1 offset is outside of the packet",
7670 .prog_type = BPF_PROG_TYPE_XDP,
7671 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7674 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
7676 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7677 offsetof(struct xdp_md, data)),
7678 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7679 offsetof(struct xdp_md, data_end)),
7680 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7681 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7682 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7683 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7684 BPF_MOV64_IMM(BPF_REG_0, 0),
7687 .errstr = "R1 offset is outside of the packet",
7689 .prog_type = BPF_PROG_TYPE_XDP,
7692 "XDP pkt read, pkt_data' <= pkt_end, good access",
7694 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7695 offsetof(struct xdp_md, data)),
7696 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7697 offsetof(struct xdp_md, data_end)),
7698 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7700 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7701 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7702 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7703 BPF_MOV64_IMM(BPF_REG_0, 0),
7707 .prog_type = BPF_PROG_TYPE_XDP,
7710 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
7712 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7713 offsetof(struct xdp_md, data)),
7714 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7715 offsetof(struct xdp_md, data_end)),
7716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7717 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7718 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7719 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7720 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7721 BPF_MOV64_IMM(BPF_REG_0, 0),
7724 .errstr = "R1 offset is outside of the packet",
7726 .prog_type = BPF_PROG_TYPE_XDP,
7727 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7730 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
7732 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7733 offsetof(struct xdp_md, data)),
7734 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7735 offsetof(struct xdp_md, data_end)),
7736 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7738 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7739 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7740 BPF_MOV64_IMM(BPF_REG_0, 0),
7743 .errstr = "R1 offset is outside of the packet",
7745 .prog_type = BPF_PROG_TYPE_XDP,
7748 "XDP pkt read, pkt_end <= pkt_data', good access",
7750 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7751 offsetof(struct xdp_md, data)),
7752 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7753 offsetof(struct xdp_md, data_end)),
7754 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7756 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7757 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7758 BPF_MOV64_IMM(BPF_REG_0, 0),
7762 .prog_type = BPF_PROG_TYPE_XDP,
7763 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7766 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
7768 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7769 offsetof(struct xdp_md, data)),
7770 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7771 offsetof(struct xdp_md, data_end)),
7772 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7773 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7774 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7775 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7776 BPF_MOV64_IMM(BPF_REG_0, 0),
7779 .errstr = "R1 offset is outside of the packet",
7781 .prog_type = BPF_PROG_TYPE_XDP,
7784 "check deducing bounds from const, 1",
7786 BPF_MOV64_IMM(BPF_REG_0, 1),
7787 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
7788 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7791 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
7792 .errstr = "R0 tried to subtract pointer from scalar",
7796 "check deducing bounds from const, 2",
7798 BPF_MOV64_IMM(BPF_REG_0, 1),
7799 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
7801 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
7803 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
7806 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
7807 .result_unpriv = REJECT,
7811 "check deducing bounds from const, 3",
7813 BPF_MOV64_IMM(BPF_REG_0, 0),
7814 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
7815 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7818 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
7819 .errstr = "R0 tried to subtract pointer from scalar",
7823 "check deducing bounds from const, 4",
7825 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7826 BPF_MOV64_IMM(BPF_REG_0, 0),
7827 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
7829 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
7831 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
7834 .errstr_unpriv = "R6 has pointer with unsupported alu operation",
7835 .result_unpriv = REJECT,
7839 "check deducing bounds from const, 5",
7841 BPF_MOV64_IMM(BPF_REG_0, 0),
7842 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
7843 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7846 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
7847 .errstr = "R0 tried to subtract pointer from scalar",
7851 "check deducing bounds from const, 6",
7853 BPF_MOV64_IMM(BPF_REG_0, 0),
7854 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
7856 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7859 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
7860 .errstr = "R0 tried to subtract pointer from scalar",
7864 "check deducing bounds from const, 7",
7866 BPF_MOV64_IMM(BPF_REG_0, ~0),
7867 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
7868 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
7869 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7870 offsetof(struct __sk_buff, mark)),
7873 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
7874 .errstr = "dereference of modified ctx ptr",
7878 "check deducing bounds from const, 8",
7880 BPF_MOV64_IMM(BPF_REG_0, ~0),
7881 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
7882 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
7883 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7884 offsetof(struct __sk_buff, mark)),
7887 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
7888 .errstr = "dereference of modified ctx ptr",
7892 "check deducing bounds from const, 9",
7894 BPF_MOV64_IMM(BPF_REG_0, 0),
7895 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
7896 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7899 .errstr_unpriv = "R1 has pointer with unsupported alu operation",
7900 .errstr = "R0 tried to subtract pointer from scalar",
7904 "check deducing bounds from const, 10",
7906 BPF_MOV64_IMM(BPF_REG_0, 0),
7907 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
7908 /* Marks reg as unknown. */
7909 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
7910 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7913 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
7917 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
7919 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7920 offsetof(struct xdp_md, data)),
7921 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7922 offsetof(struct xdp_md, data_end)),
7923 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7925 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
7926 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7927 BPF_MOV64_IMM(BPF_REG_0, 0),
7930 .errstr = "R1 offset is outside of the packet",
7932 .prog_type = BPF_PROG_TYPE_XDP,
7933 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7936 "xadd/w check unaligned stack",
7938 BPF_MOV64_IMM(BPF_REG_0, 1),
7939 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7940 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
7941 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
7945 .errstr = "misaligned stack access off",
7946 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7949 "xadd/w check unaligned map",
7951 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7952 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7954 BPF_LD_MAP_FD(BPF_REG_1, 0),
7955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7956 BPF_FUNC_map_lookup_elem),
7957 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7959 BPF_MOV64_IMM(BPF_REG_1, 1),
7960 BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
7961 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
7964 .fixup_map1 = { 3 },
7966 .errstr = "misaligned value access off",
7967 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7970 "xadd/w check unaligned pkt",
7972 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7973 offsetof(struct xdp_md, data)),
7974 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7975 offsetof(struct xdp_md, data_end)),
7976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7978 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
7979 BPF_MOV64_IMM(BPF_REG_0, 99),
7980 BPF_JMP_IMM(BPF_JA, 0, 0, 6),
7981 BPF_MOV64_IMM(BPF_REG_0, 1),
7982 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
7983 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
7984 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
7985 BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
7986 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
7990 .errstr = "BPF_XADD stores into R2 packet",
7991 .prog_type = BPF_PROG_TYPE_XDP,
7994 "pass unmodified ctx pointer to helper",
7996 BPF_MOV64_IMM(BPF_REG_2, 0),
7997 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7998 BPF_FUNC_csum_update),
7999 BPF_MOV64_IMM(BPF_REG_0, 0),
8002 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8006 "pass modified ctx pointer to helper, 1",
8008 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
8009 BPF_MOV64_IMM(BPF_REG_2, 0),
8010 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8011 BPF_FUNC_csum_update),
8012 BPF_MOV64_IMM(BPF_REG_0, 0),
8015 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8017 .errstr = "dereference of modified ctx ptr",
8020 "pass modified ctx pointer to helper, 2",
8022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
8023 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8024 BPF_FUNC_get_socket_cookie),
8025 BPF_MOV64_IMM(BPF_REG_0, 0),
8028 .result_unpriv = REJECT,
8030 .errstr_unpriv = "dereference of modified ctx ptr",
8031 .errstr = "dereference of modified ctx ptr",
8034 "pass modified ctx pointer to helper, 3",
8036 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
8037 BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
8038 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
8039 BPF_MOV64_IMM(BPF_REG_2, 0),
8040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8041 BPF_FUNC_csum_update),
8042 BPF_MOV64_IMM(BPF_REG_0, 0),
8045 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8047 .errstr = "variable ctx access var_off=(0x0; 0x4)",
8051 static int probe_filter_length(const struct bpf_insn *fp)
8055 for (len = MAX_INSNS - 1; len > 0; --len)
8056 if (fp[len].code != 0 || fp[len].imm != 0)
8061 static int create_map(uint32_t size_value, uint32_t max_elem)
8065 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
8066 size_value, max_elem, BPF_F_NO_PREALLOC);
8068 printf("Failed to create hash map '%s'!\n", strerror(errno));
8073 static int create_prog_array(void)
8077 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
8080 printf("Failed to create prog array '%s'!\n", strerror(errno));
8085 static int create_map_in_map(void)
8087 int inner_map_fd, outer_map_fd;
8089 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
8091 if (inner_map_fd < 0) {
8092 printf("Failed to create array '%s'!\n", strerror(errno));
8093 return inner_map_fd;
8096 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
8097 sizeof(int), inner_map_fd, 1, 0);
8098 if (outer_map_fd < 0)
8099 printf("Failed to create array of maps '%s'!\n",
8102 close(inner_map_fd);
8104 return outer_map_fd;
8107 static char bpf_vlog[32768];
8109 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
8112 int *fixup_map1 = test->fixup_map1;
8113 int *fixup_map2 = test->fixup_map2;
8114 int *fixup_prog = test->fixup_prog;
8115 int *fixup_map_in_map = test->fixup_map_in_map;
8117 /* Allocating HTs with 1 elem is fine here, since we only test
8118 * for verifier and not do a runtime lookup, so the only thing
8119 * that really matters is value size in this case.
8122 map_fds[0] = create_map(sizeof(long long), 1);
8124 prog[*fixup_map1].imm = map_fds[0];
8126 } while (*fixup_map1);
8130 map_fds[1] = create_map(sizeof(struct test_val), 1);
8132 prog[*fixup_map2].imm = map_fds[1];
8134 } while (*fixup_map2);
8138 map_fds[2] = create_prog_array();
8140 prog[*fixup_prog].imm = map_fds[2];
8142 } while (*fixup_prog);
8145 if (*fixup_map_in_map) {
8146 map_fds[3] = create_map_in_map();
8148 prog[*fixup_map_in_map].imm = map_fds[3];
8150 } while (*fixup_map_in_map);
8154 static void do_test_single(struct bpf_test *test, bool unpriv,
8155 int *passes, int *errors)
8157 int fd_prog, expected_ret, reject_from_alignment;
8158 struct bpf_insn *prog = test->insns;
8159 int prog_len = probe_filter_length(prog);
8160 int prog_type = test->prog_type;
8161 int map_fds[MAX_NR_MAPS];
8162 const char *expected_err;
8165 for (i = 0; i < MAX_NR_MAPS; i++)
8168 do_test_fixup(test, prog, map_fds);
8170 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
8171 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
8172 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
8174 expected_ret = unpriv && test->result_unpriv != UNDEF ?
8175 test->result_unpriv : test->result;
8176 expected_err = unpriv && test->errstr_unpriv ?
8177 test->errstr_unpriv : test->errstr;
8179 reject_from_alignment = fd_prog < 0 &&
8180 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
8181 strstr(bpf_vlog, "misaligned");
8182 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
8183 if (reject_from_alignment) {
8184 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
8189 if (expected_ret == ACCEPT) {
8190 if (fd_prog < 0 && !reject_from_alignment) {
8191 printf("FAIL\nFailed to load prog '%s'!\n",
8197 printf("FAIL\nUnexpected success to load!\n");
8200 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
8201 printf("FAIL\nUnexpected error message!\n");
8207 printf("OK%s\n", reject_from_alignment ?
8208 " (NOTE: reject due to unknown alignment)" : "");
8211 for (i = 0; i < MAX_NR_MAPS; i++)
8217 printf("%s", bpf_vlog);
8221 static bool is_admin(void)
8224 cap_flag_value_t sysadmin = CAP_CLEAR;
8225 const cap_value_t cap_val = CAP_SYS_ADMIN;
8227 #ifdef CAP_IS_SUPPORTED
8228 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
8229 perror("cap_get_flag");
8233 caps = cap_get_proc();
8235 perror("cap_get_proc");
8238 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
8239 perror("cap_get_flag");
8242 return (sysadmin == CAP_SET);
8245 static int set_admin(bool admin)
8248 const cap_value_t cap_val = CAP_SYS_ADMIN;
8251 caps = cap_get_proc();
8253 perror("cap_get_proc");
8256 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
8257 admin ? CAP_SET : CAP_CLEAR)) {
8258 perror("cap_set_flag");
8261 if (cap_set_proc(caps)) {
8262 perror("cap_set_proc");
8272 static int do_test(bool unpriv, unsigned int from, unsigned int to)
8274 int i, passes = 0, errors = 0;
8276 for (i = from; i < to; i++) {
8277 struct bpf_test *test = &tests[i];
8279 /* Program types that are not supported by non-root we
8282 if (!test->prog_type) {
8285 printf("#%d/u %s ", i, test->descr);
8286 do_test_single(test, true, &passes, &errors);
8292 printf("#%d/p %s ", i, test->descr);
8293 do_test_single(test, false, &passes, &errors);
8297 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
8298 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
8301 int main(int argc, char **argv)
8303 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
8304 struct rlimit rlim = { 1 << 20, 1 << 20 };
8305 unsigned int from = 0, to = ARRAY_SIZE(tests);
8306 bool unpriv = !is_admin();
8309 unsigned int l = atoi(argv[argc - 2]);
8310 unsigned int u = atoi(argv[argc - 1]);
8312 if (l < to && u < to) {
8316 } else if (argc == 2) {
8317 unsigned int t = atoi(argv[argc - 1]);
8325 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
8326 return do_test(unpriv, from, to);