1 /* bpf_jit_comp.c : BPF JIT compiler
3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
15 #include <asm/nospec-branch.h>
16 #include <linux/bpf.h>
19 * assembly code in arch/x86/net/bpf_jit.S
21 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
22 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
23 extern u8 sk_load_byte_positive_offset[];
24 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
25 extern u8 sk_load_byte_negative_offset[];
27 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
40 #define EMIT(bytes, len) \
41 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
43 #define EMIT1(b1) EMIT(b1, 1)
44 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
45 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
46 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
47 #define EMIT1_off32(b1, off) \
48 do {EMIT1(b1); EMIT(off, 4); } while (0)
49 #define EMIT2_off32(b1, b2, off) \
50 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
51 #define EMIT3_off32(b1, b2, b3, off) \
52 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
53 #define EMIT4_off32(b1, b2, b3, b4, off) \
54 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56 static bool is_imm8(int value)
58 return value <= 127 && value >= -128;
61 static bool is_simm32(s64 value)
63 return value == (s64) (s32) value;
67 #define EMIT_mov(DST, SRC) \
69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
72 static int bpf_size_to_x86_bytes(int bpf_size)
74 if (bpf_size == BPF_W)
76 else if (bpf_size == BPF_H)
78 else if (bpf_size == BPF_B)
80 else if (bpf_size == BPF_DW)
86 /* list of x86 cond jumps opcodes (. + s8)
87 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
98 static void bpf_flush_icache(void *start, void *end)
100 mm_segment_t old_fs = get_fs();
104 flush_icache_range((unsigned long)start, (unsigned long)end);
108 #define CHOOSE_LOAD_FUNC(K, func) \
109 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
111 /* pick a register outside of BPF range for JIT internal work */
112 #define AUX_REG (MAX_BPF_JIT_REG + 1)
114 /* The following table maps BPF registers to x64 registers.
116 * x64 register r12 is unused, since if used as base address
117 * register in load/store instructions, it always needs an
118 * extra byte of encoding and is callee saved.
120 * r9 caches skb->len - skb->data_len
121 * r10 caches skb->data, and used for blinding (if enabled)
123 static const int reg2hex[] = {
124 [BPF_REG_0] = 0, /* rax */
125 [BPF_REG_1] = 7, /* rdi */
126 [BPF_REG_2] = 6, /* rsi */
127 [BPF_REG_3] = 2, /* rdx */
128 [BPF_REG_4] = 1, /* rcx */
129 [BPF_REG_5] = 0, /* r8 */
130 [BPF_REG_6] = 3, /* rbx callee saved */
131 [BPF_REG_7] = 5, /* r13 callee saved */
132 [BPF_REG_8] = 6, /* r14 callee saved */
133 [BPF_REG_9] = 7, /* r15 callee saved */
134 [BPF_REG_FP] = 5, /* rbp readonly */
135 [BPF_REG_AX] = 2, /* r10 temp register */
136 [AUX_REG] = 3, /* r11 temp register */
139 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
140 * which need extra byte of encoding.
141 * rax,rcx,...,rbp have simpler encoding
143 static bool is_ereg(u32 reg)
145 return (1 << reg) & (BIT(BPF_REG_5) |
154 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
155 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
156 * of encoding. al,cl,dl,bl have simpler encoding.
158 static bool is_ereg_8l(u32 reg)
160 return is_ereg(reg) ||
161 (1 << reg) & (BIT(BPF_REG_1) |
166 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
167 static u8 add_1mod(u8 byte, u32 reg)
174 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
183 /* encode 'dst_reg' register into x64 opcode 'byte' */
184 static u8 add_1reg(u8 byte, u32 dst_reg)
186 return byte + reg2hex[dst_reg];
189 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
190 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
192 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
195 static void jit_fill_hole(void *area, unsigned int size)
197 /* fill whole space with int3 instructions */
198 memset(area, 0xcc, size);
202 int cleanup_addr; /* epilogue code offset */
207 /* maximum number of bytes emitted while JITing one eBPF insn */
208 #define BPF_MAX_INSN_SIZE 128
209 #define BPF_INSN_SAFETY 64
213 32 /* space for rbx, r13, r14, r15 */ + \
214 8 /* space for skb_copy_bits() buffer */)
216 #define PROLOGUE_SIZE 48
218 /* emit x64 prologue code for BPF program and check it's size.
219 * bpf_tail_call helper will skip it while jumping into another program
221 static void emit_prologue(u8 **pprog)
226 EMIT1(0x55); /* push rbp */
227 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
229 /* sub rsp, STACKSIZE */
230 EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
232 /* all classic BPF filters use R6(rbx) save it */
234 /* mov qword ptr [rbp-X],rbx */
235 EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
237 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
238 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
239 * R8(r14). R9(r15) spill could be made conditional, but there is only
240 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
241 * The overhead of extra spill is negligible for any filter other
242 * than synthetic ones. Therefore not worth adding complexity.
245 /* mov qword ptr [rbp-X],r13 */
246 EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
247 /* mov qword ptr [rbp-X],r14 */
248 EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
249 /* mov qword ptr [rbp-X],r15 */
250 EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
252 /* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
253 * we need to reset the counter to 0. It's done in two instructions,
254 * resetting rax register to 0 (xor on eax gets 0 extended), and
255 * moving it to the counter location.
260 /* mov qword ptr [rbp-X], rax */
261 EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
263 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
267 /* generate the following code:
268 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
269 * if (index >= array->map.max_entries)
271 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
273 * prog = array->ptrs[index];
276 * goto *(prog->bpf_func + prologue_size);
279 static void emit_bpf_tail_call(u8 **pprog)
282 int label1, label2, label3;
285 /* rdi - pointer to ctx
286 * rsi - pointer to bpf_array
287 * rdx - index in bpf_array
290 /* if (index >= array->map.max_entries)
293 EMIT2(0x89, 0xD2); /* mov edx, edx */
294 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
295 offsetof(struct bpf_array, map.max_entries));
296 #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
297 EMIT2(X86_JBE, OFFSET1); /* jbe out */
300 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
303 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
304 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
305 #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
306 EMIT2(X86_JA, OFFSET2); /* ja out */
308 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
309 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
311 /* prog = array->ptrs[index]; */
312 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
313 offsetof(struct bpf_array, ptrs));
318 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
319 #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
320 EMIT2(X86_JE, OFFSET3); /* je out */
323 /* goto *(prog->bpf_func + prologue_size); */
324 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
325 offsetof(struct bpf_prog, bpf_func));
326 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
328 /* now we're ready to jump into next BPF program
329 * rdi == ctx (1st arg)
330 * rax == prog->bpf_func + prologue_size
332 RETPOLINE_RAX_BPF_JIT();
335 BUILD_BUG_ON(cnt - label1 != OFFSET1);
336 BUILD_BUG_ON(cnt - label2 != OFFSET2);
337 BUILD_BUG_ON(cnt - label3 != OFFSET3);
342 static void emit_load_skb_data_hlen(u8 **pprog)
347 /* r9d = skb->len - skb->data_len (headlen)
350 /* mov %r9d, off32(%rdi) */
351 EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
353 /* sub %r9d, off32(%rdi) */
354 EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
356 /* mov %r10, off32(%rdi) */
357 EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
361 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
362 int oldproglen, struct jit_context *ctx)
364 struct bpf_insn *insn = bpf_prog->insnsi;
365 int insn_cnt = bpf_prog->len;
366 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
367 bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
368 bool seen_exit = false;
369 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
374 emit_prologue(&prog);
377 emit_load_skb_data_hlen(&prog);
379 for (i = 0; i < insn_cnt; i++, insn++) {
380 const s32 imm32 = insn->imm;
381 u32 dst_reg = insn->dst_reg;
382 u32 src_reg = insn->src_reg;
383 u8 b1 = 0, b2 = 0, b3 = 0;
386 bool reload_skb_data;
390 if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
391 ctx->seen_ax_reg = seen_ax_reg = true;
393 switch (insn->code) {
395 case BPF_ALU | BPF_ADD | BPF_X:
396 case BPF_ALU | BPF_SUB | BPF_X:
397 case BPF_ALU | BPF_AND | BPF_X:
398 case BPF_ALU | BPF_OR | BPF_X:
399 case BPF_ALU | BPF_XOR | BPF_X:
400 case BPF_ALU64 | BPF_ADD | BPF_X:
401 case BPF_ALU64 | BPF_SUB | BPF_X:
402 case BPF_ALU64 | BPF_AND | BPF_X:
403 case BPF_ALU64 | BPF_OR | BPF_X:
404 case BPF_ALU64 | BPF_XOR | BPF_X:
405 switch (BPF_OP(insn->code)) {
406 case BPF_ADD: b2 = 0x01; break;
407 case BPF_SUB: b2 = 0x29; break;
408 case BPF_AND: b2 = 0x21; break;
409 case BPF_OR: b2 = 0x09; break;
410 case BPF_XOR: b2 = 0x31; break;
412 if (BPF_CLASS(insn->code) == BPF_ALU64)
413 EMIT1(add_2mod(0x48, dst_reg, src_reg));
414 else if (is_ereg(dst_reg) || is_ereg(src_reg))
415 EMIT1(add_2mod(0x40, dst_reg, src_reg));
416 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
420 case BPF_ALU64 | BPF_MOV | BPF_X:
421 EMIT_mov(dst_reg, src_reg);
425 case BPF_ALU | BPF_MOV | BPF_X:
426 if (is_ereg(dst_reg) || is_ereg(src_reg))
427 EMIT1(add_2mod(0x40, dst_reg, src_reg));
428 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
432 case BPF_ALU | BPF_NEG:
433 case BPF_ALU64 | BPF_NEG:
434 if (BPF_CLASS(insn->code) == BPF_ALU64)
435 EMIT1(add_1mod(0x48, dst_reg));
436 else if (is_ereg(dst_reg))
437 EMIT1(add_1mod(0x40, dst_reg));
438 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
441 case BPF_ALU | BPF_ADD | BPF_K:
442 case BPF_ALU | BPF_SUB | BPF_K:
443 case BPF_ALU | BPF_AND | BPF_K:
444 case BPF_ALU | BPF_OR | BPF_K:
445 case BPF_ALU | BPF_XOR | BPF_K:
446 case BPF_ALU64 | BPF_ADD | BPF_K:
447 case BPF_ALU64 | BPF_SUB | BPF_K:
448 case BPF_ALU64 | BPF_AND | BPF_K:
449 case BPF_ALU64 | BPF_OR | BPF_K:
450 case BPF_ALU64 | BPF_XOR | BPF_K:
451 if (BPF_CLASS(insn->code) == BPF_ALU64)
452 EMIT1(add_1mod(0x48, dst_reg));
453 else if (is_ereg(dst_reg))
454 EMIT1(add_1mod(0x40, dst_reg));
456 switch (BPF_OP(insn->code)) {
457 case BPF_ADD: b3 = 0xC0; break;
458 case BPF_SUB: b3 = 0xE8; break;
459 case BPF_AND: b3 = 0xE0; break;
460 case BPF_OR: b3 = 0xC8; break;
461 case BPF_XOR: b3 = 0xF0; break;
465 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
467 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
470 case BPF_ALU64 | BPF_MOV | BPF_K:
471 /* optimization: if imm32 is positive,
472 * use 'mov eax, imm32' (which zero-extends imm32)
476 /* 'mov rax, imm32' sign extends imm32 */
477 b1 = add_1mod(0x48, dst_reg);
480 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
484 case BPF_ALU | BPF_MOV | BPF_K:
485 /* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
489 if (is_ereg(dst_reg))
490 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
493 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
497 /* mov %eax, imm32 */
498 if (is_ereg(dst_reg))
499 EMIT1(add_1mod(0x40, dst_reg));
500 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
503 case BPF_LD | BPF_IMM | BPF_DW:
504 if (insn[1].code != 0 || insn[1].src_reg != 0 ||
505 insn[1].dst_reg != 0 || insn[1].off != 0) {
506 /* verifier must catch invalid insns */
507 pr_err("invalid BPF_LD_IMM64 insn\n");
511 /* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
514 if (insn[0].imm == 0 && insn[1].imm == 0) {
515 b1 = add_2mod(0x48, dst_reg, dst_reg);
518 EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg));
525 /* movabsq %rax, imm64 */
526 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
527 EMIT(insn[0].imm, 4);
528 EMIT(insn[1].imm, 4);
534 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
535 case BPF_ALU | BPF_MOD | BPF_X:
536 case BPF_ALU | BPF_DIV | BPF_X:
537 case BPF_ALU | BPF_MOD | BPF_K:
538 case BPF_ALU | BPF_DIV | BPF_K:
539 case BPF_ALU64 | BPF_MOD | BPF_X:
540 case BPF_ALU64 | BPF_DIV | BPF_X:
541 case BPF_ALU64 | BPF_MOD | BPF_K:
542 case BPF_ALU64 | BPF_DIV | BPF_K:
543 EMIT1(0x50); /* push rax */
544 EMIT1(0x52); /* push rdx */
546 if (BPF_SRC(insn->code) == BPF_X)
547 /* mov r11, src_reg */
548 EMIT_mov(AUX_REG, src_reg);
551 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
553 /* mov rax, dst_reg */
554 EMIT_mov(BPF_REG_0, dst_reg);
557 * equivalent to 'xor rdx, rdx', but one byte less
561 if (BPF_SRC(insn->code) == BPF_X) {
562 /* if (src_reg == 0) return 0 */
565 EMIT4(0x49, 0x83, 0xFB, 0x00);
567 /* jne .+9 (skip over pop, pop, xor and jmp) */
568 EMIT2(X86_JNE, 1 + 1 + 2 + 5);
569 EMIT1(0x5A); /* pop rdx */
570 EMIT1(0x58); /* pop rax */
571 EMIT2(0x31, 0xc0); /* xor eax, eax */
574 * addrs[i] - 11, because there are 11 bytes
575 * after this insn: div, mov, pop, pop, mov
577 jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
578 EMIT1_off32(0xE9, jmp_offset);
581 if (BPF_CLASS(insn->code) == BPF_ALU64)
583 EMIT3(0x49, 0xF7, 0xF3);
586 EMIT3(0x41, 0xF7, 0xF3);
588 if (BPF_OP(insn->code) == BPF_MOD)
590 EMIT3(0x49, 0x89, 0xD3);
593 EMIT3(0x49, 0x89, 0xC3);
595 EMIT1(0x5A); /* pop rdx */
596 EMIT1(0x58); /* pop rax */
598 /* mov dst_reg, r11 */
599 EMIT_mov(dst_reg, AUX_REG);
602 case BPF_ALU | BPF_MUL | BPF_K:
603 case BPF_ALU | BPF_MUL | BPF_X:
604 case BPF_ALU64 | BPF_MUL | BPF_K:
605 case BPF_ALU64 | BPF_MUL | BPF_X:
606 EMIT1(0x50); /* push rax */
607 EMIT1(0x52); /* push rdx */
609 /* mov r11, dst_reg */
610 EMIT_mov(AUX_REG, dst_reg);
612 if (BPF_SRC(insn->code) == BPF_X)
613 /* mov rax, src_reg */
614 EMIT_mov(BPF_REG_0, src_reg);
617 EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
619 if (BPF_CLASS(insn->code) == BPF_ALU64)
620 EMIT1(add_1mod(0x48, AUX_REG));
621 else if (is_ereg(AUX_REG))
622 EMIT1(add_1mod(0x40, AUX_REG));
624 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
627 EMIT_mov(AUX_REG, BPF_REG_0);
629 EMIT1(0x5A); /* pop rdx */
630 EMIT1(0x58); /* pop rax */
632 /* mov dst_reg, r11 */
633 EMIT_mov(dst_reg, AUX_REG);
637 case BPF_ALU | BPF_LSH | BPF_K:
638 case BPF_ALU | BPF_RSH | BPF_K:
639 case BPF_ALU | BPF_ARSH | BPF_K:
640 case BPF_ALU64 | BPF_LSH | BPF_K:
641 case BPF_ALU64 | BPF_RSH | BPF_K:
642 case BPF_ALU64 | BPF_ARSH | BPF_K:
643 if (BPF_CLASS(insn->code) == BPF_ALU64)
644 EMIT1(add_1mod(0x48, dst_reg));
645 else if (is_ereg(dst_reg))
646 EMIT1(add_1mod(0x40, dst_reg));
648 switch (BPF_OP(insn->code)) {
649 case BPF_LSH: b3 = 0xE0; break;
650 case BPF_RSH: b3 = 0xE8; break;
651 case BPF_ARSH: b3 = 0xF8; break;
653 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
656 case BPF_ALU | BPF_LSH | BPF_X:
657 case BPF_ALU | BPF_RSH | BPF_X:
658 case BPF_ALU | BPF_ARSH | BPF_X:
659 case BPF_ALU64 | BPF_LSH | BPF_X:
660 case BPF_ALU64 | BPF_RSH | BPF_X:
661 case BPF_ALU64 | BPF_ARSH | BPF_X:
663 /* check for bad case when dst_reg == rcx */
664 if (dst_reg == BPF_REG_4) {
665 /* mov r11, dst_reg */
666 EMIT_mov(AUX_REG, dst_reg);
670 if (src_reg != BPF_REG_4) { /* common case */
671 EMIT1(0x51); /* push rcx */
673 /* mov rcx, src_reg */
674 EMIT_mov(BPF_REG_4, src_reg);
677 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
678 if (BPF_CLASS(insn->code) == BPF_ALU64)
679 EMIT1(add_1mod(0x48, dst_reg));
680 else if (is_ereg(dst_reg))
681 EMIT1(add_1mod(0x40, dst_reg));
683 switch (BPF_OP(insn->code)) {
684 case BPF_LSH: b3 = 0xE0; break;
685 case BPF_RSH: b3 = 0xE8; break;
686 case BPF_ARSH: b3 = 0xF8; break;
688 EMIT2(0xD3, add_1reg(b3, dst_reg));
690 if (src_reg != BPF_REG_4)
691 EMIT1(0x59); /* pop rcx */
693 if (insn->dst_reg == BPF_REG_4)
694 /* mov dst_reg, r11 */
695 EMIT_mov(insn->dst_reg, AUX_REG);
698 case BPF_ALU | BPF_END | BPF_FROM_BE:
701 /* emit 'ror %ax, 8' to swap lower 2 bytes */
703 if (is_ereg(dst_reg))
705 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
707 /* emit 'movzwl eax, ax' */
708 if (is_ereg(dst_reg))
709 EMIT3(0x45, 0x0F, 0xB7);
712 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
715 /* emit 'bswap eax' to swap lower 4 bytes */
716 if (is_ereg(dst_reg))
720 EMIT1(add_1reg(0xC8, dst_reg));
723 /* emit 'bswap rax' to swap 8 bytes */
724 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
725 add_1reg(0xC8, dst_reg));
730 case BPF_ALU | BPF_END | BPF_FROM_LE:
733 /* emit 'movzwl eax, ax' to zero extend 16-bit
736 if (is_ereg(dst_reg))
737 EMIT3(0x45, 0x0F, 0xB7);
740 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
743 /* emit 'mov eax, eax' to clear upper 32-bits */
744 if (is_ereg(dst_reg))
746 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
754 /* ST: *(u8*)(dst_reg + off) = imm */
755 case BPF_ST | BPF_MEM | BPF_B:
756 if (is_ereg(dst_reg))
761 case BPF_ST | BPF_MEM | BPF_H:
762 if (is_ereg(dst_reg))
763 EMIT3(0x66, 0x41, 0xC7);
767 case BPF_ST | BPF_MEM | BPF_W:
768 if (is_ereg(dst_reg))
773 case BPF_ST | BPF_MEM | BPF_DW:
774 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
776 st: if (is_imm8(insn->off))
777 EMIT2(add_1reg(0x40, dst_reg), insn->off);
779 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
781 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
784 /* STX: *(u8*)(dst_reg + off) = src_reg */
785 case BPF_STX | BPF_MEM | BPF_B:
786 /* emit 'mov byte ptr [rax + off], al' */
787 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
788 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
789 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
793 case BPF_STX | BPF_MEM | BPF_H:
794 if (is_ereg(dst_reg) || is_ereg(src_reg))
795 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
799 case BPF_STX | BPF_MEM | BPF_W:
800 if (is_ereg(dst_reg) || is_ereg(src_reg))
801 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
805 case BPF_STX | BPF_MEM | BPF_DW:
806 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
807 stx: if (is_imm8(insn->off))
808 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
810 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
814 /* LDX: dst_reg = *(u8*)(src_reg + off) */
815 case BPF_LDX | BPF_MEM | BPF_B:
816 /* emit 'movzx rax, byte ptr [rax + off]' */
817 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
819 case BPF_LDX | BPF_MEM | BPF_H:
820 /* emit 'movzx rax, word ptr [rax + off]' */
821 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
823 case BPF_LDX | BPF_MEM | BPF_W:
824 /* emit 'mov eax, dword ptr [rax+0x14]' */
825 if (is_ereg(dst_reg) || is_ereg(src_reg))
826 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
830 case BPF_LDX | BPF_MEM | BPF_DW:
831 /* emit 'mov rax, qword ptr [rax+0x14]' */
832 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
833 ldx: /* if insn->off == 0 we can save one extra byte, but
834 * special case of x86 r13 which always needs an offset
835 * is not worth the hassle
837 if (is_imm8(insn->off))
838 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
840 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
844 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
845 case BPF_STX | BPF_XADD | BPF_W:
846 /* emit 'lock add dword ptr [rax + off], eax' */
847 if (is_ereg(dst_reg) || is_ereg(src_reg))
848 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
852 case BPF_STX | BPF_XADD | BPF_DW:
853 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
854 xadd: if (is_imm8(insn->off))
855 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
857 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
862 case BPF_JMP | BPF_CALL:
863 func = (u8 *) __bpf_call_base + imm32;
864 jmp_offset = func - (image + addrs[i]);
866 reload_skb_data = bpf_helper_changes_skb_data(func);
867 if (reload_skb_data) {
868 EMIT1(0x57); /* push %rdi */
869 jmp_offset += 22; /* pop, mov, sub, mov */
871 EMIT2(0x41, 0x52); /* push %r10 */
872 EMIT2(0x41, 0x51); /* push %r9 */
873 /* need to adjust jmp offset, since
874 * pop %r9, pop %r10 take 4 bytes after call insn
879 if (!imm32 || !is_simm32(jmp_offset)) {
880 pr_err("unsupported bpf func %d addr %p image %p\n",
884 EMIT1_off32(0xE8, jmp_offset);
886 if (reload_skb_data) {
887 EMIT1(0x5F); /* pop %rdi */
888 emit_load_skb_data_hlen(&prog);
890 EMIT2(0x41, 0x59); /* pop %r9 */
891 EMIT2(0x41, 0x5A); /* pop %r10 */
896 case BPF_JMP | BPF_CALL | BPF_X:
897 emit_bpf_tail_call(&prog);
901 case BPF_JMP | BPF_JEQ | BPF_X:
902 case BPF_JMP | BPF_JNE | BPF_X:
903 case BPF_JMP | BPF_JGT | BPF_X:
904 case BPF_JMP | BPF_JGE | BPF_X:
905 case BPF_JMP | BPF_JSGT | BPF_X:
906 case BPF_JMP | BPF_JSGE | BPF_X:
907 /* cmp dst_reg, src_reg */
908 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
909 add_2reg(0xC0, dst_reg, src_reg));
912 case BPF_JMP | BPF_JSET | BPF_X:
913 /* test dst_reg, src_reg */
914 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
915 add_2reg(0xC0, dst_reg, src_reg));
918 case BPF_JMP | BPF_JSET | BPF_K:
919 /* test dst_reg, imm32 */
920 EMIT1(add_1mod(0x48, dst_reg));
921 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
924 case BPF_JMP | BPF_JEQ | BPF_K:
925 case BPF_JMP | BPF_JNE | BPF_K:
926 case BPF_JMP | BPF_JGT | BPF_K:
927 case BPF_JMP | BPF_JGE | BPF_K:
928 case BPF_JMP | BPF_JSGT | BPF_K:
929 case BPF_JMP | BPF_JSGE | BPF_K:
930 /* cmp dst_reg, imm8/32 */
931 EMIT1(add_1mod(0x48, dst_reg));
934 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
936 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
938 emit_cond_jmp: /* convert BPF opcode to x86 */
939 switch (BPF_OP(insn->code)) {
948 /* GT is unsigned '>', JA in x86 */
952 /* GE is unsigned '>=', JAE in x86 */
956 /* signed '>', GT in x86 */
960 /* signed '>=', GE in x86 */
963 default: /* to silence gcc warning */
966 jmp_offset = addrs[i + insn->off] - addrs[i];
967 if (is_imm8(jmp_offset)) {
968 EMIT2(jmp_cond, jmp_offset);
969 } else if (is_simm32(jmp_offset)) {
970 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
972 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
978 case BPF_JMP | BPF_JA:
979 jmp_offset = addrs[i + insn->off] - addrs[i];
981 /* optimize out nop jumps */
984 if (is_imm8(jmp_offset)) {
985 EMIT2(0xEB, jmp_offset);
986 } else if (is_simm32(jmp_offset)) {
987 EMIT1_off32(0xE9, jmp_offset);
989 pr_err("jmp gen bug %llx\n", jmp_offset);
994 case BPF_LD | BPF_IND | BPF_W:
997 case BPF_LD | BPF_ABS | BPF_W:
998 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
1000 ctx->seen_ld_abs = seen_ld_abs = true;
1001 jmp_offset = func - (image + addrs[i]);
1002 if (!func || !is_simm32(jmp_offset)) {
1003 pr_err("unsupported bpf func %d addr %p image %p\n",
1004 imm32, func, image);
1007 if (BPF_MODE(insn->code) == BPF_ABS) {
1008 /* mov %esi, imm32 */
1009 EMIT1_off32(0xBE, imm32);
1011 /* mov %rsi, src_reg */
1012 EMIT_mov(BPF_REG_2, src_reg);
1015 /* add %esi, imm8 */
1016 EMIT3(0x83, 0xC6, imm32);
1018 /* add %esi, imm32 */
1019 EMIT2_off32(0x81, 0xC6, imm32);
1022 /* skb pointer is in R6 (%rbx), it will be copied into
1023 * %rdi if skb_copy_bits() call is necessary.
1024 * sk_load_* helpers also use %r10 and %r9d.
1028 /* r10 = skb->data, mov %r10, off32(%rbx) */
1029 EMIT3_off32(0x4c, 0x8b, 0x93,
1030 offsetof(struct sk_buff, data));
1031 EMIT1_off32(0xE8, jmp_offset); /* call */
1034 case BPF_LD | BPF_IND | BPF_H:
1035 func = sk_load_half;
1037 case BPF_LD | BPF_ABS | BPF_H:
1038 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
1040 case BPF_LD | BPF_IND | BPF_B:
1041 func = sk_load_byte;
1043 case BPF_LD | BPF_ABS | BPF_B:
1044 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
1047 case BPF_JMP | BPF_EXIT:
1049 jmp_offset = ctx->cleanup_addr - addrs[i];
1053 /* update cleanup_addr */
1054 ctx->cleanup_addr = proglen;
1055 /* mov rbx, qword ptr [rbp-X] */
1056 EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
1057 /* mov r13, qword ptr [rbp-X] */
1058 EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
1059 /* mov r14, qword ptr [rbp-X] */
1060 EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
1061 /* mov r15, qword ptr [rbp-X] */
1062 EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
1064 EMIT1(0xC9); /* leave */
1065 EMIT1(0xC3); /* ret */
1069 /* By design x64 JIT should support all BPF instructions
1070 * This error will be seen if new instruction was added
1071 * to interpreter, but not to JIT
1072 * or if there is junk in bpf_prog
1074 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1079 if (ilen > BPF_MAX_INSN_SIZE) {
1080 pr_err("bpf_jit_compile fatal insn size error\n");
1086 * When populating the image, assert that:
1088 * i) We do not write beyond the allocated space, and
1089 * ii) addrs[i] did not change from the prior run, in order
1090 * to validate assumptions made for computing branch
1093 if (unlikely(proglen + ilen > oldproglen ||
1094 proglen + ilen != addrs[i])) {
1095 pr_err("bpf_jit_compile fatal error\n");
1098 memcpy(image + proglen, temp, ilen);
1107 void bpf_jit_compile(struct bpf_prog *prog)
1111 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1113 struct bpf_binary_header *header = NULL;
1114 struct bpf_prog *tmp, *orig_prog = prog;
1115 int proglen, oldproglen = 0;
1116 struct jit_context ctx = {};
1117 bool tmp_blinded = false;
1123 if (!bpf_jit_enable)
1126 tmp = bpf_jit_blind_constants(prog);
1127 /* If blinding was requested and we failed during blinding,
1128 * we must fall back to the interpreter.
1137 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
1143 /* Before first pass, make a rough estimation of addrs[]
1144 * each bpf instruction is translated to less than 64 bytes
1146 for (proglen = 0, i = 0; i < prog->len; i++) {
1150 ctx.cleanup_addr = proglen;
1152 /* JITed image shrinks with every pass and the loop iterates
1153 * until the image stops shrinking. Very large bpf programs
1154 * may converge on the last pass. In such case do one more
1155 * pass to emit the final image
1157 for (pass = 0; pass < 20 || image; pass++) {
1158 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1162 bpf_jit_binary_free(header);
1167 if (proglen != oldproglen) {
1168 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1169 proglen, oldproglen);
1175 if (proglen == oldproglen) {
1176 header = bpf_jit_binary_alloc(proglen, &image,
1183 oldproglen = proglen;
1187 if (bpf_jit_enable > 1)
1188 bpf_jit_dump(prog->len, proglen, pass + 1, image);
1191 bpf_flush_icache(header, image + proglen);
1192 set_memory_ro((unsigned long)header, header->pages);
1193 prog->bpf_func = (void *)image;
1203 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1208 void bpf_jit_free(struct bpf_prog *fp)
1210 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1211 struct bpf_binary_header *header = (void *)addr;
1216 set_memory_rw(addr, header->pages);
1217 bpf_jit_binary_free(header);
1220 bpf_prog_unlock_free(fp);