GNU Linux-libre 6.8.7-gnu
[releases.git] / arch / x86 / net / bpf_jit_comp.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/ftrace.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/text-patching.h>
19 #include <asm/unwind.h>
20 #include <asm/cfi.h>
21
22 static bool all_callee_regs_used[4] = {true, true, true, true};
23
24 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
25 {
26         if (len == 1)
27                 *ptr = bytes;
28         else if (len == 2)
29                 *(u16 *)ptr = bytes;
30         else {
31                 *(u32 *)ptr = bytes;
32                 barrier();
33         }
34         return ptr + len;
35 }
36
37 #define EMIT(bytes, len) \
38         do { prog = emit_code(prog, bytes, len); } while (0)
39
40 #define EMIT1(b1)               EMIT(b1, 1)
41 #define EMIT2(b1, b2)           EMIT((b1) + ((b2) << 8), 2)
42 #define EMIT3(b1, b2, b3)       EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
43 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
44
45 #define EMIT1_off32(b1, off) \
46         do { EMIT1(b1); EMIT(off, 4); } while (0)
47 #define EMIT2_off32(b1, b2, off) \
48         do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
49 #define EMIT3_off32(b1, b2, b3, off) \
50         do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
51 #define EMIT4_off32(b1, b2, b3, b4, off) \
52         do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
53
54 #ifdef CONFIG_X86_KERNEL_IBT
55 #define EMIT_ENDBR()            EMIT(gen_endbr(), 4)
56 #define EMIT_ENDBR_POISON()     EMIT(gen_endbr_poison(), 4)
57 #else
58 #define EMIT_ENDBR()
59 #define EMIT_ENDBR_POISON()
60 #endif
61
62 static bool is_imm8(int value)
63 {
64         return value <= 127 && value >= -128;
65 }
66
67 static bool is_simm32(s64 value)
68 {
69         return value == (s64)(s32)value;
70 }
71
72 static bool is_uimm32(u64 value)
73 {
74         return value == (u64)(u32)value;
75 }
76
77 /* mov dst, src */
78 #define EMIT_mov(DST, SRC)                                                               \
79         do {                                                                             \
80                 if (DST != SRC)                                                          \
81                         EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
82         } while (0)
83
84 static int bpf_size_to_x86_bytes(int bpf_size)
85 {
86         if (bpf_size == BPF_W)
87                 return 4;
88         else if (bpf_size == BPF_H)
89                 return 2;
90         else if (bpf_size == BPF_B)
91                 return 1;
92         else if (bpf_size == BPF_DW)
93                 return 4; /* imm32 */
94         else
95                 return 0;
96 }
97
98 /*
99  * List of x86 cond jumps opcodes (. + s8)
100  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
101  */
102 #define X86_JB  0x72
103 #define X86_JAE 0x73
104 #define X86_JE  0x74
105 #define X86_JNE 0x75
106 #define X86_JBE 0x76
107 #define X86_JA  0x77
108 #define X86_JL  0x7C
109 #define X86_JGE 0x7D
110 #define X86_JLE 0x7E
111 #define X86_JG  0x7F
112
113 /* Pick a register outside of BPF range for JIT internal work */
114 #define AUX_REG (MAX_BPF_JIT_REG + 1)
115 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
116
117 /*
118  * The following table maps BPF registers to x86-64 registers.
119  *
120  * x86-64 register R12 is unused, since if used as base address
121  * register in load/store instructions, it always needs an
122  * extra byte of encoding and is callee saved.
123  *
124  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
125  * trampoline. x86-64 register R10 is used for blinding (if enabled).
126  */
127 static const int reg2hex[] = {
128         [BPF_REG_0] = 0,  /* RAX */
129         [BPF_REG_1] = 7,  /* RDI */
130         [BPF_REG_2] = 6,  /* RSI */
131         [BPF_REG_3] = 2,  /* RDX */
132         [BPF_REG_4] = 1,  /* RCX */
133         [BPF_REG_5] = 0,  /* R8  */
134         [BPF_REG_6] = 3,  /* RBX callee saved */
135         [BPF_REG_7] = 5,  /* R13 callee saved */
136         [BPF_REG_8] = 6,  /* R14 callee saved */
137         [BPF_REG_9] = 7,  /* R15 callee saved */
138         [BPF_REG_FP] = 5, /* RBP readonly */
139         [BPF_REG_AX] = 2, /* R10 temp register */
140         [AUX_REG] = 3,    /* R11 temp register */
141         [X86_REG_R9] = 1, /* R9 register, 6th function argument */
142 };
143
144 static const int reg2pt_regs[] = {
145         [BPF_REG_0] = offsetof(struct pt_regs, ax),
146         [BPF_REG_1] = offsetof(struct pt_regs, di),
147         [BPF_REG_2] = offsetof(struct pt_regs, si),
148         [BPF_REG_3] = offsetof(struct pt_regs, dx),
149         [BPF_REG_4] = offsetof(struct pt_regs, cx),
150         [BPF_REG_5] = offsetof(struct pt_regs, r8),
151         [BPF_REG_6] = offsetof(struct pt_regs, bx),
152         [BPF_REG_7] = offsetof(struct pt_regs, r13),
153         [BPF_REG_8] = offsetof(struct pt_regs, r14),
154         [BPF_REG_9] = offsetof(struct pt_regs, r15),
155 };
156
157 /*
158  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
159  * which need extra byte of encoding.
160  * rax,rcx,...,rbp have simpler encoding
161  */
162 static bool is_ereg(u32 reg)
163 {
164         return (1 << reg) & (BIT(BPF_REG_5) |
165                              BIT(AUX_REG) |
166                              BIT(BPF_REG_7) |
167                              BIT(BPF_REG_8) |
168                              BIT(BPF_REG_9) |
169                              BIT(X86_REG_R9) |
170                              BIT(BPF_REG_AX));
171 }
172
173 /*
174  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
175  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
176  * of encoding. al,cl,dl,bl have simpler encoding.
177  */
178 static bool is_ereg_8l(u32 reg)
179 {
180         return is_ereg(reg) ||
181             (1 << reg) & (BIT(BPF_REG_1) |
182                           BIT(BPF_REG_2) |
183                           BIT(BPF_REG_FP));
184 }
185
186 static bool is_axreg(u32 reg)
187 {
188         return reg == BPF_REG_0;
189 }
190
191 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
192 static u8 add_1mod(u8 byte, u32 reg)
193 {
194         if (is_ereg(reg))
195                 byte |= 1;
196         return byte;
197 }
198
199 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
200 {
201         if (is_ereg(r1))
202                 byte |= 1;
203         if (is_ereg(r2))
204                 byte |= 4;
205         return byte;
206 }
207
208 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
209 static u8 add_1reg(u8 byte, u32 dst_reg)
210 {
211         return byte + reg2hex[dst_reg];
212 }
213
214 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
215 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
216 {
217         return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
218 }
219
220 /* Some 1-byte opcodes for binary ALU operations */
221 static u8 simple_alu_opcodes[] = {
222         [BPF_ADD] = 0x01,
223         [BPF_SUB] = 0x29,
224         [BPF_AND] = 0x21,
225         [BPF_OR] = 0x09,
226         [BPF_XOR] = 0x31,
227         [BPF_LSH] = 0xE0,
228         [BPF_RSH] = 0xE8,
229         [BPF_ARSH] = 0xF8,
230 };
231
232 static void jit_fill_hole(void *area, unsigned int size)
233 {
234         /* Fill whole space with INT3 instructions */
235         memset(area, 0xcc, size);
236 }
237
238 int bpf_arch_text_invalidate(void *dst, size_t len)
239 {
240         return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
241 }
242
243 struct jit_context {
244         int cleanup_addr; /* Epilogue code offset */
245
246         /*
247          * Program specific offsets of labels in the code; these rely on the
248          * JIT doing at least 2 passes, recording the position on the first
249          * pass, only to generate the correct offset on the second pass.
250          */
251         int tail_call_direct_label;
252         int tail_call_indirect_label;
253 };
254
255 /* Maximum number of bytes emitted while JITing one eBPF insn */
256 #define BPF_MAX_INSN_SIZE       128
257 #define BPF_INSN_SAFETY         64
258
259 /* Number of bytes emit_patch() needs to generate instructions */
260 #define X86_PATCH_SIZE          5
261 /* Number of bytes that will be skipped on tailcall */
262 #define X86_TAIL_CALL_OFFSET    (11 + ENDBR_INSN_SIZE)
263
264 static void push_r12(u8 **pprog)
265 {
266         u8 *prog = *pprog;
267
268         EMIT2(0x41, 0x54);   /* push r12 */
269         *pprog = prog;
270 }
271
272 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
273 {
274         u8 *prog = *pprog;
275
276         if (callee_regs_used[0])
277                 EMIT1(0x53);         /* push rbx */
278         if (callee_regs_used[1])
279                 EMIT2(0x41, 0x55);   /* push r13 */
280         if (callee_regs_used[2])
281                 EMIT2(0x41, 0x56);   /* push r14 */
282         if (callee_regs_used[3])
283                 EMIT2(0x41, 0x57);   /* push r15 */
284         *pprog = prog;
285 }
286
287 static void pop_r12(u8 **pprog)
288 {
289         u8 *prog = *pprog;
290
291         EMIT2(0x41, 0x5C);   /* pop r12 */
292         *pprog = prog;
293 }
294
295 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
296 {
297         u8 *prog = *pprog;
298
299         if (callee_regs_used[3])
300                 EMIT2(0x41, 0x5F);   /* pop r15 */
301         if (callee_regs_used[2])
302                 EMIT2(0x41, 0x5E);   /* pop r14 */
303         if (callee_regs_used[1])
304                 EMIT2(0x41, 0x5D);   /* pop r13 */
305         if (callee_regs_used[0])
306                 EMIT1(0x5B);         /* pop rbx */
307         *pprog = prog;
308 }
309
310 static void emit_nops(u8 **pprog, int len)
311 {
312         u8 *prog = *pprog;
313         int i, noplen;
314
315         while (len > 0) {
316                 noplen = len;
317
318                 if (noplen > ASM_NOP_MAX)
319                         noplen = ASM_NOP_MAX;
320
321                 for (i = 0; i < noplen; i++)
322                         EMIT1(x86_nops[noplen][i]);
323                 len -= noplen;
324         }
325
326         *pprog = prog;
327 }
328
329 /*
330  * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
331  * in arch/x86/kernel/alternative.c
332  */
333
334 static void emit_fineibt(u8 **pprog, u32 hash)
335 {
336         u8 *prog = *pprog;
337
338         EMIT_ENDBR();
339         EMIT3_off32(0x41, 0x81, 0xea, hash);            /* subl $hash, %r10d    */
340         EMIT2(0x74, 0x07);                              /* jz.d8 +7             */
341         EMIT2(0x0f, 0x0b);                              /* ud2                  */
342         EMIT1(0x90);                                    /* nop                  */
343         EMIT_ENDBR_POISON();
344
345         *pprog = prog;
346 }
347
348 static void emit_kcfi(u8 **pprog, u32 hash)
349 {
350         u8 *prog = *pprog;
351
352         EMIT1_off32(0xb8, hash);                        /* movl $hash, %eax     */
353 #ifdef CONFIG_CALL_PADDING
354         EMIT1(0x90);
355         EMIT1(0x90);
356         EMIT1(0x90);
357         EMIT1(0x90);
358         EMIT1(0x90);
359         EMIT1(0x90);
360         EMIT1(0x90);
361         EMIT1(0x90);
362         EMIT1(0x90);
363         EMIT1(0x90);
364         EMIT1(0x90);
365 #endif
366         EMIT_ENDBR();
367
368         *pprog = prog;
369 }
370
371 static void emit_cfi(u8 **pprog, u32 hash)
372 {
373         u8 *prog = *pprog;
374
375         switch (cfi_mode) {
376         case CFI_FINEIBT:
377                 emit_fineibt(&prog, hash);
378                 break;
379
380         case CFI_KCFI:
381                 emit_kcfi(&prog, hash);
382                 break;
383
384         default:
385                 EMIT_ENDBR();
386                 break;
387         }
388
389         *pprog = prog;
390 }
391
392 /*
393  * Emit x86-64 prologue code for BPF program.
394  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
395  * while jumping to another program
396  */
397 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
398                           bool tail_call_reachable, bool is_subprog,
399                           bool is_exception_cb)
400 {
401         u8 *prog = *pprog;
402
403         emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash);
404         /* BPF trampoline can be made to work without these nops,
405          * but let's waste 5 bytes for now and optimize later
406          */
407         emit_nops(&prog, X86_PATCH_SIZE);
408         if (!ebpf_from_cbpf) {
409                 if (tail_call_reachable && !is_subprog)
410                         /* When it's the entry of the whole tailcall context,
411                          * zeroing rax means initialising tail_call_cnt.
412                          */
413                         EMIT2(0x31, 0xC0); /* xor eax, eax */
414                 else
415                         /* Keep the same instruction layout. */
416                         EMIT2(0x66, 0x90); /* nop2 */
417         }
418         /* Exception callback receives FP as third parameter */
419         if (is_exception_cb) {
420                 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
421                 EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
422                 /* The main frame must have exception_boundary as true, so we
423                  * first restore those callee-saved regs from stack, before
424                  * reusing the stack frame.
425                  */
426                 pop_callee_regs(&prog, all_callee_regs_used);
427                 pop_r12(&prog);
428                 /* Reset the stack frame. */
429                 EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
430         } else {
431                 EMIT1(0x55);             /* push rbp */
432                 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
433         }
434
435         /* X86_TAIL_CALL_OFFSET is here */
436         EMIT_ENDBR();
437
438         /* sub rsp, rounded_stack_depth */
439         if (stack_depth)
440                 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
441         if (tail_call_reachable)
442                 EMIT1(0x50);         /* push rax */
443         *pprog = prog;
444 }
445
446 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
447 {
448         u8 *prog = *pprog;
449         s64 offset;
450
451         offset = func - (ip + X86_PATCH_SIZE);
452         if (!is_simm32(offset)) {
453                 pr_err("Target call %p is out of range\n", func);
454                 return -ERANGE;
455         }
456         EMIT1_off32(opcode, offset);
457         *pprog = prog;
458         return 0;
459 }
460
461 static int emit_call(u8 **pprog, void *func, void *ip)
462 {
463         return emit_patch(pprog, func, ip, 0xE8);
464 }
465
466 static int emit_rsb_call(u8 **pprog, void *func, void *ip)
467 {
468         OPTIMIZER_HIDE_VAR(func);
469         ip += x86_call_depth_emit_accounting(pprog, func);
470         return emit_patch(pprog, func, ip, 0xE8);
471 }
472
473 static int emit_jump(u8 **pprog, void *func, void *ip)
474 {
475         return emit_patch(pprog, func, ip, 0xE9);
476 }
477
478 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
479                                 void *old_addr, void *new_addr)
480 {
481         const u8 *nop_insn = x86_nops[5];
482         u8 old_insn[X86_PATCH_SIZE];
483         u8 new_insn[X86_PATCH_SIZE];
484         u8 *prog;
485         int ret;
486
487         memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
488         if (old_addr) {
489                 prog = old_insn;
490                 ret = t == BPF_MOD_CALL ?
491                       emit_call(&prog, old_addr, ip) :
492                       emit_jump(&prog, old_addr, ip);
493                 if (ret)
494                         return ret;
495         }
496
497         memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
498         if (new_addr) {
499                 prog = new_insn;
500                 ret = t == BPF_MOD_CALL ?
501                       emit_call(&prog, new_addr, ip) :
502                       emit_jump(&prog, new_addr, ip);
503                 if (ret)
504                         return ret;
505         }
506
507         ret = -EBUSY;
508         mutex_lock(&text_mutex);
509         if (memcmp(ip, old_insn, X86_PATCH_SIZE))
510                 goto out;
511         ret = 1;
512         if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
513                 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
514                 ret = 0;
515         }
516 out:
517         mutex_unlock(&text_mutex);
518         return ret;
519 }
520
521 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
522                        void *old_addr, void *new_addr)
523 {
524         if (!is_kernel_text((long)ip) &&
525             !is_bpf_text_address((long)ip))
526                 /* BPF poking in modules is not supported */
527                 return -EINVAL;
528
529         /*
530          * See emit_prologue(), for IBT builds the trampoline hook is preceded
531          * with an ENDBR instruction.
532          */
533         if (is_endbr(*(u32 *)ip))
534                 ip += ENDBR_INSN_SIZE;
535
536         return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
537 }
538
539 #define EMIT_LFENCE()   EMIT3(0x0F, 0xAE, 0xE8)
540
541 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
542 {
543         u8 *prog = *pprog;
544
545         if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
546                 EMIT_LFENCE();
547                 EMIT2(0xFF, 0xE0 + reg);
548         } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
549                 OPTIMIZER_HIDE_VAR(reg);
550                 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
551                         emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
552                 else
553                         emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
554         } else {
555                 EMIT2(0xFF, 0xE0 + reg);        /* jmp *%\reg */
556                 if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS))
557                         EMIT1(0xCC);            /* int3 */
558         }
559
560         *pprog = prog;
561 }
562
563 static void emit_return(u8 **pprog, u8 *ip)
564 {
565         u8 *prog = *pprog;
566
567         if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
568                 emit_jump(&prog, x86_return_thunk, ip);
569         } else {
570                 EMIT1(0xC3);            /* ret */
571                 if (IS_ENABLED(CONFIG_SLS))
572                         EMIT1(0xCC);    /* int3 */
573         }
574
575         *pprog = prog;
576 }
577
578 /*
579  * Generate the following code:
580  *
581  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
582  *   if (index >= array->map.max_entries)
583  *     goto out;
584  *   if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
585  *     goto out;
586  *   prog = array->ptrs[index];
587  *   if (prog == NULL)
588  *     goto out;
589  *   goto *(prog->bpf_func + prologue_size);
590  * out:
591  */
592 static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
593                                         u8 **pprog, bool *callee_regs_used,
594                                         u32 stack_depth, u8 *ip,
595                                         struct jit_context *ctx)
596 {
597         int tcc_off = -4 - round_up(stack_depth, 8);
598         u8 *prog = *pprog, *start = *pprog;
599         int offset;
600
601         /*
602          * rdi - pointer to ctx
603          * rsi - pointer to bpf_array
604          * rdx - index in bpf_array
605          */
606
607         /*
608          * if (index >= array->map.max_entries)
609          *      goto out;
610          */
611         EMIT2(0x89, 0xD2);                        /* mov edx, edx */
612         EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
613               offsetof(struct bpf_array, map.max_entries));
614
615         offset = ctx->tail_call_indirect_label - (prog + 2 - start);
616         EMIT2(X86_JBE, offset);                   /* jbe out */
617
618         /*
619          * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
620          *      goto out;
621          */
622         EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
623         EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
624
625         offset = ctx->tail_call_indirect_label - (prog + 2 - start);
626         EMIT2(X86_JAE, offset);                   /* jae out */
627         EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
628         EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
629
630         /* prog = array->ptrs[index]; */
631         EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
632                     offsetof(struct bpf_array, ptrs));
633
634         /*
635          * if (prog == NULL)
636          *      goto out;
637          */
638         EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
639
640         offset = ctx->tail_call_indirect_label - (prog + 2 - start);
641         EMIT2(X86_JE, offset);                    /* je out */
642
643         if (bpf_prog->aux->exception_boundary) {
644                 pop_callee_regs(&prog, all_callee_regs_used);
645                 pop_r12(&prog);
646         } else {
647                 pop_callee_regs(&prog, callee_regs_used);
648         }
649
650         EMIT1(0x58);                              /* pop rax */
651         if (stack_depth)
652                 EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
653                             round_up(stack_depth, 8));
654
655         /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
656         EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
657               offsetof(struct bpf_prog, bpf_func));
658         EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
659               X86_TAIL_CALL_OFFSET);
660         /*
661          * Now we're ready to jump into next BPF program
662          * rdi == ctx (1st arg)
663          * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
664          */
665         emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
666
667         /* out: */
668         ctx->tail_call_indirect_label = prog - start;
669         *pprog = prog;
670 }
671
672 static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
673                                       struct bpf_jit_poke_descriptor *poke,
674                                       u8 **pprog, u8 *ip,
675                                       bool *callee_regs_used, u32 stack_depth,
676                                       struct jit_context *ctx)
677 {
678         int tcc_off = -4 - round_up(stack_depth, 8);
679         u8 *prog = *pprog, *start = *pprog;
680         int offset;
681
682         /*
683          * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
684          *      goto out;
685          */
686         EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
687         EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
688
689         offset = ctx->tail_call_direct_label - (prog + 2 - start);
690         EMIT2(X86_JAE, offset);                       /* jae out */
691         EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
692         EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
693
694         poke->tailcall_bypass = ip + (prog - start);
695         poke->adj_off = X86_TAIL_CALL_OFFSET;
696         poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
697         poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
698
699         emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
700                   poke->tailcall_bypass);
701
702         if (bpf_prog->aux->exception_boundary) {
703                 pop_callee_regs(&prog, all_callee_regs_used);
704                 pop_r12(&prog);
705         } else {
706                 pop_callee_regs(&prog, callee_regs_used);
707         }
708
709         EMIT1(0x58);                                  /* pop rax */
710         if (stack_depth)
711                 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
712
713         emit_nops(&prog, X86_PATCH_SIZE);
714
715         /* out: */
716         ctx->tail_call_direct_label = prog - start;
717
718         *pprog = prog;
719 }
720
721 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
722 {
723         struct bpf_jit_poke_descriptor *poke;
724         struct bpf_array *array;
725         struct bpf_prog *target;
726         int i, ret;
727
728         for (i = 0; i < prog->aux->size_poke_tab; i++) {
729                 poke = &prog->aux->poke_tab[i];
730                 if (poke->aux && poke->aux != prog->aux)
731                         continue;
732
733                 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
734
735                 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
736                         continue;
737
738                 array = container_of(poke->tail_call.map, struct bpf_array, map);
739                 mutex_lock(&array->aux->poke_mutex);
740                 target = array->ptrs[poke->tail_call.key];
741                 if (target) {
742                         ret = __bpf_arch_text_poke(poke->tailcall_target,
743                                                    BPF_MOD_JUMP, NULL,
744                                                    (u8 *)target->bpf_func +
745                                                    poke->adj_off);
746                         BUG_ON(ret < 0);
747                         ret = __bpf_arch_text_poke(poke->tailcall_bypass,
748                                                    BPF_MOD_JUMP,
749                                                    (u8 *)poke->tailcall_target +
750                                                    X86_PATCH_SIZE, NULL);
751                         BUG_ON(ret < 0);
752                 }
753                 WRITE_ONCE(poke->tailcall_target_stable, true);
754                 mutex_unlock(&array->aux->poke_mutex);
755         }
756 }
757
758 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
759                            u32 dst_reg, const u32 imm32)
760 {
761         u8 *prog = *pprog;
762         u8 b1, b2, b3;
763
764         /*
765          * Optimization: if imm32 is positive, use 'mov %eax, imm32'
766          * (which zero-extends imm32) to save 2 bytes.
767          */
768         if (sign_propagate && (s32)imm32 < 0) {
769                 /* 'mov %rax, imm32' sign extends imm32 */
770                 b1 = add_1mod(0x48, dst_reg);
771                 b2 = 0xC7;
772                 b3 = 0xC0;
773                 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
774                 goto done;
775         }
776
777         /*
778          * Optimization: if imm32 is zero, use 'xor %eax, %eax'
779          * to save 3 bytes.
780          */
781         if (imm32 == 0) {
782                 if (is_ereg(dst_reg))
783                         EMIT1(add_2mod(0x40, dst_reg, dst_reg));
784                 b2 = 0x31; /* xor */
785                 b3 = 0xC0;
786                 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
787                 goto done;
788         }
789
790         /* mov %eax, imm32 */
791         if (is_ereg(dst_reg))
792                 EMIT1(add_1mod(0x40, dst_reg));
793         EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
794 done:
795         *pprog = prog;
796 }
797
798 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
799                            const u32 imm32_hi, const u32 imm32_lo)
800 {
801         u8 *prog = *pprog;
802
803         if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
804                 /*
805                  * For emitting plain u32, where sign bit must not be
806                  * propagated LLVM tends to load imm64 over mov32
807                  * directly, so save couple of bytes by just doing
808                  * 'mov %eax, imm32' instead.
809                  */
810                 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
811         } else {
812                 /* movabsq rax, imm64 */
813                 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
814                 EMIT(imm32_lo, 4);
815                 EMIT(imm32_hi, 4);
816         }
817
818         *pprog = prog;
819 }
820
821 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
822 {
823         u8 *prog = *pprog;
824
825         if (is64) {
826                 /* mov dst, src */
827                 EMIT_mov(dst_reg, src_reg);
828         } else {
829                 /* mov32 dst, src */
830                 if (is_ereg(dst_reg) || is_ereg(src_reg))
831                         EMIT1(add_2mod(0x40, dst_reg, src_reg));
832                 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
833         }
834
835         *pprog = prog;
836 }
837
838 static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
839                            u32 src_reg)
840 {
841         u8 *prog = *pprog;
842
843         if (is64) {
844                 /* movs[b,w,l]q dst, src */
845                 if (num_bits == 8)
846                         EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
847                               add_2reg(0xC0, src_reg, dst_reg));
848                 else if (num_bits == 16)
849                         EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
850                               add_2reg(0xC0, src_reg, dst_reg));
851                 else if (num_bits == 32)
852                         EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
853                               add_2reg(0xC0, src_reg, dst_reg));
854         } else {
855                 /* movs[b,w]l dst, src */
856                 if (num_bits == 8) {
857                         EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
858                               add_2reg(0xC0, src_reg, dst_reg));
859                 } else if (num_bits == 16) {
860                         if (is_ereg(dst_reg) || is_ereg(src_reg))
861                                 EMIT1(add_2mod(0x40, src_reg, dst_reg));
862                         EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
863                               add_2reg(0xC0, src_reg, dst_reg));
864                 }
865         }
866
867         *pprog = prog;
868 }
869
870 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
871 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
872 {
873         u8 *prog = *pprog;
874
875         if (is_imm8(off)) {
876                 /* 1-byte signed displacement.
877                  *
878                  * If off == 0 we could skip this and save one extra byte, but
879                  * special case of x86 R13 which always needs an offset is not
880                  * worth the hassle
881                  */
882                 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
883         } else {
884                 /* 4-byte signed displacement */
885                 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
886         }
887         *pprog = prog;
888 }
889
890 /*
891  * Emit a REX byte if it will be necessary to address these registers
892  */
893 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
894 {
895         u8 *prog = *pprog;
896
897         if (is64)
898                 EMIT1(add_2mod(0x48, dst_reg, src_reg));
899         else if (is_ereg(dst_reg) || is_ereg(src_reg))
900                 EMIT1(add_2mod(0x40, dst_reg, src_reg));
901         *pprog = prog;
902 }
903
904 /*
905  * Similar version of maybe_emit_mod() for a single register
906  */
907 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
908 {
909         u8 *prog = *pprog;
910
911         if (is64)
912                 EMIT1(add_1mod(0x48, reg));
913         else if (is_ereg(reg))
914                 EMIT1(add_1mod(0x40, reg));
915         *pprog = prog;
916 }
917
918 /* LDX: dst_reg = *(u8*)(src_reg + off) */
919 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
920 {
921         u8 *prog = *pprog;
922
923         switch (size) {
924         case BPF_B:
925                 /* Emit 'movzx rax, byte ptr [rax + off]' */
926                 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
927                 break;
928         case BPF_H:
929                 /* Emit 'movzx rax, word ptr [rax + off]' */
930                 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
931                 break;
932         case BPF_W:
933                 /* Emit 'mov eax, dword ptr [rax+0x14]' */
934                 if (is_ereg(dst_reg) || is_ereg(src_reg))
935                         EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
936                 else
937                         EMIT1(0x8B);
938                 break;
939         case BPF_DW:
940                 /* Emit 'mov rax, qword ptr [rax+0x14]' */
941                 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
942                 break;
943         }
944         emit_insn_suffix(&prog, src_reg, dst_reg, off);
945         *pprog = prog;
946 }
947
948 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
949 static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
950 {
951         u8 *prog = *pprog;
952
953         switch (size) {
954         case BPF_B:
955                 /* Emit 'movsx rax, byte ptr [rax + off]' */
956                 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
957                 break;
958         case BPF_H:
959                 /* Emit 'movsx rax, word ptr [rax + off]' */
960                 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
961                 break;
962         case BPF_W:
963                 /* Emit 'movsx rax, dword ptr [rax+0x14]' */
964                 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
965                 break;
966         }
967         emit_insn_suffix(&prog, src_reg, dst_reg, off);
968         *pprog = prog;
969 }
970
971 /* STX: *(u8*)(dst_reg + off) = src_reg */
972 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
973 {
974         u8 *prog = *pprog;
975
976         switch (size) {
977         case BPF_B:
978                 /* Emit 'mov byte ptr [rax + off], al' */
979                 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
980                         /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
981                         EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
982                 else
983                         EMIT1(0x88);
984                 break;
985         case BPF_H:
986                 if (is_ereg(dst_reg) || is_ereg(src_reg))
987                         EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
988                 else
989                         EMIT2(0x66, 0x89);
990                 break;
991         case BPF_W:
992                 if (is_ereg(dst_reg) || is_ereg(src_reg))
993                         EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
994                 else
995                         EMIT1(0x89);
996                 break;
997         case BPF_DW:
998                 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
999                 break;
1000         }
1001         emit_insn_suffix(&prog, dst_reg, src_reg, off);
1002         *pprog = prog;
1003 }
1004
1005 static int emit_atomic(u8 **pprog, u8 atomic_op,
1006                        u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1007 {
1008         u8 *prog = *pprog;
1009
1010         EMIT1(0xF0); /* lock prefix */
1011
1012         maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1013
1014         /* emit opcode */
1015         switch (atomic_op) {
1016         case BPF_ADD:
1017         case BPF_AND:
1018         case BPF_OR:
1019         case BPF_XOR:
1020                 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1021                 EMIT1(simple_alu_opcodes[atomic_op]);
1022                 break;
1023         case BPF_ADD | BPF_FETCH:
1024                 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1025                 EMIT2(0x0F, 0xC1);
1026                 break;
1027         case BPF_XCHG:
1028                 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1029                 EMIT1(0x87);
1030                 break;
1031         case BPF_CMPXCHG:
1032                 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1033                 EMIT2(0x0F, 0xB1);
1034                 break;
1035         default:
1036                 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1037                 return -EFAULT;
1038         }
1039
1040         emit_insn_suffix(&prog, dst_reg, src_reg, off);
1041
1042         *pprog = prog;
1043         return 0;
1044 }
1045
1046 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
1047 {
1048         u32 reg = x->fixup >> 8;
1049
1050         /* jump over faulting load and clear dest register */
1051         *(unsigned long *)((void *)regs + reg) = 0;
1052         regs->ip += x->fixup & 0xff;
1053         return true;
1054 }
1055
1056 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1057                              bool *regs_used, bool *tail_call_seen)
1058 {
1059         int i;
1060
1061         for (i = 1; i <= insn_cnt; i++, insn++) {
1062                 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
1063                         *tail_call_seen = true;
1064                 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1065                         regs_used[0] = true;
1066                 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1067                         regs_used[1] = true;
1068                 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1069                         regs_used[2] = true;
1070                 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1071                         regs_used[3] = true;
1072         }
1073 }
1074
1075 /* emit the 3-byte VEX prefix
1076  *
1077  * r: same as rex.r, extra bit for ModRM reg field
1078  * x: same as rex.x, extra bit for SIB index field
1079  * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1080  * m: opcode map select, encoding escape bytes e.g. 0x0f38
1081  * w: same as rex.w (32 bit or 64 bit) or opcode specific
1082  * src_reg2: additional source reg (encoded as BPF reg)
1083  * l: vector length (128 bit or 256 bit) or reserved
1084  * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1085  */
1086 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1087                       bool w, u8 src_reg2, bool l, u8 pp)
1088 {
1089         u8 *prog = *pprog;
1090         const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1091         u8 b1, b2;
1092         u8 vvvv = reg2hex[src_reg2];
1093
1094         /* reg2hex gives only the lower 3 bit of vvvv */
1095         if (is_ereg(src_reg2))
1096                 vvvv |= 1 << 3;
1097
1098         /*
1099          * 2nd byte of 3-byte VEX prefix
1100          * ~ means bit inverted encoding
1101          *
1102          *    7                           0
1103          *  +---+---+---+---+---+---+---+---+
1104          *  |~R |~X |~B |         m         |
1105          *  +---+---+---+---+---+---+---+---+
1106          */
1107         b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1108         /*
1109          * 3rd byte of 3-byte VEX prefix
1110          *
1111          *    7                           0
1112          *  +---+---+---+---+---+---+---+---+
1113          *  | W |     ~vvvv     | L |   pp  |
1114          *  +---+---+---+---+---+---+---+---+
1115          */
1116         b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1117
1118         EMIT3(b0, b1, b2);
1119         *pprog = prog;
1120 }
1121
1122 /* emit BMI2 shift instruction */
1123 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1124 {
1125         u8 *prog = *pprog;
1126         bool r = is_ereg(dst_reg);
1127         u8 m = 2; /* escape code 0f38 */
1128
1129         emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1130         EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1131         *pprog = prog;
1132 }
1133
1134 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1135
1136 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1137 #define RESTORE_TAIL_CALL_CNT(stack)                            \
1138         EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
1139
1140 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
1141                   int oldproglen, struct jit_context *ctx, bool jmp_padding)
1142 {
1143         bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1144         struct bpf_insn *insn = bpf_prog->insnsi;
1145         bool callee_regs_used[4] = {};
1146         int insn_cnt = bpf_prog->len;
1147         bool tail_call_seen = false;
1148         bool seen_exit = false;
1149         u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1150         int i, excnt = 0;
1151         int ilen, proglen = 0;
1152         u8 *prog = temp;
1153         int err;
1154
1155         detect_reg_usage(insn, insn_cnt, callee_regs_used,
1156                          &tail_call_seen);
1157
1158         /* tail call's presence in current prog implies it is reachable */
1159         tail_call_reachable |= tail_call_seen;
1160
1161         emit_prologue(&prog, bpf_prog->aux->stack_depth,
1162                       bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1163                       bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1164         /* Exception callback will clobber callee regs for its own use, and
1165          * restore the original callee regs from main prog's stack frame.
1166          */
1167         if (bpf_prog->aux->exception_boundary) {
1168                 /* We also need to save r12, which is not mapped to any BPF
1169                  * register, as we throw after entry into the kernel, which may
1170                  * overwrite r12.
1171                  */
1172                 push_r12(&prog);
1173                 push_callee_regs(&prog, all_callee_regs_used);
1174         } else {
1175                 push_callee_regs(&prog, callee_regs_used);
1176         }
1177
1178         ilen = prog - temp;
1179         if (rw_image)
1180                 memcpy(rw_image + proglen, temp, ilen);
1181         proglen += ilen;
1182         addrs[0] = proglen;
1183         prog = temp;
1184
1185         for (i = 1; i <= insn_cnt; i++, insn++) {
1186                 const s32 imm32 = insn->imm;
1187                 u32 dst_reg = insn->dst_reg;
1188                 u32 src_reg = insn->src_reg;
1189                 u8 b2 = 0, b3 = 0;
1190                 u8 *start_of_ldx;
1191                 s64 jmp_offset;
1192                 s16 insn_off;
1193                 u8 jmp_cond;
1194                 u8 *func;
1195                 int nops;
1196
1197                 switch (insn->code) {
1198                         /* ALU */
1199                 case BPF_ALU | BPF_ADD | BPF_X:
1200                 case BPF_ALU | BPF_SUB | BPF_X:
1201                 case BPF_ALU | BPF_AND | BPF_X:
1202                 case BPF_ALU | BPF_OR | BPF_X:
1203                 case BPF_ALU | BPF_XOR | BPF_X:
1204                 case BPF_ALU64 | BPF_ADD | BPF_X:
1205                 case BPF_ALU64 | BPF_SUB | BPF_X:
1206                 case BPF_ALU64 | BPF_AND | BPF_X:
1207                 case BPF_ALU64 | BPF_OR | BPF_X:
1208                 case BPF_ALU64 | BPF_XOR | BPF_X:
1209                         maybe_emit_mod(&prog, dst_reg, src_reg,
1210                                        BPF_CLASS(insn->code) == BPF_ALU64);
1211                         b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1212                         EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1213                         break;
1214
1215                 case BPF_ALU64 | BPF_MOV | BPF_X:
1216                 case BPF_ALU | BPF_MOV | BPF_X:
1217                         if (insn->off == 0)
1218                                 emit_mov_reg(&prog,
1219                                              BPF_CLASS(insn->code) == BPF_ALU64,
1220                                              dst_reg, src_reg);
1221                         else
1222                                 emit_movsx_reg(&prog, insn->off,
1223                                                BPF_CLASS(insn->code) == BPF_ALU64,
1224                                                dst_reg, src_reg);
1225                         break;
1226
1227                         /* neg dst */
1228                 case BPF_ALU | BPF_NEG:
1229                 case BPF_ALU64 | BPF_NEG:
1230                         maybe_emit_1mod(&prog, dst_reg,
1231                                         BPF_CLASS(insn->code) == BPF_ALU64);
1232                         EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1233                         break;
1234
1235                 case BPF_ALU | BPF_ADD | BPF_K:
1236                 case BPF_ALU | BPF_SUB | BPF_K:
1237                 case BPF_ALU | BPF_AND | BPF_K:
1238                 case BPF_ALU | BPF_OR | BPF_K:
1239                 case BPF_ALU | BPF_XOR | BPF_K:
1240                 case BPF_ALU64 | BPF_ADD | BPF_K:
1241                 case BPF_ALU64 | BPF_SUB | BPF_K:
1242                 case BPF_ALU64 | BPF_AND | BPF_K:
1243                 case BPF_ALU64 | BPF_OR | BPF_K:
1244                 case BPF_ALU64 | BPF_XOR | BPF_K:
1245                         maybe_emit_1mod(&prog, dst_reg,
1246                                         BPF_CLASS(insn->code) == BPF_ALU64);
1247
1248                         /*
1249                          * b3 holds 'normal' opcode, b2 short form only valid
1250                          * in case dst is eax/rax.
1251                          */
1252                         switch (BPF_OP(insn->code)) {
1253                         case BPF_ADD:
1254                                 b3 = 0xC0;
1255                                 b2 = 0x05;
1256                                 break;
1257                         case BPF_SUB:
1258                                 b3 = 0xE8;
1259                                 b2 = 0x2D;
1260                                 break;
1261                         case BPF_AND:
1262                                 b3 = 0xE0;
1263                                 b2 = 0x25;
1264                                 break;
1265                         case BPF_OR:
1266                                 b3 = 0xC8;
1267                                 b2 = 0x0D;
1268                                 break;
1269                         case BPF_XOR:
1270                                 b3 = 0xF0;
1271                                 b2 = 0x35;
1272                                 break;
1273                         }
1274
1275                         if (is_imm8(imm32))
1276                                 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1277                         else if (is_axreg(dst_reg))
1278                                 EMIT1_off32(b2, imm32);
1279                         else
1280                                 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1281                         break;
1282
1283                 case BPF_ALU64 | BPF_MOV | BPF_K:
1284                 case BPF_ALU | BPF_MOV | BPF_K:
1285                         emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1286                                        dst_reg, imm32);
1287                         break;
1288
1289                 case BPF_LD | BPF_IMM | BPF_DW:
1290                         emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1291                         insn++;
1292                         i++;
1293                         break;
1294
1295                         /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1296                 case BPF_ALU | BPF_MOD | BPF_X:
1297                 case BPF_ALU | BPF_DIV | BPF_X:
1298                 case BPF_ALU | BPF_MOD | BPF_K:
1299                 case BPF_ALU | BPF_DIV | BPF_K:
1300                 case BPF_ALU64 | BPF_MOD | BPF_X:
1301                 case BPF_ALU64 | BPF_DIV | BPF_X:
1302                 case BPF_ALU64 | BPF_MOD | BPF_K:
1303                 case BPF_ALU64 | BPF_DIV | BPF_K: {
1304                         bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1305
1306                         if (dst_reg != BPF_REG_0)
1307                                 EMIT1(0x50); /* push rax */
1308                         if (dst_reg != BPF_REG_3)
1309                                 EMIT1(0x52); /* push rdx */
1310
1311                         if (BPF_SRC(insn->code) == BPF_X) {
1312                                 if (src_reg == BPF_REG_0 ||
1313                                     src_reg == BPF_REG_3) {
1314                                         /* mov r11, src_reg */
1315                                         EMIT_mov(AUX_REG, src_reg);
1316                                         src_reg = AUX_REG;
1317                                 }
1318                         } else {
1319                                 /* mov r11, imm32 */
1320                                 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1321                                 src_reg = AUX_REG;
1322                         }
1323
1324                         if (dst_reg != BPF_REG_0)
1325                                 /* mov rax, dst_reg */
1326                                 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1327
1328                         if (insn->off == 0) {
1329                                 /*
1330                                  * xor edx, edx
1331                                  * equivalent to 'xor rdx, rdx', but one byte less
1332                                  */
1333                                 EMIT2(0x31, 0xd2);
1334
1335                                 /* div src_reg */
1336                                 maybe_emit_1mod(&prog, src_reg, is64);
1337                                 EMIT2(0xF7, add_1reg(0xF0, src_reg));
1338                         } else {
1339                                 if (BPF_CLASS(insn->code) == BPF_ALU)
1340                                         EMIT1(0x99); /* cdq */
1341                                 else
1342                                         EMIT2(0x48, 0x99); /* cqo */
1343
1344                                 /* idiv src_reg */
1345                                 maybe_emit_1mod(&prog, src_reg, is64);
1346                                 EMIT2(0xF7, add_1reg(0xF8, src_reg));
1347                         }
1348
1349                         if (BPF_OP(insn->code) == BPF_MOD &&
1350                             dst_reg != BPF_REG_3)
1351                                 /* mov dst_reg, rdx */
1352                                 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1353                         else if (BPF_OP(insn->code) == BPF_DIV &&
1354                                  dst_reg != BPF_REG_0)
1355                                 /* mov dst_reg, rax */
1356                                 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1357
1358                         if (dst_reg != BPF_REG_3)
1359                                 EMIT1(0x5A); /* pop rdx */
1360                         if (dst_reg != BPF_REG_0)
1361                                 EMIT1(0x58); /* pop rax */
1362                         break;
1363                 }
1364
1365                 case BPF_ALU | BPF_MUL | BPF_K:
1366                 case BPF_ALU64 | BPF_MUL | BPF_K:
1367                         maybe_emit_mod(&prog, dst_reg, dst_reg,
1368                                        BPF_CLASS(insn->code) == BPF_ALU64);
1369
1370                         if (is_imm8(imm32))
1371                                 /* imul dst_reg, dst_reg, imm8 */
1372                                 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1373                                       imm32);
1374                         else
1375                                 /* imul dst_reg, dst_reg, imm32 */
1376                                 EMIT2_off32(0x69,
1377                                             add_2reg(0xC0, dst_reg, dst_reg),
1378                                             imm32);
1379                         break;
1380
1381                 case BPF_ALU | BPF_MUL | BPF_X:
1382                 case BPF_ALU64 | BPF_MUL | BPF_X:
1383                         maybe_emit_mod(&prog, src_reg, dst_reg,
1384                                        BPF_CLASS(insn->code) == BPF_ALU64);
1385
1386                         /* imul dst_reg, src_reg */
1387                         EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1388                         break;
1389
1390                         /* Shifts */
1391                 case BPF_ALU | BPF_LSH | BPF_K:
1392                 case BPF_ALU | BPF_RSH | BPF_K:
1393                 case BPF_ALU | BPF_ARSH | BPF_K:
1394                 case BPF_ALU64 | BPF_LSH | BPF_K:
1395                 case BPF_ALU64 | BPF_RSH | BPF_K:
1396                 case BPF_ALU64 | BPF_ARSH | BPF_K:
1397                         maybe_emit_1mod(&prog, dst_reg,
1398                                         BPF_CLASS(insn->code) == BPF_ALU64);
1399
1400                         b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1401                         if (imm32 == 1)
1402                                 EMIT2(0xD1, add_1reg(b3, dst_reg));
1403                         else
1404                                 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1405                         break;
1406
1407                 case BPF_ALU | BPF_LSH | BPF_X:
1408                 case BPF_ALU | BPF_RSH | BPF_X:
1409                 case BPF_ALU | BPF_ARSH | BPF_X:
1410                 case BPF_ALU64 | BPF_LSH | BPF_X:
1411                 case BPF_ALU64 | BPF_RSH | BPF_X:
1412                 case BPF_ALU64 | BPF_ARSH | BPF_X:
1413                         /* BMI2 shifts aren't better when shift count is already in rcx */
1414                         if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1415                                 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1416                                 bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1417                                 u8 op;
1418
1419                                 switch (BPF_OP(insn->code)) {
1420                                 case BPF_LSH:
1421                                         op = 1; /* prefix 0x66 */
1422                                         break;
1423                                 case BPF_RSH:
1424                                         op = 3; /* prefix 0xf2 */
1425                                         break;
1426                                 case BPF_ARSH:
1427                                         op = 2; /* prefix 0xf3 */
1428                                         break;
1429                                 }
1430
1431                                 emit_shiftx(&prog, dst_reg, src_reg, w, op);
1432
1433                                 break;
1434                         }
1435
1436                         if (src_reg != BPF_REG_4) { /* common case */
1437                                 /* Check for bad case when dst_reg == rcx */
1438                                 if (dst_reg == BPF_REG_4) {
1439                                         /* mov r11, dst_reg */
1440                                         EMIT_mov(AUX_REG, dst_reg);
1441                                         dst_reg = AUX_REG;
1442                                 } else {
1443                                         EMIT1(0x51); /* push rcx */
1444                                 }
1445                                 /* mov rcx, src_reg */
1446                                 EMIT_mov(BPF_REG_4, src_reg);
1447                         }
1448
1449                         /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1450                         maybe_emit_1mod(&prog, dst_reg,
1451                                         BPF_CLASS(insn->code) == BPF_ALU64);
1452
1453                         b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1454                         EMIT2(0xD3, add_1reg(b3, dst_reg));
1455
1456                         if (src_reg != BPF_REG_4) {
1457                                 if (insn->dst_reg == BPF_REG_4)
1458                                         /* mov dst_reg, r11 */
1459                                         EMIT_mov(insn->dst_reg, AUX_REG);
1460                                 else
1461                                         EMIT1(0x59); /* pop rcx */
1462                         }
1463
1464                         break;
1465
1466                 case BPF_ALU | BPF_END | BPF_FROM_BE:
1467                 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1468                         switch (imm32) {
1469                         case 16:
1470                                 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1471                                 EMIT1(0x66);
1472                                 if (is_ereg(dst_reg))
1473                                         EMIT1(0x41);
1474                                 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1475
1476                                 /* Emit 'movzwl eax, ax' */
1477                                 if (is_ereg(dst_reg))
1478                                         EMIT3(0x45, 0x0F, 0xB7);
1479                                 else
1480                                         EMIT2(0x0F, 0xB7);
1481                                 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1482                                 break;
1483                         case 32:
1484                                 /* Emit 'bswap eax' to swap lower 4 bytes */
1485                                 if (is_ereg(dst_reg))
1486                                         EMIT2(0x41, 0x0F);
1487                                 else
1488                                         EMIT1(0x0F);
1489                                 EMIT1(add_1reg(0xC8, dst_reg));
1490                                 break;
1491                         case 64:
1492                                 /* Emit 'bswap rax' to swap 8 bytes */
1493                                 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1494                                       add_1reg(0xC8, dst_reg));
1495                                 break;
1496                         }
1497                         break;
1498
1499                 case BPF_ALU | BPF_END | BPF_FROM_LE:
1500                         switch (imm32) {
1501                         case 16:
1502                                 /*
1503                                  * Emit 'movzwl eax, ax' to zero extend 16-bit
1504                                  * into 64 bit
1505                                  */
1506                                 if (is_ereg(dst_reg))
1507                                         EMIT3(0x45, 0x0F, 0xB7);
1508                                 else
1509                                         EMIT2(0x0F, 0xB7);
1510                                 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1511                                 break;
1512                         case 32:
1513                                 /* Emit 'mov eax, eax' to clear upper 32-bits */
1514                                 if (is_ereg(dst_reg))
1515                                         EMIT1(0x45);
1516                                 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1517                                 break;
1518                         case 64:
1519                                 /* nop */
1520                                 break;
1521                         }
1522                         break;
1523
1524                         /* speculation barrier */
1525                 case BPF_ST | BPF_NOSPEC:
1526                         EMIT_LFENCE();
1527                         break;
1528
1529                         /* ST: *(u8*)(dst_reg + off) = imm */
1530                 case BPF_ST | BPF_MEM | BPF_B:
1531                         if (is_ereg(dst_reg))
1532                                 EMIT2(0x41, 0xC6);
1533                         else
1534                                 EMIT1(0xC6);
1535                         goto st;
1536                 case BPF_ST | BPF_MEM | BPF_H:
1537                         if (is_ereg(dst_reg))
1538                                 EMIT3(0x66, 0x41, 0xC7);
1539                         else
1540                                 EMIT2(0x66, 0xC7);
1541                         goto st;
1542                 case BPF_ST | BPF_MEM | BPF_W:
1543                         if (is_ereg(dst_reg))
1544                                 EMIT2(0x41, 0xC7);
1545                         else
1546                                 EMIT1(0xC7);
1547                         goto st;
1548                 case BPF_ST | BPF_MEM | BPF_DW:
1549                         EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1550
1551 st:                     if (is_imm8(insn->off))
1552                                 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1553                         else
1554                                 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1555
1556                         EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1557                         break;
1558
1559                         /* STX: *(u8*)(dst_reg + off) = src_reg */
1560                 case BPF_STX | BPF_MEM | BPF_B:
1561                 case BPF_STX | BPF_MEM | BPF_H:
1562                 case BPF_STX | BPF_MEM | BPF_W:
1563                 case BPF_STX | BPF_MEM | BPF_DW:
1564                         emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1565                         break;
1566
1567                         /* LDX: dst_reg = *(u8*)(src_reg + off) */
1568                 case BPF_LDX | BPF_MEM | BPF_B:
1569                 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1570                 case BPF_LDX | BPF_MEM | BPF_H:
1571                 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1572                 case BPF_LDX | BPF_MEM | BPF_W:
1573                 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1574                 case BPF_LDX | BPF_MEM | BPF_DW:
1575                 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1576                         /* LDXS: dst_reg = *(s8*)(src_reg + off) */
1577                 case BPF_LDX | BPF_MEMSX | BPF_B:
1578                 case BPF_LDX | BPF_MEMSX | BPF_H:
1579                 case BPF_LDX | BPF_MEMSX | BPF_W:
1580                 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1581                 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1582                 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1583                         insn_off = insn->off;
1584
1585                         if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
1586                             BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1587                                 /* Conservatively check that src_reg + insn->off is a kernel address:
1588                                  *   src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
1589                                  * src_reg is used as scratch for src_reg += insn->off and restored
1590                                  * after emit_ldx if necessary
1591                                  */
1592
1593                                 u64 limit = TASK_SIZE_MAX + PAGE_SIZE;
1594                                 u8 *end_of_jmp;
1595
1596                                 /* At end of these emitted checks, insn->off will have been added
1597                                  * to src_reg, so no need to do relative load with insn->off offset
1598                                  */
1599                                 insn_off = 0;
1600
1601                                 /* movabsq r11, limit */
1602                                 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
1603                                 EMIT((u32)limit, 4);
1604                                 EMIT(limit >> 32, 4);
1605
1606                                 if (insn->off) {
1607                                         /* add src_reg, insn->off */
1608                                         maybe_emit_1mod(&prog, src_reg, true);
1609                                         EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off);
1610                                 }
1611
1612                                 /* cmp src_reg, r11 */
1613                                 maybe_emit_mod(&prog, src_reg, AUX_REG, true);
1614                                 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
1615
1616                                 /* if unsigned '>=', goto load */
1617                                 EMIT2(X86_JAE, 0);
1618                                 end_of_jmp = prog;
1619
1620                                 /* xor dst_reg, dst_reg */
1621                                 emit_mov_imm32(&prog, false, dst_reg, 0);
1622                                 /* jmp byte_after_ldx */
1623                                 EMIT2(0xEB, 0);
1624
1625                                 /* populate jmp_offset for JAE above to jump to start_of_ldx */
1626                                 start_of_ldx = prog;
1627                                 end_of_jmp[-1] = start_of_ldx - end_of_jmp;
1628                         }
1629                         if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
1630                             BPF_MODE(insn->code) == BPF_MEMSX)
1631                                 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1632                         else
1633                                 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1634                         if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
1635                             BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
1636                                 struct exception_table_entry *ex;
1637                                 u8 *_insn = image + proglen + (start_of_ldx - temp);
1638                                 s64 delta;
1639
1640                                 /* populate jmp_offset for JMP above */
1641                                 start_of_ldx[-1] = prog - start_of_ldx;
1642
1643                                 if (insn->off && src_reg != dst_reg) {
1644                                         /* sub src_reg, insn->off
1645                                          * Restore src_reg after "add src_reg, insn->off" in prev
1646                                          * if statement. But if src_reg == dst_reg, emit_ldx
1647                                          * above already clobbered src_reg, so no need to restore.
1648                                          * If add src_reg, insn->off was unnecessary, no need to
1649                                          * restore either.
1650                                          */
1651                                         maybe_emit_1mod(&prog, src_reg, true);
1652                                         EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off);
1653                                 }
1654
1655                                 if (!bpf_prog->aux->extable)
1656                                         break;
1657
1658                                 if (excnt >= bpf_prog->aux->num_exentries) {
1659                                         pr_err("ex gen bug\n");
1660                                         return -EFAULT;
1661                                 }
1662                                 ex = &bpf_prog->aux->extable[excnt++];
1663
1664                                 delta = _insn - (u8 *)&ex->insn;
1665                                 if (!is_simm32(delta)) {
1666                                         pr_err("extable->insn doesn't fit into 32-bit\n");
1667                                         return -EFAULT;
1668                                 }
1669                                 /* switch ex to rw buffer for writes */
1670                                 ex = (void *)rw_image + ((void *)ex - (void *)image);
1671
1672                                 ex->insn = delta;
1673
1674                                 ex->data = EX_TYPE_BPF;
1675
1676                                 if (dst_reg > BPF_REG_9) {
1677                                         pr_err("verifier error\n");
1678                                         return -EFAULT;
1679                                 }
1680                                 /*
1681                                  * Compute size of x86 insn and its target dest x86 register.
1682                                  * ex_handler_bpf() will use lower 8 bits to adjust
1683                                  * pt_regs->ip to jump over this x86 instruction
1684                                  * and upper bits to figure out which pt_regs to zero out.
1685                                  * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1686                                  * of 4 bytes will be ignored and rbx will be zero inited.
1687                                  */
1688                                 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1689                         }
1690                         break;
1691
1692                 case BPF_STX | BPF_ATOMIC | BPF_W:
1693                 case BPF_STX | BPF_ATOMIC | BPF_DW:
1694                         if (insn->imm == (BPF_AND | BPF_FETCH) ||
1695                             insn->imm == (BPF_OR | BPF_FETCH) ||
1696                             insn->imm == (BPF_XOR | BPF_FETCH)) {
1697                                 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1698                                 u32 real_src_reg = src_reg;
1699                                 u32 real_dst_reg = dst_reg;
1700                                 u8 *branch_target;
1701
1702                                 /*
1703                                  * Can't be implemented with a single x86 insn.
1704                                  * Need to do a CMPXCHG loop.
1705                                  */
1706
1707                                 /* Will need RAX as a CMPXCHG operand so save R0 */
1708                                 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1709                                 if (src_reg == BPF_REG_0)
1710                                         real_src_reg = BPF_REG_AX;
1711                                 if (dst_reg == BPF_REG_0)
1712                                         real_dst_reg = BPF_REG_AX;
1713
1714                                 branch_target = prog;
1715                                 /* Load old value */
1716                                 emit_ldx(&prog, BPF_SIZE(insn->code),
1717                                          BPF_REG_0, real_dst_reg, insn->off);
1718                                 /*
1719                                  * Perform the (commutative) operation locally,
1720                                  * put the result in the AUX_REG.
1721                                  */
1722                                 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1723                                 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1724                                 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1725                                       add_2reg(0xC0, AUX_REG, real_src_reg));
1726                                 /* Attempt to swap in new value */
1727                                 err = emit_atomic(&prog, BPF_CMPXCHG,
1728                                                   real_dst_reg, AUX_REG,
1729                                                   insn->off,
1730                                                   BPF_SIZE(insn->code));
1731                                 if (WARN_ON(err))
1732                                         return err;
1733                                 /*
1734                                  * ZF tells us whether we won the race. If it's
1735                                  * cleared we need to try again.
1736                                  */
1737                                 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1738                                 /* Return the pre-modification value */
1739                                 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1740                                 /* Restore R0 after clobbering RAX */
1741                                 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1742                                 break;
1743                         }
1744
1745                         err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1746                                           insn->off, BPF_SIZE(insn->code));
1747                         if (err)
1748                                 return err;
1749                         break;
1750
1751                         /* call */
1752                 case BPF_JMP | BPF_CALL: {
1753                         int offs;
1754
1755                         func = (u8 *) __bpf_call_base + imm32;
1756                         if (tail_call_reachable) {
1757                                 RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
1758                                 if (!imm32)
1759                                         return -EINVAL;
1760                                 offs = 7 + x86_call_depth_emit_accounting(&prog, func);
1761                         } else {
1762                                 if (!imm32)
1763                                         return -EINVAL;
1764                                 offs = x86_call_depth_emit_accounting(&prog, func);
1765                         }
1766                         if (emit_call(&prog, func, image + addrs[i - 1] + offs))
1767                                 return -EINVAL;
1768                         break;
1769                 }
1770
1771                 case BPF_JMP | BPF_TAIL_CALL:
1772                         if (imm32)
1773                                 emit_bpf_tail_call_direct(bpf_prog,
1774                                                           &bpf_prog->aux->poke_tab[imm32 - 1],
1775                                                           &prog, image + addrs[i - 1],
1776                                                           callee_regs_used,
1777                                                           bpf_prog->aux->stack_depth,
1778                                                           ctx);
1779                         else
1780                                 emit_bpf_tail_call_indirect(bpf_prog,
1781                                                             &prog,
1782                                                             callee_regs_used,
1783                                                             bpf_prog->aux->stack_depth,
1784                                                             image + addrs[i - 1],
1785                                                             ctx);
1786                         break;
1787
1788                         /* cond jump */
1789                 case BPF_JMP | BPF_JEQ | BPF_X:
1790                 case BPF_JMP | BPF_JNE | BPF_X:
1791                 case BPF_JMP | BPF_JGT | BPF_X:
1792                 case BPF_JMP | BPF_JLT | BPF_X:
1793                 case BPF_JMP | BPF_JGE | BPF_X:
1794                 case BPF_JMP | BPF_JLE | BPF_X:
1795                 case BPF_JMP | BPF_JSGT | BPF_X:
1796                 case BPF_JMP | BPF_JSLT | BPF_X:
1797                 case BPF_JMP | BPF_JSGE | BPF_X:
1798                 case BPF_JMP | BPF_JSLE | BPF_X:
1799                 case BPF_JMP32 | BPF_JEQ | BPF_X:
1800                 case BPF_JMP32 | BPF_JNE | BPF_X:
1801                 case BPF_JMP32 | BPF_JGT | BPF_X:
1802                 case BPF_JMP32 | BPF_JLT | BPF_X:
1803                 case BPF_JMP32 | BPF_JGE | BPF_X:
1804                 case BPF_JMP32 | BPF_JLE | BPF_X:
1805                 case BPF_JMP32 | BPF_JSGT | BPF_X:
1806                 case BPF_JMP32 | BPF_JSLT | BPF_X:
1807                 case BPF_JMP32 | BPF_JSGE | BPF_X:
1808                 case BPF_JMP32 | BPF_JSLE | BPF_X:
1809                         /* cmp dst_reg, src_reg */
1810                         maybe_emit_mod(&prog, dst_reg, src_reg,
1811                                        BPF_CLASS(insn->code) == BPF_JMP);
1812                         EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1813                         goto emit_cond_jmp;
1814
1815                 case BPF_JMP | BPF_JSET | BPF_X:
1816                 case BPF_JMP32 | BPF_JSET | BPF_X:
1817                         /* test dst_reg, src_reg */
1818                         maybe_emit_mod(&prog, dst_reg, src_reg,
1819                                        BPF_CLASS(insn->code) == BPF_JMP);
1820                         EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1821                         goto emit_cond_jmp;
1822
1823                 case BPF_JMP | BPF_JSET | BPF_K:
1824                 case BPF_JMP32 | BPF_JSET | BPF_K:
1825                         /* test dst_reg, imm32 */
1826                         maybe_emit_1mod(&prog, dst_reg,
1827                                         BPF_CLASS(insn->code) == BPF_JMP);
1828                         EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1829                         goto emit_cond_jmp;
1830
1831                 case BPF_JMP | BPF_JEQ | BPF_K:
1832                 case BPF_JMP | BPF_JNE | BPF_K:
1833                 case BPF_JMP | BPF_JGT | BPF_K:
1834                 case BPF_JMP | BPF_JLT | BPF_K:
1835                 case BPF_JMP | BPF_JGE | BPF_K:
1836                 case BPF_JMP | BPF_JLE | BPF_K:
1837                 case BPF_JMP | BPF_JSGT | BPF_K:
1838                 case BPF_JMP | BPF_JSLT | BPF_K:
1839                 case BPF_JMP | BPF_JSGE | BPF_K:
1840                 case BPF_JMP | BPF_JSLE | BPF_K:
1841                 case BPF_JMP32 | BPF_JEQ | BPF_K:
1842                 case BPF_JMP32 | BPF_JNE | BPF_K:
1843                 case BPF_JMP32 | BPF_JGT | BPF_K:
1844                 case BPF_JMP32 | BPF_JLT | BPF_K:
1845                 case BPF_JMP32 | BPF_JGE | BPF_K:
1846                 case BPF_JMP32 | BPF_JLE | BPF_K:
1847                 case BPF_JMP32 | BPF_JSGT | BPF_K:
1848                 case BPF_JMP32 | BPF_JSLT | BPF_K:
1849                 case BPF_JMP32 | BPF_JSGE | BPF_K:
1850                 case BPF_JMP32 | BPF_JSLE | BPF_K:
1851                         /* test dst_reg, dst_reg to save one extra byte */
1852                         if (imm32 == 0) {
1853                                 maybe_emit_mod(&prog, dst_reg, dst_reg,
1854                                                BPF_CLASS(insn->code) == BPF_JMP);
1855                                 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1856                                 goto emit_cond_jmp;
1857                         }
1858
1859                         /* cmp dst_reg, imm8/32 */
1860                         maybe_emit_1mod(&prog, dst_reg,
1861                                         BPF_CLASS(insn->code) == BPF_JMP);
1862
1863                         if (is_imm8(imm32))
1864                                 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1865                         else
1866                                 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1867
1868 emit_cond_jmp:          /* Convert BPF opcode to x86 */
1869                         switch (BPF_OP(insn->code)) {
1870                         case BPF_JEQ:
1871                                 jmp_cond = X86_JE;
1872                                 break;
1873                         case BPF_JSET:
1874                         case BPF_JNE:
1875                                 jmp_cond = X86_JNE;
1876                                 break;
1877                         case BPF_JGT:
1878                                 /* GT is unsigned '>', JA in x86 */
1879                                 jmp_cond = X86_JA;
1880                                 break;
1881                         case BPF_JLT:
1882                                 /* LT is unsigned '<', JB in x86 */
1883                                 jmp_cond = X86_JB;
1884                                 break;
1885                         case BPF_JGE:
1886                                 /* GE is unsigned '>=', JAE in x86 */
1887                                 jmp_cond = X86_JAE;
1888                                 break;
1889                         case BPF_JLE:
1890                                 /* LE is unsigned '<=', JBE in x86 */
1891                                 jmp_cond = X86_JBE;
1892                                 break;
1893                         case BPF_JSGT:
1894                                 /* Signed '>', GT in x86 */
1895                                 jmp_cond = X86_JG;
1896                                 break;
1897                         case BPF_JSLT:
1898                                 /* Signed '<', LT in x86 */
1899                                 jmp_cond = X86_JL;
1900                                 break;
1901                         case BPF_JSGE:
1902                                 /* Signed '>=', GE in x86 */
1903                                 jmp_cond = X86_JGE;
1904                                 break;
1905                         case BPF_JSLE:
1906                                 /* Signed '<=', LE in x86 */
1907                                 jmp_cond = X86_JLE;
1908                                 break;
1909                         default: /* to silence GCC warning */
1910                                 return -EFAULT;
1911                         }
1912                         jmp_offset = addrs[i + insn->off] - addrs[i];
1913                         if (is_imm8(jmp_offset)) {
1914                                 if (jmp_padding) {
1915                                         /* To keep the jmp_offset valid, the extra bytes are
1916                                          * padded before the jump insn, so we subtract the
1917                                          * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1918                                          *
1919                                          * If the previous pass already emits an imm8
1920                                          * jmp_cond, then this BPF insn won't shrink, so
1921                                          * "nops" is 0.
1922                                          *
1923                                          * On the other hand, if the previous pass emits an
1924                                          * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1925                                          * keep the image from shrinking further.
1926                                          *
1927                                          * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1928                                          *     is 2 bytes, so the size difference is 4 bytes.
1929                                          */
1930                                         nops = INSN_SZ_DIFF - 2;
1931                                         if (nops != 0 && nops != 4) {
1932                                                 pr_err("unexpected jmp_cond padding: %d bytes\n",
1933                                                        nops);
1934                                                 return -EFAULT;
1935                                         }
1936                                         emit_nops(&prog, nops);
1937                                 }
1938                                 EMIT2(jmp_cond, jmp_offset);
1939                         } else if (is_simm32(jmp_offset)) {
1940                                 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1941                         } else {
1942                                 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1943                                 return -EFAULT;
1944                         }
1945
1946                         break;
1947
1948                 case BPF_JMP | BPF_JA:
1949                 case BPF_JMP32 | BPF_JA:
1950                         if (BPF_CLASS(insn->code) == BPF_JMP) {
1951                                 if (insn->off == -1)
1952                                         /* -1 jmp instructions will always jump
1953                                          * backwards two bytes. Explicitly handling
1954                                          * this case avoids wasting too many passes
1955                                          * when there are long sequences of replaced
1956                                          * dead code.
1957                                          */
1958                                         jmp_offset = -2;
1959                                 else
1960                                         jmp_offset = addrs[i + insn->off] - addrs[i];
1961                         } else {
1962                                 if (insn->imm == -1)
1963                                         jmp_offset = -2;
1964                                 else
1965                                         jmp_offset = addrs[i + insn->imm] - addrs[i];
1966                         }
1967
1968                         if (!jmp_offset) {
1969                                 /*
1970                                  * If jmp_padding is enabled, the extra nops will
1971                                  * be inserted. Otherwise, optimize out nop jumps.
1972                                  */
1973                                 if (jmp_padding) {
1974                                         /* There are 3 possible conditions.
1975                                          * (1) This BPF_JA is already optimized out in
1976                                          *     the previous run, so there is no need
1977                                          *     to pad any extra byte (0 byte).
1978                                          * (2) The previous pass emits an imm8 jmp,
1979                                          *     so we pad 2 bytes to match the previous
1980                                          *     insn size.
1981                                          * (3) Similarly, the previous pass emits an
1982                                          *     imm32 jmp, and 5 bytes is padded.
1983                                          */
1984                                         nops = INSN_SZ_DIFF;
1985                                         if (nops != 0 && nops != 2 && nops != 5) {
1986                                                 pr_err("unexpected nop jump padding: %d bytes\n",
1987                                                        nops);
1988                                                 return -EFAULT;
1989                                         }
1990                                         emit_nops(&prog, nops);
1991                                 }
1992                                 break;
1993                         }
1994 emit_jmp:
1995                         if (is_imm8(jmp_offset)) {
1996                                 if (jmp_padding) {
1997                                         /* To avoid breaking jmp_offset, the extra bytes
1998                                          * are padded before the actual jmp insn, so
1999                                          * 2 bytes is subtracted from INSN_SZ_DIFF.
2000                                          *
2001                                          * If the previous pass already emits an imm8
2002                                          * jmp, there is nothing to pad (0 byte).
2003                                          *
2004                                          * If it emits an imm32 jmp (5 bytes) previously
2005                                          * and now an imm8 jmp (2 bytes), then we pad
2006                                          * (5 - 2 = 3) bytes to stop the image from
2007                                          * shrinking further.
2008                                          */
2009                                         nops = INSN_SZ_DIFF - 2;
2010                                         if (nops != 0 && nops != 3) {
2011                                                 pr_err("unexpected jump padding: %d bytes\n",
2012                                                        nops);
2013                                                 return -EFAULT;
2014                                         }
2015                                         emit_nops(&prog, INSN_SZ_DIFF - 2);
2016                                 }
2017                                 EMIT2(0xEB, jmp_offset);
2018                         } else if (is_simm32(jmp_offset)) {
2019                                 EMIT1_off32(0xE9, jmp_offset);
2020                         } else {
2021                                 pr_err("jmp gen bug %llx\n", jmp_offset);
2022                                 return -EFAULT;
2023                         }
2024                         break;
2025
2026                 case BPF_JMP | BPF_EXIT:
2027                         if (seen_exit) {
2028                                 jmp_offset = ctx->cleanup_addr - addrs[i];
2029                                 goto emit_jmp;
2030                         }
2031                         seen_exit = true;
2032                         /* Update cleanup_addr */
2033                         ctx->cleanup_addr = proglen;
2034                         if (bpf_prog->aux->exception_boundary) {
2035                                 pop_callee_regs(&prog, all_callee_regs_used);
2036                                 pop_r12(&prog);
2037                         } else {
2038                                 pop_callee_regs(&prog, callee_regs_used);
2039                         }
2040                         EMIT1(0xC9);         /* leave */
2041                         emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2042                         break;
2043
2044                 default:
2045                         /*
2046                          * By design x86-64 JIT should support all BPF instructions.
2047                          * This error will be seen if new instruction was added
2048                          * to the interpreter, but not to the JIT, or if there is
2049                          * junk in bpf_prog.
2050                          */
2051                         pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2052                         return -EINVAL;
2053                 }
2054
2055                 ilen = prog - temp;
2056                 if (ilen > BPF_MAX_INSN_SIZE) {
2057                         pr_err("bpf_jit: fatal insn size error\n");
2058                         return -EFAULT;
2059                 }
2060
2061                 if (image) {
2062                         /*
2063                          * When populating the image, assert that:
2064                          *
2065                          *  i) We do not write beyond the allocated space, and
2066                          * ii) addrs[i] did not change from the prior run, in order
2067                          *     to validate assumptions made for computing branch
2068                          *     displacements.
2069                          */
2070                         if (unlikely(proglen + ilen > oldproglen ||
2071                                      proglen + ilen != addrs[i])) {
2072                                 pr_err("bpf_jit: fatal error\n");
2073                                 return -EFAULT;
2074                         }
2075                         memcpy(rw_image + proglen, temp, ilen);
2076                 }
2077                 proglen += ilen;
2078                 addrs[i] = proglen;
2079                 prog = temp;
2080         }
2081
2082         if (image && excnt != bpf_prog->aux->num_exentries) {
2083                 pr_err("extable is not populated\n");
2084                 return -EFAULT;
2085         }
2086         return proglen;
2087 }
2088
2089 static void clean_stack_garbage(const struct btf_func_model *m,
2090                                 u8 **pprog, int nr_stack_slots,
2091                                 int stack_size)
2092 {
2093         int arg_size, off;
2094         u8 *prog;
2095
2096         /* Generally speaking, the compiler will pass the arguments
2097          * on-stack with "push" instruction, which will take 8-byte
2098          * on the stack. In this case, there won't be garbage values
2099          * while we copy the arguments from origin stack frame to current
2100          * in BPF_DW.
2101          *
2102          * However, sometimes the compiler will only allocate 4-byte on
2103          * the stack for the arguments. For now, this case will only
2104          * happen if there is only one argument on-stack and its size
2105          * not more than 4 byte. In this case, there will be garbage
2106          * values on the upper 4-byte where we store the argument on
2107          * current stack frame.
2108          *
2109          * arguments on origin stack:
2110          *
2111          * stack_arg_1(4-byte) xxx(4-byte)
2112          *
2113          * what we copy:
2114          *
2115          * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2116          *
2117          * and the xxx is the garbage values which we should clean here.
2118          */
2119         if (nr_stack_slots != 1)
2120                 return;
2121
2122         /* the size of the last argument */
2123         arg_size = m->arg_size[m->nr_args - 1];
2124         if (arg_size <= 4) {
2125                 off = -(stack_size - 4);
2126                 prog = *pprog;
2127                 /* mov DWORD PTR [rbp + off], 0 */
2128                 if (!is_imm8(off))
2129                         EMIT2_off32(0xC7, 0x85, off);
2130                 else
2131                         EMIT3(0xC7, 0x45, off);
2132                 EMIT(0, 4);
2133                 *pprog = prog;
2134         }
2135 }
2136
2137 /* get the count of the regs that are used to pass arguments */
2138 static int get_nr_used_regs(const struct btf_func_model *m)
2139 {
2140         int i, arg_regs, nr_used_regs = 0;
2141
2142         for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2143                 arg_regs = (m->arg_size[i] + 7) / 8;
2144                 if (nr_used_regs + arg_regs <= 6)
2145                         nr_used_regs += arg_regs;
2146
2147                 if (nr_used_regs >= 6)
2148                         break;
2149         }
2150
2151         return nr_used_regs;
2152 }
2153
2154 static void save_args(const struct btf_func_model *m, u8 **prog,
2155                       int stack_size, bool for_call_origin)
2156 {
2157         int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2158         int i, j;
2159
2160         /* Store function arguments to stack.
2161          * For a function that accepts two pointers the sequence will be:
2162          * mov QWORD PTR [rbp-0x10],rdi
2163          * mov QWORD PTR [rbp-0x8],rsi
2164          */
2165         for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2166                 arg_regs = (m->arg_size[i] + 7) / 8;
2167
2168                 /* According to the research of Yonghong, struct members
2169                  * should be all in register or all on the stack.
2170                  * Meanwhile, the compiler will pass the argument on regs
2171                  * if the remaining regs can hold the argument.
2172                  *
2173                  * Disorder of the args can happen. For example:
2174                  *
2175                  * struct foo_struct {
2176                  *     long a;
2177                  *     int b;
2178                  * };
2179                  * int foo(char, char, char, char, char, struct foo_struct,
2180                  *         char);
2181                  *
2182                  * the arg1-5,arg7 will be passed by regs, and arg6 will
2183                  * by stack.
2184                  */
2185                 if (nr_regs + arg_regs > 6) {
2186                         /* copy function arguments from origin stack frame
2187                          * into current stack frame.
2188                          *
2189                          * The starting address of the arguments on-stack
2190                          * is:
2191                          *   rbp + 8(push rbp) +
2192                          *   8(return addr of origin call) +
2193                          *   8(return addr of the caller)
2194                          * which means: rbp + 24
2195                          */
2196                         for (j = 0; j < arg_regs; j++) {
2197                                 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2198                                          nr_stack_slots * 8 + 0x18);
2199                                 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2200                                          -stack_size);
2201
2202                                 if (!nr_stack_slots)
2203                                         first_off = stack_size;
2204                                 stack_size -= 8;
2205                                 nr_stack_slots++;
2206                         }
2207                 } else {
2208                         /* Only copy the arguments on-stack to current
2209                          * 'stack_size' and ignore the regs, used to
2210                          * prepare the arguments on-stack for origin call.
2211                          */
2212                         if (for_call_origin) {
2213                                 nr_regs += arg_regs;
2214                                 continue;
2215                         }
2216
2217                         /* copy the arguments from regs into stack */
2218                         for (j = 0; j < arg_regs; j++) {
2219                                 emit_stx(prog, BPF_DW, BPF_REG_FP,
2220                                          nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2221                                          -stack_size);
2222                                 stack_size -= 8;
2223                                 nr_regs++;
2224                         }
2225                 }
2226         }
2227
2228         clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2229 }
2230
2231 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2232                          int stack_size)
2233 {
2234         int i, j, arg_regs, nr_regs = 0;
2235
2236         /* Restore function arguments from stack.
2237          * For a function that accepts two pointers the sequence will be:
2238          * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2239          * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2240          *
2241          * The logic here is similar to what we do in save_args()
2242          */
2243         for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2244                 arg_regs = (m->arg_size[i] + 7) / 8;
2245                 if (nr_regs + arg_regs <= 6) {
2246                         for (j = 0; j < arg_regs; j++) {
2247                                 emit_ldx(prog, BPF_DW,
2248                                          nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2249                                          BPF_REG_FP,
2250                                          -stack_size);
2251                                 stack_size -= 8;
2252                                 nr_regs++;
2253                         }
2254                 } else {
2255                         stack_size -= 8 * arg_regs;
2256                 }
2257
2258                 if (nr_regs >= 6)
2259                         break;
2260         }
2261 }
2262
2263 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2264                            struct bpf_tramp_link *l, int stack_size,
2265                            int run_ctx_off, bool save_ret,
2266                            void *image, void *rw_image)
2267 {
2268         u8 *prog = *pprog;
2269         u8 *jmp_insn;
2270         int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2271         struct bpf_prog *p = l->link.prog;
2272         u64 cookie = l->cookie;
2273
2274         /* mov rdi, cookie */
2275         emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2276
2277         /* Prepare struct bpf_tramp_run_ctx.
2278          *
2279          * bpf_tramp_run_ctx is already preserved by
2280          * arch_prepare_bpf_trampoline().
2281          *
2282          * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2283          */
2284         emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2285
2286         /* arg1: mov rdi, progs[i] */
2287         emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2288         /* arg2: lea rsi, [rbp - ctx_cookie_off] */
2289         if (!is_imm8(-run_ctx_off))
2290                 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
2291         else
2292                 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
2293
2294         if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
2295                 return -EINVAL;
2296         /* remember prog start time returned by __bpf_prog_enter */
2297         emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
2298
2299         /* if (__bpf_prog_enter*(prog) == 0)
2300          *      goto skip_exec_of_prog;
2301          */
2302         EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
2303         /* emit 2 nops that will be replaced with JE insn */
2304         jmp_insn = prog;
2305         emit_nops(&prog, 2);
2306
2307         /* arg1: lea rdi, [rbp - stack_size] */
2308         if (!is_imm8(-stack_size))
2309                 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
2310         else
2311                 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
2312         /* arg2: progs[i]->insnsi for interpreter */
2313         if (!p->jited)
2314                 emit_mov_imm64(&prog, BPF_REG_2,
2315                                (long) p->insnsi >> 32,
2316                                (u32) (long) p->insnsi);
2317         /* call JITed bpf program or interpreter */
2318         if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
2319                 return -EINVAL;
2320
2321         /*
2322          * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
2323          * of the previous call which is then passed on the stack to
2324          * the next BPF program.
2325          *
2326          * BPF_TRAMP_FENTRY trampoline may need to return the return
2327          * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2328          */
2329         if (save_ret)
2330                 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2331
2332         /* replace 2 nops with JE insn, since jmp target is known */
2333         jmp_insn[0] = X86_JE;
2334         jmp_insn[1] = prog - jmp_insn - 2;
2335
2336         /* arg1: mov rdi, progs[i] */
2337         emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2338         /* arg2: mov rsi, rbx <- start time in nsec */
2339         emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
2340         /* arg3: lea rdx, [rbp - run_ctx_off] */
2341         if (!is_imm8(-run_ctx_off))
2342                 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
2343         else
2344                 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
2345         if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
2346                 return -EINVAL;
2347
2348         *pprog = prog;
2349         return 0;
2350 }
2351
2352 static void emit_align(u8 **pprog, u32 align)
2353 {
2354         u8 *target, *prog = *pprog;
2355
2356         target = PTR_ALIGN(prog, align);
2357         if (target != prog)
2358                 emit_nops(&prog, target - prog);
2359
2360         *pprog = prog;
2361 }
2362
2363 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
2364 {
2365         u8 *prog = *pprog;
2366         s64 offset;
2367
2368         offset = func - (ip + 2 + 4);
2369         if (!is_simm32(offset)) {
2370                 pr_err("Target %p is out of range\n", func);
2371                 return -EINVAL;
2372         }
2373         EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
2374         *pprog = prog;
2375         return 0;
2376 }
2377
2378 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
2379                       struct bpf_tramp_links *tl, int stack_size,
2380                       int run_ctx_off, bool save_ret,
2381                       void *image, void *rw_image)
2382 {
2383         int i;
2384         u8 *prog = *pprog;
2385
2386         for (i = 0; i < tl->nr_links; i++) {
2387                 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2388                                     run_ctx_off, save_ret, image, rw_image))
2389                         return -EINVAL;
2390         }
2391         *pprog = prog;
2392         return 0;
2393 }
2394
2395 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
2396                               struct bpf_tramp_links *tl, int stack_size,
2397                               int run_ctx_off, u8 **branches,
2398                               void *image, void *rw_image)
2399 {
2400         u8 *prog = *pprog;
2401         int i;
2402
2403         /* The first fmod_ret program will receive a garbage return value.
2404          * Set this to 0 to avoid confusing the program.
2405          */
2406         emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2407         emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2408         for (i = 0; i < tl->nr_links; i++) {
2409                 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
2410                                     image, rw_image))
2411                         return -EINVAL;
2412
2413                 /* mod_ret prog stored return value into [rbp - 8]. Emit:
2414                  * if (*(u64 *)(rbp - 8) !=  0)
2415                  *      goto do_fexit;
2416                  */
2417                 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
2418                 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2419
2420                 /* Save the location of the branch and Generate 6 nops
2421                  * (4 bytes for an offset and 2 bytes for the jump) These nops
2422                  * are replaced with a conditional jump once do_fexit (i.e. the
2423                  * start of the fexit invocation) is finalized.
2424                  */
2425                 branches[i] = prog;
2426                 emit_nops(&prog, 4 + 2);
2427         }
2428
2429         *pprog = prog;
2430         return 0;
2431 }
2432
2433 /* Example:
2434  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2435  * its 'struct btf_func_model' will be nr_args=2
2436  * The assembly code when eth_type_trans is executing after trampoline:
2437  *
2438  * push rbp
2439  * mov rbp, rsp
2440  * sub rsp, 16                     // space for skb and dev
2441  * push rbx                        // temp regs to pass start time
2442  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
2443  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
2444  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2445  * mov rbx, rax                    // remember start time in bpf stats are enabled
2446  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
2447  * call addr_of_jited_FENTRY_prog
2448  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2449  * mov rsi, rbx                    // prog start time
2450  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2451  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
2452  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
2453  * pop rbx
2454  * leave
2455  * ret
2456  *
2457  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2458  * replaced with 'call generated_bpf_trampoline'. When it returns
2459  * eth_type_trans will continue executing with original skb and dev pointers.
2460  *
2461  * The assembly code when eth_type_trans is called from trampoline:
2462  *
2463  * push rbp
2464  * mov rbp, rsp
2465  * sub rsp, 24                     // space for skb, dev, return value
2466  * push rbx                        // temp regs to pass start time
2467  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
2468  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
2469  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2470  * mov rbx, rax                    // remember start time if bpf stats are enabled
2471  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2472  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
2473  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2474  * mov rsi, rbx                    // prog start time
2475  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2476  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
2477  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
2478  * call eth_type_trans+5           // execute body of eth_type_trans
2479  * mov qword ptr [rbp - 8], rax    // save return value
2480  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
2481  * mov rbx, rax                    // remember start time in bpf stats are enabled
2482  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
2483  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
2484  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
2485  * mov rsi, rbx                    // prog start time
2486  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
2487  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
2488  * pop rbx
2489  * leave
2490  * add rsp, 8                      // skip eth_type_trans's frame
2491  * ret                             // return to its caller
2492  */
2493 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
2494                                          void *rw_image_end, void *image,
2495                                          const struct btf_func_model *m, u32 flags,
2496                                          struct bpf_tramp_links *tlinks,
2497                                          void *func_addr)
2498 {
2499         int i, ret, nr_regs = m->nr_args, stack_size = 0;
2500         int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
2501         struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2502         struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2503         struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2504         void *orig_call = func_addr;
2505         u8 **branches = NULL;
2506         u8 *prog;
2507         bool save_ret;
2508
2509         /*
2510          * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
2511          * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
2512          * because @func_addr.
2513          */
2514         WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
2515                      (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
2516
2517         /* extra registers for struct arguments */
2518         for (i = 0; i < m->nr_args; i++) {
2519                 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
2520                         nr_regs += (m->arg_size[i] + 7) / 8 - 1;
2521         }
2522
2523         /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
2524          * are passed through regs, the remains are through stack.
2525          */
2526         if (nr_regs > MAX_BPF_FUNC_ARGS)
2527                 return -ENOTSUPP;
2528
2529         /* Generated trampoline stack layout:
2530          *
2531          * RBP + 8         [ return address  ]
2532          * RBP + 0         [ RBP             ]
2533          *
2534          * RBP - 8         [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
2535          *                                      BPF_TRAMP_F_RET_FENTRY_RET flags
2536          *
2537          *                 [ reg_argN        ]  always
2538          *                 [ ...             ]
2539          * RBP - regs_off  [ reg_arg1        ]  program's ctx pointer
2540          *
2541          * RBP - nregs_off [ regs count      ]  always
2542          *
2543          * RBP - ip_off    [ traced function ]  BPF_TRAMP_F_IP_ARG flag
2544          *
2545          * RBP - rbx_off   [ rbx value       ]  always
2546          *
2547          * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2548          *
2549          *                     [ stack_argN ]  BPF_TRAMP_F_CALL_ORIG
2550          *                     [ ...        ]
2551          *                     [ stack_arg2 ]
2552          * RBP - arg_stack_off [ stack_arg1 ]
2553          * RSP                 [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
2554          */
2555
2556         /* room for return value of orig_call or fentry prog */
2557         save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2558         if (save_ret)
2559                 stack_size += 8;
2560
2561         stack_size += nr_regs * 8;
2562         regs_off = stack_size;
2563
2564         /* regs count  */
2565         stack_size += 8;
2566         nregs_off = stack_size;
2567
2568         if (flags & BPF_TRAMP_F_IP_ARG)
2569                 stack_size += 8; /* room for IP address argument */
2570
2571         ip_off = stack_size;
2572
2573         stack_size += 8;
2574         rbx_off = stack_size;
2575
2576         stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2577         run_ctx_off = stack_size;
2578
2579         if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
2580                 /* the space that used to pass arguments on-stack */
2581                 stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
2582                 /* make sure the stack pointer is 16-byte aligned if we
2583                  * need pass arguments on stack, which means
2584                  *  [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
2585                  * should be 16-byte aligned. Following code depend on
2586                  * that stack_size is already 8-byte aligned.
2587                  */
2588                 stack_size += (stack_size % 16) ? 0 : 8;
2589         }
2590
2591         arg_stack_off = stack_size;
2592
2593         if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2594                 /* skip patched call instruction and point orig_call to actual
2595                  * body of the kernel function.
2596                  */
2597                 if (is_endbr(*(u32 *)orig_call))
2598                         orig_call += ENDBR_INSN_SIZE;
2599                 orig_call += X86_PATCH_SIZE;
2600         }
2601
2602         prog = rw_image;
2603
2604         if (flags & BPF_TRAMP_F_INDIRECT) {
2605                 /*
2606                  * Indirect call for bpf_struct_ops
2607                  */
2608                 emit_cfi(&prog, cfi_get_func_hash(func_addr));
2609         } else {
2610                 /*
2611                  * Direct-call fentry stub, as such it needs accounting for the
2612                  * __fentry__ call.
2613                  */
2614                 x86_call_depth_emit_accounting(&prog, NULL);
2615         }
2616         EMIT1(0x55);             /* push rbp */
2617         EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2618         if (!is_imm8(stack_size)) {
2619                 /* sub rsp, stack_size */
2620                 EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
2621         } else {
2622                 /* sub rsp, stack_size */
2623                 EMIT4(0x48, 0x83, 0xEC, stack_size);
2624         }
2625         if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
2626                 EMIT1(0x50);            /* push rax */
2627         /* mov QWORD PTR [rbp - rbx_off], rbx */
2628         emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
2629
2630         /* Store number of argument registers of the traced function:
2631          *   mov rax, nr_regs
2632          *   mov QWORD PTR [rbp - nregs_off], rax
2633          */
2634         emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
2635         emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
2636
2637         if (flags & BPF_TRAMP_F_IP_ARG) {
2638                 /* Store IP address of the traced function:
2639                  * movabsq rax, func_addr
2640                  * mov QWORD PTR [rbp - ip_off], rax
2641                  */
2642                 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
2643                 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2644         }
2645
2646         save_args(m, &prog, regs_off, false);
2647
2648         if (flags & BPF_TRAMP_F_CALL_ORIG) {
2649                 /* arg1: mov rdi, im */
2650                 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2651                 if (emit_rsb_call(&prog, __bpf_tramp_enter,
2652                                   image + (prog - (u8 *)rw_image))) {
2653                         ret = -EINVAL;
2654                         goto cleanup;
2655                 }
2656         }
2657
2658         if (fentry->nr_links) {
2659                 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2660                                flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
2661                         return -EINVAL;
2662         }
2663
2664         if (fmod_ret->nr_links) {
2665                 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
2666                                    GFP_KERNEL);
2667                 if (!branches)
2668                         return -ENOMEM;
2669
2670                 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2671                                        run_ctx_off, branches, image, rw_image)) {
2672                         ret = -EINVAL;
2673                         goto cleanup;
2674                 }
2675         }
2676
2677         if (flags & BPF_TRAMP_F_CALL_ORIG) {
2678                 restore_regs(m, &prog, regs_off);
2679                 save_args(m, &prog, arg_stack_off, true);
2680
2681                 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
2682                         /* Before calling the original function, restore the
2683                          * tail_call_cnt from stack to rax.
2684                          */
2685                         RESTORE_TAIL_CALL_CNT(stack_size);
2686                 }
2687
2688                 if (flags & BPF_TRAMP_F_ORIG_STACK) {
2689                         emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
2690                         EMIT2(0xff, 0xd3); /* call *rbx */
2691                 } else {
2692                         /* call original function */
2693                         if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
2694                                 ret = -EINVAL;
2695                                 goto cleanup;
2696                         }
2697                 }
2698                 /* remember return value in a stack for bpf prog to access */
2699                 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2700                 im->ip_after_call = image + (prog - (u8 *)rw_image);
2701                 emit_nops(&prog, X86_PATCH_SIZE);
2702         }
2703
2704         if (fmod_ret->nr_links) {
2705                 /* From Intel 64 and IA-32 Architectures Optimization
2706                  * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2707                  * Coding Rule 11: All branch targets should be 16-byte
2708                  * aligned.
2709                  */
2710                 emit_align(&prog, 16);
2711                 /* Update the branches saved in invoke_bpf_mod_ret with the
2712                  * aligned address of do_fexit.
2713                  */
2714                 for (i = 0; i < fmod_ret->nr_links; i++) {
2715                         emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
2716                                             image + (branches[i] - (u8 *)rw_image), X86_JNE);
2717                 }
2718         }
2719
2720         if (fexit->nr_links) {
2721                 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
2722                                false, image, rw_image)) {
2723                         ret = -EINVAL;
2724                         goto cleanup;
2725                 }
2726         }
2727
2728         if (flags & BPF_TRAMP_F_RESTORE_REGS)
2729                 restore_regs(m, &prog, regs_off);
2730
2731         /* This needs to be done regardless. If there were fmod_ret programs,
2732          * the return value is only updated on the stack and still needs to be
2733          * restored to R0.
2734          */
2735         if (flags & BPF_TRAMP_F_CALL_ORIG) {
2736                 im->ip_epilogue = image + (prog - (u8 *)rw_image);
2737                 /* arg1: mov rdi, im */
2738                 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2739                 if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
2740                         ret = -EINVAL;
2741                         goto cleanup;
2742                 }
2743         } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
2744                 /* Before running the original function, restore the
2745                  * tail_call_cnt from stack to rax.
2746                  */
2747                 RESTORE_TAIL_CALL_CNT(stack_size);
2748         }
2749
2750         /* restore return value of orig_call or fentry prog back into RAX */
2751         if (save_ret)
2752                 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2753
2754         emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
2755         EMIT1(0xC9); /* leave */
2756         if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2757                 /* skip our return address and return to parent */
2758                 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2759         }
2760         emit_return(&prog, image + (prog - (u8 *)rw_image));
2761         /* Make sure the trampoline generation logic doesn't overflow */
2762         if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
2763                 ret = -EFAULT;
2764                 goto cleanup;
2765         }
2766         ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
2767
2768 cleanup:
2769         kfree(branches);
2770         return ret;
2771 }
2772
2773 void *arch_alloc_bpf_trampoline(unsigned int size)
2774 {
2775         return bpf_prog_pack_alloc(size, jit_fill_hole);
2776 }
2777
2778 void arch_free_bpf_trampoline(void *image, unsigned int size)
2779 {
2780         bpf_prog_pack_free(image, size);
2781 }
2782
2783 void arch_protect_bpf_trampoline(void *image, unsigned int size)
2784 {
2785 }
2786
2787 void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
2788 {
2789 }
2790
2791 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
2792                                 const struct btf_func_model *m, u32 flags,
2793                                 struct bpf_tramp_links *tlinks,
2794                                 void *func_addr)
2795 {
2796         void *rw_image, *tmp;
2797         int ret;
2798         u32 size = image_end - image;
2799
2800         /* rw_image doesn't need to be in module memory range, so we can
2801          * use kvmalloc.
2802          */
2803         rw_image = kvmalloc(size, GFP_KERNEL);
2804         if (!rw_image)
2805                 return -ENOMEM;
2806
2807         ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
2808                                             flags, tlinks, func_addr);
2809         if (ret < 0)
2810                 goto out;
2811
2812         tmp = bpf_arch_text_copy(image, rw_image, size);
2813         if (IS_ERR(tmp))
2814                 ret = PTR_ERR(tmp);
2815 out:
2816         kvfree(rw_image);
2817         return ret;
2818 }
2819
2820 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
2821                              struct bpf_tramp_links *tlinks, void *func_addr)
2822 {
2823         struct bpf_tramp_image im;
2824         void *image;
2825         int ret;
2826
2827         /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
2828          * This will NOT cause fragmentation in direct map, as we do not
2829          * call set_memory_*() on this buffer.
2830          *
2831          * We cannot use kvmalloc here, because we need image to be in
2832          * module memory range.
2833          */
2834         image = bpf_jit_alloc_exec(PAGE_SIZE);
2835         if (!image)
2836                 return -ENOMEM;
2837
2838         ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
2839                                             m, flags, tlinks, func_addr);
2840         bpf_jit_free_exec(image);
2841         return ret;
2842 }
2843
2844 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
2845 {
2846         u8 *jg_reloc, *prog = *pprog;
2847         int pivot, err, jg_bytes = 1;
2848         s64 jg_offset;
2849
2850         if (a == b) {
2851                 /* Leaf node of recursion, i.e. not a range of indices
2852                  * anymore.
2853                  */
2854                 EMIT1(add_1mod(0x48, BPF_REG_3));       /* cmp rdx,func */
2855                 if (!is_simm32(progs[a]))
2856                         return -1;
2857                 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2858                             progs[a]);
2859                 err = emit_cond_near_jump(&prog,        /* je func */
2860                                           (void *)progs[a], image + (prog - buf),
2861                                           X86_JE);
2862                 if (err)
2863                         return err;
2864
2865                 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
2866
2867                 *pprog = prog;
2868                 return 0;
2869         }
2870
2871         /* Not a leaf node, so we pivot, and recursively descend into
2872          * the lower and upper ranges.
2873          */
2874         pivot = (b - a) / 2;
2875         EMIT1(add_1mod(0x48, BPF_REG_3));               /* cmp rdx,func */
2876         if (!is_simm32(progs[a + pivot]))
2877                 return -1;
2878         EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2879
2880         if (pivot > 2) {                                /* jg upper_part */
2881                 /* Require near jump. */
2882                 jg_bytes = 4;
2883                 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2884         } else {
2885                 EMIT2(X86_JG, 0);
2886         }
2887         jg_reloc = prog;
2888
2889         err = emit_bpf_dispatcher(&prog, a, a + pivot,  /* emit lower_part */
2890                                   progs, image, buf);
2891         if (err)
2892                 return err;
2893
2894         /* From Intel 64 and IA-32 Architectures Optimization
2895          * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2896          * Coding Rule 11: All branch targets should be 16-byte
2897          * aligned.
2898          */
2899         emit_align(&prog, 16);
2900         jg_offset = prog - jg_reloc;
2901         emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2902
2903         err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2904                                   b, progs, image, buf);
2905         if (err)
2906                 return err;
2907
2908         *pprog = prog;
2909         return 0;
2910 }
2911
2912 static int cmp_ips(const void *a, const void *b)
2913 {
2914         const s64 *ipa = a;
2915         const s64 *ipb = b;
2916
2917         if (*ipa > *ipb)
2918                 return 1;
2919         if (*ipa < *ipb)
2920                 return -1;
2921         return 0;
2922 }
2923
2924 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
2925 {
2926         u8 *prog = buf;
2927
2928         sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2929         return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
2930 }
2931
2932 struct x64_jit_data {
2933         struct bpf_binary_header *rw_header;
2934         struct bpf_binary_header *header;
2935         int *addrs;
2936         u8 *image;
2937         int proglen;
2938         struct jit_context ctx;
2939 };
2940
2941 #define MAX_PASSES 20
2942 #define PADDING_PASSES (MAX_PASSES - 5)
2943
2944 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2945 {
2946         struct bpf_binary_header *rw_header = NULL;
2947         struct bpf_binary_header *header = NULL;
2948         struct bpf_prog *tmp, *orig_prog = prog;
2949         struct x64_jit_data *jit_data;
2950         int proglen, oldproglen = 0;
2951         struct jit_context ctx = {};
2952         bool tmp_blinded = false;
2953         bool extra_pass = false;
2954         bool padding = false;
2955         u8 *rw_image = NULL;
2956         u8 *image = NULL;
2957         int *addrs;
2958         int pass;
2959         int i;
2960
2961         if (!prog->jit_requested)
2962                 return orig_prog;
2963
2964         tmp = bpf_jit_blind_constants(prog);
2965         /*
2966          * If blinding was requested and we failed during blinding,
2967          * we must fall back to the interpreter.
2968          */
2969         if (IS_ERR(tmp))
2970                 return orig_prog;
2971         if (tmp != prog) {
2972                 tmp_blinded = true;
2973                 prog = tmp;
2974         }
2975
2976         jit_data = prog->aux->jit_data;
2977         if (!jit_data) {
2978                 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2979                 if (!jit_data) {
2980                         prog = orig_prog;
2981                         goto out;
2982                 }
2983                 prog->aux->jit_data = jit_data;
2984         }
2985         addrs = jit_data->addrs;
2986         if (addrs) {
2987                 ctx = jit_data->ctx;
2988                 oldproglen = jit_data->proglen;
2989                 image = jit_data->image;
2990                 header = jit_data->header;
2991                 rw_header = jit_data->rw_header;
2992                 rw_image = (void *)rw_header + ((void *)image - (void *)header);
2993                 extra_pass = true;
2994                 padding = true;
2995                 goto skip_init_addrs;
2996         }
2997         addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2998         if (!addrs) {
2999                 prog = orig_prog;
3000                 goto out_addrs;
3001         }
3002
3003         /*
3004          * Before first pass, make a rough estimation of addrs[]
3005          * each BPF instruction is translated to less than 64 bytes
3006          */
3007         for (proglen = 0, i = 0; i <= prog->len; i++) {
3008                 proglen += 64;
3009                 addrs[i] = proglen;
3010         }
3011         ctx.cleanup_addr = proglen;
3012 skip_init_addrs:
3013
3014         /*
3015          * JITed image shrinks with every pass and the loop iterates
3016          * until the image stops shrinking. Very large BPF programs
3017          * may converge on the last pass. In such case do one more
3018          * pass to emit the final image.
3019          */
3020         for (pass = 0; pass < MAX_PASSES || image; pass++) {
3021                 if (!padding && pass >= PADDING_PASSES)
3022                         padding = true;
3023                 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3024                 if (proglen <= 0) {
3025 out_image:
3026                         image = NULL;
3027                         if (header) {
3028                                 bpf_arch_text_copy(&header->size, &rw_header->size,
3029                                                    sizeof(rw_header->size));
3030                                 bpf_jit_binary_pack_free(header, rw_header);
3031                         }
3032                         /* Fall back to interpreter mode */
3033                         prog = orig_prog;
3034                         if (extra_pass) {
3035                                 prog->bpf_func = NULL;
3036                                 prog->jited = 0;
3037                                 prog->jited_len = 0;
3038                         }
3039                         goto out_addrs;
3040                 }
3041                 if (image) {
3042                         if (proglen != oldproglen) {
3043                                 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3044                                        proglen, oldproglen);
3045                                 goto out_image;
3046                         }
3047                         break;
3048                 }
3049                 if (proglen == oldproglen) {
3050                         /*
3051                          * The number of entries in extable is the number of BPF_LDX
3052                          * insns that access kernel memory via "pointer to BTF type".
3053                          * The verifier changed their opcode from LDX|MEM|size
3054                          * to LDX|PROBE_MEM|size to make JITing easier.
3055                          */
3056                         u32 align = __alignof__(struct exception_table_entry);
3057                         u32 extable_size = prog->aux->num_exentries *
3058                                 sizeof(struct exception_table_entry);
3059
3060                         /* allocate module memory for x86 insns and extable */
3061                         header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
3062                                                            &image, align, &rw_header, &rw_image,
3063                                                            jit_fill_hole);
3064                         if (!header) {
3065                                 prog = orig_prog;
3066                                 goto out_addrs;
3067                         }
3068                         prog->aux->extable = (void *) image + roundup(proglen, align);
3069                 }
3070                 oldproglen = proglen;
3071                 cond_resched();
3072         }
3073
3074         if (bpf_jit_enable > 1)
3075                 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3076
3077         if (image) {
3078                 if (!prog->is_func || extra_pass) {
3079                         /*
3080                          * bpf_jit_binary_pack_finalize fails in two scenarios:
3081                          *   1) header is not pointing to proper module memory;
3082                          *   2) the arch doesn't support bpf_arch_text_copy().
3083                          *
3084                          * Both cases are serious bugs and justify WARN_ON.
3085                          */
3086                         if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
3087                                 /* header has been freed */
3088                                 header = NULL;
3089                                 goto out_image;
3090                         }
3091
3092                         bpf_tail_call_direct_fixup(prog);
3093                 } else {
3094                         jit_data->addrs = addrs;
3095                         jit_data->ctx = ctx;
3096                         jit_data->proglen = proglen;
3097                         jit_data->image = image;
3098                         jit_data->header = header;
3099                         jit_data->rw_header = rw_header;
3100                 }
3101                 /*
3102                  * ctx.prog_offset is used when CFI preambles put code *before*
3103                  * the function. See emit_cfi(). For FineIBT specifically this code
3104                  * can also be executed and bpf_prog_kallsyms_add() will
3105                  * generate an additional symbol to cover this, hence also
3106                  * decrement proglen.
3107                  */
3108                 prog->bpf_func = (void *)image + cfi_get_offset();
3109                 prog->jited = 1;
3110                 prog->jited_len = proglen - cfi_get_offset();
3111         } else {
3112                 prog = orig_prog;
3113         }
3114
3115         if (!image || !prog->is_func || extra_pass) {
3116                 if (image)
3117                         bpf_prog_fill_jited_linfo(prog, addrs + 1);
3118 out_addrs:
3119                 kvfree(addrs);
3120                 kfree(jit_data);
3121                 prog->aux->jit_data = NULL;
3122         }
3123 out:
3124         if (tmp_blinded)
3125                 bpf_jit_prog_release_other(prog, prog == orig_prog ?
3126                                            tmp : orig_prog);
3127         return prog;
3128 }
3129
3130 bool bpf_jit_supports_kfunc_call(void)
3131 {
3132         return true;
3133 }
3134
3135 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3136 {
3137         if (text_poke_copy(dst, src, len) == NULL)
3138                 return ERR_PTR(-EINVAL);
3139         return dst;
3140 }
3141
3142 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
3143 bool bpf_jit_supports_subprog_tailcalls(void)
3144 {
3145         return true;
3146 }
3147
3148 void bpf_jit_free(struct bpf_prog *prog)
3149 {
3150         if (prog->jited) {
3151                 struct x64_jit_data *jit_data = prog->aux->jit_data;
3152                 struct bpf_binary_header *hdr;
3153
3154                 /*
3155                  * If we fail the final pass of JIT (from jit_subprogs),
3156                  * the program may not be finalized yet. Call finalize here
3157                  * before freeing it.
3158                  */
3159                 if (jit_data) {
3160                         bpf_jit_binary_pack_finalize(prog, jit_data->header,
3161                                                      jit_data->rw_header);
3162                         kvfree(jit_data->addrs);
3163                         kfree(jit_data);
3164                 }
3165                 prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3166                 hdr = bpf_jit_binary_pack_hdr(prog);
3167                 bpf_jit_binary_pack_free(hdr, NULL);
3168                 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3169         }
3170
3171         bpf_prog_unlock_free(prog);
3172 }
3173
3174 bool bpf_jit_supports_exceptions(void)
3175 {
3176         /* We unwind through both kernel frames (starting from within bpf_throw
3177          * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3178          * to walk kernel frames and reach BPF frames in the stack trace.
3179          */
3180         return IS_ENABLED(CONFIG_UNWINDER_ORC);
3181 }
3182
3183 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3184 {
3185 #if defined(CONFIG_UNWINDER_ORC)
3186         struct unwind_state state;
3187         unsigned long addr;
3188
3189         for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
3190              unwind_next_frame(&state)) {
3191                 addr = unwind_get_return_address(&state);
3192                 if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
3193                         break;
3194         }
3195         return;
3196 #endif
3197         WARN(1, "verification of programs using bpf_throw should have failed\n");
3198 }
3199
3200 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3201                                struct bpf_prog *new, struct bpf_prog *old)
3202 {
3203         u8 *old_addr, *new_addr, *old_bypass_addr;
3204         int ret;
3205
3206         old_bypass_addr = old ? NULL : poke->bypass_addr;
3207         old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
3208         new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
3209
3210         /*
3211          * On program loading or teardown, the program's kallsym entry
3212          * might not be in place, so we use __bpf_arch_text_poke to skip
3213          * the kallsyms check.
3214          */
3215         if (new) {
3216                 ret = __bpf_arch_text_poke(poke->tailcall_target,
3217                                            BPF_MOD_JUMP,
3218                                            old_addr, new_addr);
3219                 BUG_ON(ret < 0);
3220                 if (!old) {
3221                         ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3222                                                    BPF_MOD_JUMP,
3223                                                    poke->bypass_addr,
3224                                                    NULL);
3225                         BUG_ON(ret < 0);
3226                 }
3227         } else {
3228                 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3229                                            BPF_MOD_JUMP,
3230                                            old_bypass_addr,
3231                                            poke->bypass_addr);
3232                 BUG_ON(ret < 0);
3233                 /* let other CPUs finish the execution of program
3234                  * so that it will not possible to expose them
3235                  * to invalid nop, stack unwind, nop state
3236                  */
3237                 if (!ret)
3238                         synchronize_rcu();
3239                 ret = __bpf_arch_text_poke(poke->tailcall_target,
3240                                            BPF_MOD_JUMP,
3241                                            old_addr, NULL);
3242                 BUG_ON(ret < 0);
3243         }
3244 }