1 // SPDX-License-Identifier: GPL-2.0
3 * BPF Jit compiler for s390.
5 * Minimum build requirements:
7 * - HAVE_MARCH_Z196_FEATURES: laal, laalg
8 * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
9 * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
12 * Copyright IBM Corp. 2012,2015
14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
15 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
18 #define KMSG_COMPONENT "bpf_jit"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/netdevice.h>
22 #include <linux/filter.h>
23 #include <linux/init.h>
24 #include <linux/bpf.h>
26 #include <linux/kernel.h>
27 #include <asm/cacheflush.h>
28 #include <asm/extable.h>
30 #include <asm/facility.h>
31 #include <asm/nospec-branch.h>
32 #include <asm/set_memory.h>
36 u32 seen; /* Flags to remember seen eBPF instructions */
37 u32 seen_reg[16]; /* Array to remember which registers are used */
38 u32 *addrs; /* Array with relative instruction addresses */
39 u8 *prg_buf; /* Start of program */
40 int size; /* Size of program and literal pool */
41 int size_prg; /* Size of program */
42 int prg; /* Current position in program */
43 int lit32_start; /* Start of 32-bit literal pool */
44 int lit32; /* Current position in 32-bit literal pool */
45 int lit64_start; /* Start of 64-bit literal pool */
46 int lit64; /* Current position in 64-bit literal pool */
47 int base_ip; /* Base address for literal pool */
48 int exit_ip; /* Address of exit */
49 int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
50 int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
51 int tail_call_start; /* Tail call start offset */
52 int excnt; /* Number of exception table entries */
55 #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
56 #define SEEN_LITERAL BIT(1) /* code uses literals */
57 #define SEEN_FUNC BIT(2) /* calls C functions */
58 #define SEEN_TAIL_CALL BIT(3) /* code uses tail calls */
59 #define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
64 #define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */
65 #define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */
66 #define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */
67 #define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */
68 #define REG_0 REG_W0 /* Register 0 */
69 #define REG_1 REG_W1 /* Register 1 */
70 #define REG_2 BPF_REG_1 /* Register 2 */
71 #define REG_14 BPF_REG_0 /* Register 14 */
74 * Mapping of BPF registers to s390 registers
76 static const int reg2hex[] = {
79 /* Function parameters */
85 /* Call saved registers */
90 /* BPF stack pointer */
92 /* Register for blinding */
94 /* Work registers for s390x backend */
101 static inline u32 reg(u32 dst_reg, u32 src_reg)
103 return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
106 static inline u32 reg_high(u32 reg)
108 return reg2hex[reg] << 4;
111 static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
113 u32 r1 = reg2hex[b1];
115 if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
116 jit->seen_reg[r1] = 1;
119 #define REG_SET_SEEN(b1) \
121 reg_set_seen(jit, b1); \
124 #define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
127 * EMIT macros for code generation
133 *(u16 *) (jit->prg_buf + jit->prg) = (op); \
137 #define EMIT2(op, b1, b2) \
139 _EMIT2((op) | reg(b1, b2)); \
147 *(u32 *) (jit->prg_buf + jit->prg) = (op); \
151 #define EMIT4(op, b1, b2) \
153 _EMIT4((op) | reg(b1, b2)); \
158 #define EMIT4_RRF(op, b1, b2, b3) \
160 _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \
166 #define _EMIT4_DISP(op, disp) \
168 unsigned int __disp = (disp) & 0xfff; \
169 _EMIT4((op) | __disp); \
172 #define EMIT4_DISP(op, b1, b2, disp) \
174 _EMIT4_DISP((op) | reg_high(b1) << 16 | \
175 reg_high(b2) << 8, (disp)); \
180 #define EMIT4_IMM(op, b1, imm) \
182 unsigned int __imm = (imm) & 0xffff; \
183 _EMIT4((op) | reg_high(b1) << 16 | __imm); \
187 #define EMIT4_PCREL(op, pcrel) \
189 long __pcrel = ((pcrel) >> 1) & 0xffff; \
190 _EMIT4((op) | __pcrel); \
193 #define EMIT4_PCREL_RIC(op, mask, target) \
195 int __rel = ((target) - jit->prg) / 2; \
196 _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \
199 #define _EMIT6(op1, op2) \
201 if (jit->prg_buf) { \
202 *(u32 *) (jit->prg_buf + jit->prg) = (op1); \
203 *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
208 #define _EMIT6_DISP(op1, op2, disp) \
210 unsigned int __disp = (disp) & 0xfff; \
211 _EMIT6((op1) | __disp, op2); \
214 #define _EMIT6_DISP_LH(op1, op2, disp) \
216 u32 _disp = (u32) (disp); \
217 unsigned int __disp_h = _disp & 0xff000; \
218 unsigned int __disp_l = _disp & 0x00fff; \
219 _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \
222 #define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \
224 _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \
225 reg_high(b3) << 8, op2, disp); \
231 #define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
233 unsigned int rel = (int)((target) - jit->prg) / 2; \
234 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
235 (op2) | (mask) << 12); \
240 #define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
242 unsigned int rel = (int)((target) - jit->prg) / 2; \
243 _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
244 (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
246 BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \
249 #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
251 int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
252 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
257 #define EMIT6_PCREL_RILB(op, b, target) \
259 unsigned int rel = (int)((target) - jit->prg) / 2; \
260 _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
264 #define EMIT6_PCREL_RIL(op, target) \
266 unsigned int rel = (int)((target) - jit->prg) / 2; \
267 _EMIT6((op) | rel >> 16, rel & 0xffff); \
270 #define EMIT6_PCREL_RILC(op, mask, target) \
272 EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \
275 #define _EMIT6_IMM(op, imm) \
277 unsigned int __imm = (imm); \
278 _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \
281 #define EMIT6_IMM(op, b1, imm) \
283 _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \
287 #define _EMIT_CONST_U32(val) \
292 *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
297 #define EMIT_CONST_U32(val) \
299 jit->seen |= SEEN_LITERAL; \
300 _EMIT_CONST_U32(val) - jit->base_ip; \
303 #define _EMIT_CONST_U64(val) \
308 *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
313 #define EMIT_CONST_U64(val) \
315 jit->seen |= SEEN_LITERAL; \
316 _EMIT_CONST_U64(val) - jit->base_ip; \
319 #define EMIT_ZERO(b1) \
321 if (!fp->aux->verifier_zext) { \
322 /* llgfr %dst,%dst (zero extend to 64 bit) */ \
323 EMIT4(0xb9160000, b1, b1); \
329 * Return whether this is the first pass. The first pass is special, since we
330 * don't know any sizes yet, and thus must be conservative.
332 static bool is_first_pass(struct bpf_jit *jit)
334 return jit->size == 0;
338 * Return whether this is the code generation pass. The code generation pass is
339 * special, since we should change as little as possible.
341 static bool is_codegen_pass(struct bpf_jit *jit)
347 * Return whether "rel" can be encoded as a short PC-relative offset
349 static bool is_valid_rel(int rel)
351 return rel >= -65536 && rel <= 65534;
355 * Return whether "off" can be reached using a short PC-relative offset
357 static bool can_use_rel(struct bpf_jit *jit, int off)
359 return is_valid_rel(off - jit->prg);
363 * Return whether given displacement can be encoded using
364 * Long-Displacement Facility
366 static bool is_valid_ldisp(int disp)
368 return disp >= -524288 && disp <= 524287;
372 * Return whether the next 32-bit literal pool entry can be referenced using
373 * Long-Displacement Facility
375 static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
377 return is_valid_ldisp(jit->lit32 - jit->base_ip);
381 * Return whether the next 64-bit literal pool entry can be referenced using
382 * Long-Displacement Facility
384 static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
386 return is_valid_ldisp(jit->lit64 - jit->base_ip);
390 * Fill whole space with illegal instructions
392 static void jit_fill_hole(void *area, unsigned int size)
394 memset(area, 0, size);
398 * Save registers from "rs" (register start) to "re" (register end) on stack
400 static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
402 u32 off = STK_OFF_R6 + (rs - 6) * 8;
405 /* stg %rs,off(%r15) */
406 _EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
408 /* stmg %rs,%re,off(%r15) */
409 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
413 * Restore registers from "rs" (register start) to "re" (register end) on stack
415 static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
417 u32 off = STK_OFF_R6 + (rs - 6) * 8;
419 if (jit->seen & SEEN_STACK)
420 off += STK_OFF + stack_depth;
423 /* lg %rs,off(%r15) */
424 _EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
426 /* lmg %rs,%re,off(%r15) */
427 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
431 * Return first seen register (from start)
433 static int get_start(struct bpf_jit *jit, int start)
437 for (i = start; i <= 15; i++) {
438 if (jit->seen_reg[i])
445 * Return last seen register (from start) (gap >= 2)
447 static int get_end(struct bpf_jit *jit, int start)
451 for (i = start; i < 15; i++) {
452 if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
455 return jit->seen_reg[15] ? 15 : 14;
459 #define REGS_RESTORE 0
461 * Save and restore clobbered registers (6-15) on stack.
462 * We save/restore registers in chunks with gap >= 2 registers.
464 static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
466 const int last = 15, save_restore_size = 6;
469 if (is_first_pass(jit)) {
471 * We don't know yet which registers are used. Reserve space
474 jit->prg += (last - re + 1) * save_restore_size;
479 rs = get_start(jit, re);
482 re = get_end(jit, rs + 1);
484 save_regs(jit, rs, re);
486 restore_regs(jit, rs, re, stack_depth);
488 } while (re <= last);
491 static void bpf_skip(struct bpf_jit *jit, int size)
493 if (size >= 6 && !is_valid_rel(size)) {
495 EMIT6_PCREL_RIL(0xc0f4000000, size);
497 } else if (size >= 4 && is_valid_rel(size)) {
499 EMIT4_PCREL(0xa7f40000, size);
510 * Emit function prologue
512 * Save registers and create stack frame if necessary.
513 * See stack frame layout desription in "bpf_jit.h"!
515 static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
517 if (jit->seen & SEEN_TAIL_CALL) {
518 /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
519 _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
522 * There are no tail calls. Insert nops in order to have
523 * tail_call_start at a predictable offset.
527 /* Tail calls have to skip above initialization */
528 jit->tail_call_start = jit->prg;
530 save_restore_regs(jit, REGS_SAVE, stack_depth);
531 /* Setup literal pool */
532 if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
533 if (!is_first_pass(jit) &&
534 is_valid_ldisp(jit->size - (jit->prg + 2))) {
536 EMIT2(0x0d00, REG_L, REG_0);
537 jit->base_ip = jit->prg;
539 /* larl %l,lit32_start */
540 EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
541 jit->base_ip = jit->lit32_start;
544 /* Setup stack and backchain */
545 if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
546 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
547 /* lgr %w1,%r15 (backchain) */
548 EMIT4(0xb9040000, REG_W1, REG_15);
549 /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
550 EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
551 /* aghi %r15,-STK_OFF */
552 EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
553 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
554 /* stg %w1,152(%r15) (backchain) */
555 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
563 static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
565 jit->exit_ip = jit->prg;
566 /* Load exit code: lgr %r2,%b0 */
567 EMIT4(0xb9040000, REG_2, BPF_REG_0);
568 /* Restore registers */
569 save_restore_regs(jit, REGS_RESTORE, stack_depth);
570 if (nospec_uses_trampoline()) {
571 jit->r14_thunk_ip = jit->prg;
572 /* Generate __s390_indirect_jump_r14 thunk */
574 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
576 EMIT4_PCREL(0xa7f40000, 0);
581 if ((nospec_uses_trampoline()) &&
582 (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
583 jit->r1_thunk_ip = jit->prg;
584 /* Generate __s390_indirect_jump_r1 thunk */
586 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
588 EMIT4_PCREL(0xa7f40000, 0);
594 static int get_probe_mem_regno(const u8 *insn)
597 * insn must point to llgc, llgh, llgf or lg, which have destination
598 * register at the same position.
600 if (insn[0] != 0xe3) /* common llgc, llgh, llgf and lg prefix */
602 if (insn[5] != 0x90 && /* llgc */
603 insn[5] != 0x91 && /* llgh */
604 insn[5] != 0x16 && /* llgf */
605 insn[5] != 0x04) /* lg */
610 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
612 regs->psw.addr = extable_fixup(x);
613 regs->gprs[x->data] = 0;
617 static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
618 int probe_prg, int nop_prg)
620 struct exception_table_entry *ex;
626 if (!fp->aux->extable)
627 /* Do nothing during early JIT passes. */
629 insn = jit->prg_buf + probe_prg;
630 reg = get_probe_mem_regno(insn);
631 if (WARN_ON_ONCE(reg < 0))
632 /* JIT bug - unexpected probe instruction. */
634 if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
635 /* JIT bug - gap between probe and nop instructions. */
637 for (i = 0; i < 2; i++) {
638 if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
639 /* Verifier bug - not enough entries. */
641 ex = &fp->aux->extable[jit->excnt];
642 /* Add extable entries for probe and nop instructions. */
643 prg = i == 0 ? probe_prg : nop_prg;
644 delta = jit->prg_buf + prg - (u8 *)&ex->insn;
645 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
646 /* JIT bug - code and extable must be close. */
650 * Always land on the nop. Note that extable infrastructure
651 * ignores fixup field, it is handled by ex_handler_bpf().
653 delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
654 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
655 /* JIT bug - landing pad and extable must be close. */
658 ex->type = EX_TYPE_BPF;
666 * Compile one eBPF instruction into s390x code
668 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
669 * stack space for the large switch statement.
671 static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
672 int i, bool extra_pass, u32 stack_depth)
674 struct bpf_insn *insn = &fp->insnsi[i];
675 u32 dst_reg = insn->dst_reg;
676 u32 src_reg = insn->src_reg;
677 int last, insn_count = 1;
678 u32 *addrs = jit->addrs;
686 if (BPF_CLASS(insn->code) == BPF_LDX &&
687 BPF_MODE(insn->code) == BPF_PROBE_MEM)
688 probe_prg = jit->prg;
690 switch (insn->code) {
694 case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
695 /* llgfr %dst,%src */
696 EMIT4(0xb9160000, dst_reg, src_reg);
697 if (insn_is_zext(&insn[1]))
700 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
702 EMIT4(0xb9040000, dst_reg, src_reg);
704 case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
706 EMIT6_IMM(0xc00f0000, dst_reg, imm);
707 if (insn_is_zext(&insn[1]))
710 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
712 EMIT6_IMM(0xc0010000, dst_reg, imm);
717 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
719 /* 16 byte instruction that uses two 'struct bpf_insn' */
722 imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
724 EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
731 case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
733 EMIT2(0x1a00, dst_reg, src_reg);
736 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
738 EMIT4(0xb9080000, dst_reg, src_reg);
740 case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
743 EMIT6_IMM(0xc20b0000, dst_reg, imm);
747 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
751 EMIT6_IMM(0xc2080000, dst_reg, imm);
756 case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
758 EMIT2(0x1b00, dst_reg, src_reg);
761 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
763 EMIT4(0xb9090000, dst_reg, src_reg);
765 case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
768 EMIT6_IMM(0xc20b0000, dst_reg, -imm);
772 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
775 if (imm == -0x80000000) {
776 /* algfi %dst,0x80000000 */
777 EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
780 EMIT6_IMM(0xc2080000, dst_reg, -imm);
786 case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
788 EMIT4(0xb2520000, dst_reg, src_reg);
791 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
793 EMIT4(0xb90c0000, dst_reg, src_reg);
795 case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
798 EMIT6_IMM(0xc2010000, dst_reg, imm);
802 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
806 EMIT6_IMM(0xc2000000, dst_reg, imm);
811 case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
812 case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
814 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
817 EMIT4_IMM(0xa7080000, REG_W0, 0);
819 EMIT2(0x1800, REG_W1, dst_reg);
821 EMIT4(0xb9970000, REG_W0, src_reg);
823 EMIT4(0xb9160000, dst_reg, rc_reg);
824 if (insn_is_zext(&insn[1]))
828 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
829 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
831 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
834 EMIT4_IMM(0xa7090000, REG_W0, 0);
836 EMIT4(0xb9040000, REG_W1, dst_reg);
838 EMIT4(0xb9870000, REG_W0, src_reg);
840 EMIT4(0xb9040000, dst_reg, rc_reg);
843 case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
844 case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
846 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
849 if (BPF_OP(insn->code) == BPF_MOD)
851 EMIT4_IMM(0xa7090000, dst_reg, 0);
857 EMIT4_IMM(0xa7080000, REG_W0, 0);
859 EMIT2(0x1800, REG_W1, dst_reg);
860 if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
861 /* dl %w0,<d(imm)>(%l) */
862 EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
863 EMIT_CONST_U32(imm));
866 EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
867 _EMIT_CONST_U32(imm));
868 jit->seen |= SEEN_LITERAL;
870 EMIT4(0xb9970000, REG_W0, dst_reg);
873 EMIT4(0xb9160000, dst_reg, rc_reg);
874 if (insn_is_zext(&insn[1]))
878 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
879 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
881 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
884 if (BPF_OP(insn->code) == BPF_MOD)
886 EMIT4_IMM(0xa7090000, dst_reg, 0);
890 EMIT4_IMM(0xa7090000, REG_W0, 0);
892 EMIT4(0xb9040000, REG_W1, dst_reg);
893 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
894 /* dlg %w0,<d(imm)>(%l) */
895 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
896 EMIT_CONST_U64(imm));
899 EMIT6_PCREL_RILB(0xc4080000, dst_reg,
900 _EMIT_CONST_U64(imm));
901 jit->seen |= SEEN_LITERAL;
903 EMIT4(0xb9870000, REG_W0, dst_reg);
906 EMIT4(0xb9040000, dst_reg, rc_reg);
912 case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
914 EMIT2(0x1400, dst_reg, src_reg);
917 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
919 EMIT4(0xb9800000, dst_reg, src_reg);
921 case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
923 EMIT6_IMM(0xc00b0000, dst_reg, imm);
926 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
927 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
928 /* ng %dst,<d(imm)>(%l) */
929 EMIT6_DISP_LH(0xe3000000, 0x0080,
930 dst_reg, REG_0, REG_L,
931 EMIT_CONST_U64(imm));
934 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
935 _EMIT_CONST_U64(imm));
936 jit->seen |= SEEN_LITERAL;
938 EMIT4(0xb9800000, dst_reg, REG_W0);
944 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
946 EMIT2(0x1600, dst_reg, src_reg);
949 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
951 EMIT4(0xb9810000, dst_reg, src_reg);
953 case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
955 EMIT6_IMM(0xc00d0000, dst_reg, imm);
958 case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
959 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
960 /* og %dst,<d(imm)>(%l) */
961 EMIT6_DISP_LH(0xe3000000, 0x0081,
962 dst_reg, REG_0, REG_L,
963 EMIT_CONST_U64(imm));
966 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
967 _EMIT_CONST_U64(imm));
968 jit->seen |= SEEN_LITERAL;
970 EMIT4(0xb9810000, dst_reg, REG_W0);
976 case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
978 EMIT2(0x1700, dst_reg, src_reg);
981 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
983 EMIT4(0xb9820000, dst_reg, src_reg);
985 case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
988 EMIT6_IMM(0xc0070000, dst_reg, imm);
992 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
993 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
994 /* xg %dst,<d(imm)>(%l) */
995 EMIT6_DISP_LH(0xe3000000, 0x0082,
996 dst_reg, REG_0, REG_L,
997 EMIT_CONST_U64(imm));
1000 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1001 _EMIT_CONST_U64(imm));
1002 jit->seen |= SEEN_LITERAL;
1004 EMIT4(0xb9820000, dst_reg, REG_W0);
1010 case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
1011 /* sll %dst,0(%src) */
1012 EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
1015 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
1016 /* sllg %dst,%dst,0(%src) */
1017 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
1019 case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
1021 /* sll %dst,imm(%r0) */
1022 EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
1026 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
1029 /* sllg %dst,%dst,imm(%r0) */
1030 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
1035 case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
1036 /* srl %dst,0(%src) */
1037 EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
1040 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
1041 /* srlg %dst,%dst,0(%src) */
1042 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
1044 case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
1046 /* srl %dst,imm(%r0) */
1047 EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
1051 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
1054 /* srlg %dst,%dst,imm(%r0) */
1055 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
1060 case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
1061 /* sra %dst,%dst,0(%src) */
1062 EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
1065 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
1066 /* srag %dst,%dst,0(%src) */
1067 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
1069 case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
1071 /* sra %dst,imm(%r0) */
1072 EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
1076 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
1079 /* srag %dst,%dst,imm(%r0) */
1080 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
1085 case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
1087 EMIT2(0x1300, dst_reg, dst_reg);
1090 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1091 /* lcgr %dst,%dst */
1092 EMIT4(0xb9030000, dst_reg, dst_reg);
1097 case BPF_ALU | BPF_END | BPF_FROM_BE:
1098 /* s390 is big endian, therefore only clear high order bytes */
1100 case 16: /* dst = (u16) cpu_to_be16(dst) */
1101 /* llghr %dst,%dst */
1102 EMIT4(0xb9850000, dst_reg, dst_reg);
1103 if (insn_is_zext(&insn[1]))
1106 case 32: /* dst = (u32) cpu_to_be32(dst) */
1107 if (!fp->aux->verifier_zext)
1108 /* llgfr %dst,%dst */
1109 EMIT4(0xb9160000, dst_reg, dst_reg);
1111 case 64: /* dst = (u64) cpu_to_be64(dst) */
1115 case BPF_ALU | BPF_END | BPF_FROM_LE:
1117 case 16: /* dst = (u16) cpu_to_le16(dst) */
1118 /* lrvr %dst,%dst */
1119 EMIT4(0xb91f0000, dst_reg, dst_reg);
1120 /* srl %dst,16(%r0) */
1121 EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
1122 /* llghr %dst,%dst */
1123 EMIT4(0xb9850000, dst_reg, dst_reg);
1124 if (insn_is_zext(&insn[1]))
1127 case 32: /* dst = (u32) cpu_to_le32(dst) */
1128 /* lrvr %dst,%dst */
1129 EMIT4(0xb91f0000, dst_reg, dst_reg);
1130 if (!fp->aux->verifier_zext)
1131 /* llgfr %dst,%dst */
1132 EMIT4(0xb9160000, dst_reg, dst_reg);
1134 case 64: /* dst = (u64) cpu_to_le64(dst) */
1135 /* lrvgr %dst,%dst */
1136 EMIT4(0xb90f0000, dst_reg, dst_reg);
1141 * BPF_NOSPEC (speculation barrier)
1143 case BPF_ST | BPF_NOSPEC:
1148 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
1149 /* stcy %src,off(%dst) */
1150 EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
1151 jit->seen |= SEEN_MEM;
1153 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1154 /* sthy %src,off(%dst) */
1155 EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
1156 jit->seen |= SEEN_MEM;
1158 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1159 /* sty %src,off(%dst) */
1160 EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
1161 jit->seen |= SEEN_MEM;
1163 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1164 /* stg %src,off(%dst) */
1165 EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
1166 jit->seen |= SEEN_MEM;
1168 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1170 EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
1171 /* stcy %w0,off(dst) */
1172 EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
1173 jit->seen |= SEEN_MEM;
1175 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1177 EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
1178 /* sthy %w0,off(dst) */
1179 EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
1180 jit->seen |= SEEN_MEM;
1182 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1184 EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
1185 /* sty %w0,off(%dst) */
1186 EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
1187 jit->seen |= SEEN_MEM;
1189 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1191 EMIT6_IMM(0xc0010000, REG_W0, imm);
1192 /* stg %w0,off(%dst) */
1193 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
1194 jit->seen |= SEEN_MEM;
1199 case BPF_STX | BPF_ATOMIC | BPF_DW:
1200 case BPF_STX | BPF_ATOMIC | BPF_W:
1202 bool is32 = BPF_SIZE(insn->code) == BPF_W;
1204 switch (insn->imm) {
1205 /* {op32|op64} {%w0|%src},%src,off(%dst) */
1206 #define EMIT_ATOMIC(op32, op64) do { \
1207 EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
1208 (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
1209 src_reg, dst_reg, off); \
1210 if (is32 && (insn->imm & BPF_FETCH)) \
1211 EMIT_ZERO(src_reg); \
1214 case BPF_ADD | BPF_FETCH:
1216 EMIT_ATOMIC(0x00fa, 0x00ea);
1219 case BPF_AND | BPF_FETCH:
1221 EMIT_ATOMIC(0x00f4, 0x00e4);
1224 case BPF_OR | BPF_FETCH:
1226 EMIT_ATOMIC(0x00f6, 0x00e6);
1229 case BPF_XOR | BPF_FETCH:
1231 EMIT_ATOMIC(0x00f7, 0x00e7);
1235 /* {ly|lg} %w0,off(%dst) */
1236 EMIT6_DISP_LH(0xe3000000,
1237 is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
1239 /* 0: {csy|csg} %w0,%src,off(%dst) */
1240 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1241 REG_W0, src_reg, dst_reg, off);
1243 EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
1244 /* {llgfr|lgr} %src,%w0 */
1245 EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
1246 if (is32 && insn_is_zext(&insn[1]))
1250 /* 0: {csy|csg} %b0,%src,off(%dst) */
1251 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1252 BPF_REG_0, src_reg, dst_reg, off);
1255 pr_err("Unknown atomic operation %02x\n", insn->imm);
1259 jit->seen |= SEEN_MEM;
1265 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
1266 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1267 /* llgc %dst,0(off,%src) */
1268 EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
1269 jit->seen |= SEEN_MEM;
1270 if (insn_is_zext(&insn[1]))
1273 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
1274 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1275 /* llgh %dst,0(off,%src) */
1276 EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
1277 jit->seen |= SEEN_MEM;
1278 if (insn_is_zext(&insn[1]))
1281 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
1282 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1283 /* llgf %dst,off(%src) */
1284 jit->seen |= SEEN_MEM;
1285 EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
1286 if (insn_is_zext(&insn[1]))
1289 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
1290 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1291 /* lg %dst,0(off,%src) */
1292 jit->seen |= SEEN_MEM;
1293 EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1298 case BPF_JMP | BPF_CALL:
1301 bool func_addr_fixed;
1304 ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1305 &func, &func_addr_fixed);
1309 REG_SET_SEEN(BPF_REG_5);
1310 jit->seen |= SEEN_FUNC;
1312 EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
1313 if (nospec_uses_trampoline()) {
1314 /* brasl %r14,__s390_indirect_jump_r1 */
1315 EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
1318 EMIT2(0x0d00, REG_14, REG_W1);
1320 /* lgr %b0,%r2: load return value into %b0 */
1321 EMIT4(0xb9040000, BPF_REG_0, REG_2);
1324 case BPF_JMP | BPF_TAIL_CALL: {
1325 int patch_1_clrj, patch_2_clij, patch_3_brc;
1329 * B1: pointer to ctx
1330 * B2: pointer to bpf_array
1331 * B3: index in bpf_array
1333 jit->seen |= SEEN_TAIL_CALL;
1336 * if (index >= array->map.max_entries)
1340 /* llgf %w1,map.max_entries(%b2) */
1341 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1342 offsetof(struct bpf_array, map.max_entries));
1343 /* if ((u32)%b3 >= (u32)%w1) goto out; */
1344 /* clrj %b3,%w1,0xa,out */
1345 patch_1_clrj = jit->prg;
1346 EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
1350 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
1354 if (jit->seen & SEEN_STACK)
1355 off = STK_OFF_TCCNT + STK_OFF + stack_depth;
1357 off = STK_OFF_TCCNT;
1359 EMIT4_IMM(0xa7080000, REG_W0, 1);
1360 /* laal %w1,%w0,off(%r15) */
1361 EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1362 /* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */
1363 patch_2_clij = jit->prg;
1364 EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1,
1368 * prog = array->ptrs[index];
1373 /* llgfr %r1,%b3: %r1 = (u32) index */
1374 EMIT4(0xb9160000, REG_1, BPF_REG_3);
1375 /* sllg %r1,%r1,3: %r1 *= 8 */
1376 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1377 /* ltg %r1,prog(%b2,%r1) */
1378 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
1379 REG_1, offsetof(struct bpf_array, ptrs));
1381 patch_3_brc = jit->prg;
1382 EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
1385 * Restore registers before calling function
1387 save_restore_regs(jit, REGS_RESTORE, stack_depth);
1390 * goto *(prog->bpf_func + tail_call_start);
1393 /* lg %r1,bpf_func(%r1) */
1394 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1395 offsetof(struct bpf_prog, bpf_func));
1396 if (nospec_uses_trampoline()) {
1397 jit->seen |= SEEN_FUNC;
1398 /* aghi %r1,tail_call_start */
1399 EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
1400 /* brcl 0xf,__s390_indirect_jump_r1 */
1401 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
1403 /* bc 0xf,tail_call_start(%r1) */
1404 _EMIT4(0x47f01000 + jit->tail_call_start);
1408 *(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
1409 (jit->prg - patch_1_clrj) >> 1;
1410 *(u16 *)(jit->prg_buf + patch_2_clij + 2) =
1411 (jit->prg - patch_2_clij) >> 1;
1412 *(u16 *)(jit->prg_buf + patch_3_brc + 2) =
1413 (jit->prg - patch_3_brc) >> 1;
1417 case BPF_JMP | BPF_EXIT: /* return b0 */
1418 last = (i == fp->len - 1) ? 1 : 0;
1421 if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
1422 /* brc 0xf, <exit> */
1423 EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
1425 /* brcl 0xf, <exit> */
1426 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
1429 * Branch relative (number of skipped instructions) to offset on
1432 * Condition code to mask mapping:
1434 * CC | Description | Mask
1435 * ------------------------------
1436 * 0 | Operands equal | 8
1437 * 1 | First operand low | 4
1438 * 2 | First operand high | 2
1441 * For s390x relative branches: ip = ip + off_bytes
1442 * For BPF relative branches: insn = insn + off_insns + 1
1444 * For example for s390x with offset 0 we jump to the branch
1445 * instruction itself (loop) and for BPF with offset 0 we
1446 * branch to the instruction behind the branch.
1448 case BPF_JMP | BPF_JA: /* if (true) */
1449 mask = 0xf000; /* j */
1451 case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1452 case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1453 mask = 0x2000; /* jh */
1455 case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1456 case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1457 mask = 0x4000; /* jl */
1459 case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1460 case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1461 mask = 0xa000; /* jhe */
1463 case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1464 case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1465 mask = 0xc000; /* jle */
1467 case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1468 case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1469 mask = 0x2000; /* jh */
1471 case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1472 case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1473 mask = 0x4000; /* jl */
1475 case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1476 case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1477 mask = 0xa000; /* jhe */
1479 case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1480 case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1481 mask = 0xc000; /* jle */
1483 case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1484 case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1485 mask = 0x7000; /* jne */
1487 case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1488 case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1489 mask = 0x8000; /* je */
1491 case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1492 case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1493 mask = 0x7000; /* jnz */
1494 if (BPF_CLASS(insn->code) == BPF_JMP32) {
1495 /* llilf %w1,imm (load zero extend imm) */
1496 EMIT6_IMM(0xc00f0000, REG_W1, imm);
1498 EMIT2(0x1400, REG_W1, dst_reg);
1500 /* lgfi %w1,imm (load sign extend imm) */
1501 EMIT6_IMM(0xc0010000, REG_W1, imm);
1503 EMIT4(0xb9800000, REG_W1, dst_reg);
1507 case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1508 case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1509 mask = 0x2000; /* jh */
1511 case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1512 case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1513 mask = 0x4000; /* jl */
1515 case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1516 case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1517 mask = 0xa000; /* jhe */
1519 case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
1520 case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
1521 mask = 0xc000; /* jle */
1523 case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1524 case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
1525 mask = 0x2000; /* jh */
1527 case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
1528 case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
1529 mask = 0x4000; /* jl */
1531 case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1532 case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
1533 mask = 0xa000; /* jhe */
1535 case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
1536 case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
1537 mask = 0xc000; /* jle */
1539 case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1540 case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
1541 mask = 0x7000; /* jne */
1543 case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1544 case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
1545 mask = 0x8000; /* je */
1547 case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1548 case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
1550 bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1552 mask = 0x7000; /* jnz */
1553 /* nrk or ngrk %w1,%dst,%src */
1554 EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
1555 REG_W1, dst_reg, src_reg);
1558 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1559 /* cfi or cgfi %dst,imm */
1560 EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
1562 if (!is_first_pass(jit) &&
1563 can_use_rel(jit, addrs[i + off + 1])) {
1565 EMIT4_PCREL_RIC(0xa7040000,
1566 mask >> 12, addrs[i + off + 1]);
1569 EMIT6_PCREL_RILC(0xc0040000,
1570 mask >> 12, addrs[i + off + 1]);
1574 /* lgfi %w1,imm (load sign extend imm) */
1576 EMIT6_IMM(0xc0010000, src_reg, imm);
1579 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1580 if (!is_first_pass(jit) &&
1581 can_use_rel(jit, addrs[i + off + 1])) {
1582 /* crj or cgrj %dst,%src,mask,off */
1583 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1584 dst_reg, src_reg, i, off, mask);
1586 /* cr or cgr %dst,%src */
1588 EMIT2(0x1900, dst_reg, src_reg);
1590 EMIT4(0xb9200000, dst_reg, src_reg);
1592 EMIT6_PCREL_RILC(0xc0040000,
1593 mask >> 12, addrs[i + off + 1]);
1597 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1598 if (!is_first_pass(jit) &&
1599 can_use_rel(jit, addrs[i + off + 1])) {
1600 /* clrj or clgrj %dst,%src,mask,off */
1601 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1602 dst_reg, src_reg, i, off, mask);
1604 /* clr or clgr %dst,%src */
1606 EMIT2(0x1500, dst_reg, src_reg);
1608 EMIT4(0xb9210000, dst_reg, src_reg);
1610 EMIT6_PCREL_RILC(0xc0040000,
1611 mask >> 12, addrs[i + off + 1]);
1615 if (!is_first_pass(jit) &&
1616 can_use_rel(jit, addrs[i + off + 1])) {
1618 EMIT4_PCREL_RIC(0xa7040000,
1619 mask >> 12, addrs[i + off + 1]);
1622 EMIT6_PCREL_RILC(0xc0040000,
1623 mask >> 12, addrs[i + off + 1]);
1627 default: /* too complex, give up */
1628 pr_err("Unknown opcode %02x\n", insn->code);
1632 if (probe_prg != -1) {
1634 * Handlers of certain exceptions leave psw.addr pointing to
1635 * the instruction directly after the failing one. Therefore,
1636 * create two exception table entries and also add a nop in
1637 * case two probing instructions come directly after each
1643 err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
1652 * Return whether new i-th instruction address does not violate any invariant
1654 static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
1656 /* On the first pass anything goes */
1657 if (is_first_pass(jit))
1660 /* The codegen pass must not change anything */
1661 if (is_codegen_pass(jit))
1662 return jit->addrs[i] == jit->prg;
1664 /* Passes in between must not increase code size */
1665 return jit->addrs[i] >= jit->prg;
1669 * Update the address of i-th instruction
1671 static int bpf_set_addr(struct bpf_jit *jit, int i)
1675 if (is_codegen_pass(jit)) {
1676 delta = jit->prg - jit->addrs[i];
1678 bpf_skip(jit, -delta);
1680 if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
1682 jit->addrs[i] = jit->prg;
1687 * Compile eBPF program into s390x code
1689 static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
1690 bool extra_pass, u32 stack_depth)
1692 int i, insn_count, lit32_size, lit64_size;
1694 jit->lit32 = jit->lit32_start;
1695 jit->lit64 = jit->lit64_start;
1699 bpf_jit_prologue(jit, stack_depth);
1700 if (bpf_set_addr(jit, 0) < 0)
1702 for (i = 0; i < fp->len; i += insn_count) {
1703 insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
1706 /* Next instruction address */
1707 if (bpf_set_addr(jit, i + insn_count) < 0)
1710 bpf_jit_epilogue(jit, stack_depth);
1712 lit32_size = jit->lit32 - jit->lit32_start;
1713 lit64_size = jit->lit64 - jit->lit64_start;
1714 jit->lit32_start = jit->prg;
1716 jit->lit32_start = ALIGN(jit->lit32_start, 4);
1717 jit->lit64_start = jit->lit32_start + lit32_size;
1719 jit->lit64_start = ALIGN(jit->lit64_start, 8);
1720 jit->size = jit->lit64_start + lit64_size;
1721 jit->size_prg = jit->prg;
1723 if (WARN_ON_ONCE(fp->aux->extable &&
1724 jit->excnt != fp->aux->num_exentries))
1725 /* Verifier bug - too many entries. */
1731 bool bpf_jit_needs_zext(void)
1736 struct s390_jit_data {
1737 struct bpf_binary_header *header;
1742 static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
1743 struct bpf_prog *fp)
1745 struct bpf_binary_header *header;
1749 /* We need two entries per insn. */
1750 fp->aux->num_exentries *= 2;
1752 code_size = roundup(jit->size,
1753 __alignof__(struct exception_table_entry));
1754 extable_size = fp->aux->num_exentries *
1755 sizeof(struct exception_table_entry);
1756 header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
1760 fp->aux->extable = (struct exception_table_entry *)
1761 (jit->prg_buf + code_size);
1766 * Compile eBPF program "fp"
1768 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1770 u32 stack_depth = round_up(fp->aux->stack_depth, 8);
1771 struct bpf_prog *tmp, *orig_fp = fp;
1772 struct bpf_binary_header *header;
1773 struct s390_jit_data *jit_data;
1774 bool tmp_blinded = false;
1775 bool extra_pass = false;
1779 if (!fp->jit_requested)
1782 tmp = bpf_jit_blind_constants(fp);
1784 * If blinding was requested and we failed during blinding,
1785 * we must fall back to the interpreter.
1794 jit_data = fp->aux->jit_data;
1796 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1801 fp->aux->jit_data = jit_data;
1803 if (jit_data->ctx.addrs) {
1804 jit = jit_data->ctx;
1805 header = jit_data->header;
1807 pass = jit_data->pass + 1;
1811 memset(&jit, 0, sizeof(jit));
1812 jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1813 if (jit.addrs == NULL) {
1818 * Three initial passes:
1819 * - 1/2: Determine clobbered registers
1820 * - 3: Calculate program size and addrs array
1822 for (pass = 1; pass <= 3; pass++) {
1823 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1829 * Final pass: Allocate and generate program
1831 header = bpf_jit_alloc(&jit, fp);
1837 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1838 bpf_jit_binary_free(header);
1842 if (bpf_jit_enable > 1) {
1843 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1844 print_fn_code(jit.prg_buf, jit.size_prg);
1846 if (!fp->is_func || extra_pass) {
1847 bpf_jit_binary_lock_ro(header);
1849 jit_data->header = header;
1850 jit_data->ctx = jit;
1851 jit_data->pass = pass;
1853 fp->bpf_func = (void *) jit.prg_buf;
1855 fp->jited_len = jit.size;
1857 if (!fp->is_func || extra_pass) {
1858 bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
1862 fp->aux->jit_data = NULL;
1866 bpf_jit_prog_release_other(fp, fp == orig_fp ?