1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/moduleloader.h>
3 #include <linux/workqueue.h>
4 #include <linux/netdevice.h>
5 #include <linux/filter.h>
7 #include <linux/cache.h>
8 #include <linux/if_vlan.h>
10 #include <asm/cacheflush.h>
11 #include <asm/ptrace.h>
13 #include "bpf_jit_64.h"
15 static inline bool is_simm13(unsigned int value)
17 return value + 0x1000 < 0x2000;
20 static inline bool is_simm10(unsigned int value)
22 return value + 0x200 < 0x400;
25 static inline bool is_simm5(unsigned int value)
27 return value + 0x10 < 0x20;
30 static inline bool is_sethi(unsigned int value)
32 return (value & ~0x3fffff) == 0;
35 static void bpf_flush_icache(void *start_, void *end_)
37 /* Cheetah's I-cache is fully coherent. */
38 if (tlb_type == spitfire) {
39 unsigned long start = (unsigned long) start_;
40 unsigned long end = (unsigned long) end_;
43 end = (end + 7UL) & ~7UL;
51 #define SEEN_DATAREF 1 /* might call external helpers */
52 #define SEEN_XREG 2 /* ebx is used */
53 #define SEEN_MEM 4 /* use mem[] for temporary storage */
55 #define S13(X) ((X) & 0x1fff)
56 #define S5(X) ((X) & 0x1f)
57 #define IMMED 0x00002000
58 #define RD(X) ((X) << 25)
59 #define RS1(X) ((X) << 14)
61 #define OP(X) ((X) << 30)
62 #define OP2(X) ((X) << 22)
63 #define OP3(X) ((X) << 19)
64 #define COND(X) (((X) & 0xf) << 25)
65 #define CBCOND(X) (((X) & 0x1f) << 25)
67 #define F2(X, Y) (OP(X) | OP2(Y))
68 #define F3(X, Y) (OP(X) | OP3(Y))
69 #define ASI(X) (((X) & 0xff) << 5)
71 #define CONDN COND(0x0)
72 #define CONDE COND(0x1)
73 #define CONDLE COND(0x2)
74 #define CONDL COND(0x3)
75 #define CONDLEU COND(0x4)
76 #define CONDCS COND(0x5)
77 #define CONDNEG COND(0x6)
78 #define CONDVC COND(0x7)
79 #define CONDA COND(0x8)
80 #define CONDNE COND(0x9)
81 #define CONDG COND(0xa)
82 #define CONDGE COND(0xb)
83 #define CONDGU COND(0xc)
84 #define CONDCC COND(0xd)
85 #define CONDPOS COND(0xe)
86 #define CONDVS COND(0xf)
88 #define CONDGEU CONDCC
91 #define WDISP22(X) (((X) >> 2) & 0x3fffff)
92 #define WDISP19(X) (((X) >> 2) & 0x7ffff)
94 /* The 10-bit branch displacement for CBCOND is split into two fields */
95 static u32 WDISP10(u32 off)
97 u32 ret = ((off >> 2) & 0xff) << 5;
99 ret |= ((off >> (2 + 8)) & 0x03) << 19;
104 #define CBCONDE CBCOND(0x09)
105 #define CBCONDLE CBCOND(0x0a)
106 #define CBCONDL CBCOND(0x0b)
107 #define CBCONDLEU CBCOND(0x0c)
108 #define CBCONDCS CBCOND(0x0d)
109 #define CBCONDN CBCOND(0x0e)
110 #define CBCONDVS CBCOND(0x0f)
111 #define CBCONDNE CBCOND(0x19)
112 #define CBCONDG CBCOND(0x1a)
113 #define CBCONDGE CBCOND(0x1b)
114 #define CBCONDGU CBCOND(0x1c)
115 #define CBCONDCC CBCOND(0x1d)
116 #define CBCONDPOS CBCOND(0x1e)
117 #define CBCONDVC CBCOND(0x1f)
119 #define CBCONDGEU CBCONDCC
120 #define CBCONDLU CBCONDCS
122 #define ANNUL (1 << 29)
123 #define XCC (1 << 21)
125 #define BRANCH (F2(0, 1) | XCC)
126 #define CBCOND_OP (F2(0, 3) | XCC)
128 #define BA (BRANCH | CONDA)
129 #define BG (BRANCH | CONDG)
130 #define BL (BRANCH | CONDL)
131 #define BLE (BRANCH | CONDLE)
132 #define BGU (BRANCH | CONDGU)
133 #define BLEU (BRANCH | CONDLEU)
134 #define BGE (BRANCH | CONDGE)
135 #define BGEU (BRANCH | CONDGEU)
136 #define BLU (BRANCH | CONDLU)
137 #define BE (BRANCH | CONDE)
138 #define BNE (BRANCH | CONDNE)
140 #define SETHI(K, REG) \
141 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
142 #define OR_LO(K, REG) \
143 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
145 #define ADD F3(2, 0x00)
146 #define AND F3(2, 0x01)
147 #define ANDCC F3(2, 0x11)
148 #define OR F3(2, 0x02)
149 #define XOR F3(2, 0x03)
150 #define SUB F3(2, 0x04)
151 #define SUBCC F3(2, 0x14)
152 #define MUL F3(2, 0x0a)
153 #define MULX F3(2, 0x09)
154 #define UDIVX F3(2, 0x0d)
155 #define DIV F3(2, 0x0e)
156 #define SLL F3(2, 0x25)
157 #define SLLX (F3(2, 0x25)|(1<<12))
158 #define SRA F3(2, 0x27)
159 #define SRAX (F3(2, 0x27)|(1<<12))
160 #define SRL F3(2, 0x26)
161 #define SRLX (F3(2, 0x26)|(1<<12))
162 #define JMPL F3(2, 0x38)
163 #define SAVE F3(2, 0x3c)
164 #define RESTORE F3(2, 0x3d)
166 #define BR F2(0, 0x01)
167 #define RD_Y F3(2, 0x28)
168 #define WR_Y F3(2, 0x30)
170 #define LD32 F3(3, 0x00)
171 #define LD8 F3(3, 0x01)
172 #define LD16 F3(3, 0x02)
173 #define LD64 F3(3, 0x0b)
174 #define LD64A F3(3, 0x1b)
175 #define ST8 F3(3, 0x05)
176 #define ST16 F3(3, 0x06)
177 #define ST32 F3(3, 0x04)
178 #define ST64 F3(3, 0x0e)
180 #define CAS F3(3, 0x3c)
181 #define CASX F3(3, 0x3e)
184 #define BASE_STACKFRAME 176
186 #define LD32I (LD32 | IMMED)
187 #define LD8I (LD8 | IMMED)
188 #define LD16I (LD16 | IMMED)
189 #define LD64I (LD64 | IMMED)
190 #define LDPTRI (LDPTR | IMMED)
191 #define ST32I (ST32 | IMMED)
194 struct bpf_prog *prog;
195 unsigned int *offset;
202 bool saw_frame_pointer;
208 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
209 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
210 #define SKB_HLEN_REG (MAX_BPF_JIT_REG + 2)
211 #define SKB_DATA_REG (MAX_BPF_JIT_REG + 3)
212 #define TMP_REG_3 (MAX_BPF_JIT_REG + 4)
214 /* Map BPF registers to SPARC registers */
215 static const int bpf2sparc[] = {
216 /* return value from in-kernel function, and exit value from eBPF */
219 /* arguments from eBPF program to in-kernel function */
226 /* callee saved registers that in-kernel function will preserve */
232 /* read-only frame pointer to access stack */
237 /* temporary register for internal BPF JIT */
246 static void emit(const u32 insn, struct jit_ctx *ctx)
248 if (ctx->image != NULL)
249 ctx->image[ctx->idx] = insn;
254 static void emit_call(u32 *func, struct jit_ctx *ctx)
256 if (ctx->image != NULL) {
257 void *here = &ctx->image[ctx->idx];
260 off = (void *)func - here;
261 ctx->image[ctx->idx] = CALL | ((off >> 2) & 0x3fffffff);
266 static void emit_nop(struct jit_ctx *ctx)
268 emit(SETHI(0, G0), ctx);
271 static void emit_reg_move(u32 from, u32 to, struct jit_ctx *ctx)
273 emit(OR | RS1(G0) | RS2(from) | RD(to), ctx);
276 /* Emit 32-bit constant, zero extended. */
277 static void emit_set_const(s32 K, u32 reg, struct jit_ctx *ctx)
279 emit(SETHI(K, reg), ctx);
280 emit(OR_LO(K, reg), ctx);
283 /* Emit 32-bit constant, sign extended. */
284 static void emit_set_const_sext(s32 K, u32 reg, struct jit_ctx *ctx)
287 emit(SETHI(K, reg), ctx);
288 emit(OR_LO(K, reg), ctx);
290 u32 hbits = ~(u32) K;
291 u32 lbits = -0x400 | (u32) K;
293 emit(SETHI(hbits, reg), ctx);
294 emit(XOR | IMMED | RS1(reg) | S13(lbits) | RD(reg), ctx);
298 static void emit_alu(u32 opcode, u32 src, u32 dst, struct jit_ctx *ctx)
300 emit(opcode | RS1(dst) | RS2(src) | RD(dst), ctx);
303 static void emit_alu3(u32 opcode, u32 a, u32 b, u32 c, struct jit_ctx *ctx)
305 emit(opcode | RS1(a) | RS2(b) | RD(c), ctx);
308 static void emit_alu_K(unsigned int opcode, unsigned int dst, unsigned int imm,
311 bool small_immed = is_simm13(imm);
312 unsigned int insn = opcode;
314 insn |= RS1(dst) | RD(dst);
316 emit(insn | IMMED | S13(imm), ctx);
318 unsigned int tmp = bpf2sparc[TMP_REG_1];
320 ctx->tmp_1_used = true;
322 emit_set_const_sext(imm, tmp, ctx);
323 emit(insn | RS2(tmp), ctx);
327 static void emit_alu3_K(unsigned int opcode, unsigned int src, unsigned int imm,
328 unsigned int dst, struct jit_ctx *ctx)
330 bool small_immed = is_simm13(imm);
331 unsigned int insn = opcode;
333 insn |= RS1(src) | RD(dst);
335 emit(insn | IMMED | S13(imm), ctx);
337 unsigned int tmp = bpf2sparc[TMP_REG_1];
339 ctx->tmp_1_used = true;
341 emit_set_const_sext(imm, tmp, ctx);
342 emit(insn | RS2(tmp), ctx);
346 static void emit_loadimm32(s32 K, unsigned int dest, struct jit_ctx *ctx)
348 if (K >= 0 && is_simm13(K)) {
349 /* or %g0, K, DEST */
350 emit(OR | IMMED | RS1(G0) | S13(K) | RD(dest), ctx);
352 emit_set_const(K, dest, ctx);
356 static void emit_loadimm(s32 K, unsigned int dest, struct jit_ctx *ctx)
359 /* or %g0, K, DEST */
360 emit(OR | IMMED | RS1(G0) | S13(K) | RD(dest), ctx);
362 emit_set_const(K, dest, ctx);
366 static void emit_loadimm_sext(s32 K, unsigned int dest, struct jit_ctx *ctx)
369 /* or %g0, K, DEST */
370 emit(OR | IMMED | RS1(G0) | S13(K) | RD(dest), ctx);
372 emit_set_const_sext(K, dest, ctx);
376 static void analyze_64bit_constant(u32 high_bits, u32 low_bits,
377 int *hbsp, int *lbsp, int *abbasp)
379 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
382 lowest_bit_set = highest_bit_set = -1;
385 if ((lowest_bit_set == -1) && ((low_bits >> i) & 1))
387 if ((highest_bit_set == -1) && ((high_bits >> (32 - i - 1)) & 1))
388 highest_bit_set = (64 - i - 1);
389 } while (++i < 32 && (highest_bit_set == -1 ||
390 lowest_bit_set == -1));
394 if (lowest_bit_set == -1 && ((high_bits >> i) & 1))
395 lowest_bit_set = i + 32;
396 if (highest_bit_set == -1 &&
397 ((low_bits >> (32 - i - 1)) & 1))
398 highest_bit_set = 32 - i - 1;
399 } while (++i < 32 && (highest_bit_set == -1 ||
400 lowest_bit_set == -1));
403 all_bits_between_are_set = 1;
404 for (i = lowest_bit_set; i <= highest_bit_set; i++) {
406 if ((low_bits & (1 << i)) != 0)
409 if ((high_bits & (1 << (i - 32))) != 0)
412 all_bits_between_are_set = 0;
415 *hbsp = highest_bit_set;
416 *lbsp = lowest_bit_set;
417 *abbasp = all_bits_between_are_set;
420 static unsigned long create_simple_focus_bits(unsigned long high_bits,
421 unsigned long low_bits,
422 int lowest_bit_set, int shift)
426 if (lowest_bit_set < 32) {
427 lo = (low_bits >> lowest_bit_set) << shift;
428 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
431 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
436 static bool const64_is_2insns(unsigned long high_bits,
437 unsigned long low_bits)
439 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
441 if (high_bits == 0 || high_bits == 0xffffffff)
444 analyze_64bit_constant(high_bits, low_bits,
445 &highest_bit_set, &lowest_bit_set,
446 &all_bits_between_are_set);
448 if ((highest_bit_set == 63 || lowest_bit_set == 0) &&
449 all_bits_between_are_set != 0)
452 if (highest_bit_set - lowest_bit_set < 21)
458 static void sparc_emit_set_const64_quick2(unsigned long high_bits,
459 unsigned long low_imm,
461 int shift_count, struct jit_ctx *ctx)
463 emit_loadimm32(high_bits, dest, ctx);
465 /* Now shift it up into place. */
466 emit_alu_K(SLLX, dest, shift_count, ctx);
468 /* If there is a low immediate part piece, finish up by
469 * putting that in as well.
472 emit(OR | IMMED | RS1(dest) | S13(low_imm) | RD(dest), ctx);
475 static void emit_loadimm64(u64 K, unsigned int dest, struct jit_ctx *ctx)
477 int all_bits_between_are_set, lowest_bit_set, highest_bit_set;
478 unsigned int tmp = bpf2sparc[TMP_REG_1];
479 u32 low_bits = (K & 0xffffffff);
480 u32 high_bits = (K >> 32);
482 /* These two tests also take care of all of the one
485 if (high_bits == 0xffffffff && (low_bits & 0x80000000))
486 return emit_loadimm_sext(K, dest, ctx);
487 if (high_bits == 0x00000000)
488 return emit_loadimm32(K, dest, ctx);
490 analyze_64bit_constant(high_bits, low_bits, &highest_bit_set,
491 &lowest_bit_set, &all_bits_between_are_set);
494 * sllx %reg, shift, %reg
496 * srlx %reg, shift, %reg
497 * 3) mov some_small_const, %reg
498 * sllx %reg, shift, %reg
500 if (((highest_bit_set == 63 || lowest_bit_set == 0) &&
501 all_bits_between_are_set != 0) ||
502 ((highest_bit_set - lowest_bit_set) < 12)) {
503 int shift = lowest_bit_set;
506 if ((highest_bit_set != 63 && lowest_bit_set != 0) ||
507 all_bits_between_are_set == 0) {
509 create_simple_focus_bits(high_bits, low_bits,
511 } else if (lowest_bit_set == 0)
512 shift = -(63 - highest_bit_set);
514 emit(OR | IMMED | RS1(G0) | S13(the_const) | RD(dest), ctx);
516 emit_alu_K(SLLX, dest, shift, ctx);
518 emit_alu_K(SRLX, dest, -shift, ctx);
523 /* Now a range of 22 or less bits set somewhere.
524 * 1) sethi %hi(focus_bits), %reg
525 * sllx %reg, shift, %reg
526 * 2) sethi %hi(focus_bits), %reg
527 * srlx %reg, shift, %reg
529 if ((highest_bit_set - lowest_bit_set) < 21) {
530 unsigned long focus_bits =
531 create_simple_focus_bits(high_bits, low_bits,
534 emit(SETHI(focus_bits, dest), ctx);
536 /* If lowest_bit_set == 10 then a sethi alone could
539 if (lowest_bit_set < 10)
540 emit_alu_K(SRLX, dest, 10 - lowest_bit_set, ctx);
541 else if (lowest_bit_set > 10)
542 emit_alu_K(SLLX, dest, lowest_bit_set - 10, ctx);
546 /* Ok, now 3 instruction sequences. */
548 emit_loadimm32(high_bits, dest, ctx);
549 emit_alu_K(SLLX, dest, 32, ctx);
553 /* We may be able to do something quick
554 * when the constant is negated, so try that.
556 if (const64_is_2insns((~high_bits) & 0xffffffff,
557 (~low_bits) & 0xfffffc00)) {
558 /* NOTE: The trailing bits get XOR'd so we need the
559 * non-negated bits, not the negated ones.
561 unsigned long trailing_bits = low_bits & 0x3ff;
563 if ((((~high_bits) & 0xffffffff) == 0 &&
564 ((~low_bits) & 0x80000000) == 0) ||
565 (((~high_bits) & 0xffffffff) == 0xffffffff &&
566 ((~low_bits) & 0x80000000) != 0)) {
567 unsigned long fast_int = (~low_bits & 0xffffffff);
569 if ((is_sethi(fast_int) &&
570 (~high_bits & 0xffffffff) == 0)) {
571 emit(SETHI(fast_int, dest), ctx);
572 } else if (is_simm13(fast_int)) {
573 emit(OR | IMMED | RS1(G0) | S13(fast_int) | RD(dest), ctx);
575 emit_loadimm64(fast_int, dest, ctx);
578 u64 n = ((~low_bits) & 0xfffffc00) |
579 (((unsigned long)((~high_bits) & 0xffffffff))<<32);
580 emit_loadimm64(n, dest, ctx);
583 low_bits = -0x400 | trailing_bits;
585 emit(XOR | IMMED | RS1(dest) | S13(low_bits) | RD(dest), ctx);
589 /* 1) sethi %hi(xxx), %reg
590 * or %reg, %lo(xxx), %reg
591 * sllx %reg, yyy, %reg
593 if ((highest_bit_set - lowest_bit_set) < 32) {
594 unsigned long focus_bits =
595 create_simple_focus_bits(high_bits, low_bits,
598 /* So what we know is that the set bits straddle the
599 * middle of the 64-bit word.
601 sparc_emit_set_const64_quick2(focus_bits, 0, dest,
602 lowest_bit_set, ctx);
606 /* 1) sethi %hi(high_bits), %reg
607 * or %reg, %lo(high_bits), %reg
608 * sllx %reg, 32, %reg
609 * or %reg, low_bits, %reg
611 if (is_simm13(low_bits) && ((int)low_bits > 0)) {
612 sparc_emit_set_const64_quick2(high_bits, low_bits,
617 /* Oh well, we tried... Do a full 64-bit decomposition. */
618 ctx->tmp_1_used = true;
620 emit_loadimm32(high_bits, tmp, ctx);
621 emit_loadimm32(low_bits, dest, ctx);
622 emit_alu_K(SLLX, tmp, 32, ctx);
623 emit(OR | RS1(dest) | RS2(tmp) | RD(dest), ctx);
626 static void emit_branch(unsigned int br_opc, unsigned int from_idx, unsigned int to_idx,
629 unsigned int off = to_idx - from_idx;
632 emit(br_opc | WDISP19(off << 2), ctx);
634 emit(br_opc | WDISP22(off << 2), ctx);
637 static void emit_cbcond(unsigned int cb_opc, unsigned int from_idx, unsigned int to_idx,
638 const u8 dst, const u8 src, struct jit_ctx *ctx)
640 unsigned int off = to_idx - from_idx;
642 emit(cb_opc | WDISP10(off << 2) | RS1(dst) | RS2(src), ctx);
645 static void emit_cbcondi(unsigned int cb_opc, unsigned int from_idx, unsigned int to_idx,
646 const u8 dst, s32 imm, struct jit_ctx *ctx)
648 unsigned int off = to_idx - from_idx;
650 emit(cb_opc | IMMED | WDISP10(off << 2) | RS1(dst) | S5(imm), ctx);
653 #define emit_read_y(REG, CTX) emit(RD_Y | RD(REG), CTX)
654 #define emit_write_y(REG, CTX) emit(WR_Y | IMMED | RS1(REG) | S13(0), CTX)
656 #define emit_cmp(R1, R2, CTX) \
657 emit(SUBCC | RS1(R1) | RS2(R2) | RD(G0), CTX)
659 #define emit_cmpi(R1, IMM, CTX) \
660 emit(SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX)
662 #define emit_btst(R1, R2, CTX) \
663 emit(ANDCC | RS1(R1) | RS2(R2) | RD(G0), CTX)
665 #define emit_btsti(R1, IMM, CTX) \
666 emit(ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX)
668 static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
669 const s32 imm, bool is_imm, int branch_dst,
672 bool use_cbcond = (sparc64_elf_hwcap & AV_SPARC_CBCOND) != 0;
673 const u8 tmp = bpf2sparc[TMP_REG_1];
675 branch_dst = ctx->offset[branch_dst];
677 if (!is_simm10(branch_dst - ctx->idx) ||
678 BPF_OP(code) == BPF_JSET)
687 } else if (!is_simm13(imm)) {
691 ctx->tmp_1_used = true;
692 emit_loadimm_sext(imm, tmp, ctx);
701 if (BPF_OP(code) == BPF_JSET) {
703 emit_btsti(dst, imm, ctx);
705 emit_btst(dst, src, ctx);
708 emit_cmpi(dst, imm, ctx);
710 emit_cmp(dst, src, ctx);
712 switch (BPF_OP(code)) {
745 /* Make sure we dont leak kernel information to the
750 emit_branch(br_opcode, ctx->idx, branch_dst, ctx);
755 switch (BPF_OP(code)) {
757 cbcond_opcode = CBCONDE;
760 cbcond_opcode = CBCONDGU;
763 cbcond_opcode = CBCONDLU;
766 cbcond_opcode = CBCONDGEU;
769 cbcond_opcode = CBCONDLEU;
772 cbcond_opcode = CBCONDNE;
775 cbcond_opcode = CBCONDG;
778 cbcond_opcode = CBCONDL;
781 cbcond_opcode = CBCONDGE;
784 cbcond_opcode = CBCONDLE;
787 /* Make sure we dont leak kernel information to the
792 cbcond_opcode |= CBCOND_OP;
794 emit_cbcondi(cbcond_opcode, ctx->idx, branch_dst,
797 emit_cbcond(cbcond_opcode, ctx->idx, branch_dst,
803 static void load_skb_regs(struct jit_ctx *ctx, u8 r_skb)
805 const u8 r_headlen = bpf2sparc[SKB_HLEN_REG];
806 const u8 r_data = bpf2sparc[SKB_DATA_REG];
807 const u8 r_tmp = bpf2sparc[TMP_REG_1];
810 off = offsetof(struct sk_buff, len);
811 emit(LD32I | RS1(r_skb) | S13(off) | RD(r_headlen), ctx);
813 off = offsetof(struct sk_buff, data_len);
814 emit(LD32I | RS1(r_skb) | S13(off) | RD(r_tmp), ctx);
816 emit(SUB | RS1(r_headlen) | RS2(r_tmp) | RD(r_headlen), ctx);
818 off = offsetof(struct sk_buff, data);
819 emit(LDPTRI | RS1(r_skb) | S13(off) | RD(r_data), ctx);
822 /* Just skip the save instruction and the ctx register move. */
823 #define BPF_TAILCALL_PROLOGUE_SKIP 16
824 #define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128)
826 static void build_prologue(struct jit_ctx *ctx)
828 s32 stack_needed = BASE_STACKFRAME;
830 if (ctx->saw_frame_pointer || ctx->saw_tail_call) {
831 struct bpf_prog *prog = ctx->prog;
834 stack_depth = prog->aux->stack_depth;
835 stack_needed += round_up(stack_depth, 16);
838 if (ctx->saw_tail_call)
841 /* save %sp, -176, %sp */
842 emit(SAVE | IMMED | RS1(SP) | S13(-stack_needed) | RD(SP), ctx);
844 /* tail_call_cnt = 0 */
845 if (ctx->saw_tail_call) {
846 u32 off = BPF_TAILCALL_CNT_SP_OFF;
848 emit(ST32 | IMMED | RS1(SP) | S13(off) | RD(G0), ctx);
852 if (ctx->saw_frame_pointer) {
853 const u8 vfp = bpf2sparc[BPF_REG_FP];
855 emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx);
858 emit_reg_move(I0, O0, ctx);
859 /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */
861 if (ctx->saw_ld_abs_ind)
862 load_skb_regs(ctx, bpf2sparc[BPF_REG_1]);
865 static void build_epilogue(struct jit_ctx *ctx)
867 ctx->epilogue_offset = ctx->idx;
869 /* ret (jmpl %i7 + 8, %g0) */
870 emit(JMPL | IMMED | RS1(I7) | S13(8) | RD(G0), ctx);
872 /* restore %i5, %g0, %o0 */
873 emit(RESTORE | RS1(bpf2sparc[BPF_REG_0]) | RS2(G0) | RD(O0), ctx);
876 static void emit_tail_call(struct jit_ctx *ctx)
878 const u8 bpf_array = bpf2sparc[BPF_REG_2];
879 const u8 bpf_index = bpf2sparc[BPF_REG_3];
880 const u8 tmp = bpf2sparc[TMP_REG_1];
883 ctx->saw_tail_call = true;
885 off = offsetof(struct bpf_array, map.max_entries);
886 emit(LD32 | IMMED | RS1(bpf_array) | S13(off) | RD(tmp), ctx);
887 emit_cmp(bpf_index, tmp, ctx);
889 emit_branch(BGEU, ctx->idx, ctx->idx + OFFSET1, ctx);
892 off = BPF_TAILCALL_CNT_SP_OFF;
893 emit(LD32 | IMMED | RS1(SP) | S13(off) | RD(tmp), ctx);
894 emit_cmpi(tmp, MAX_TAIL_CALL_CNT, ctx);
896 emit_branch(BGU, ctx->idx, ctx->idx + OFFSET2, ctx);
899 emit_alu_K(ADD, tmp, 1, ctx);
900 off = BPF_TAILCALL_CNT_SP_OFF;
901 emit(ST32 | IMMED | RS1(SP) | S13(off) | RD(tmp), ctx);
903 emit_alu3_K(SLL, bpf_index, 3, tmp, ctx);
904 emit_alu(ADD, bpf_array, tmp, ctx);
905 off = offsetof(struct bpf_array, ptrs);
906 emit(LD64 | IMMED | RS1(tmp) | S13(off) | RD(tmp), ctx);
908 emit_cmpi(tmp, 0, ctx);
910 emit_branch(BE, ctx->idx, ctx->idx + OFFSET3, ctx);
913 off = offsetof(struct bpf_prog, bpf_func);
914 emit(LD64 | IMMED | RS1(tmp) | S13(off) | RD(tmp), ctx);
916 off = BPF_TAILCALL_PROLOGUE_SKIP;
917 emit(JMPL | IMMED | RS1(tmp) | S13(off) | RD(G0), ctx);
921 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
923 const u8 code = insn->code;
924 const u8 dst = bpf2sparc[insn->dst_reg];
925 const u8 src = bpf2sparc[insn->src_reg];
926 const int i = insn - ctx->prog->insnsi;
927 const s16 off = insn->off;
928 const s32 imm = insn->imm;
931 if (insn->src_reg == BPF_REG_FP)
932 ctx->saw_frame_pointer = true;
936 case BPF_ALU | BPF_MOV | BPF_X:
937 emit_alu3_K(SRL, src, 0, dst, ctx);
939 case BPF_ALU64 | BPF_MOV | BPF_X:
940 emit_reg_move(src, dst, ctx);
942 /* dst = dst OP src */
943 case BPF_ALU | BPF_ADD | BPF_X:
944 case BPF_ALU64 | BPF_ADD | BPF_X:
945 emit_alu(ADD, src, dst, ctx);
947 case BPF_ALU | BPF_SUB | BPF_X:
948 case BPF_ALU64 | BPF_SUB | BPF_X:
949 emit_alu(SUB, src, dst, ctx);
951 case BPF_ALU | BPF_AND | BPF_X:
952 case BPF_ALU64 | BPF_AND | BPF_X:
953 emit_alu(AND, src, dst, ctx);
955 case BPF_ALU | BPF_OR | BPF_X:
956 case BPF_ALU64 | BPF_OR | BPF_X:
957 emit_alu(OR, src, dst, ctx);
959 case BPF_ALU | BPF_XOR | BPF_X:
960 case BPF_ALU64 | BPF_XOR | BPF_X:
961 emit_alu(XOR, src, dst, ctx);
963 case BPF_ALU | BPF_MUL | BPF_X:
964 emit_alu(MUL, src, dst, ctx);
966 case BPF_ALU64 | BPF_MUL | BPF_X:
967 emit_alu(MULX, src, dst, ctx);
969 case BPF_ALU | BPF_DIV | BPF_X:
970 emit_cmp(src, G0, ctx);
971 emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
972 emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
974 emit_write_y(G0, ctx);
975 emit_alu(DIV, src, dst, ctx);
978 case BPF_ALU64 | BPF_DIV | BPF_X:
979 emit_cmp(src, G0, ctx);
980 emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
981 emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
983 emit_alu(UDIVX, src, dst, ctx);
986 case BPF_ALU | BPF_MOD | BPF_X: {
987 const u8 tmp = bpf2sparc[TMP_REG_1];
989 ctx->tmp_1_used = true;
991 emit_cmp(src, G0, ctx);
992 emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
993 emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
995 emit_write_y(G0, ctx);
996 emit_alu3(DIV, dst, src, tmp, ctx);
997 emit_alu3(MULX, tmp, src, tmp, ctx);
998 emit_alu3(SUB, dst, tmp, dst, ctx);
1001 case BPF_ALU64 | BPF_MOD | BPF_X: {
1002 const u8 tmp = bpf2sparc[TMP_REG_1];
1004 ctx->tmp_1_used = true;
1006 emit_cmp(src, G0, ctx);
1007 emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx);
1008 emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx);
1010 emit_alu3(UDIVX, dst, src, tmp, ctx);
1011 emit_alu3(MULX, tmp, src, tmp, ctx);
1012 emit_alu3(SUB, dst, tmp, dst, ctx);
1015 case BPF_ALU | BPF_LSH | BPF_X:
1016 emit_alu(SLL, src, dst, ctx);
1017 goto do_alu32_trunc;
1018 case BPF_ALU64 | BPF_LSH | BPF_X:
1019 emit_alu(SLLX, src, dst, ctx);
1021 case BPF_ALU | BPF_RSH | BPF_X:
1022 emit_alu(SRL, src, dst, ctx);
1024 case BPF_ALU64 | BPF_RSH | BPF_X:
1025 emit_alu(SRLX, src, dst, ctx);
1027 case BPF_ALU | BPF_ARSH | BPF_X:
1028 emit_alu(SRA, src, dst, ctx);
1029 goto do_alu32_trunc;
1030 case BPF_ALU64 | BPF_ARSH | BPF_X:
1031 emit_alu(SRAX, src, dst, ctx);
1035 case BPF_ALU | BPF_NEG:
1036 case BPF_ALU64 | BPF_NEG:
1037 emit(SUB | RS1(0) | RS2(dst) | RD(dst), ctx);
1038 goto do_alu32_trunc;
1040 case BPF_ALU | BPF_END | BPF_FROM_BE:
1043 emit_alu_K(SLL, dst, 16, ctx);
1044 emit_alu_K(SRL, dst, 16, ctx);
1047 emit_alu_K(SRL, dst, 0, ctx);
1056 /* dst = BSWAP##imm(dst) */
1057 case BPF_ALU | BPF_END | BPF_FROM_LE: {
1058 const u8 tmp = bpf2sparc[TMP_REG_1];
1059 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1061 ctx->tmp_1_used = true;
1064 emit_alu3_K(AND, dst, 0xff, tmp, ctx);
1065 emit_alu3_K(SRL, dst, 8, dst, ctx);
1066 emit_alu3_K(AND, dst, 0xff, dst, ctx);
1067 emit_alu3_K(SLL, tmp, 8, tmp, ctx);
1068 emit_alu(OR, tmp, dst, ctx);
1072 ctx->tmp_2_used = true;
1073 emit_alu3_K(SRL, dst, 24, tmp, ctx); /* tmp = dst >> 24 */
1074 emit_alu3_K(SRL, dst, 16, tmp2, ctx); /* tmp2 = dst >> 16 */
1075 emit_alu3_K(AND, tmp2, 0xff, tmp2, ctx);/* tmp2 = tmp2 & 0xff */
1076 emit_alu3_K(SLL, tmp2, 8, tmp2, ctx); /* tmp2 = tmp2 << 8 */
1077 emit_alu(OR, tmp2, tmp, ctx); /* tmp = tmp | tmp2 */
1078 emit_alu3_K(SRL, dst, 8, tmp2, ctx); /* tmp2 = dst >> 8 */
1079 emit_alu3_K(AND, tmp2, 0xff, tmp2, ctx);/* tmp2 = tmp2 & 0xff */
1080 emit_alu3_K(SLL, tmp2, 16, tmp2, ctx); /* tmp2 = tmp2 << 16 */
1081 emit_alu(OR, tmp2, tmp, ctx); /* tmp = tmp | tmp2 */
1082 emit_alu3_K(AND, dst, 0xff, dst, ctx); /* dst = dst & 0xff */
1083 emit_alu3_K(SLL, dst, 24, dst, ctx); /* dst = dst << 24 */
1084 emit_alu(OR, tmp, dst, ctx); /* dst = dst | tmp */
1088 emit_alu3_K(ADD, SP, STACK_BIAS + 128, tmp, ctx);
1089 emit(ST64 | RS1(tmp) | RS2(G0) | RD(dst), ctx);
1090 emit(LD64A | ASI(ASI_PL) | RS1(tmp) | RS2(G0) | RD(dst), ctx);
1096 case BPF_ALU | BPF_MOV | BPF_K:
1097 emit_loadimm32(imm, dst, ctx);
1099 case BPF_ALU64 | BPF_MOV | BPF_K:
1100 emit_loadimm_sext(imm, dst, ctx);
1102 /* dst = dst OP imm */
1103 case BPF_ALU | BPF_ADD | BPF_K:
1104 case BPF_ALU64 | BPF_ADD | BPF_K:
1105 emit_alu_K(ADD, dst, imm, ctx);
1106 goto do_alu32_trunc;
1107 case BPF_ALU | BPF_SUB | BPF_K:
1108 case BPF_ALU64 | BPF_SUB | BPF_K:
1109 emit_alu_K(SUB, dst, imm, ctx);
1110 goto do_alu32_trunc;
1111 case BPF_ALU | BPF_AND | BPF_K:
1112 case BPF_ALU64 | BPF_AND | BPF_K:
1113 emit_alu_K(AND, dst, imm, ctx);
1114 goto do_alu32_trunc;
1115 case BPF_ALU | BPF_OR | BPF_K:
1116 case BPF_ALU64 | BPF_OR | BPF_K:
1117 emit_alu_K(OR, dst, imm, ctx);
1118 goto do_alu32_trunc;
1119 case BPF_ALU | BPF_XOR | BPF_K:
1120 case BPF_ALU64 | BPF_XOR | BPF_K:
1121 emit_alu_K(XOR, dst, imm, ctx);
1122 goto do_alu32_trunc;
1123 case BPF_ALU | BPF_MUL | BPF_K:
1124 emit_alu_K(MUL, dst, imm, ctx);
1125 goto do_alu32_trunc;
1126 case BPF_ALU64 | BPF_MUL | BPF_K:
1127 emit_alu_K(MULX, dst, imm, ctx);
1129 case BPF_ALU | BPF_DIV | BPF_K:
1133 emit_write_y(G0, ctx);
1134 emit_alu_K(DIV, dst, imm, ctx);
1135 goto do_alu32_trunc;
1136 case BPF_ALU64 | BPF_DIV | BPF_K:
1140 emit_alu_K(UDIVX, dst, imm, ctx);
1142 case BPF_ALU64 | BPF_MOD | BPF_K:
1143 case BPF_ALU | BPF_MOD | BPF_K: {
1144 const u8 tmp = bpf2sparc[TMP_REG_2];
1150 div = (BPF_CLASS(code) == BPF_ALU64) ? UDIVX : DIV;
1152 ctx->tmp_2_used = true;
1154 if (BPF_CLASS(code) != BPF_ALU64)
1155 emit_write_y(G0, ctx);
1156 if (is_simm13(imm)) {
1157 emit(div | IMMED | RS1(dst) | S13(imm) | RD(tmp), ctx);
1158 emit(MULX | IMMED | RS1(tmp) | S13(imm) | RD(tmp), ctx);
1159 emit(SUB | RS1(dst) | RS2(tmp) | RD(dst), ctx);
1161 const u8 tmp1 = bpf2sparc[TMP_REG_1];
1163 ctx->tmp_1_used = true;
1165 emit_set_const_sext(imm, tmp1, ctx);
1166 emit(div | RS1(dst) | RS2(tmp1) | RD(tmp), ctx);
1167 emit(MULX | RS1(tmp) | RS2(tmp1) | RD(tmp), ctx);
1168 emit(SUB | RS1(dst) | RS2(tmp) | RD(dst), ctx);
1170 goto do_alu32_trunc;
1172 case BPF_ALU | BPF_LSH | BPF_K:
1173 emit_alu_K(SLL, dst, imm, ctx);
1174 goto do_alu32_trunc;
1175 case BPF_ALU64 | BPF_LSH | BPF_K:
1176 emit_alu_K(SLLX, dst, imm, ctx);
1178 case BPF_ALU | BPF_RSH | BPF_K:
1179 emit_alu_K(SRL, dst, imm, ctx);
1181 case BPF_ALU64 | BPF_RSH | BPF_K:
1182 emit_alu_K(SRLX, dst, imm, ctx);
1184 case BPF_ALU | BPF_ARSH | BPF_K:
1185 emit_alu_K(SRA, dst, imm, ctx);
1186 goto do_alu32_trunc;
1187 case BPF_ALU64 | BPF_ARSH | BPF_K:
1188 emit_alu_K(SRAX, dst, imm, ctx);
1192 if (BPF_CLASS(code) == BPF_ALU)
1193 emit_alu_K(SRL, dst, 0, ctx);
1197 case BPF_JMP | BPF_JA:
1198 emit_branch(BA, ctx->idx, ctx->offset[i + off], ctx);
1201 /* IF (dst COND src) JUMP off */
1202 case BPF_JMP | BPF_JEQ | BPF_X:
1203 case BPF_JMP | BPF_JGT | BPF_X:
1204 case BPF_JMP | BPF_JLT | BPF_X:
1205 case BPF_JMP | BPF_JGE | BPF_X:
1206 case BPF_JMP | BPF_JLE | BPF_X:
1207 case BPF_JMP | BPF_JNE | BPF_X:
1208 case BPF_JMP | BPF_JSGT | BPF_X:
1209 case BPF_JMP | BPF_JSLT | BPF_X:
1210 case BPF_JMP | BPF_JSGE | BPF_X:
1211 case BPF_JMP | BPF_JSLE | BPF_X:
1212 case BPF_JMP | BPF_JSET | BPF_X: {
1215 err = emit_compare_and_branch(code, dst, src, 0, false, i + off, ctx);
1220 /* IF (dst COND imm) JUMP off */
1221 case BPF_JMP | BPF_JEQ | BPF_K:
1222 case BPF_JMP | BPF_JGT | BPF_K:
1223 case BPF_JMP | BPF_JLT | BPF_K:
1224 case BPF_JMP | BPF_JGE | BPF_K:
1225 case BPF_JMP | BPF_JLE | BPF_K:
1226 case BPF_JMP | BPF_JNE | BPF_K:
1227 case BPF_JMP | BPF_JSGT | BPF_K:
1228 case BPF_JMP | BPF_JSLT | BPF_K:
1229 case BPF_JMP | BPF_JSGE | BPF_K:
1230 case BPF_JMP | BPF_JSLE | BPF_K:
1231 case BPF_JMP | BPF_JSET | BPF_K: {
1234 err = emit_compare_and_branch(code, dst, 0, imm, true, i + off, ctx);
1241 case BPF_JMP | BPF_CALL:
1243 u8 *func = ((u8 *)__bpf_call_base) + imm;
1245 ctx->saw_call = true;
1246 if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
1247 emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);
1249 emit_call((u32 *)func, ctx);
1252 emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
1254 if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
1255 load_skb_regs(ctx, L7);
1260 case BPF_JMP | BPF_TAIL_CALL:
1261 emit_tail_call(ctx);
1264 /* function return */
1265 case BPF_JMP | BPF_EXIT:
1266 /* Optimization: when last instruction is EXIT,
1267 simply fallthrough to epilogue. */
1268 if (i == ctx->prog->len - 1)
1270 emit_branch(BA, ctx->idx, ctx->epilogue_offset, ctx);
1275 case BPF_LD | BPF_IMM | BPF_DW:
1277 const struct bpf_insn insn1 = insn[1];
1280 imm64 = (u64)insn1.imm << 32 | (u32)imm;
1281 emit_loadimm64(imm64, dst, ctx);
1286 /* LDX: dst = *(size *)(src + off) */
1287 case BPF_LDX | BPF_MEM | BPF_W:
1288 case BPF_LDX | BPF_MEM | BPF_H:
1289 case BPF_LDX | BPF_MEM | BPF_B:
1290 case BPF_LDX | BPF_MEM | BPF_DW: {
1291 const u8 tmp = bpf2sparc[TMP_REG_1];
1292 u32 opcode = 0, rs2;
1294 ctx->tmp_1_used = true;
1295 switch (BPF_SIZE(code)) {
1310 if (is_simm13(off)) {
1314 emit_loadimm(off, tmp, ctx);
1317 emit(opcode | RS1(src) | rs2 | RD(dst), ctx);
1320 /* ST: *(size *)(dst + off) = imm */
1321 case BPF_ST | BPF_MEM | BPF_W:
1322 case BPF_ST | BPF_MEM | BPF_H:
1323 case BPF_ST | BPF_MEM | BPF_B:
1324 case BPF_ST | BPF_MEM | BPF_DW: {
1325 const u8 tmp = bpf2sparc[TMP_REG_1];
1326 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1327 u32 opcode = 0, rs2;
1329 if (insn->dst_reg == BPF_REG_FP)
1330 ctx->saw_frame_pointer = true;
1332 ctx->tmp_2_used = true;
1333 emit_loadimm(imm, tmp2, ctx);
1335 switch (BPF_SIZE(code)) {
1350 if (is_simm13(off)) {
1354 ctx->tmp_1_used = true;
1355 emit_loadimm(off, tmp, ctx);
1358 emit(opcode | RS1(dst) | rs2 | RD(tmp2), ctx);
1362 /* STX: *(size *)(dst + off) = src */
1363 case BPF_STX | BPF_MEM | BPF_W:
1364 case BPF_STX | BPF_MEM | BPF_H:
1365 case BPF_STX | BPF_MEM | BPF_B:
1366 case BPF_STX | BPF_MEM | BPF_DW: {
1367 const u8 tmp = bpf2sparc[TMP_REG_1];
1368 u32 opcode = 0, rs2;
1370 if (insn->dst_reg == BPF_REG_FP)
1371 ctx->saw_frame_pointer = true;
1373 switch (BPF_SIZE(code)) {
1387 if (is_simm13(off)) {
1391 ctx->tmp_1_used = true;
1392 emit_loadimm(off, tmp, ctx);
1395 emit(opcode | RS1(dst) | rs2 | RD(src), ctx);
1399 /* STX XADD: lock *(u32 *)(dst + off) += src */
1400 case BPF_STX | BPF_XADD | BPF_W: {
1401 const u8 tmp = bpf2sparc[TMP_REG_1];
1402 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1403 const u8 tmp3 = bpf2sparc[TMP_REG_3];
1405 if (insn->dst_reg == BPF_REG_FP)
1406 ctx->saw_frame_pointer = true;
1408 ctx->tmp_1_used = true;
1409 ctx->tmp_2_used = true;
1410 ctx->tmp_3_used = true;
1411 emit_loadimm(off, tmp, ctx);
1412 emit_alu3(ADD, dst, tmp, tmp, ctx);
1414 emit(LD32 | RS1(tmp) | RS2(G0) | RD(tmp2), ctx);
1415 emit_alu3(ADD, tmp2, src, tmp3, ctx);
1416 emit(CAS | ASI(ASI_P) | RS1(tmp) | RS2(tmp2) | RD(tmp3), ctx);
1417 emit_cmp(tmp2, tmp3, ctx);
1418 emit_branch(BNE, 4, 0, ctx);
1422 /* STX XADD: lock *(u64 *)(dst + off) += src */
1423 case BPF_STX | BPF_XADD | BPF_DW: {
1424 const u8 tmp = bpf2sparc[TMP_REG_1];
1425 const u8 tmp2 = bpf2sparc[TMP_REG_2];
1426 const u8 tmp3 = bpf2sparc[TMP_REG_3];
1428 if (insn->dst_reg == BPF_REG_FP)
1429 ctx->saw_frame_pointer = true;
1431 ctx->tmp_1_used = true;
1432 ctx->tmp_2_used = true;
1433 ctx->tmp_3_used = true;
1434 emit_loadimm(off, tmp, ctx);
1435 emit_alu3(ADD, dst, tmp, tmp, ctx);
1437 emit(LD64 | RS1(tmp) | RS2(G0) | RD(tmp2), ctx);
1438 emit_alu3(ADD, tmp2, src, tmp3, ctx);
1439 emit(CASX | ASI(ASI_P) | RS1(tmp) | RS2(tmp2) | RD(tmp3), ctx);
1440 emit_cmp(tmp2, tmp3, ctx);
1441 emit_branch(BNE, 4, 0, ctx);
1445 #define CHOOSE_LOAD_FUNC(K, func) \
1446 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
1448 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
1449 case BPF_LD | BPF_ABS | BPF_W:
1450 func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_word);
1452 case BPF_LD | BPF_ABS | BPF_H:
1453 func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_half);
1455 case BPF_LD | BPF_ABS | BPF_B:
1456 func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_byte);
1458 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
1459 case BPF_LD | BPF_IND | BPF_W:
1460 func = bpf_jit_load_word;
1462 case BPF_LD | BPF_IND | BPF_H:
1463 func = bpf_jit_load_half;
1466 case BPF_LD | BPF_IND | BPF_B:
1467 func = bpf_jit_load_byte;
1469 ctx->saw_ld_abs_ind = true;
1471 emit_reg_move(bpf2sparc[BPF_REG_6], O0, ctx);
1472 emit_loadimm(imm, O1, ctx);
1474 if (BPF_MODE(code) == BPF_IND)
1475 emit_alu(ADD, src, O1, ctx);
1477 emit_call(func, ctx);
1478 emit_alu_K(SRA, O1, 0, ctx);
1480 emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
1484 pr_err_once("unknown opcode %02x\n", code);
1491 static int build_body(struct jit_ctx *ctx)
1493 const struct bpf_prog *prog = ctx->prog;
1496 for (i = 0; i < prog->len; i++) {
1497 const struct bpf_insn *insn = &prog->insnsi[i];
1500 ret = build_insn(insn, ctx);
1504 ctx->offset[i] = ctx->idx;
1507 ctx->offset[i] = ctx->idx;
1514 static void jit_fill_hole(void *area, unsigned int size)
1517 /* We are guaranteed to have aligned memory. */
1518 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
1519 *ptr++ = 0x91d02005; /* ta 5 */
1522 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1524 struct bpf_prog *tmp, *orig_prog = prog;
1525 struct bpf_binary_header *header;
1526 bool tmp_blinded = false;
1532 if (!bpf_jit_enable)
1535 tmp = bpf_jit_blind_constants(prog);
1536 /* If blinding was requested and we failed during blinding,
1537 * we must fall back to the interpreter.
1546 memset(&ctx, 0, sizeof(ctx));
1549 ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
1550 if (ctx.offset == NULL) {
1555 /* Fake pass to detect features used, and get an accurate assessment
1556 * of what the final image size will be.
1558 if (build_body(&ctx)) {
1562 build_prologue(&ctx);
1563 build_epilogue(&ctx);
1565 /* Now we know the actual image size. */
1566 image_size = sizeof(u32) * ctx.idx;
1567 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1568 sizeof(u32), jit_fill_hole);
1569 if (header == NULL) {
1574 ctx.image = (u32 *)image_ptr;
1576 for (pass = 1; pass < 3; pass++) {
1579 build_prologue(&ctx);
1581 if (build_body(&ctx)) {
1582 bpf_jit_binary_free(header);
1587 build_epilogue(&ctx);
1589 if (bpf_jit_enable > 1)
1590 pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c%c]\n", pass,
1591 image_size - (ctx.idx * 4),
1592 ctx.tmp_1_used ? '1' : ' ',
1593 ctx.tmp_2_used ? '2' : ' ',
1594 ctx.tmp_3_used ? '3' : ' ',
1595 ctx.saw_ld_abs_ind ? 'L' : ' ',
1596 ctx.saw_frame_pointer ? 'F' : ' ',
1597 ctx.saw_call ? 'C' : ' ',
1598 ctx.saw_tail_call ? 'T' : ' ');
1601 if (bpf_jit_enable > 1)
1602 bpf_jit_dump(prog->len, image_size, pass, ctx.image);
1604 bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
1606 bpf_jit_binary_lock_ro(header);
1608 prog->bpf_func = (void *)ctx.image;
1610 prog->jited_len = image_size;
1616 bpf_jit_prog_release_other(prog, prog == orig_prog ?