1 // SPDX-License-Identifier: GPL-2.0
2 /* BPF JIT compiler for RV64G
4 * Copyright(c) 2019 Björn Töpel <bjorn.topel@gmail.com>
8 #include <linux/bitfield.h>
10 #include <linux/filter.h>
13 #define RV_REG_TCC RV_REG_A6
14 #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
16 static const int regmap[] = {
17 [BPF_REG_0] = RV_REG_A5,
18 [BPF_REG_1] = RV_REG_A0,
19 [BPF_REG_2] = RV_REG_A1,
20 [BPF_REG_3] = RV_REG_A2,
21 [BPF_REG_4] = RV_REG_A3,
22 [BPF_REG_5] = RV_REG_A4,
23 [BPF_REG_6] = RV_REG_S1,
24 [BPF_REG_7] = RV_REG_S2,
25 [BPF_REG_8] = RV_REG_S3,
26 [BPF_REG_9] = RV_REG_S4,
27 [BPF_REG_FP] = RV_REG_S5,
28 [BPF_REG_AX] = RV_REG_T0,
31 static const int pt_regmap[] = {
32 [RV_REG_A0] = offsetof(struct pt_regs, a0),
33 [RV_REG_A1] = offsetof(struct pt_regs, a1),
34 [RV_REG_A2] = offsetof(struct pt_regs, a2),
35 [RV_REG_A3] = offsetof(struct pt_regs, a3),
36 [RV_REG_A4] = offsetof(struct pt_regs, a4),
37 [RV_REG_A5] = offsetof(struct pt_regs, a5),
38 [RV_REG_S1] = offsetof(struct pt_regs, s1),
39 [RV_REG_S2] = offsetof(struct pt_regs, s2),
40 [RV_REG_S3] = offsetof(struct pt_regs, s3),
41 [RV_REG_S4] = offsetof(struct pt_regs, s4),
42 [RV_REG_S5] = offsetof(struct pt_regs, s5),
43 [RV_REG_T0] = offsetof(struct pt_regs, t0),
47 RV_CTX_F_SEEN_TAIL_CALL = 0,
48 RV_CTX_F_SEEN_CALL = RV_REG_RA,
49 RV_CTX_F_SEEN_S1 = RV_REG_S1,
50 RV_CTX_F_SEEN_S2 = RV_REG_S2,
51 RV_CTX_F_SEEN_S3 = RV_REG_S3,
52 RV_CTX_F_SEEN_S4 = RV_REG_S4,
53 RV_CTX_F_SEEN_S5 = RV_REG_S5,
54 RV_CTX_F_SEEN_S6 = RV_REG_S6,
57 static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx)
59 u8 reg = regmap[bpf_reg];
62 case RV_CTX_F_SEEN_S1:
63 case RV_CTX_F_SEEN_S2:
64 case RV_CTX_F_SEEN_S3:
65 case RV_CTX_F_SEEN_S4:
66 case RV_CTX_F_SEEN_S5:
67 case RV_CTX_F_SEEN_S6:
68 __set_bit(reg, &ctx->flags);
73 static bool seen_reg(int reg, struct rv_jit_context *ctx)
76 case RV_CTX_F_SEEN_CALL:
77 case RV_CTX_F_SEEN_S1:
78 case RV_CTX_F_SEEN_S2:
79 case RV_CTX_F_SEEN_S3:
80 case RV_CTX_F_SEEN_S4:
81 case RV_CTX_F_SEEN_S5:
82 case RV_CTX_F_SEEN_S6:
83 return test_bit(reg, &ctx->flags);
88 static void mark_fp(struct rv_jit_context *ctx)
90 __set_bit(RV_CTX_F_SEEN_S5, &ctx->flags);
93 static void mark_call(struct rv_jit_context *ctx)
95 __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
98 static bool seen_call(struct rv_jit_context *ctx)
100 return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
103 static void mark_tail_call(struct rv_jit_context *ctx)
105 __set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
108 static bool seen_tail_call(struct rv_jit_context *ctx)
110 return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
113 static u8 rv_tail_call_reg(struct rv_jit_context *ctx)
117 if (seen_call(ctx)) {
118 __set_bit(RV_CTX_F_SEEN_S6, &ctx->flags);
124 static bool is_32b_int(s64 val)
126 return -(1L << 31) <= val && val < (1L << 31);
129 static bool in_auipc_jalr_range(s64 val)
132 * auipc+jalr can reach any signed PC-relative offset in the range
133 * [-2^31 - 2^11, 2^31 - 2^11).
135 return (-(1L << 31) - (1L << 11)) <= val &&
136 val < ((1L << 31) - (1L << 11));
139 /* Emit fixed-length instructions for address */
140 static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
142 u64 ip = (u64)(ctx->insns + ctx->ninsns);
144 s64 upper = (off + (1 << 11)) >> 12;
145 s64 lower = off & 0xfff;
147 if (extra_pass && !in_auipc_jalr_range(off)) {
148 pr_err("bpf-jit: target offset 0x%llx is out of range\n", off);
152 emit(rv_auipc(rd, upper), ctx);
153 emit(rv_addi(rd, rd, lower), ctx);
157 /* Emit variable-length instructions for 32-bit and 64-bit imm */
158 static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
160 /* Note that the immediate from the add is sign-extended,
161 * which means that we need to compensate this by adding 2^12,
162 * when the 12th bit is set. A simpler way of doing this, and
163 * getting rid of the check, is to just add 2**11 before the
164 * shift. The "Loading a 32-Bit constant" example from the
165 * "Computer Organization and Design, RISC-V edition" book by
166 * Patterson/Hennessy highlights this fact.
168 * This also means that we need to process LSB to MSB.
170 s64 upper = (val + (1 << 11)) >> 12;
171 /* Sign-extend lower 12 bits to 64 bits since immediates for li, addiw,
172 * and addi are signed and RVC checks will perform signed comparisons.
174 s64 lower = ((val & 0xfff) << 52) >> 52;
177 if (is_32b_int(val)) {
179 emit_lui(rd, upper, ctx);
182 emit_li(rd, lower, ctx);
186 emit_addiw(rd, rd, lower, ctx);
190 shift = __ffs(upper);
194 emit_imm(rd, upper, ctx);
196 emit_slli(rd, rd, shift, ctx);
198 emit_addi(rd, rd, lower, ctx);
201 static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
203 int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8;
205 if (seen_reg(RV_REG_RA, ctx)) {
206 emit_ld(RV_REG_RA, store_offset, RV_REG_SP, ctx);
209 emit_ld(RV_REG_FP, store_offset, RV_REG_SP, ctx);
211 if (seen_reg(RV_REG_S1, ctx)) {
212 emit_ld(RV_REG_S1, store_offset, RV_REG_SP, ctx);
215 if (seen_reg(RV_REG_S2, ctx)) {
216 emit_ld(RV_REG_S2, store_offset, RV_REG_SP, ctx);
219 if (seen_reg(RV_REG_S3, ctx)) {
220 emit_ld(RV_REG_S3, store_offset, RV_REG_SP, ctx);
223 if (seen_reg(RV_REG_S4, ctx)) {
224 emit_ld(RV_REG_S4, store_offset, RV_REG_SP, ctx);
227 if (seen_reg(RV_REG_S5, ctx)) {
228 emit_ld(RV_REG_S5, store_offset, RV_REG_SP, ctx);
231 if (seen_reg(RV_REG_S6, ctx)) {
232 emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx);
236 emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
237 /* Set return value. */
239 emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
240 emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
241 is_tail_call ? 4 : 0, /* skip TCC init */
245 static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff,
246 struct rv_jit_context *ctx)
250 emit(rv_beq(rd, rs, rvoff >> 1), ctx);
253 emit(rv_bltu(rs, rd, rvoff >> 1), ctx);
256 emit(rv_bltu(rd, rs, rvoff >> 1), ctx);
259 emit(rv_bgeu(rd, rs, rvoff >> 1), ctx);
262 emit(rv_bgeu(rs, rd, rvoff >> 1), ctx);
265 emit(rv_bne(rd, rs, rvoff >> 1), ctx);
268 emit(rv_blt(rs, rd, rvoff >> 1), ctx);
271 emit(rv_blt(rd, rs, rvoff >> 1), ctx);
274 emit(rv_bge(rd, rs, rvoff >> 1), ctx);
277 emit(rv_bge(rs, rd, rvoff >> 1), ctx);
281 static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff,
282 struct rv_jit_context *ctx)
286 if (is_13b_int(rvoff)) {
287 emit_bcc(cond, rd, rs, rvoff, ctx);
302 cond = invert_bpf_cond(cond);
303 if (is_21b_int(rvoff)) {
304 emit_bcc(cond, rd, rs, 8, ctx);
305 emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx);
309 /* 32b No need for an additional rvoff adjustment, since we
310 * get that from the auipc at PC', where PC = PC' + 4.
312 upper = (rvoff + (1 << 11)) >> 12;
313 lower = rvoff & 0xfff;
315 emit_bcc(cond, rd, rs, 12, ctx);
316 emit(rv_auipc(RV_REG_T1, upper), ctx);
317 emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx);
320 static void emit_zext_32(u8 reg, struct rv_jit_context *ctx)
322 emit_slli(reg, reg, 32, ctx);
323 emit_srli(reg, reg, 32, ctx);
326 static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
328 int tc_ninsn, off, start_insn = ctx->ninsns;
329 u8 tcc = rv_tail_call_reg(ctx);
335 * if (index >= array->map.max_entries)
338 tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
340 emit_zext_32(RV_REG_A2, ctx);
342 off = offsetof(struct bpf_array, map.max_entries);
343 if (is_12b_check(off, insn))
345 emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx);
346 off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
347 emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx);
352 emit_addi(RV_REG_TCC, tcc, -1, ctx);
353 off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
354 emit_branch(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
356 /* prog = array->ptrs[index];
360 emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
361 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
362 off = offsetof(struct bpf_array, ptrs);
363 if (is_12b_check(off, insn))
365 emit_ld(RV_REG_T2, off, RV_REG_T2, ctx);
366 off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
367 emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx);
369 /* goto *(prog->bpf_func + 4); */
370 off = offsetof(struct bpf_prog, bpf_func);
371 if (is_12b_check(off, insn))
373 emit_ld(RV_REG_T3, off, RV_REG_T2, ctx);
374 __build_epilogue(true, ctx);
378 static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
379 struct rv_jit_context *ctx)
381 u8 code = insn->code;
384 case BPF_JMP | BPF_JA:
385 case BPF_JMP | BPF_CALL:
386 case BPF_JMP | BPF_EXIT:
387 case BPF_JMP | BPF_TAIL_CALL:
390 *rd = bpf_to_rv_reg(insn->dst_reg, ctx);
393 if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) ||
394 code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) ||
395 code & BPF_LDX || code & BPF_STX)
396 *rs = bpf_to_rv_reg(insn->src_reg, ctx);
399 static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
401 emit_mv(RV_REG_T2, *rd, ctx);
402 emit_zext_32(RV_REG_T2, ctx);
403 emit_mv(RV_REG_T1, *rs, ctx);
404 emit_zext_32(RV_REG_T1, ctx);
409 static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
411 emit_addiw(RV_REG_T2, *rd, 0, ctx);
412 emit_addiw(RV_REG_T1, *rs, 0, ctx);
417 static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
419 emit_mv(RV_REG_T2, *rd, ctx);
420 emit_zext_32(RV_REG_T2, ctx);
421 emit_zext_32(RV_REG_T1, ctx);
425 static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
427 emit_addiw(RV_REG_T2, *rd, 0, ctx);
431 static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
432 struct rv_jit_context *ctx)
436 if (rvoff && fixed_addr && is_21b_int(rvoff)) {
437 emit(rv_jal(rd, rvoff >> 1), ctx);
439 } else if (in_auipc_jalr_range(rvoff)) {
440 upper = (rvoff + (1 << 11)) >> 12;
441 lower = rvoff & 0xfff;
442 emit(rv_auipc(RV_REG_T1, upper), ctx);
443 emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
447 pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff);
451 static bool is_signed_bpf_cond(u8 cond)
453 return cond == BPF_JSGT || cond == BPF_JSLT ||
454 cond == BPF_JSGE || cond == BPF_JSLE;
457 static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
462 if (addr && ctx->insns) {
463 ip = (u64)(long)(ctx->insns + ctx->ninsns);
467 return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
470 static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
471 struct rv_jit_context *ctx)
477 if (is_12b_int(off)) {
478 emit_addi(RV_REG_T1, rd, off, ctx);
480 emit_imm(RV_REG_T1, off, ctx);
481 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
487 /* lock *(u32/u64 *)(dst_reg + off16) <op>= src_reg */
489 emit(is64 ? rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0) :
490 rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
493 emit(is64 ? rv_amoand_d(RV_REG_ZERO, rs, rd, 0, 0) :
494 rv_amoand_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
497 emit(is64 ? rv_amoor_d(RV_REG_ZERO, rs, rd, 0, 0) :
498 rv_amoor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
501 emit(is64 ? rv_amoxor_d(RV_REG_ZERO, rs, rd, 0, 0) :
502 rv_amoxor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
504 /* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
505 case BPF_ADD | BPF_FETCH:
506 emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
507 rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
509 emit_zext_32(rs, ctx);
511 case BPF_AND | BPF_FETCH:
512 emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
513 rv_amoand_w(rs, rs, rd, 0, 0), ctx);
515 emit_zext_32(rs, ctx);
517 case BPF_OR | BPF_FETCH:
518 emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
519 rv_amoor_w(rs, rs, rd, 0, 0), ctx);
521 emit_zext_32(rs, ctx);
523 case BPF_XOR | BPF_FETCH:
524 emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
525 rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
527 emit_zext_32(rs, ctx);
529 /* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
531 emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
532 rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
534 emit_zext_32(rs, ctx);
536 /* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */
538 r0 = bpf_to_rv_reg(BPF_REG_0, ctx);
539 emit(is64 ? rv_addi(RV_REG_T2, r0, 0) :
540 rv_addiw(RV_REG_T2, r0, 0), ctx);
541 emit(is64 ? rv_lr_d(r0, 0, rd, 0, 0) :
542 rv_lr_w(r0, 0, rd, 0, 0), ctx);
543 jmp_offset = ninsns_rvoff(8);
544 emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx);
545 emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) :
546 rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx);
547 jmp_offset = ninsns_rvoff(-6);
548 emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
549 emit(rv_fence(0x3, 0x3), ctx);
554 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
555 #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
557 bool ex_handler_bpf(const struct exception_table_entry *ex,
558 struct pt_regs *regs)
560 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
561 int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
563 *(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
564 regs->epc = (unsigned long)&ex->fixup - offset;
569 /* For accesses to BTF pointers, add an entry to the exception table */
570 static int add_exception_handler(const struct bpf_insn *insn,
571 struct rv_jit_context *ctx,
572 int dst_reg, int insn_len)
574 struct exception_table_entry *ex;
578 if (!ctx->insns || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
581 if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
584 if (WARN_ON_ONCE(insn_len > ctx->ninsns))
587 if (WARN_ON_ONCE(!rvc_enabled() && insn_len == 1))
590 ex = &ctx->prog->aux->extable[ctx->nexentries];
591 pc = (unsigned long)&ctx->insns[ctx->ninsns - insn_len];
593 offset = pc - (long)&ex->insn;
594 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
599 * Since the extable follows the program, the fixup offset is always
600 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
601 * to keep things simple, and put the destination register in the upper
602 * bits. We don't need to worry about buildtime or runtime sort
603 * modifying the upper bits because the table is already sorted, and
604 * isn't part of the main exception table.
606 offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16));
607 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
610 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
611 FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
612 ex->type = EX_TYPE_BPF;
618 int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
621 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
622 BPF_CLASS(insn->code) == BPF_JMP;
623 int s, e, rvoff, ret, i = insn - ctx->prog->insnsi;
624 struct bpf_prog_aux *aux = ctx->prog->aux;
625 u8 rd = -1, rs = -1, code = insn->code;
629 init_regs(&rd, &rs, insn, ctx);
633 case BPF_ALU | BPF_MOV | BPF_X:
634 case BPF_ALU64 | BPF_MOV | BPF_X:
636 /* Special mov32 for zext */
637 emit_zext_32(rd, ctx);
640 emit_mv(rd, rs, ctx);
641 if (!is64 && !aux->verifier_zext)
642 emit_zext_32(rd, ctx);
645 /* dst = dst OP src */
646 case BPF_ALU | BPF_ADD | BPF_X:
647 case BPF_ALU64 | BPF_ADD | BPF_X:
648 emit_add(rd, rd, rs, ctx);
649 if (!is64 && !aux->verifier_zext)
650 emit_zext_32(rd, ctx);
652 case BPF_ALU | BPF_SUB | BPF_X:
653 case BPF_ALU64 | BPF_SUB | BPF_X:
655 emit_sub(rd, rd, rs, ctx);
657 emit_subw(rd, rd, rs, ctx);
659 if (!is64 && !aux->verifier_zext)
660 emit_zext_32(rd, ctx);
662 case BPF_ALU | BPF_AND | BPF_X:
663 case BPF_ALU64 | BPF_AND | BPF_X:
664 emit_and(rd, rd, rs, ctx);
665 if (!is64 && !aux->verifier_zext)
666 emit_zext_32(rd, ctx);
668 case BPF_ALU | BPF_OR | BPF_X:
669 case BPF_ALU64 | BPF_OR | BPF_X:
670 emit_or(rd, rd, rs, ctx);
671 if (!is64 && !aux->verifier_zext)
672 emit_zext_32(rd, ctx);
674 case BPF_ALU | BPF_XOR | BPF_X:
675 case BPF_ALU64 | BPF_XOR | BPF_X:
676 emit_xor(rd, rd, rs, ctx);
677 if (!is64 && !aux->verifier_zext)
678 emit_zext_32(rd, ctx);
680 case BPF_ALU | BPF_MUL | BPF_X:
681 case BPF_ALU64 | BPF_MUL | BPF_X:
682 emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx);
683 if (!is64 && !aux->verifier_zext)
684 emit_zext_32(rd, ctx);
686 case BPF_ALU | BPF_DIV | BPF_X:
687 case BPF_ALU64 | BPF_DIV | BPF_X:
688 emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
689 if (!is64 && !aux->verifier_zext)
690 emit_zext_32(rd, ctx);
692 case BPF_ALU | BPF_MOD | BPF_X:
693 case BPF_ALU64 | BPF_MOD | BPF_X:
694 emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
695 if (!is64 && !aux->verifier_zext)
696 emit_zext_32(rd, ctx);
698 case BPF_ALU | BPF_LSH | BPF_X:
699 case BPF_ALU64 | BPF_LSH | BPF_X:
700 emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
701 if (!is64 && !aux->verifier_zext)
702 emit_zext_32(rd, ctx);
704 case BPF_ALU | BPF_RSH | BPF_X:
705 case BPF_ALU64 | BPF_RSH | BPF_X:
706 emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
707 if (!is64 && !aux->verifier_zext)
708 emit_zext_32(rd, ctx);
710 case BPF_ALU | BPF_ARSH | BPF_X:
711 case BPF_ALU64 | BPF_ARSH | BPF_X:
712 emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
713 if (!is64 && !aux->verifier_zext)
714 emit_zext_32(rd, ctx);
718 case BPF_ALU | BPF_NEG:
719 case BPF_ALU64 | BPF_NEG:
720 emit_sub(rd, RV_REG_ZERO, rd, ctx);
721 if (!is64 && !aux->verifier_zext)
722 emit_zext_32(rd, ctx);
725 /* dst = BSWAP##imm(dst) */
726 case BPF_ALU | BPF_END | BPF_FROM_LE:
729 emit_slli(rd, rd, 48, ctx);
730 emit_srli(rd, rd, 48, ctx);
733 if (!aux->verifier_zext)
734 emit_zext_32(rd, ctx);
742 case BPF_ALU | BPF_END | BPF_FROM_BE:
743 emit_li(RV_REG_T2, 0, ctx);
745 emit_andi(RV_REG_T1, rd, 0xff, ctx);
746 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
747 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
748 emit_srli(rd, rd, 8, ctx);
752 emit_andi(RV_REG_T1, rd, 0xff, ctx);
753 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
754 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
755 emit_srli(rd, rd, 8, ctx);
757 emit_andi(RV_REG_T1, rd, 0xff, ctx);
758 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
759 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
760 emit_srli(rd, rd, 8, ctx);
764 emit_andi(RV_REG_T1, rd, 0xff, ctx);
765 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
766 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
767 emit_srli(rd, rd, 8, ctx);
769 emit_andi(RV_REG_T1, rd, 0xff, ctx);
770 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
771 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
772 emit_srli(rd, rd, 8, ctx);
774 emit_andi(RV_REG_T1, rd, 0xff, ctx);
775 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
776 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
777 emit_srli(rd, rd, 8, ctx);
779 emit_andi(RV_REG_T1, rd, 0xff, ctx);
780 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
781 emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
782 emit_srli(rd, rd, 8, ctx);
784 emit_andi(RV_REG_T1, rd, 0xff, ctx);
785 emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
787 emit_mv(rd, RV_REG_T2, ctx);
791 case BPF_ALU | BPF_MOV | BPF_K:
792 case BPF_ALU64 | BPF_MOV | BPF_K:
793 emit_imm(rd, imm, ctx);
794 if (!is64 && !aux->verifier_zext)
795 emit_zext_32(rd, ctx);
798 /* dst = dst OP imm */
799 case BPF_ALU | BPF_ADD | BPF_K:
800 case BPF_ALU64 | BPF_ADD | BPF_K:
801 if (is_12b_int(imm)) {
802 emit_addi(rd, rd, imm, ctx);
804 emit_imm(RV_REG_T1, imm, ctx);
805 emit_add(rd, rd, RV_REG_T1, ctx);
807 if (!is64 && !aux->verifier_zext)
808 emit_zext_32(rd, ctx);
810 case BPF_ALU | BPF_SUB | BPF_K:
811 case BPF_ALU64 | BPF_SUB | BPF_K:
812 if (is_12b_int(-imm)) {
813 emit_addi(rd, rd, -imm, ctx);
815 emit_imm(RV_REG_T1, imm, ctx);
816 emit_sub(rd, rd, RV_REG_T1, ctx);
818 if (!is64 && !aux->verifier_zext)
819 emit_zext_32(rd, ctx);
821 case BPF_ALU | BPF_AND | BPF_K:
822 case BPF_ALU64 | BPF_AND | BPF_K:
823 if (is_12b_int(imm)) {
824 emit_andi(rd, rd, imm, ctx);
826 emit_imm(RV_REG_T1, imm, ctx);
827 emit_and(rd, rd, RV_REG_T1, ctx);
829 if (!is64 && !aux->verifier_zext)
830 emit_zext_32(rd, ctx);
832 case BPF_ALU | BPF_OR | BPF_K:
833 case BPF_ALU64 | BPF_OR | BPF_K:
834 if (is_12b_int(imm)) {
835 emit(rv_ori(rd, rd, imm), ctx);
837 emit_imm(RV_REG_T1, imm, ctx);
838 emit_or(rd, rd, RV_REG_T1, ctx);
840 if (!is64 && !aux->verifier_zext)
841 emit_zext_32(rd, ctx);
843 case BPF_ALU | BPF_XOR | BPF_K:
844 case BPF_ALU64 | BPF_XOR | BPF_K:
845 if (is_12b_int(imm)) {
846 emit(rv_xori(rd, rd, imm), ctx);
848 emit_imm(RV_REG_T1, imm, ctx);
849 emit_xor(rd, rd, RV_REG_T1, ctx);
851 if (!is64 && !aux->verifier_zext)
852 emit_zext_32(rd, ctx);
854 case BPF_ALU | BPF_MUL | BPF_K:
855 case BPF_ALU64 | BPF_MUL | BPF_K:
856 emit_imm(RV_REG_T1, imm, ctx);
857 emit(is64 ? rv_mul(rd, rd, RV_REG_T1) :
858 rv_mulw(rd, rd, RV_REG_T1), ctx);
859 if (!is64 && !aux->verifier_zext)
860 emit_zext_32(rd, ctx);
862 case BPF_ALU | BPF_DIV | BPF_K:
863 case BPF_ALU64 | BPF_DIV | BPF_K:
864 emit_imm(RV_REG_T1, imm, ctx);
865 emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
866 rv_divuw(rd, rd, RV_REG_T1), ctx);
867 if (!is64 && !aux->verifier_zext)
868 emit_zext_32(rd, ctx);
870 case BPF_ALU | BPF_MOD | BPF_K:
871 case BPF_ALU64 | BPF_MOD | BPF_K:
872 emit_imm(RV_REG_T1, imm, ctx);
873 emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
874 rv_remuw(rd, rd, RV_REG_T1), ctx);
875 if (!is64 && !aux->verifier_zext)
876 emit_zext_32(rd, ctx);
878 case BPF_ALU | BPF_LSH | BPF_K:
879 case BPF_ALU64 | BPF_LSH | BPF_K:
880 emit_slli(rd, rd, imm, ctx);
882 if (!is64 && !aux->verifier_zext)
883 emit_zext_32(rd, ctx);
885 case BPF_ALU | BPF_RSH | BPF_K:
886 case BPF_ALU64 | BPF_RSH | BPF_K:
888 emit_srli(rd, rd, imm, ctx);
890 emit(rv_srliw(rd, rd, imm), ctx);
892 if (!is64 && !aux->verifier_zext)
893 emit_zext_32(rd, ctx);
895 case BPF_ALU | BPF_ARSH | BPF_K:
896 case BPF_ALU64 | BPF_ARSH | BPF_K:
898 emit_srai(rd, rd, imm, ctx);
900 emit(rv_sraiw(rd, rd, imm), ctx);
902 if (!is64 && !aux->verifier_zext)
903 emit_zext_32(rd, ctx);
907 case BPF_JMP | BPF_JA:
908 rvoff = rv_offset(i, off, ctx);
909 ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
914 /* IF (dst COND src) JUMP off */
915 case BPF_JMP | BPF_JEQ | BPF_X:
916 case BPF_JMP32 | BPF_JEQ | BPF_X:
917 case BPF_JMP | BPF_JGT | BPF_X:
918 case BPF_JMP32 | BPF_JGT | BPF_X:
919 case BPF_JMP | BPF_JLT | BPF_X:
920 case BPF_JMP32 | BPF_JLT | BPF_X:
921 case BPF_JMP | BPF_JGE | BPF_X:
922 case BPF_JMP32 | BPF_JGE | BPF_X:
923 case BPF_JMP | BPF_JLE | BPF_X:
924 case BPF_JMP32 | BPF_JLE | BPF_X:
925 case BPF_JMP | BPF_JNE | BPF_X:
926 case BPF_JMP32 | BPF_JNE | BPF_X:
927 case BPF_JMP | BPF_JSGT | BPF_X:
928 case BPF_JMP32 | BPF_JSGT | BPF_X:
929 case BPF_JMP | BPF_JSLT | BPF_X:
930 case BPF_JMP32 | BPF_JSLT | BPF_X:
931 case BPF_JMP | BPF_JSGE | BPF_X:
932 case BPF_JMP32 | BPF_JSGE | BPF_X:
933 case BPF_JMP | BPF_JSLE | BPF_X:
934 case BPF_JMP32 | BPF_JSLE | BPF_X:
935 case BPF_JMP | BPF_JSET | BPF_X:
936 case BPF_JMP32 | BPF_JSET | BPF_X:
937 rvoff = rv_offset(i, off, ctx);
940 if (is_signed_bpf_cond(BPF_OP(code)))
941 emit_sext_32_rd_rs(&rd, &rs, ctx);
943 emit_zext_32_rd_rs(&rd, &rs, ctx);
946 /* Adjust for extra insns */
947 rvoff -= ninsns_rvoff(e - s);
950 if (BPF_OP(code) == BPF_JSET) {
953 emit_and(RV_REG_T1, rd, rs, ctx);
954 emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
957 emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
961 /* IF (dst COND imm) JUMP off */
962 case BPF_JMP | BPF_JEQ | BPF_K:
963 case BPF_JMP32 | BPF_JEQ | BPF_K:
964 case BPF_JMP | BPF_JGT | BPF_K:
965 case BPF_JMP32 | BPF_JGT | BPF_K:
966 case BPF_JMP | BPF_JLT | BPF_K:
967 case BPF_JMP32 | BPF_JLT | BPF_K:
968 case BPF_JMP | BPF_JGE | BPF_K:
969 case BPF_JMP32 | BPF_JGE | BPF_K:
970 case BPF_JMP | BPF_JLE | BPF_K:
971 case BPF_JMP32 | BPF_JLE | BPF_K:
972 case BPF_JMP | BPF_JNE | BPF_K:
973 case BPF_JMP32 | BPF_JNE | BPF_K:
974 case BPF_JMP | BPF_JSGT | BPF_K:
975 case BPF_JMP32 | BPF_JSGT | BPF_K:
976 case BPF_JMP | BPF_JSLT | BPF_K:
977 case BPF_JMP32 | BPF_JSLT | BPF_K:
978 case BPF_JMP | BPF_JSGE | BPF_K:
979 case BPF_JMP32 | BPF_JSGE | BPF_K:
980 case BPF_JMP | BPF_JSLE | BPF_K:
981 case BPF_JMP32 | BPF_JSLE | BPF_K:
982 rvoff = rv_offset(i, off, ctx);
985 emit_imm(RV_REG_T1, imm, ctx);
988 /* If imm is 0, simply use zero register. */
992 if (is_signed_bpf_cond(BPF_OP(code)))
993 emit_sext_32_rd(&rd, ctx);
995 emit_zext_32_rd_t1(&rd, ctx);
999 /* Adjust for extra insns */
1000 rvoff -= ninsns_rvoff(e - s);
1001 emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
1004 case BPF_JMP | BPF_JSET | BPF_K:
1005 case BPF_JMP32 | BPF_JSET | BPF_K:
1006 rvoff = rv_offset(i, off, ctx);
1008 if (is_12b_int(imm)) {
1009 emit_andi(RV_REG_T1, rd, imm, ctx);
1011 emit_imm(RV_REG_T1, imm, ctx);
1012 emit_and(RV_REG_T1, rd, RV_REG_T1, ctx);
1014 /* For jset32, we should clear the upper 32 bits of t1, but
1015 * sign-extension is sufficient here and saves one instruction,
1016 * as t1 is used only in comparison against zero.
1018 if (!is64 && imm < 0)
1019 emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx);
1021 rvoff -= ninsns_rvoff(e - s);
1022 emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
1026 case BPF_JMP | BPF_CALL:
1032 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
1033 &addr, &fixed_addr);
1037 ret = emit_call(addr, fixed_addr, ctx);
1041 if (insn->src_reg != BPF_PSEUDO_CALL)
1042 emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
1046 case BPF_JMP | BPF_TAIL_CALL:
1047 if (emit_bpf_tail_call(i, ctx))
1051 /* function return */
1052 case BPF_JMP | BPF_EXIT:
1053 if (i == ctx->prog->len - 1)
1056 rvoff = epilogue_offset(ctx);
1057 ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
1063 case BPF_LD | BPF_IMM | BPF_DW:
1065 struct bpf_insn insn1 = insn[1];
1068 imm64 = (u64)insn1.imm << 32 | (u32)imm;
1069 if (bpf_pseudo_func(insn)) {
1070 /* fixed-length insns for extra jit pass */
1071 ret = emit_addr(rd, imm64, extra_pass, ctx);
1075 emit_imm(rd, imm64, ctx);
1081 /* LDX: dst = *(size *)(src + off) */
1082 case BPF_LDX | BPF_MEM | BPF_B:
1083 case BPF_LDX | BPF_MEM | BPF_H:
1084 case BPF_LDX | BPF_MEM | BPF_W:
1085 case BPF_LDX | BPF_MEM | BPF_DW:
1086 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1087 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1088 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1089 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1091 int insn_len, insns_start;
1093 switch (BPF_SIZE(code)) {
1095 if (is_12b_int(off)) {
1096 insns_start = ctx->ninsns;
1097 emit(rv_lbu(rd, off, rs), ctx);
1098 insn_len = ctx->ninsns - insns_start;
1102 emit_imm(RV_REG_T1, off, ctx);
1103 emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
1104 insns_start = ctx->ninsns;
1105 emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
1106 insn_len = ctx->ninsns - insns_start;
1107 if (insn_is_zext(&insn[1]))
1111 if (is_12b_int(off)) {
1112 insns_start = ctx->ninsns;
1113 emit(rv_lhu(rd, off, rs), ctx);
1114 insn_len = ctx->ninsns - insns_start;
1118 emit_imm(RV_REG_T1, off, ctx);
1119 emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
1120 insns_start = ctx->ninsns;
1121 emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
1122 insn_len = ctx->ninsns - insns_start;
1123 if (insn_is_zext(&insn[1]))
1127 if (is_12b_int(off)) {
1128 insns_start = ctx->ninsns;
1129 emit(rv_lwu(rd, off, rs), ctx);
1130 insn_len = ctx->ninsns - insns_start;
1134 emit_imm(RV_REG_T1, off, ctx);
1135 emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
1136 insns_start = ctx->ninsns;
1137 emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
1138 insn_len = ctx->ninsns - insns_start;
1139 if (insn_is_zext(&insn[1]))
1143 if (is_12b_int(off)) {
1144 insns_start = ctx->ninsns;
1145 emit_ld(rd, off, rs, ctx);
1146 insn_len = ctx->ninsns - insns_start;
1150 emit_imm(RV_REG_T1, off, ctx);
1151 emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
1152 insns_start = ctx->ninsns;
1153 emit_ld(rd, 0, RV_REG_T1, ctx);
1154 insn_len = ctx->ninsns - insns_start;
1158 ret = add_exception_handler(insn, ctx, rd, insn_len);
1163 /* speculation barrier */
1164 case BPF_ST | BPF_NOSPEC:
1167 /* ST: *(size *)(dst + off) = imm */
1168 case BPF_ST | BPF_MEM | BPF_B:
1169 emit_imm(RV_REG_T1, imm, ctx);
1170 if (is_12b_int(off)) {
1171 emit(rv_sb(rd, off, RV_REG_T1), ctx);
1175 emit_imm(RV_REG_T2, off, ctx);
1176 emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1177 emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
1180 case BPF_ST | BPF_MEM | BPF_H:
1181 emit_imm(RV_REG_T1, imm, ctx);
1182 if (is_12b_int(off)) {
1183 emit(rv_sh(rd, off, RV_REG_T1), ctx);
1187 emit_imm(RV_REG_T2, off, ctx);
1188 emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1189 emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
1191 case BPF_ST | BPF_MEM | BPF_W:
1192 emit_imm(RV_REG_T1, imm, ctx);
1193 if (is_12b_int(off)) {
1194 emit_sw(rd, off, RV_REG_T1, ctx);
1198 emit_imm(RV_REG_T2, off, ctx);
1199 emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1200 emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
1202 case BPF_ST | BPF_MEM | BPF_DW:
1203 emit_imm(RV_REG_T1, imm, ctx);
1204 if (is_12b_int(off)) {
1205 emit_sd(rd, off, RV_REG_T1, ctx);
1209 emit_imm(RV_REG_T2, off, ctx);
1210 emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
1211 emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
1214 /* STX: *(size *)(dst + off) = src */
1215 case BPF_STX | BPF_MEM | BPF_B:
1216 if (is_12b_int(off)) {
1217 emit(rv_sb(rd, off, rs), ctx);
1221 emit_imm(RV_REG_T1, off, ctx);
1222 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1223 emit(rv_sb(RV_REG_T1, 0, rs), ctx);
1225 case BPF_STX | BPF_MEM | BPF_H:
1226 if (is_12b_int(off)) {
1227 emit(rv_sh(rd, off, rs), ctx);
1231 emit_imm(RV_REG_T1, off, ctx);
1232 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1233 emit(rv_sh(RV_REG_T1, 0, rs), ctx);
1235 case BPF_STX | BPF_MEM | BPF_W:
1236 if (is_12b_int(off)) {
1237 emit_sw(rd, off, rs, ctx);
1241 emit_imm(RV_REG_T1, off, ctx);
1242 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1243 emit_sw(RV_REG_T1, 0, rs, ctx);
1245 case BPF_STX | BPF_MEM | BPF_DW:
1246 if (is_12b_int(off)) {
1247 emit_sd(rd, off, rs, ctx);
1251 emit_imm(RV_REG_T1, off, ctx);
1252 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
1253 emit_sd(RV_REG_T1, 0, rs, ctx);
1255 case BPF_STX | BPF_ATOMIC | BPF_W:
1256 case BPF_STX | BPF_ATOMIC | BPF_DW:
1257 emit_atomic(rd, rs, off, imm,
1258 BPF_SIZE(code) == BPF_DW, ctx);
1261 pr_err("bpf-jit: unknown opcode %02x\n", code);
1268 void bpf_jit_build_prologue(struct rv_jit_context *ctx)
1270 int stack_adjust = 0, store_offset, bpf_stack_adjust;
1272 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
1273 if (bpf_stack_adjust)
1276 if (seen_reg(RV_REG_RA, ctx))
1278 stack_adjust += 8; /* RV_REG_FP */
1279 if (seen_reg(RV_REG_S1, ctx))
1281 if (seen_reg(RV_REG_S2, ctx))
1283 if (seen_reg(RV_REG_S3, ctx))
1285 if (seen_reg(RV_REG_S4, ctx))
1287 if (seen_reg(RV_REG_S5, ctx))
1289 if (seen_reg(RV_REG_S6, ctx))
1292 stack_adjust = round_up(stack_adjust, 16);
1293 stack_adjust += bpf_stack_adjust;
1295 store_offset = stack_adjust - 8;
1297 /* First instruction is always setting the tail-call-counter
1298 * (TCC) register. This instruction is skipped for tail calls.
1299 * Force using a 4-byte (non-compressed) instruction.
1301 emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
1303 emit_addi(RV_REG_SP, RV_REG_SP, -stack_adjust, ctx);
1305 if (seen_reg(RV_REG_RA, ctx)) {
1306 emit_sd(RV_REG_SP, store_offset, RV_REG_RA, ctx);
1309 emit_sd(RV_REG_SP, store_offset, RV_REG_FP, ctx);
1311 if (seen_reg(RV_REG_S1, ctx)) {
1312 emit_sd(RV_REG_SP, store_offset, RV_REG_S1, ctx);
1315 if (seen_reg(RV_REG_S2, ctx)) {
1316 emit_sd(RV_REG_SP, store_offset, RV_REG_S2, ctx);
1319 if (seen_reg(RV_REG_S3, ctx)) {
1320 emit_sd(RV_REG_SP, store_offset, RV_REG_S3, ctx);
1323 if (seen_reg(RV_REG_S4, ctx)) {
1324 emit_sd(RV_REG_SP, store_offset, RV_REG_S4, ctx);
1327 if (seen_reg(RV_REG_S5, ctx)) {
1328 emit_sd(RV_REG_SP, store_offset, RV_REG_S5, ctx);
1331 if (seen_reg(RV_REG_S6, ctx)) {
1332 emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx);
1336 emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx);
1338 if (bpf_stack_adjust)
1339 emit_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust, ctx);
1341 /* Program contains calls and tail calls, so RV_REG_TCC need
1342 * to be saved across calls.
1344 if (seen_tail_call(ctx) && seen_call(ctx))
1345 emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx);
1347 ctx->stack_size = stack_adjust;
1350 void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
1352 __build_epilogue(false, ctx);