1 // SPDX-License-Identifier: GPL-2.0-only
3 * BPF JIT compiler for LoongArch
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
9 #define REG_TCC LOONGARCH_GPR_A6
10 #define TCC_SAVED LOONGARCH_GPR_S5
12 #define SAVE_RA BIT(0)
13 #define SAVE_TCC BIT(1)
15 static const int regmap[] = {
16 /* return value from in-kernel function, and exit value for eBPF program */
17 [BPF_REG_0] = LOONGARCH_GPR_A5,
18 /* arguments from eBPF program to in-kernel function */
19 [BPF_REG_1] = LOONGARCH_GPR_A0,
20 [BPF_REG_2] = LOONGARCH_GPR_A1,
21 [BPF_REG_3] = LOONGARCH_GPR_A2,
22 [BPF_REG_4] = LOONGARCH_GPR_A3,
23 [BPF_REG_5] = LOONGARCH_GPR_A4,
24 /* callee saved registers that in-kernel function will preserve */
25 [BPF_REG_6] = LOONGARCH_GPR_S0,
26 [BPF_REG_7] = LOONGARCH_GPR_S1,
27 [BPF_REG_8] = LOONGARCH_GPR_S2,
28 [BPF_REG_9] = LOONGARCH_GPR_S3,
29 /* read-only frame pointer to access stack */
30 [BPF_REG_FP] = LOONGARCH_GPR_S4,
31 /* temporary register for blinding constants */
32 [BPF_REG_AX] = LOONGARCH_GPR_T0,
35 static void mark_call(struct jit_ctx *ctx)
37 ctx->flags |= SAVE_RA;
40 static void mark_tail_call(struct jit_ctx *ctx)
42 ctx->flags |= SAVE_TCC;
45 static bool seen_call(struct jit_ctx *ctx)
47 return (ctx->flags & SAVE_RA);
50 static bool seen_tail_call(struct jit_ctx *ctx)
52 return (ctx->flags & SAVE_TCC);
55 static u8 tail_call_reg(struct jit_ctx *ctx)
64 * eBPF prog stack layout:
67 * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
69 * +-------------------------+
71 * +-------------------------+
73 * +-------------------------+
75 * +-------------------------+
77 * +-------------------------+
79 * +-------------------------+
81 * +-------------------------+
83 * +-------------------------+ <--BPF_REG_FP
84 * | prog->aux->stack_depth |
86 * current $sp -------------> +-------------------------+
89 static void build_prologue(struct jit_ctx *ctx)
91 int stack_adjust = 0, store_offset, bpf_stack_adjust;
93 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
95 /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
96 stack_adjust += sizeof(long) * 8;
98 stack_adjust = round_up(stack_adjust, 16);
99 stack_adjust += bpf_stack_adjust;
102 * First instruction initializes the tail call count (TCC).
103 * On tail call we skip this instruction, and the TCC is
104 * passed in REG_TCC from the caller.
106 emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
108 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
110 store_offset = stack_adjust - sizeof(long);
111 emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset);
113 store_offset -= sizeof(long);
114 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset);
116 store_offset -= sizeof(long);
117 emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset);
119 store_offset -= sizeof(long);
120 emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset);
122 store_offset -= sizeof(long);
123 emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset);
125 store_offset -= sizeof(long);
126 emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset);
128 store_offset -= sizeof(long);
129 emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset);
131 store_offset -= sizeof(long);
132 emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
134 emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
136 if (bpf_stack_adjust)
137 emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
140 * Program contains calls and tail calls, so REG_TCC need
141 * to be saved across calls.
143 if (seen_tail_call(ctx) && seen_call(ctx))
144 move_reg(ctx, TCC_SAVED, REG_TCC);
146 ctx->stack_size = stack_adjust;
149 static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
151 int stack_adjust = ctx->stack_size;
154 load_offset = stack_adjust - sizeof(long);
155 emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset);
157 load_offset -= sizeof(long);
158 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset);
160 load_offset -= sizeof(long);
161 emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset);
163 load_offset -= sizeof(long);
164 emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset);
166 load_offset -= sizeof(long);
167 emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset);
169 load_offset -= sizeof(long);
170 emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset);
172 load_offset -= sizeof(long);
173 emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset);
175 load_offset -= sizeof(long);
176 emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
178 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
181 /* Set return value */
182 move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]);
183 /* Return to the caller */
184 emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
187 * Call the next bpf prog and skip the first instruction
188 * of TCC initialization.
190 emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
194 static void build_epilogue(struct jit_ctx *ctx)
196 __build_epilogue(ctx, false);
199 bool bpf_jit_supports_kfunc_call(void)
204 /* initialized on the first pass of build_body() */
205 static int out_offset = -1;
206 static int emit_bpf_tail_call(struct jit_ctx *ctx)
209 u8 tcc = tail_call_reg(ctx);
210 u8 a1 = LOONGARCH_GPR_A1;
211 u8 a2 = LOONGARCH_GPR_A2;
212 u8 t1 = LOONGARCH_GPR_T1;
213 u8 t2 = LOONGARCH_GPR_T2;
214 u8 t3 = LOONGARCH_GPR_T3;
215 const int idx0 = ctx->idx;
217 #define cur_offset (ctx->idx - idx0)
218 #define jmp_offset (out_offset - (cur_offset))
225 * if (index >= array->map.max_entries)
228 off = offsetof(struct bpf_array, map.max_entries);
229 emit_insn(ctx, ldwu, t1, a1, off);
230 /* bgeu $a2, $t1, jmp_offset */
231 if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0)
238 emit_insn(ctx, addid, REG_TCC, tcc, -1);
239 if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
243 * prog = array->ptrs[index];
247 emit_insn(ctx, alsld, t2, a2, a1, 2);
248 off = offsetof(struct bpf_array, ptrs);
249 emit_insn(ctx, ldd, t2, t2, off);
250 /* beq $t2, $zero, jmp_offset */
251 if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
254 /* goto *(prog->bpf_func + 4); */
255 off = offsetof(struct bpf_prog, bpf_func);
256 emit_insn(ctx, ldd, t3, t2, off);
257 __build_epilogue(ctx, true);
260 if (out_offset == -1)
261 out_offset = cur_offset;
262 if (cur_offset != out_offset) {
263 pr_err_once("tail_call out_offset = %d, expected %d!\n",
264 cur_offset, out_offset);
271 pr_info_once("tail_call: jump too far\n");
277 static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
279 const u8 t1 = LOONGARCH_GPR_T1;
280 const u8 t2 = LOONGARCH_GPR_T2;
281 const u8 t3 = LOONGARCH_GPR_T3;
282 const u8 r0 = regmap[BPF_REG_0];
283 const u8 src = regmap[insn->src_reg];
284 const u8 dst = regmap[insn->dst_reg];
285 const s16 off = insn->off;
286 const s32 imm = insn->imm;
287 const bool isdw = BPF_SIZE(insn->code) == BPF_DW;
289 move_imm(ctx, t1, off, false);
290 emit_insn(ctx, addd, t1, dst, t1);
291 move_reg(ctx, t3, src);
294 /* lock *(size *)(dst + off) <op>= src */
297 emit_insn(ctx, amaddd, t2, t1, src);
299 emit_insn(ctx, amaddw, t2, t1, src);
303 emit_insn(ctx, amandd, t2, t1, src);
305 emit_insn(ctx, amandw, t2, t1, src);
309 emit_insn(ctx, amord, t2, t1, src);
311 emit_insn(ctx, amorw, t2, t1, src);
315 emit_insn(ctx, amxord, t2, t1, src);
317 emit_insn(ctx, amxorw, t2, t1, src);
319 /* src = atomic_fetch_<op>(dst + off, src) */
320 case BPF_ADD | BPF_FETCH:
322 emit_insn(ctx, amaddd, src, t1, t3);
324 emit_insn(ctx, amaddw, src, t1, t3);
325 emit_zext_32(ctx, src, true);
328 case BPF_AND | BPF_FETCH:
330 emit_insn(ctx, amandd, src, t1, t3);
332 emit_insn(ctx, amandw, src, t1, t3);
333 emit_zext_32(ctx, src, true);
336 case BPF_OR | BPF_FETCH:
338 emit_insn(ctx, amord, src, t1, t3);
340 emit_insn(ctx, amorw, src, t1, t3);
341 emit_zext_32(ctx, src, true);
344 case BPF_XOR | BPF_FETCH:
346 emit_insn(ctx, amxord, src, t1, t3);
348 emit_insn(ctx, amxorw, src, t1, t3);
349 emit_zext_32(ctx, src, true);
352 /* src = atomic_xchg(dst + off, src); */
355 emit_insn(ctx, amswapd, src, t1, t3);
357 emit_insn(ctx, amswapw, src, t1, t3);
358 emit_zext_32(ctx, src, true);
361 /* r0 = atomic_cmpxchg(dst + off, r0, src); */
363 move_reg(ctx, t2, r0);
365 emit_insn(ctx, lld, r0, t1, 0);
366 emit_insn(ctx, bne, t2, r0, 4);
367 move_reg(ctx, t3, src);
368 emit_insn(ctx, scd, t3, t1, 0);
369 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4);
371 emit_insn(ctx, llw, r0, t1, 0);
372 emit_zext_32(ctx, t2, true);
373 emit_zext_32(ctx, r0, true);
374 emit_insn(ctx, bne, t2, r0, 4);
375 move_reg(ctx, t3, src);
376 emit_insn(ctx, scw, t3, t1, 0);
377 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6);
378 emit_zext_32(ctx, r0, true);
384 static bool is_signed_bpf_cond(u8 cond)
386 return cond == BPF_JSGT || cond == BPF_JSLT ||
387 cond == BPF_JSGE || cond == BPF_JSLE;
390 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
394 bool func_addr_fixed;
395 int i = insn - ctx->prog->insnsi;
397 const u8 code = insn->code;
398 const u8 cond = BPF_OP(code);
399 const u8 t1 = LOONGARCH_GPR_T1;
400 const u8 t2 = LOONGARCH_GPR_T2;
401 const u8 src = regmap[insn->src_reg];
402 const u8 dst = regmap[insn->dst_reg];
403 const s16 off = insn->off;
404 const s32 imm = insn->imm;
405 const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
409 case BPF_ALU | BPF_MOV | BPF_X:
410 case BPF_ALU64 | BPF_MOV | BPF_X:
411 move_reg(ctx, dst, src);
412 emit_zext_32(ctx, dst, is32);
416 case BPF_ALU | BPF_MOV | BPF_K:
417 case BPF_ALU64 | BPF_MOV | BPF_K:
418 move_imm(ctx, dst, imm, is32);
421 /* dst = dst + src */
422 case BPF_ALU | BPF_ADD | BPF_X:
423 case BPF_ALU64 | BPF_ADD | BPF_X:
424 emit_insn(ctx, addd, dst, dst, src);
425 emit_zext_32(ctx, dst, is32);
428 /* dst = dst + imm */
429 case BPF_ALU | BPF_ADD | BPF_K:
430 case BPF_ALU64 | BPF_ADD | BPF_K:
431 if (is_signed_imm12(imm)) {
432 emit_insn(ctx, addid, dst, dst, imm);
434 move_imm(ctx, t1, imm, is32);
435 emit_insn(ctx, addd, dst, dst, t1);
437 emit_zext_32(ctx, dst, is32);
440 /* dst = dst - src */
441 case BPF_ALU | BPF_SUB | BPF_X:
442 case BPF_ALU64 | BPF_SUB | BPF_X:
443 emit_insn(ctx, subd, dst, dst, src);
444 emit_zext_32(ctx, dst, is32);
447 /* dst = dst - imm */
448 case BPF_ALU | BPF_SUB | BPF_K:
449 case BPF_ALU64 | BPF_SUB | BPF_K:
450 if (is_signed_imm12(-imm)) {
451 emit_insn(ctx, addid, dst, dst, -imm);
453 move_imm(ctx, t1, imm, is32);
454 emit_insn(ctx, subd, dst, dst, t1);
456 emit_zext_32(ctx, dst, is32);
459 /* dst = dst * src */
460 case BPF_ALU | BPF_MUL | BPF_X:
461 case BPF_ALU64 | BPF_MUL | BPF_X:
462 emit_insn(ctx, muld, dst, dst, src);
463 emit_zext_32(ctx, dst, is32);
466 /* dst = dst * imm */
467 case BPF_ALU | BPF_MUL | BPF_K:
468 case BPF_ALU64 | BPF_MUL | BPF_K:
469 move_imm(ctx, t1, imm, is32);
470 emit_insn(ctx, muld, dst, dst, t1);
471 emit_zext_32(ctx, dst, is32);
474 /* dst = dst / src */
475 case BPF_ALU | BPF_DIV | BPF_X:
476 case BPF_ALU64 | BPF_DIV | BPF_X:
477 emit_zext_32(ctx, dst, is32);
478 move_reg(ctx, t1, src);
479 emit_zext_32(ctx, t1, is32);
480 emit_insn(ctx, divdu, dst, dst, t1);
481 emit_zext_32(ctx, dst, is32);
484 /* dst = dst / imm */
485 case BPF_ALU | BPF_DIV | BPF_K:
486 case BPF_ALU64 | BPF_DIV | BPF_K:
487 move_imm(ctx, t1, imm, is32);
488 emit_zext_32(ctx, dst, is32);
489 emit_insn(ctx, divdu, dst, dst, t1);
490 emit_zext_32(ctx, dst, is32);
493 /* dst = dst % src */
494 case BPF_ALU | BPF_MOD | BPF_X:
495 case BPF_ALU64 | BPF_MOD | BPF_X:
496 emit_zext_32(ctx, dst, is32);
497 move_reg(ctx, t1, src);
498 emit_zext_32(ctx, t1, is32);
499 emit_insn(ctx, moddu, dst, dst, t1);
500 emit_zext_32(ctx, dst, is32);
503 /* dst = dst % imm */
504 case BPF_ALU | BPF_MOD | BPF_K:
505 case BPF_ALU64 | BPF_MOD | BPF_K:
506 move_imm(ctx, t1, imm, is32);
507 emit_zext_32(ctx, dst, is32);
508 emit_insn(ctx, moddu, dst, dst, t1);
509 emit_zext_32(ctx, dst, is32);
513 case BPF_ALU | BPF_NEG:
514 case BPF_ALU64 | BPF_NEG:
515 move_imm(ctx, t1, imm, is32);
516 emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst);
517 emit_zext_32(ctx, dst, is32);
520 /* dst = dst & src */
521 case BPF_ALU | BPF_AND | BPF_X:
522 case BPF_ALU64 | BPF_AND | BPF_X:
523 emit_insn(ctx, and, dst, dst, src);
524 emit_zext_32(ctx, dst, is32);
527 /* dst = dst & imm */
528 case BPF_ALU | BPF_AND | BPF_K:
529 case BPF_ALU64 | BPF_AND | BPF_K:
530 if (is_unsigned_imm12(imm)) {
531 emit_insn(ctx, andi, dst, dst, imm);
533 move_imm(ctx, t1, imm, is32);
534 emit_insn(ctx, and, dst, dst, t1);
536 emit_zext_32(ctx, dst, is32);
539 /* dst = dst | src */
540 case BPF_ALU | BPF_OR | BPF_X:
541 case BPF_ALU64 | BPF_OR | BPF_X:
542 emit_insn(ctx, or, dst, dst, src);
543 emit_zext_32(ctx, dst, is32);
546 /* dst = dst | imm */
547 case BPF_ALU | BPF_OR | BPF_K:
548 case BPF_ALU64 | BPF_OR | BPF_K:
549 if (is_unsigned_imm12(imm)) {
550 emit_insn(ctx, ori, dst, dst, imm);
552 move_imm(ctx, t1, imm, is32);
553 emit_insn(ctx, or, dst, dst, t1);
555 emit_zext_32(ctx, dst, is32);
558 /* dst = dst ^ src */
559 case BPF_ALU | BPF_XOR | BPF_X:
560 case BPF_ALU64 | BPF_XOR | BPF_X:
561 emit_insn(ctx, xor, dst, dst, src);
562 emit_zext_32(ctx, dst, is32);
565 /* dst = dst ^ imm */
566 case BPF_ALU | BPF_XOR | BPF_K:
567 case BPF_ALU64 | BPF_XOR | BPF_K:
568 if (is_unsigned_imm12(imm)) {
569 emit_insn(ctx, xori, dst, dst, imm);
571 move_imm(ctx, t1, imm, is32);
572 emit_insn(ctx, xor, dst, dst, t1);
574 emit_zext_32(ctx, dst, is32);
577 /* dst = dst << src (logical) */
578 case BPF_ALU | BPF_LSH | BPF_X:
579 emit_insn(ctx, sllw, dst, dst, src);
580 emit_zext_32(ctx, dst, is32);
583 case BPF_ALU64 | BPF_LSH | BPF_X:
584 emit_insn(ctx, slld, dst, dst, src);
587 /* dst = dst << imm (logical) */
588 case BPF_ALU | BPF_LSH | BPF_K:
589 emit_insn(ctx, slliw, dst, dst, imm);
590 emit_zext_32(ctx, dst, is32);
593 case BPF_ALU64 | BPF_LSH | BPF_K:
594 emit_insn(ctx, sllid, dst, dst, imm);
597 /* dst = dst >> src (logical) */
598 case BPF_ALU | BPF_RSH | BPF_X:
599 emit_insn(ctx, srlw, dst, dst, src);
600 emit_zext_32(ctx, dst, is32);
603 case BPF_ALU64 | BPF_RSH | BPF_X:
604 emit_insn(ctx, srld, dst, dst, src);
607 /* dst = dst >> imm (logical) */
608 case BPF_ALU | BPF_RSH | BPF_K:
609 emit_insn(ctx, srliw, dst, dst, imm);
610 emit_zext_32(ctx, dst, is32);
613 case BPF_ALU64 | BPF_RSH | BPF_K:
614 emit_insn(ctx, srlid, dst, dst, imm);
617 /* dst = dst >> src (arithmetic) */
618 case BPF_ALU | BPF_ARSH | BPF_X:
619 emit_insn(ctx, sraw, dst, dst, src);
620 emit_zext_32(ctx, dst, is32);
623 case BPF_ALU64 | BPF_ARSH | BPF_X:
624 emit_insn(ctx, srad, dst, dst, src);
627 /* dst = dst >> imm (arithmetic) */
628 case BPF_ALU | BPF_ARSH | BPF_K:
629 emit_insn(ctx, sraiw, dst, dst, imm);
630 emit_zext_32(ctx, dst, is32);
633 case BPF_ALU64 | BPF_ARSH | BPF_K:
634 emit_insn(ctx, sraid, dst, dst, imm);
637 /* dst = BSWAP##imm(dst) */
638 case BPF_ALU | BPF_END | BPF_FROM_LE:
641 /* zero-extend 16 bits into 64 bits */
642 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
645 /* zero-extend 32 bits into 64 bits */
646 emit_zext_32(ctx, dst, is32);
654 case BPF_ALU | BPF_END | BPF_FROM_BE:
657 emit_insn(ctx, revb2h, dst, dst);
658 /* zero-extend 16 bits into 64 bits */
659 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
662 emit_insn(ctx, revb2w, dst, dst);
663 /* zero-extend 32 bits into 64 bits */
664 emit_zext_32(ctx, dst, is32);
667 emit_insn(ctx, revbd, dst, dst);
672 /* PC += off if dst cond src */
673 case BPF_JMP | BPF_JEQ | BPF_X:
674 case BPF_JMP | BPF_JNE | BPF_X:
675 case BPF_JMP | BPF_JGT | BPF_X:
676 case BPF_JMP | BPF_JGE | BPF_X:
677 case BPF_JMP | BPF_JLT | BPF_X:
678 case BPF_JMP | BPF_JLE | BPF_X:
679 case BPF_JMP | BPF_JSGT | BPF_X:
680 case BPF_JMP | BPF_JSGE | BPF_X:
681 case BPF_JMP | BPF_JSLT | BPF_X:
682 case BPF_JMP | BPF_JSLE | BPF_X:
683 case BPF_JMP32 | BPF_JEQ | BPF_X:
684 case BPF_JMP32 | BPF_JNE | BPF_X:
685 case BPF_JMP32 | BPF_JGT | BPF_X:
686 case BPF_JMP32 | BPF_JGE | BPF_X:
687 case BPF_JMP32 | BPF_JLT | BPF_X:
688 case BPF_JMP32 | BPF_JLE | BPF_X:
689 case BPF_JMP32 | BPF_JSGT | BPF_X:
690 case BPF_JMP32 | BPF_JSGE | BPF_X:
691 case BPF_JMP32 | BPF_JSLT | BPF_X:
692 case BPF_JMP32 | BPF_JSLE | BPF_X:
693 jmp_offset = bpf2la_offset(i, off, ctx);
694 move_reg(ctx, t1, dst);
695 move_reg(ctx, t2, src);
696 if (is_signed_bpf_cond(BPF_OP(code))) {
697 emit_sext_32(ctx, t1, is32);
698 emit_sext_32(ctx, t2, is32);
700 emit_zext_32(ctx, t1, is32);
701 emit_zext_32(ctx, t2, is32);
703 if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0)
707 /* PC += off if dst cond imm */
708 case BPF_JMP | BPF_JEQ | BPF_K:
709 case BPF_JMP | BPF_JNE | BPF_K:
710 case BPF_JMP | BPF_JGT | BPF_K:
711 case BPF_JMP | BPF_JGE | BPF_K:
712 case BPF_JMP | BPF_JLT | BPF_K:
713 case BPF_JMP | BPF_JLE | BPF_K:
714 case BPF_JMP | BPF_JSGT | BPF_K:
715 case BPF_JMP | BPF_JSGE | BPF_K:
716 case BPF_JMP | BPF_JSLT | BPF_K:
717 case BPF_JMP | BPF_JSLE | BPF_K:
718 case BPF_JMP32 | BPF_JEQ | BPF_K:
719 case BPF_JMP32 | BPF_JNE | BPF_K:
720 case BPF_JMP32 | BPF_JGT | BPF_K:
721 case BPF_JMP32 | BPF_JGE | BPF_K:
722 case BPF_JMP32 | BPF_JLT | BPF_K:
723 case BPF_JMP32 | BPF_JLE | BPF_K:
724 case BPF_JMP32 | BPF_JSGT | BPF_K:
725 case BPF_JMP32 | BPF_JSGE | BPF_K:
726 case BPF_JMP32 | BPF_JSLT | BPF_K:
727 case BPF_JMP32 | BPF_JSLE | BPF_K:
728 jmp_offset = bpf2la_offset(i, off, ctx);
730 move_imm(ctx, t1, imm, false);
733 /* If imm is 0, simply use zero register. */
734 tm = LOONGARCH_GPR_ZERO;
736 move_reg(ctx, t2, dst);
737 if (is_signed_bpf_cond(BPF_OP(code))) {
738 emit_sext_32(ctx, tm, is32);
739 emit_sext_32(ctx, t2, is32);
741 emit_zext_32(ctx, tm, is32);
742 emit_zext_32(ctx, t2, is32);
744 if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
748 /* PC += off if dst & src */
749 case BPF_JMP | BPF_JSET | BPF_X:
750 case BPF_JMP32 | BPF_JSET | BPF_X:
751 jmp_offset = bpf2la_offset(i, off, ctx);
752 emit_insn(ctx, and, t1, dst, src);
753 emit_zext_32(ctx, t1, is32);
754 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
758 /* PC += off if dst & imm */
759 case BPF_JMP | BPF_JSET | BPF_K:
760 case BPF_JMP32 | BPF_JSET | BPF_K:
761 jmp_offset = bpf2la_offset(i, off, ctx);
762 move_imm(ctx, t1, imm, is32);
763 emit_insn(ctx, and, t1, dst, t1);
764 emit_zext_32(ctx, t1, is32);
765 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
770 case BPF_JMP | BPF_JA:
771 jmp_offset = bpf2la_offset(i, off, ctx);
772 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
777 case BPF_JMP | BPF_CALL:
779 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
780 &func_addr, &func_addr_fixed);
784 move_addr(ctx, t1, func_addr);
785 emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
786 move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
790 case BPF_JMP | BPF_TAIL_CALL:
792 if (emit_bpf_tail_call(ctx) < 0)
796 /* function return */
797 case BPF_JMP | BPF_EXIT:
798 if (i == ctx->prog->len - 1)
801 jmp_offset = epilogue_offset(ctx);
802 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
807 case BPF_LD | BPF_IMM | BPF_DW:
809 const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
811 move_imm(ctx, dst, imm64, is32);
815 /* dst = *(size *)(src + off) */
816 case BPF_LDX | BPF_MEM | BPF_B:
817 case BPF_LDX | BPF_MEM | BPF_H:
818 case BPF_LDX | BPF_MEM | BPF_W:
819 case BPF_LDX | BPF_MEM | BPF_DW:
820 switch (BPF_SIZE(code)) {
822 if (is_signed_imm12(off)) {
823 emit_insn(ctx, ldbu, dst, src, off);
825 move_imm(ctx, t1, off, is32);
826 emit_insn(ctx, ldxbu, dst, src, t1);
830 if (is_signed_imm12(off)) {
831 emit_insn(ctx, ldhu, dst, src, off);
833 move_imm(ctx, t1, off, is32);
834 emit_insn(ctx, ldxhu, dst, src, t1);
838 if (is_signed_imm12(off)) {
839 emit_insn(ctx, ldwu, dst, src, off);
840 } else if (is_signed_imm14(off)) {
841 emit_insn(ctx, ldptrw, dst, src, off);
843 move_imm(ctx, t1, off, is32);
844 emit_insn(ctx, ldxwu, dst, src, t1);
848 move_imm(ctx, t1, off, is32);
849 emit_insn(ctx, ldxd, dst, src, t1);
854 /* *(size *)(dst + off) = imm */
855 case BPF_ST | BPF_MEM | BPF_B:
856 case BPF_ST | BPF_MEM | BPF_H:
857 case BPF_ST | BPF_MEM | BPF_W:
858 case BPF_ST | BPF_MEM | BPF_DW:
859 switch (BPF_SIZE(code)) {
861 move_imm(ctx, t1, imm, is32);
862 if (is_signed_imm12(off)) {
863 emit_insn(ctx, stb, t1, dst, off);
865 move_imm(ctx, t2, off, is32);
866 emit_insn(ctx, stxb, t1, dst, t2);
870 move_imm(ctx, t1, imm, is32);
871 if (is_signed_imm12(off)) {
872 emit_insn(ctx, sth, t1, dst, off);
874 move_imm(ctx, t2, off, is32);
875 emit_insn(ctx, stxh, t1, dst, t2);
879 move_imm(ctx, t1, imm, is32);
880 if (is_signed_imm12(off)) {
881 emit_insn(ctx, stw, t1, dst, off);
882 } else if (is_signed_imm14(off)) {
883 emit_insn(ctx, stptrw, t1, dst, off);
885 move_imm(ctx, t2, off, is32);
886 emit_insn(ctx, stxw, t1, dst, t2);
890 move_imm(ctx, t1, imm, is32);
891 if (is_signed_imm12(off)) {
892 emit_insn(ctx, std, t1, dst, off);
893 } else if (is_signed_imm14(off)) {
894 emit_insn(ctx, stptrd, t1, dst, off);
896 move_imm(ctx, t2, off, is32);
897 emit_insn(ctx, stxd, t1, dst, t2);
903 /* *(size *)(dst + off) = src */
904 case BPF_STX | BPF_MEM | BPF_B:
905 case BPF_STX | BPF_MEM | BPF_H:
906 case BPF_STX | BPF_MEM | BPF_W:
907 case BPF_STX | BPF_MEM | BPF_DW:
908 switch (BPF_SIZE(code)) {
910 if (is_signed_imm12(off)) {
911 emit_insn(ctx, stb, src, dst, off);
913 move_imm(ctx, t1, off, is32);
914 emit_insn(ctx, stxb, src, dst, t1);
918 if (is_signed_imm12(off)) {
919 emit_insn(ctx, sth, src, dst, off);
921 move_imm(ctx, t1, off, is32);
922 emit_insn(ctx, stxh, src, dst, t1);
926 if (is_signed_imm12(off)) {
927 emit_insn(ctx, stw, src, dst, off);
928 } else if (is_signed_imm14(off)) {
929 emit_insn(ctx, stptrw, src, dst, off);
931 move_imm(ctx, t1, off, is32);
932 emit_insn(ctx, stxw, src, dst, t1);
936 if (is_signed_imm12(off)) {
937 emit_insn(ctx, std, src, dst, off);
938 } else if (is_signed_imm14(off)) {
939 emit_insn(ctx, stptrd, src, dst, off);
941 move_imm(ctx, t1, off, is32);
942 emit_insn(ctx, stxd, src, dst, t1);
948 case BPF_STX | BPF_ATOMIC | BPF_W:
949 case BPF_STX | BPF_ATOMIC | BPF_DW:
950 emit_atomic(insn, ctx);
953 /* Speculation barrier */
954 case BPF_ST | BPF_NOSPEC:
958 pr_err("bpf_jit: unknown opcode %02x\n", code);
965 pr_info_once("bpf_jit: opcode %02x, jump too far\n", code);
969 static int build_body(struct jit_ctx *ctx, bool extra_pass)
972 const struct bpf_prog *prog = ctx->prog;
974 for (i = 0; i < prog->len; i++) {
975 const struct bpf_insn *insn = &prog->insnsi[i];
978 if (ctx->image == NULL)
979 ctx->offset[i] = ctx->idx;
981 ret = build_insn(insn, ctx, extra_pass);
984 if (ctx->image == NULL)
985 ctx->offset[i] = ctx->idx;
992 if (ctx->image == NULL)
993 ctx->offset[i] = ctx->idx;
998 /* Fill space with break instructions */
999 static void jit_fill_hole(void *area, unsigned int size)
1003 /* We are guaranteed to have aligned memory */
1004 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
1005 *ptr++ = INSN_BREAK;
1008 static int validate_code(struct jit_ctx *ctx)
1011 union loongarch_instruction insn;
1013 for (i = 0; i < ctx->idx; i++) {
1014 insn = ctx->image[i];
1015 /* Check INSN_BREAK */
1016 if (insn.word == INSN_BREAK)
1023 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1025 bool tmp_blinded = false, extra_pass = false;
1029 struct jit_data *jit_data;
1030 struct bpf_binary_header *header;
1031 struct bpf_prog *tmp, *orig_prog = prog;
1034 * If BPF JIT was not enabled then we must fall back to
1037 if (!prog->jit_requested)
1040 tmp = bpf_jit_blind_constants(prog);
1042 * If blinding was requested and we failed during blinding,
1043 * we must fall back to the interpreter. Otherwise, we save
1044 * the new JITed code.
1054 jit_data = prog->aux->jit_data;
1056 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1061 prog->aux->jit_data = jit_data;
1063 if (jit_data->ctx.offset) {
1064 ctx = jit_data->ctx;
1065 image_ptr = jit_data->image;
1066 header = jit_data->header;
1068 image_size = sizeof(u32) * ctx.idx;
1072 memset(&ctx, 0, sizeof(ctx));
1075 ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
1076 if (ctx.offset == NULL) {
1081 /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
1082 build_prologue(&ctx);
1083 if (build_body(&ctx, extra_pass)) {
1087 ctx.epilogue_offset = ctx.idx;
1088 build_epilogue(&ctx);
1090 /* Now we know the actual image size.
1091 * As each LoongArch instruction is of length 32bit,
1092 * we are translating number of JITed intructions into
1093 * the size required to store these JITed code.
1095 image_size = sizeof(u32) * ctx.idx;
1096 /* Now we know the size of the structure to make */
1097 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1098 sizeof(u32), jit_fill_hole);
1099 if (header == NULL) {
1104 /* 2. Now, the actual pass to generate final JIT code */
1105 ctx.image = (union loongarch_instruction *)image_ptr;
1110 build_prologue(&ctx);
1111 if (build_body(&ctx, extra_pass)) {
1112 bpf_jit_binary_free(header);
1116 build_epilogue(&ctx);
1118 /* 3. Extra pass to validate JITed code */
1119 if (validate_code(&ctx)) {
1120 bpf_jit_binary_free(header);
1125 /* And we're done */
1126 if (bpf_jit_enable > 1)
1127 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
1129 /* Update the icache */
1130 flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
1132 if (!prog->is_func || extra_pass) {
1133 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1134 pr_err_once("multi-func JIT bug %d != %d\n",
1135 ctx.idx, jit_data->ctx.idx);
1136 bpf_jit_binary_free(header);
1137 prog->bpf_func = NULL;
1139 prog->jited_len = 0;
1142 bpf_jit_binary_lock_ro(header);
1144 jit_data->ctx = ctx;
1145 jit_data->image = image_ptr;
1146 jit_data->header = header;
1149 prog->jited_len = image_size;
1150 prog->bpf_func = (void *)ctx.image;
1152 if (!prog->is_func || extra_pass) {
1155 /* offset[prog->len] is the size of program */
1156 for (i = 0; i <= prog->len; i++)
1157 ctx.offset[i] *= LOONGARCH_INSN_SIZE;
1158 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1163 prog->aux->jit_data = NULL;
1168 bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);