1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * BPF JIT compiler for LoongArch
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
8 #include <linux/filter.h>
9 #include <asm/cacheflush.h>
13 const struct bpf_prog *prog;
16 unsigned int epilogue_offset;
18 union loongarch_instruction *image;
23 struct bpf_binary_header *header;
28 #define emit_insn(ctx, func, ...) \
30 if (ctx->image != NULL) { \
31 union loongarch_instruction *insn = &ctx->image[ctx->idx]; \
32 emit_##func(insn, ##__VA_ARGS__); \
37 #define is_signed_imm12(val) signed_imm_check(val, 12)
38 #define is_signed_imm14(val) signed_imm_check(val, 14)
39 #define is_signed_imm16(val) signed_imm_check(val, 16)
40 #define is_signed_imm26(val) signed_imm_check(val, 26)
41 #define is_signed_imm32(val) signed_imm_check(val, 32)
42 #define is_signed_imm52(val) signed_imm_check(val, 52)
43 #define is_unsigned_imm12(val) unsigned_imm_check(val, 12)
45 static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx)
47 /* BPF JMP offset is relative to the next instruction */
50 * Whereas LoongArch branch instructions encode the offset
51 * from the branch itself, so we must subtract 1 from the
54 return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1));
57 static inline int epilogue_offset(const struct jit_ctx *ctx)
60 int to = ctx->epilogue_offset;
65 /* Zero-extend 32 bits into 64 bits */
66 static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
71 emit_insn(ctx, lu32id, reg, 0);
74 /* Signed-extend 32 bits into 64 bits */
75 static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
80 emit_insn(ctx, addiw, reg, reg, 0);
83 static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
85 u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
87 /* lu12iw rd, imm_31_12 */
88 imm_31_12 = (addr >> 12) & 0xfffff;
89 emit_insn(ctx, lu12iw, rd, imm_31_12);
91 /* ori rd, rd, imm_11_0 */
92 imm_11_0 = addr & 0xfff;
93 emit_insn(ctx, ori, rd, rd, imm_11_0);
95 /* lu32id rd, imm_51_32 */
96 imm_51_32 = (addr >> 32) & 0xfffff;
97 emit_insn(ctx, lu32id, rd, imm_51_32);
99 /* lu52id rd, rd, imm_63_52 */
100 imm_63_52 = (addr >> 52) & 0xfff;
101 emit_insn(ctx, lu52id, rd, rd, imm_63_52);
104 static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
106 long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
108 /* or rd, $zero, $zero */
110 emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO);
114 /* addiw rd, $zero, imm_11_0 */
115 if (is_signed_imm12(imm)) {
116 emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm);
120 /* ori rd, $zero, imm_11_0 */
121 if (is_unsigned_imm12(imm)) {
122 emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm);
126 /* lu52id rd, $zero, imm_63_52 */
127 imm_63_52 = (imm >> 52) & 0xfff;
128 imm_51_0 = imm & 0xfffffffffffff;
129 if (imm_63_52 != 0 && imm_51_0 == 0) {
130 emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52);
134 /* lu12iw rd, imm_31_12 */
135 imm_31_12 = (imm >> 12) & 0xfffff;
136 emit_insn(ctx, lu12iw, rd, imm_31_12);
138 /* ori rd, rd, imm_11_0 */
139 imm_11_0 = imm & 0xfff;
141 emit_insn(ctx, ori, rd, rd, imm_11_0);
143 if (!is_signed_imm32(imm)) {
146 * If bit[51:31] is all 0 or all 1,
147 * it means bit[51:32] is sign extended by lu12iw,
148 * no need to call lu32id to do a new filled operation.
150 imm_51_31 = (imm >> 31) & 0x1fffff;
151 if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) {
152 /* lu32id rd, imm_51_32 */
153 imm_51_32 = (imm >> 32) & 0xfffff;
154 emit_insn(ctx, lu32id, rd, imm_51_32);
158 /* lu52id rd, rd, imm_63_52 */
159 if (!is_signed_imm52(imm))
160 emit_insn(ctx, lu52id, rd, rd, imm_63_52);
164 emit_zext_32(ctx, rd, is32);
167 static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd,
168 enum loongarch_gpr rj)
170 emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO);
173 static inline int invert_jmp_cond(u8 cond)
201 static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
202 enum loongarch_gpr rd, int jmp_offset)
206 /* PC += jmp_offset if rj == rd */
207 emit_insn(ctx, beq, rj, rd, jmp_offset);
211 /* PC += jmp_offset if rj != rd */
212 emit_insn(ctx, bne, rj, rd, jmp_offset);
215 /* PC += jmp_offset if rj > rd (unsigned) */
216 emit_insn(ctx, bltu, rd, rj, jmp_offset);
219 /* PC += jmp_offset if rj < rd (unsigned) */
220 emit_insn(ctx, bltu, rj, rd, jmp_offset);
223 /* PC += jmp_offset if rj >= rd (unsigned) */
224 emit_insn(ctx, bgeu, rj, rd, jmp_offset);
227 /* PC += jmp_offset if rj <= rd (unsigned) */
228 emit_insn(ctx, bgeu, rd, rj, jmp_offset);
231 /* PC += jmp_offset if rj > rd (signed) */
232 emit_insn(ctx, blt, rd, rj, jmp_offset);
235 /* PC += jmp_offset if rj < rd (signed) */
236 emit_insn(ctx, blt, rj, rd, jmp_offset);
239 /* PC += jmp_offset if rj >= rd (signed) */
240 emit_insn(ctx, bge, rj, rd, jmp_offset);
243 /* PC += jmp_offset if rj <= rd (signed) */
244 emit_insn(ctx, bge, rd, rj, jmp_offset);
249 static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
250 enum loongarch_gpr rd, int jmp_offset)
252 cond = invert_jmp_cond(cond);
253 cond_jmp_offset(ctx, cond, rj, rd, 2);
254 emit_insn(ctx, b, jmp_offset);
257 static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset)
259 emit_insn(ctx, b, jmp_offset);
262 static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
263 enum loongarch_gpr rd, int jmp_offset)
266 * A large PC-relative jump offset may overflow the immediate field of
267 * the native conditional branch instruction, triggering a conversion
268 * to use an absolute jump instead, this jump sequence is particularly
269 * nasty. For now, use cond_jmp_offs26() directly to keep it simple.
270 * In the future, maybe we can add support for far branching, the branch
271 * relaxation requires more than two passes to converge, the code seems
272 * too complex to understand, not quite sure whether it is necessary and
273 * worth the extra pain. Anyway, just leave it as it is to enhance code
276 if (is_signed_imm26(jmp_offset)) {
277 cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset);
284 static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset)
286 if (is_signed_imm26(jmp_offset)) {
287 uncond_jmp_offs26(ctx, jmp_offset);
294 static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
295 enum loongarch_gpr rd, int jmp_offset)
297 if (is_signed_imm16(jmp_offset)) {
298 cond_jmp_offset(ctx, cond, rj, rd, jmp_offset);