1 // SPDX-License-Identifier: GPL-2.0-only
3 * eBPF JIT compiler for PPC32
5 * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu>
8 * Based on PPC64 eBPF JIT compiler by Naveen N. Rao
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
24 * [ prev sp ] <-------------
25 * [ nv gpr save area ] 16 * 4 |
26 * fp (r31) --> [ ebpf stack space ] upto 512 |
27 * [ frame header ] 16 |
28 * sp (r1) ---> [ stack pointer ] --------------
31 /* for gpr non volatile registers r17 to r31 (14) + tail call */
32 #define BPF_PPC_STACK_SAVE (15 * 4 + 4)
33 /* stack frame, ensure this is quadword aligned */
34 #define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
36 #define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
38 /* PPC NVR range -- update this if we ever use NVRs below r17 */
39 #define BPF_PPC_NVR_MIN _R17
40 #define BPF_PPC_TC _R16
42 /* BPF register usage */
43 #define TMP_REG (MAX_BPF_JIT_REG + 0)
45 /* BPF to ppc register mappings */
46 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
48 /* function return value */
49 ctx->b2p[BPF_REG_0] = _R12;
50 /* function arguments */
51 ctx->b2p[BPF_REG_1] = _R4;
52 ctx->b2p[BPF_REG_2] = _R6;
53 ctx->b2p[BPF_REG_3] = _R8;
54 ctx->b2p[BPF_REG_4] = _R10;
55 ctx->b2p[BPF_REG_5] = _R22;
56 /* non volatile registers */
57 ctx->b2p[BPF_REG_6] = _R24;
58 ctx->b2p[BPF_REG_7] = _R26;
59 ctx->b2p[BPF_REG_8] = _R28;
60 ctx->b2p[BPF_REG_9] = _R30;
61 /* frame pointer aka BPF_REG_10 */
62 ctx->b2p[BPF_REG_FP] = _R18;
63 /* eBPF jit internal registers */
64 ctx->b2p[BPF_REG_AX] = _R20;
65 ctx->b2p[TMP_REG] = _R31; /* 32 bits */
68 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
70 if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
71 return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
73 WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
74 /* Use the hole we have left for alignment */
75 return BPF_PPC_STACKFRAME(ctx) - 4;
78 #define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
79 #define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */
80 #define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
82 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
85 * We only need a stack frame if:
86 * - we call other functions (kernel helpers), or
87 * - we use non volatile registers, or
88 * - we use tail call counter
89 * - the bpf program uses its stack area
90 * The latter condition is deduced from the usage of BPF_REG_FP
92 return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
93 bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
96 void bpf_jit_realloc_regs(struct codegen_context *ctx)
98 unsigned int nvreg_mask;
100 if (ctx->seen & SEEN_FUNC)
101 nvreg_mask = SEEN_NVREG_TEMP_MASK;
103 nvreg_mask = SEEN_NVREG_FULL_MASK;
105 while (ctx->seen & nvreg_mask &&
106 (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
107 int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab));
108 int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa));
111 for (i = BPF_REG_0; i <= TMP_REG; i++) {
112 if (ctx->b2p[i] != old)
115 bpf_set_seen_register(ctx, new);
116 bpf_clear_seen_register(ctx, old);
118 bpf_set_seen_register(ctx, new - 1);
119 bpf_clear_seen_register(ctx, old - 1);
126 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
130 /* Initialize tail_call_cnt, to be skipped if we do tail calls. */
131 EMIT(PPC_RAW_LI(_R4, 0));
133 #define BPF_TAILCALL_PROLOGUE_SIZE 4
135 if (bpf_has_stack_frame(ctx))
136 EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
138 if (ctx->seen & SEEN_TAILCALL)
139 EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
141 /* First arg comes in as a 32 bits pointer. */
142 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
143 EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
146 * We need a stack frame, but we don't necessarily need to
147 * save/restore LR unless we call other functions
149 if (ctx->seen & SEEN_FUNC)
150 EMIT(PPC_RAW_MFLR(_R0));
153 * Back up non-volatile regs -- registers r18-r31
155 for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
156 if (bpf_is_seen_register(ctx, i))
157 EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
159 /* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/
160 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
161 EMIT(PPC_RAW_LWZ(bpf_to_ppc(BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8);
162 EMIT(PPC_RAW_LWZ(bpf_to_ppc(BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12);
165 /* Setup frame pointer to point to the bpf stack area */
166 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
167 EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0));
168 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
169 STACK_FRAME_MIN_SIZE + ctx->stack_size));
172 if (ctx->seen & SEEN_FUNC)
173 EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
176 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
181 for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
182 if (bpf_is_seen_register(ctx, i))
183 EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
185 if (ctx->seen & SEEN_FUNC)
186 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
188 /* Tear down our stack frame */
189 if (bpf_has_stack_frame(ctx))
190 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
192 if (ctx->seen & SEEN_FUNC)
193 EMIT(PPC_RAW_MTLR(_R0));
197 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
199 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
201 bpf_jit_emit_common_epilogue(image, ctx);
206 int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
208 s32 rel = (s32)func - (s32)(image + ctx->idx);
210 if (image && rel < 0x2000000 && rel >= -0x2000000) {
216 /* Load function address into r0 */
217 EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
218 EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
219 EMIT(PPC_RAW_MTCTR(_R0));
220 EMIT(PPC_RAW_BCTRL());
226 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
229 * By now, the eBPF program has already setup parameters in r3-r6
230 * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
231 * r5-r6/BPF_REG_2 - pointer to bpf_array
232 * r7-r8/BPF_REG_3 - index in bpf_array
234 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
235 int b2p_index = bpf_to_ppc(BPF_REG_3);
238 * if (index >= array->map.max_entries)
241 EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
242 EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
243 EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
244 PPC_BCC_SHORT(COND_GE, out);
247 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
250 EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
251 /* tail_call_cnt++; */
252 EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
253 PPC_BCC_SHORT(COND_GE, out);
255 /* prog = array->ptrs[index]; */
256 EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
257 EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
258 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
264 EMIT(PPC_RAW_CMPLWI(_R3, 0));
265 PPC_BCC_SHORT(COND_EQ, out);
267 /* goto *(prog->bpf_func + prologue_size); */
268 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
269 EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
270 EMIT(PPC_RAW_MTCTR(_R3));
272 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
274 /* Put tail_call_cnt in r4 */
275 EMIT(PPC_RAW_MR(_R4, _R0));
277 /* tear restore NVRs, ... */
278 bpf_jit_emit_common_epilogue(image, ctx);
280 EMIT(PPC_RAW_BCTR());
286 /* Assemble the body code between the prologue & epilogue */
287 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
288 u32 *addrs, int pass)
290 const struct bpf_insn *insn = fp->insnsi;
294 /* Start of epilogue code - will only be valid 2nd pass onwards */
295 u32 exit_addr = addrs[flen];
297 for (i = 0; i < flen; i++) {
298 u32 code = insn[i].code;
299 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
300 u32 dst_reg_h = dst_reg - 1;
301 u32 src_reg = bpf_to_ppc(insn[i].src_reg);
302 u32 src_reg_h = src_reg - 1;
303 u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
304 u32 tmp_reg = bpf_to_ppc(TMP_REG);
305 u32 size = BPF_SIZE(code);
306 u32 save_reg, ret_reg;
307 s16 off = insn[i].off;
308 s32 imm = insn[i].imm;
309 bool func_addr_fixed;
316 * addrs[] maps a BPF bytecode address into a real offset from
317 * the start of the body code.
319 addrs[i] = ctx->idx * 4;
322 * As an optimization, we note down which registers
323 * are used so that we can only save/restore those in our
324 * prologue and epilogue. We do this here regardless of whether
325 * the actual BPF instruction uses src/dst registers or not
326 * (for instance, BPF_CALL does not use them). The expectation
327 * is that those instructions will have src_reg/dst_reg set to
328 * 0. Even otherwise, we just lose some prologue/epilogue
329 * optimization but everything else should work without
332 if (dst_reg >= 3 && dst_reg < 32) {
333 bpf_set_seen_register(ctx, dst_reg);
334 bpf_set_seen_register(ctx, dst_reg_h);
337 if (src_reg >= 3 && src_reg < 32) {
338 bpf_set_seen_register(ctx, src_reg);
339 bpf_set_seen_register(ctx, src_reg_h);
344 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
346 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
347 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
349 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
350 EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, src_reg));
351 EMIT(PPC_RAW_ADDE(dst_reg_h, dst_reg_h, src_reg_h));
353 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
354 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
356 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
357 EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, dst_reg));
358 EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, dst_reg_h));
360 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
363 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
364 if (IMM_HA(imm) & 0xffff)
365 EMIT(PPC_RAW_ADDIS(dst_reg, dst_reg, IMM_HA(imm)));
367 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
369 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
372 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
376 if (imm >= -32768 && imm < 32768) {
377 EMIT(PPC_RAW_ADDIC(dst_reg, dst_reg, imm));
380 EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
382 if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
383 EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
385 EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
387 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
388 bpf_set_seen_register(ctx, tmp_reg);
389 EMIT(PPC_RAW_MULW(_R0, dst_reg, src_reg_h));
390 EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, src_reg));
391 EMIT(PPC_RAW_MULHWU(tmp_reg, dst_reg, src_reg));
392 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
393 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
394 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
396 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
397 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
399 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
400 if (imm >= -32768 && imm < 32768) {
401 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, imm));
404 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, _R0));
407 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
409 PPC_LI32(dst_reg, 0);
410 PPC_LI32(dst_reg_h, 0);
416 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
417 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
420 bpf_set_seen_register(ctx, tmp_reg);
421 PPC_LI32(tmp_reg, imm);
422 EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, tmp_reg));
424 EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, dst_reg));
425 EMIT(PPC_RAW_MULHWU(_R0, dst_reg, tmp_reg));
426 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp_reg));
427 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
429 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
430 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
432 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
433 EMIT(PPC_RAW_DIVWU(_R0, dst_reg, src_reg));
434 EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
435 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
437 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
439 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
441 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
448 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, _R0));
450 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
454 if (!is_power_of_2((u32)imm)) {
455 bpf_set_seen_register(ctx, tmp_reg);
456 PPC_LI32(tmp_reg, imm);
457 EMIT(PPC_RAW_DIVWU(_R0, dst_reg, tmp_reg));
458 EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
459 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
463 EMIT(PPC_RAW_LI(dst_reg, 0));
465 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2((u32)imm), 31));
468 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
473 if (!is_power_of_2(imm))
476 EMIT(PPC_RAW_LI(dst_reg, 0));
478 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
479 EMIT(PPC_RAW_LI(dst_reg_h, 0));
481 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
484 if (!is_power_of_2(abs(imm)))
488 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
489 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
495 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
496 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
497 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm));
499 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
500 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
502 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
503 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
504 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
508 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
510 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
511 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
512 EMIT(PPC_RAW_AND(dst_reg_h, dst_reg_h, src_reg_h));
514 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
515 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
517 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
519 EMIT(PPC_RAW_LI(dst_reg_h, 0));
521 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
523 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
524 } else if (!IMM_L(imm)) {
525 EMIT(PPC_RAW_ANDIS(dst_reg, dst_reg, IMM_H(imm)));
526 } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) {
527 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0,
528 32 - fls(imm), 32 - ffs(imm)));
531 EMIT(PPC_RAW_AND(dst_reg, dst_reg, _R0));
534 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
535 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
536 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, src_reg_h));
538 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
539 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
541 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
544 EMIT(PPC_RAW_LI(dst_reg_h, -1));
546 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
548 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
550 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
552 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
553 if (dst_reg == src_reg) {
554 EMIT(PPC_RAW_LI(dst_reg, 0));
555 EMIT(PPC_RAW_LI(dst_reg_h, 0));
557 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
558 EMIT(PPC_RAW_XOR(dst_reg_h, dst_reg_h, src_reg_h));
561 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
562 if (dst_reg == src_reg)
563 EMIT(PPC_RAW_LI(dst_reg, 0));
565 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
567 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
569 EMIT(PPC_RAW_NOR(dst_reg_h, dst_reg_h, dst_reg_h));
571 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
573 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
575 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
577 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
578 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
580 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
581 bpf_set_seen_register(ctx, tmp_reg);
582 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
583 EMIT(PPC_RAW_SLW(dst_reg_h, dst_reg_h, src_reg));
584 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
585 EMIT(PPC_RAW_SRW(_R0, dst_reg, _R0));
586 EMIT(PPC_RAW_SLW(tmp_reg, dst_reg, tmp_reg));
587 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
588 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
589 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
591 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */
594 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
596 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */
602 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, imm, 0, 31 - imm));
603 EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31));
604 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, imm, 0, 31 - imm));
608 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg, imm, 0, 31 - imm));
610 EMIT(PPC_RAW_LI(dst_reg_h, 0));
611 EMIT(PPC_RAW_LI(dst_reg, 0));
613 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
614 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
616 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
617 bpf_set_seen_register(ctx, tmp_reg);
618 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
619 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
620 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
621 EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
622 EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
623 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
624 EMIT(PPC_RAW_SRW(dst_reg_h, dst_reg_h, src_reg));
625 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
627 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
630 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
632 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
638 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
639 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
640 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, 32 - imm, imm, 31));
644 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg_h, 64 - imm, imm - 32, 31));
646 EMIT(PPC_RAW_LI(dst_reg, 0));
647 EMIT(PPC_RAW_LI(dst_reg_h, 0));
649 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
650 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
652 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
653 bpf_set_seen_register(ctx, tmp_reg);
654 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
655 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
656 EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
657 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
658 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
659 EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
660 EMIT(PPC_RAW_SRAW(tmp_reg, dst_reg_h, tmp_reg));
661 EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg_h, src_reg));
662 EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
663 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
665 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
668 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
670 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
676 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
677 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
678 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm));
682 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, imm - 32));
684 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, 31));
685 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, 31));
691 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
692 if (dst_reg == src_reg)
694 EMIT(PPC_RAW_MR(dst_reg, src_reg));
695 EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
697 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
698 /* special mov32 for zext */
700 EMIT(PPC_RAW_LI(dst_reg_h, 0));
701 else if (dst_reg != src_reg)
702 EMIT(PPC_RAW_MR(dst_reg, src_reg));
704 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
705 PPC_LI32(dst_reg, imm);
706 PPC_EX32(dst_reg_h, imm);
708 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
709 PPC_LI32(dst_reg, imm);
715 case BPF_ALU | BPF_END | BPF_FROM_LE:
718 /* Copy 16 bits to upper part */
719 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg, 16, 0, 15));
720 /* Rotate 8 bits right & mask */
721 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31));
725 * Rotate word left by 8 bits:
726 * 2 bytes are already in their final position
727 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
729 EMIT(PPC_RAW_RLWINM(_R0, dst_reg, 8, 0, 31));
730 /* Rotate 24 bits and insert byte 1 */
731 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 0, 7));
732 /* Rotate 24 bits and insert byte 3 */
733 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 16, 23));
734 EMIT(PPC_RAW_MR(dst_reg, _R0));
737 bpf_set_seen_register(ctx, tmp_reg);
738 EMIT(PPC_RAW_RLWINM(tmp_reg, dst_reg, 8, 0, 31));
739 EMIT(PPC_RAW_RLWINM(_R0, dst_reg_h, 8, 0, 31));
740 /* Rotate 24 bits and insert byte 1 */
741 EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 0, 7));
742 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 0, 7));
743 /* Rotate 24 bits and insert byte 3 */
744 EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 16, 23));
745 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 16, 23));
746 EMIT(PPC_RAW_MR(dst_reg, _R0));
747 EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
751 case BPF_ALU | BPF_END | BPF_FROM_BE:
754 /* zero-extend 16 bits into 32 bits */
755 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 16, 31));
765 * BPF_ST NOSPEC (speculation barrier)
767 case BPF_ST | BPF_NOSPEC:
773 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
774 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
776 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
778 EMIT(PPC_RAW_STB(_R0, dst_reg, off));
780 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
781 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
783 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
785 EMIT(PPC_RAW_STH(_R0, dst_reg, off));
787 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
788 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
790 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
792 EMIT(PPC_RAW_STW(_R0, dst_reg, off));
794 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
795 EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
796 EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
798 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
800 EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
802 EMIT(PPC_RAW_STW(_R0, dst_reg, off));
806 * BPF_STX ATOMIC (atomic ops)
808 case BPF_STX | BPF_ATOMIC | BPF_W:
812 bpf_set_seen_register(ctx, tmp_reg);
813 bpf_set_seen_register(ctx, ax_reg);
815 /* Get offset into TMP_REG */
816 EMIT(PPC_RAW_LI(tmp_reg, off));
817 tmp_idx = ctx->idx * 4;
818 /* load value from memory into r0 */
819 EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
821 /* Save old value in BPF_REG_AX */
823 EMIT(PPC_RAW_MR(ax_reg, _R0));
827 case BPF_ADD | BPF_FETCH:
828 EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
831 case BPF_AND | BPF_FETCH:
832 EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
835 case BPF_OR | BPF_FETCH:
836 EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
839 case BPF_XOR | BPF_FETCH:
840 EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
844 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
845 * in src_reg for other cases.
847 ret_reg = bpf_to_ppc(BPF_REG_0);
849 /* Compare with old value in BPF_REG_0 */
850 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
851 /* Don't set if different from old value */
852 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
858 pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
863 /* store new value */
864 EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
865 /* we're done if this succeeded */
866 PPC_BCC_SHORT(COND_NE, tmp_idx);
868 /* For the BPF_FETCH variant, get old data into src_reg */
869 if (imm & BPF_FETCH) {
870 EMIT(PPC_RAW_MR(ret_reg, ax_reg));
871 if (!fp->aux->verifier_zext)
872 EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
876 case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
882 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
883 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
884 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
885 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
886 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
887 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
888 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
889 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
891 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
892 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
893 * load only if addr is kernel address (see is_kernel_addr()), otherwise
894 * set dst_reg=0 and move on.
896 if (BPF_MODE(code) == BPF_PROBE_MEM) {
897 PPC_LI32(_R0, TASK_SIZE - off);
898 EMIT(PPC_RAW_CMPLW(src_reg, _R0));
899 PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
900 EMIT(PPC_RAW_LI(dst_reg, 0));
902 * For BPF_DW case, "li reg_h,0" would be needed when
903 * !fp->aux->verifier_zext. Emit NOP otherwise.
905 * Note that "li reg_h,0" is emitted for BPF_B/H/W case,
906 * if necessary. So, jump there insted of emitting an
907 * additional "li reg_h,0" instruction.
909 if (size == BPF_DW && !fp->aux->verifier_zext)
910 EMIT(PPC_RAW_LI(dst_reg_h, 0));
914 * Need to jump two instructions instead of one for BPF_DW case
915 * as there are two load instructions for dst_reg_h & dst_reg
919 PPC_JMP((ctx->idx + 3) * 4);
921 PPC_JMP((ctx->idx + 2) * 4);
926 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
929 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
932 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
935 EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
936 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
940 if (size != BPF_DW && !fp->aux->verifier_zext)
941 EMIT(PPC_RAW_LI(dst_reg_h, 0));
943 if (BPF_MODE(code) == BPF_PROBE_MEM) {
944 int insn_idx = ctx->idx - 1;
948 * In case of BPF_DW, two lwz instructions are emitted, one
949 * for higher 32-bit and another for lower 32-bit. So, set
950 * ex->insn to the first of the two and jump over both
951 * instructions in fixup.
953 * Similarly, with !verifier_zext, two instructions are
954 * emitted for BPF_B/H/W case. So, set ex->insn to the
955 * instruction that could fault and skip over both
958 if (size == BPF_DW || !fp->aux->verifier_zext) {
963 ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx,
972 * 16 byte instruction that uses two 'struct bpf_insn'
974 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
976 PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
977 PPC_LI32(dst_reg, (u32)insn[i].imm);
978 /* padding to allow full 4 instructions for later patching */
979 for (j = ctx->idx - tmp_idx; j < 4; j++)
981 /* Adjust for two bpf instructions */
982 addrs[++i] = ctx->idx * 4;
988 case BPF_JMP | BPF_EXIT:
990 * If this isn't the very last instruction, branch to
991 * the epilogue. If we _are_ the last instruction,
992 * we'll just fall through to the epilogue.
995 ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr);
999 /* else fall through to the epilogue */
1003 * Call kernel helper or bpf function
1005 case BPF_JMP | BPF_CALL:
1006 ctx->seen |= SEEN_FUNC;
1008 ret = bpf_jit_get_func_addr(fp, &insn[i], false,
1009 &func_addr, &func_addr_fixed);
1013 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
1014 EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8));
1015 EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
1018 ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
1022 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3));
1023 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4));
1027 * Jumps and branches
1029 case BPF_JMP | BPF_JA:
1030 PPC_JMP(addrs[i + 1 + off]);
1033 case BPF_JMP | BPF_JGT | BPF_K:
1034 case BPF_JMP | BPF_JGT | BPF_X:
1035 case BPF_JMP | BPF_JSGT | BPF_K:
1036 case BPF_JMP | BPF_JSGT | BPF_X:
1037 case BPF_JMP32 | BPF_JGT | BPF_K:
1038 case BPF_JMP32 | BPF_JGT | BPF_X:
1039 case BPF_JMP32 | BPF_JSGT | BPF_K:
1040 case BPF_JMP32 | BPF_JSGT | BPF_X:
1041 true_cond = COND_GT;
1043 case BPF_JMP | BPF_JLT | BPF_K:
1044 case BPF_JMP | BPF_JLT | BPF_X:
1045 case BPF_JMP | BPF_JSLT | BPF_K:
1046 case BPF_JMP | BPF_JSLT | BPF_X:
1047 case BPF_JMP32 | BPF_JLT | BPF_K:
1048 case BPF_JMP32 | BPF_JLT | BPF_X:
1049 case BPF_JMP32 | BPF_JSLT | BPF_K:
1050 case BPF_JMP32 | BPF_JSLT | BPF_X:
1051 true_cond = COND_LT;
1053 case BPF_JMP | BPF_JGE | BPF_K:
1054 case BPF_JMP | BPF_JGE | BPF_X:
1055 case BPF_JMP | BPF_JSGE | BPF_K:
1056 case BPF_JMP | BPF_JSGE | BPF_X:
1057 case BPF_JMP32 | BPF_JGE | BPF_K:
1058 case BPF_JMP32 | BPF_JGE | BPF_X:
1059 case BPF_JMP32 | BPF_JSGE | BPF_K:
1060 case BPF_JMP32 | BPF_JSGE | BPF_X:
1061 true_cond = COND_GE;
1063 case BPF_JMP | BPF_JLE | BPF_K:
1064 case BPF_JMP | BPF_JLE | BPF_X:
1065 case BPF_JMP | BPF_JSLE | BPF_K:
1066 case BPF_JMP | BPF_JSLE | BPF_X:
1067 case BPF_JMP32 | BPF_JLE | BPF_K:
1068 case BPF_JMP32 | BPF_JLE | BPF_X:
1069 case BPF_JMP32 | BPF_JSLE | BPF_K:
1070 case BPF_JMP32 | BPF_JSLE | BPF_X:
1071 true_cond = COND_LE;
1073 case BPF_JMP | BPF_JEQ | BPF_K:
1074 case BPF_JMP | BPF_JEQ | BPF_X:
1075 case BPF_JMP32 | BPF_JEQ | BPF_K:
1076 case BPF_JMP32 | BPF_JEQ | BPF_X:
1077 true_cond = COND_EQ;
1079 case BPF_JMP | BPF_JNE | BPF_K:
1080 case BPF_JMP | BPF_JNE | BPF_X:
1081 case BPF_JMP32 | BPF_JNE | BPF_K:
1082 case BPF_JMP32 | BPF_JNE | BPF_X:
1083 true_cond = COND_NE;
1085 case BPF_JMP | BPF_JSET | BPF_K:
1086 case BPF_JMP | BPF_JSET | BPF_X:
1087 case BPF_JMP32 | BPF_JSET | BPF_K:
1088 case BPF_JMP32 | BPF_JSET | BPF_X:
1089 true_cond = COND_NE;
1094 case BPF_JMP | BPF_JGT | BPF_X:
1095 case BPF_JMP | BPF_JLT | BPF_X:
1096 case BPF_JMP | BPF_JGE | BPF_X:
1097 case BPF_JMP | BPF_JLE | BPF_X:
1098 case BPF_JMP | BPF_JEQ | BPF_X:
1099 case BPF_JMP | BPF_JNE | BPF_X:
1100 /* unsigned comparison */
1101 EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
1102 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1103 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1105 case BPF_JMP32 | BPF_JGT | BPF_X:
1106 case BPF_JMP32 | BPF_JLT | BPF_X:
1107 case BPF_JMP32 | BPF_JGE | BPF_X:
1108 case BPF_JMP32 | BPF_JLE | BPF_X:
1109 case BPF_JMP32 | BPF_JEQ | BPF_X:
1110 case BPF_JMP32 | BPF_JNE | BPF_X:
1111 /* unsigned comparison */
1112 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1114 case BPF_JMP | BPF_JSGT | BPF_X:
1115 case BPF_JMP | BPF_JSLT | BPF_X:
1116 case BPF_JMP | BPF_JSGE | BPF_X:
1117 case BPF_JMP | BPF_JSLE | BPF_X:
1118 /* signed comparison */
1119 EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
1120 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1121 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1123 case BPF_JMP32 | BPF_JSGT | BPF_X:
1124 case BPF_JMP32 | BPF_JSLT | BPF_X:
1125 case BPF_JMP32 | BPF_JSGE | BPF_X:
1126 case BPF_JMP32 | BPF_JSLE | BPF_X:
1127 /* signed comparison */
1128 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1130 case BPF_JMP | BPF_JSET | BPF_X:
1131 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
1132 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1133 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1135 case BPF_JMP32 | BPF_JSET | BPF_X: {
1136 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1138 case BPF_JMP | BPF_JNE | BPF_K:
1139 case BPF_JMP | BPF_JEQ | BPF_K:
1140 case BPF_JMP | BPF_JGT | BPF_K:
1141 case BPF_JMP | BPF_JLT | BPF_K:
1142 case BPF_JMP | BPF_JGE | BPF_K:
1143 case BPF_JMP | BPF_JLE | BPF_K:
1145 * Need sign-extended load, so only positive
1146 * values can be used as imm in cmplwi
1148 if (imm >= 0 && imm < 32768) {
1149 EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
1150 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1151 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1153 /* sign-extending load ... but unsigned comparison */
1155 EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
1157 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1158 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1161 case BPF_JMP32 | BPF_JNE | BPF_K:
1162 case BPF_JMP32 | BPF_JEQ | BPF_K:
1163 case BPF_JMP32 | BPF_JGT | BPF_K:
1164 case BPF_JMP32 | BPF_JLT | BPF_K:
1165 case BPF_JMP32 | BPF_JGE | BPF_K:
1166 case BPF_JMP32 | BPF_JLE | BPF_K:
1167 if (imm >= 0 && imm < 65536) {
1168 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1171 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1175 case BPF_JMP | BPF_JSGT | BPF_K:
1176 case BPF_JMP | BPF_JSLT | BPF_K:
1177 case BPF_JMP | BPF_JSGE | BPF_K:
1178 case BPF_JMP | BPF_JSLE | BPF_K:
1179 if (imm >= 0 && imm < 65536) {
1180 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1181 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1182 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1184 /* sign-extending load */
1185 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1187 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1188 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1191 case BPF_JMP32 | BPF_JSGT | BPF_K:
1192 case BPF_JMP32 | BPF_JSLT | BPF_K:
1193 case BPF_JMP32 | BPF_JSGE | BPF_K:
1194 case BPF_JMP32 | BPF_JSLE | BPF_K:
1196 * signed comparison, so any 16-bit value
1197 * can be used in cmpwi
1199 if (imm >= -32768 && imm < 32768) {
1200 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1202 /* sign-extending load */
1204 EMIT(PPC_RAW_CMPW(dst_reg, _R0));
1207 case BPF_JMP | BPF_JSET | BPF_K:
1208 /* andi does not sign-extend the immediate */
1209 if (imm >= 0 && imm < 32768) {
1210 /* PPC_ANDI is _only/always_ dot-form */
1211 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1215 EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
1216 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1218 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1221 case BPF_JMP32 | BPF_JSET | BPF_K:
1222 /* andi does not sign-extend the immediate */
1223 if (imm >= 0 && imm < 32768) {
1224 /* PPC_ANDI is _only/always_ dot-form */
1225 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1228 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1232 PPC_BCC(true_cond, addrs[i + 1 + off]);
1238 case BPF_JMP | BPF_TAIL_CALL:
1239 ctx->seen |= SEEN_TAILCALL;
1240 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1247 * The filter contains something cruel & unusual.
1248 * We don't handle it, but also there shouldn't be
1249 * anything missing from our list.
1251 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i);
1254 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
1255 !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
1256 EMIT(PPC_RAW_LI(dst_reg_h, 0));
1259 /* Set end-of-body-code address for exit. */
1260 addrs[i] = ctx->idx * 4;