2 * bpf_jit_comp64.c: eBPF JIT compiler
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <asm/asm-compat.h>
17 #include <linux/netdevice.h>
18 #include <linux/filter.h>
19 #include <linux/if_vlan.h>
20 #include <asm/kprobes.h>
21 #include <linux/bpf.h>
22 #include <asm/security_features.h>
24 #include "bpf_jit64.h"
26 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
28 memset32(area, BREAKPOINT_INSTRUCTION, size/4);
31 static inline void bpf_flush_icache(void *start, void *end)
34 flush_icache_range((unsigned long)start, (unsigned long)end);
37 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
39 return (ctx->seen & (1 << (31 - b2p[i])));
42 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
44 ctx->seen |= (1 << (31 - b2p[i]));
47 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
50 * We only need a stack frame if:
51 * - we call other functions (kernel helpers), or
52 * - the bpf program uses its stack area
53 * The latter condition is deduced from the usage of BPF_REG_FP
55 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
59 * When not setting up our own stackframe, the redzone usage is:
61 * [ prev sp ] <-------------
63 * sp (r1) ---> [ stack pointer ] --------------
64 * [ nv gpr save area ] 5*8
66 * [ local_tmp_var ] 16
67 * [ unused red zone ] 208 bytes protected
69 static int bpf_jit_stack_local(struct codegen_context *ctx)
71 if (bpf_has_stack_frame(ctx))
72 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
74 return -(BPF_PPC_STACK_SAVE + 24);
77 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
79 return bpf_jit_stack_local(ctx) + 16;
82 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
84 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
85 return (bpf_has_stack_frame(ctx) ?
86 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
89 pr_err("BPF JIT is asking about unknown registers");
93 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
98 * Initialize tail_call_cnt if we do tail calls.
99 * Otherwise, put in NOPs so that it can be skipped when we are
100 * invoked through a tail call.
102 if (ctx->seen & SEEN_TAILCALL) {
103 PPC_LI(b2p[TMP_REG_1], 0);
104 /* this goes in the redzone */
105 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
111 #define BPF_TAILCALL_PROLOGUE_SIZE 8
113 if (bpf_has_stack_frame(ctx)) {
115 * We need a stack frame, but we don't necessarily need to
116 * save/restore LR unless we call other functions
118 if (ctx->seen & SEEN_FUNC) {
119 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
120 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
123 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
127 * Back up non-volatile regs -- BPF registers 6-10
128 * If we haven't created our own stack frame, we save these
129 * in the protected zone below the previous stack frame
131 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
132 if (bpf_is_seen_register(ctx, i))
133 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
135 /* Setup frame pointer to point to the bpf stack area */
136 if (bpf_is_seen_register(ctx, BPF_REG_FP))
137 PPC_ADDI(b2p[BPF_REG_FP], 1,
138 STACK_FRAME_MIN_SIZE + ctx->stack_size);
141 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
146 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
147 if (bpf_is_seen_register(ctx, i))
148 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
150 /* Tear down our stack frame */
151 if (bpf_has_stack_frame(ctx)) {
152 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
153 if (ctx->seen & SEEN_FUNC) {
154 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
160 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
162 bpf_jit_emit_common_epilogue(image, ctx);
164 /* Move result to r3 */
165 PPC_MR(3, b2p[BPF_REG_0]);
170 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
172 unsigned int i, ctx_idx = ctx->idx;
174 /* Load function address into r12 */
177 /* For bpf-to-bpf function calls, the callee's address is unknown
178 * until the last extra pass. As seen above, we use PPC_LI64() to
179 * load the callee's address, but this may optimize the number of
180 * instructions required based on the nature of the address.
182 * Since we don't want the number of instructions emitted to change,
183 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
184 * we always have a five-instruction sequence, which is the maximum
185 * that PPC_LI64() can emit.
187 for (i = ctx->idx - ctx_idx; i < 5; i++)
190 #ifdef PPC64_ELF_ABI_v1
192 * Load TOC from function descriptor at offset 8.
193 * We can clobber r2 since we get called through a
194 * function pointer (so caller will save/restore r2)
195 * and since we don't use a TOC ourself.
197 PPC_BPF_LL(2, 12, 8);
198 /* Load actual entry point from function descriptor */
199 PPC_BPF_LL(12, 12, 0);
206 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
209 * By now, the eBPF program has already setup parameters in r3, r4 and r5
210 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
211 * r4/BPF_REG_2 - pointer to bpf_array
212 * r5/BPF_REG_3 - index in bpf_array
214 int b2p_bpf_array = b2p[BPF_REG_2];
215 int b2p_index = b2p[BPF_REG_3];
218 * if (index >= array->map.max_entries)
221 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
222 PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
223 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
224 PPC_BCC(COND_GE, out);
227 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
230 PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
231 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
232 PPC_BCC(COND_GT, out);
237 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
238 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
240 /* prog = array->ptrs[index]; */
241 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
242 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
243 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
249 PPC_CMPLDI(b2p[TMP_REG_1], 0);
250 PPC_BCC(COND_EQ, out);
252 /* goto *(prog->bpf_func + prologue_size); */
253 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
254 #ifdef PPC64_ELF_ABI_v1
255 /* skip past the function descriptor */
256 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
257 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
259 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
261 PPC_MTCTR(b2p[TMP_REG_1]);
263 /* tear down stack, restore NVRs, ... */
264 bpf_jit_emit_common_epilogue(image, ctx);
273 * We spill into the redzone always, even if the bpf program has its own stackframe.
274 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
276 void bpf_stf_barrier(void);
279 " .global bpf_stf_barrier ;"
280 " bpf_stf_barrier: ;"
294 /* Assemble the body code between the prologue & epilogue */
295 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
296 struct codegen_context *ctx,
297 u32 *addrs, bool extra_pass)
299 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
300 const struct bpf_insn *insn = fp->insnsi;
304 /* Start of epilogue code - will only be valid 2nd pass onwards */
305 u32 exit_addr = addrs[flen];
307 for (i = 0; i < flen; i++) {
308 u32 code = insn[i].code;
309 u32 dst_reg = b2p[insn[i].dst_reg];
310 u32 src_reg = b2p[insn[i].src_reg];
311 s16 off = insn[i].off;
312 s32 imm = insn[i].imm;
319 * addrs[] maps a BPF bytecode address into a real offset from
320 * the start of the body code.
322 addrs[i] = ctx->idx * 4;
325 * As an optimization, we note down which non-volatile registers
326 * are used so that we can only save/restore those in our
327 * prologue and epilogue. We do this here regardless of whether
328 * the actual BPF instruction uses src/dst registers or not
329 * (for instance, BPF_CALL does not use them). The expectation
330 * is that those instructions will have src_reg/dst_reg set to
331 * 0. Even otherwise, we just lose some prologue/epilogue
332 * optimization but everything else should work without
335 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
336 bpf_set_seen_register(ctx, insn[i].dst_reg);
337 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
338 bpf_set_seen_register(ctx, insn[i].src_reg);
342 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
344 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
345 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
346 PPC_ADD(dst_reg, dst_reg, src_reg);
347 goto bpf_alu32_trunc;
348 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
349 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
350 PPC_SUB(dst_reg, dst_reg, src_reg);
351 goto bpf_alu32_trunc;
352 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
353 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
355 goto bpf_alu32_trunc;
356 } else if (imm >= -32768 && imm < 32768) {
357 PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
359 PPC_LI32(b2p[TMP_REG_1], imm);
360 PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
362 goto bpf_alu32_trunc;
363 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
364 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
366 goto bpf_alu32_trunc;
367 } else if (imm > -32768 && imm <= 32768) {
368 PPC_ADDI(dst_reg, dst_reg, IMM_L(-imm));
370 PPC_LI32(b2p[TMP_REG_1], imm);
371 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
373 goto bpf_alu32_trunc;
374 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
375 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
376 if (BPF_CLASS(code) == BPF_ALU)
377 PPC_MULW(dst_reg, dst_reg, src_reg);
379 PPC_MULD(dst_reg, dst_reg, src_reg);
380 goto bpf_alu32_trunc;
381 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
382 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
383 if (imm >= -32768 && imm < 32768)
384 PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
386 PPC_LI32(b2p[TMP_REG_1], imm);
387 if (BPF_CLASS(code) == BPF_ALU)
388 PPC_MULW(dst_reg, dst_reg,
391 PPC_MULD(dst_reg, dst_reg,
394 goto bpf_alu32_trunc;
395 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
396 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
397 if (BPF_OP(code) == BPF_MOD) {
398 PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
399 PPC_MULW(b2p[TMP_REG_1], src_reg,
401 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
403 PPC_DIVWU(dst_reg, dst_reg, src_reg);
404 goto bpf_alu32_trunc;
405 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
406 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
407 if (BPF_OP(code) == BPF_MOD) {
408 PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
409 PPC_MULD(b2p[TMP_REG_1], src_reg,
411 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
413 PPC_DIVDU(dst_reg, dst_reg, src_reg);
415 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
416 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
417 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
418 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
422 if (BPF_OP(code) == BPF_DIV) {
423 goto bpf_alu32_trunc;
430 PPC_LI32(b2p[TMP_REG_1], imm);
431 switch (BPF_CLASS(code)) {
433 if (BPF_OP(code) == BPF_MOD) {
434 PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
436 PPC_MULW(b2p[TMP_REG_1],
439 PPC_SUB(dst_reg, dst_reg,
442 PPC_DIVWU(dst_reg, dst_reg,
446 if (BPF_OP(code) == BPF_MOD) {
447 PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
449 PPC_MULD(b2p[TMP_REG_1],
452 PPC_SUB(dst_reg, dst_reg,
455 PPC_DIVDU(dst_reg, dst_reg,
459 goto bpf_alu32_trunc;
460 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
461 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
462 PPC_NEG(dst_reg, dst_reg);
463 goto bpf_alu32_trunc;
466 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
468 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
469 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
470 PPC_AND(dst_reg, dst_reg, src_reg);
471 goto bpf_alu32_trunc;
472 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
473 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
475 PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
478 PPC_LI32(b2p[TMP_REG_1], imm);
479 PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
481 goto bpf_alu32_trunc;
482 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
483 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
484 PPC_OR(dst_reg, dst_reg, src_reg);
485 goto bpf_alu32_trunc;
486 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
487 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
488 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
490 PPC_LI32(b2p[TMP_REG_1], imm);
491 PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
494 PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
496 PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
498 goto bpf_alu32_trunc;
499 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
500 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
501 PPC_XOR(dst_reg, dst_reg, src_reg);
502 goto bpf_alu32_trunc;
503 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
504 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
505 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
507 PPC_LI32(b2p[TMP_REG_1], imm);
508 PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
511 PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
513 PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
515 goto bpf_alu32_trunc;
516 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
517 /* slw clears top 32 bits */
518 PPC_SLW(dst_reg, dst_reg, src_reg);
520 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
521 PPC_SLD(dst_reg, dst_reg, src_reg);
523 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
524 /* with imm 0, we still need to clear top 32 bits */
525 PPC_SLWI(dst_reg, dst_reg, imm);
527 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
529 PPC_SLDI(dst_reg, dst_reg, imm);
531 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
532 PPC_SRW(dst_reg, dst_reg, src_reg);
534 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
535 PPC_SRD(dst_reg, dst_reg, src_reg);
537 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
538 PPC_SRWI(dst_reg, dst_reg, imm);
540 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
542 PPC_SRDI(dst_reg, dst_reg, imm);
544 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
545 PPC_SRAD(dst_reg, dst_reg, src_reg);
547 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
549 PPC_SRADI(dst_reg, dst_reg, imm);
555 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
556 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
557 PPC_MR(dst_reg, src_reg);
558 goto bpf_alu32_trunc;
559 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
560 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
561 PPC_LI32(dst_reg, imm);
563 goto bpf_alu32_trunc;
567 /* Truncate to 32-bits */
568 if (BPF_CLASS(code) == BPF_ALU)
569 PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
575 case BPF_ALU | BPF_END | BPF_FROM_LE:
576 case BPF_ALU | BPF_END | BPF_FROM_BE:
577 #ifdef __BIG_ENDIAN__
578 if (BPF_SRC(code) == BPF_FROM_BE)
580 #else /* !__BIG_ENDIAN__ */
581 if (BPF_SRC(code) == BPF_FROM_LE)
586 /* Rotate 8 bits left & mask with 0x0000ff00 */
587 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
588 /* Rotate 8 bits right & insert LSB to reg */
589 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
590 /* Move result back to dst_reg */
591 PPC_MR(dst_reg, b2p[TMP_REG_1]);
595 * Rotate word left by 8 bits:
596 * 2 bytes are already in their final position
597 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
599 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
600 /* Rotate 24 bits and insert byte 1 */
601 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
602 /* Rotate 24 bits and insert byte 3 */
603 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
604 PPC_MR(dst_reg, b2p[TMP_REG_1]);
608 * Way easier and faster(?) to store the value
609 * into stack and then use ldbrx
611 * ctx->seen will be reliable in pass2, but
612 * the instructions generated will remain the
613 * same across all passes
615 PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
616 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
617 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
625 /* zero-extend 16 bits into 64 bits */
626 PPC_RLDICL(dst_reg, dst_reg, 0, 48);
629 /* zero-extend 32 bits into 64 bits */
630 PPC_RLDICL(dst_reg, dst_reg, 0, 32);
639 * BPF_ST NOSPEC (speculation barrier)
641 case BPF_ST | BPF_NOSPEC:
642 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
643 (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) &&
644 (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) || !cpu_has_feature(CPU_FTR_HVMODE))))
647 switch (stf_barrier) {
648 case STF_BARRIER_EIEIO:
649 EMIT(0x7c0006ac | 0x02000000);
651 case STF_BARRIER_SYNC_ORI:
653 PPC_LD(b2p[TMP_REG_1], 13, 0);
656 case STF_BARRIER_FALLBACK:
657 EMIT(PPC_INST_MFLR | ___PPC_RT(b2p[TMP_REG_1]));
658 PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
660 EMIT(PPC_INST_BCTR | 0x1);
661 PPC_MTLR(b2p[TMP_REG_1]);
663 case STF_BARRIER_NONE:
671 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
672 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
673 if (BPF_CLASS(code) == BPF_ST) {
674 PPC_LI(b2p[TMP_REG_1], imm);
675 src_reg = b2p[TMP_REG_1];
677 PPC_STB(src_reg, dst_reg, off);
679 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
680 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
681 if (BPF_CLASS(code) == BPF_ST) {
682 PPC_LI(b2p[TMP_REG_1], imm);
683 src_reg = b2p[TMP_REG_1];
685 PPC_STH(src_reg, dst_reg, off);
687 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
688 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
689 if (BPF_CLASS(code) == BPF_ST) {
690 PPC_LI32(b2p[TMP_REG_1], imm);
691 src_reg = b2p[TMP_REG_1];
693 PPC_STW(src_reg, dst_reg, off);
695 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
696 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
697 if (BPF_CLASS(code) == BPF_ST) {
698 PPC_LI32(b2p[TMP_REG_1], imm);
699 src_reg = b2p[TMP_REG_1];
701 PPC_BPF_STL(src_reg, dst_reg, off);
705 * BPF_STX XADD (atomic_add)
707 /* *(u32 *)(dst + off) += src */
708 case BPF_STX | BPF_XADD | BPF_W:
709 /* Get EA into TMP_REG_1 */
710 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
711 tmp_idx = ctx->idx * 4;
712 /* load value from memory into TMP_REG_2 */
713 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
714 /* add value from src_reg into this */
715 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
716 /* store result back */
717 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
718 /* we're done if this succeeded */
719 PPC_BCC_SHORT(COND_NE, tmp_idx);
721 /* *(u64 *)(dst + off) += src */
722 case BPF_STX | BPF_XADD | BPF_DW:
723 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
724 tmp_idx = ctx->idx * 4;
725 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
726 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
727 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
728 PPC_BCC_SHORT(COND_NE, tmp_idx);
734 /* dst = *(u8 *)(ul) (src + off) */
735 case BPF_LDX | BPF_MEM | BPF_B:
736 PPC_LBZ(dst_reg, src_reg, off);
738 /* dst = *(u16 *)(ul) (src + off) */
739 case BPF_LDX | BPF_MEM | BPF_H:
740 PPC_LHZ(dst_reg, src_reg, off);
742 /* dst = *(u32 *)(ul) (src + off) */
743 case BPF_LDX | BPF_MEM | BPF_W:
744 PPC_LWZ(dst_reg, src_reg, off);
746 /* dst = *(u64 *)(ul) (src + off) */
747 case BPF_LDX | BPF_MEM | BPF_DW:
748 PPC_BPF_LL(dst_reg, src_reg, off);
753 * 16 byte instruction that uses two 'struct bpf_insn'
755 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
756 imm64 = ((u64)(u32) insn[i].imm) |
757 (((u64)(u32) insn[i+1].imm) << 32);
758 /* Adjust for two bpf instructions */
759 addrs[++i] = ctx->idx * 4;
760 PPC_LI64(dst_reg, imm64);
766 case BPF_JMP | BPF_EXIT:
768 * If this isn't the very last instruction, branch to
769 * the epilogue. If we _are_ the last instruction,
770 * we'll just fall through to the epilogue.
774 /* else fall through to the epilogue */
778 * Call kernel helper or bpf function
780 case BPF_JMP | BPF_CALL:
781 ctx->seen |= SEEN_FUNC;
783 /* bpf function call */
784 if (insn[i].src_reg == BPF_PSEUDO_CALL)
787 else if (fp->aux->func && off < fp->aux->func_cnt)
788 /* use the subprog id from the off
789 * field to lookup the callee address
791 func = (u8 *) fp->aux->func[off]->bpf_func;
794 /* kernel helper call */
796 func = (u8 *) __bpf_call_base + imm;
798 bpf_jit_emit_func_call(image, ctx, (u64)func);
800 /* move return value from r3 to BPF_REG_0 */
801 PPC_MR(b2p[BPF_REG_0], 3);
807 case BPF_JMP | BPF_JA:
808 PPC_JMP(addrs[i + 1 + off]);
811 case BPF_JMP | BPF_JGT | BPF_K:
812 case BPF_JMP | BPF_JGT | BPF_X:
813 case BPF_JMP | BPF_JSGT | BPF_K:
814 case BPF_JMP | BPF_JSGT | BPF_X:
817 case BPF_JMP | BPF_JLT | BPF_K:
818 case BPF_JMP | BPF_JLT | BPF_X:
819 case BPF_JMP | BPF_JSLT | BPF_K:
820 case BPF_JMP | BPF_JSLT | BPF_X:
823 case BPF_JMP | BPF_JGE | BPF_K:
824 case BPF_JMP | BPF_JGE | BPF_X:
825 case BPF_JMP | BPF_JSGE | BPF_K:
826 case BPF_JMP | BPF_JSGE | BPF_X:
829 case BPF_JMP | BPF_JLE | BPF_K:
830 case BPF_JMP | BPF_JLE | BPF_X:
831 case BPF_JMP | BPF_JSLE | BPF_K:
832 case BPF_JMP | BPF_JSLE | BPF_X:
835 case BPF_JMP | BPF_JEQ | BPF_K:
836 case BPF_JMP | BPF_JEQ | BPF_X:
839 case BPF_JMP | BPF_JNE | BPF_K:
840 case BPF_JMP | BPF_JNE | BPF_X:
843 case BPF_JMP | BPF_JSET | BPF_K:
844 case BPF_JMP | BPF_JSET | BPF_X:
850 case BPF_JMP | BPF_JGT | BPF_X:
851 case BPF_JMP | BPF_JLT | BPF_X:
852 case BPF_JMP | BPF_JGE | BPF_X:
853 case BPF_JMP | BPF_JLE | BPF_X:
854 case BPF_JMP | BPF_JEQ | BPF_X:
855 case BPF_JMP | BPF_JNE | BPF_X:
856 /* unsigned comparison */
857 PPC_CMPLD(dst_reg, src_reg);
859 case BPF_JMP | BPF_JSGT | BPF_X:
860 case BPF_JMP | BPF_JSLT | BPF_X:
861 case BPF_JMP | BPF_JSGE | BPF_X:
862 case BPF_JMP | BPF_JSLE | BPF_X:
863 /* signed comparison */
864 PPC_CMPD(dst_reg, src_reg);
866 case BPF_JMP | BPF_JSET | BPF_X:
867 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
869 case BPF_JMP | BPF_JNE | BPF_K:
870 case BPF_JMP | BPF_JEQ | BPF_K:
871 case BPF_JMP | BPF_JGT | BPF_K:
872 case BPF_JMP | BPF_JLT | BPF_K:
873 case BPF_JMP | BPF_JGE | BPF_K:
874 case BPF_JMP | BPF_JLE | BPF_K:
876 * Need sign-extended load, so only positive
877 * values can be used as imm in cmpldi
879 if (imm >= 0 && imm < 32768)
880 PPC_CMPLDI(dst_reg, imm);
882 /* sign-extending load */
883 PPC_LI32(b2p[TMP_REG_1], imm);
884 /* ... but unsigned comparison */
885 PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
888 case BPF_JMP | BPF_JSGT | BPF_K:
889 case BPF_JMP | BPF_JSLT | BPF_K:
890 case BPF_JMP | BPF_JSGE | BPF_K:
891 case BPF_JMP | BPF_JSLE | BPF_K:
893 * signed comparison, so any 16-bit value
894 * can be used in cmpdi
896 if (imm >= -32768 && imm < 32768)
897 PPC_CMPDI(dst_reg, imm);
899 PPC_LI32(b2p[TMP_REG_1], imm);
900 PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
903 case BPF_JMP | BPF_JSET | BPF_K:
904 /* andi does not sign-extend the immediate */
905 if (imm >= 0 && imm < 32768)
906 /* PPC_ANDI is _only/always_ dot-form */
907 PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
909 PPC_LI32(b2p[TMP_REG_1], imm);
910 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
915 PPC_BCC(true_cond, addrs[i + 1 + off]);
921 case BPF_JMP | BPF_TAIL_CALL:
922 ctx->seen |= SEEN_TAILCALL;
923 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
930 * The filter contains something cruel & unusual.
931 * We don't handle it, but also there shouldn't be
932 * anything missing from our list.
934 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
940 /* Set end-of-body-code address for exit. */
941 addrs[i] = ctx->idx * 4;
946 struct powerpc64_jit_data {
947 struct bpf_binary_header *header;
951 struct codegen_context ctx;
954 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
961 struct powerpc64_jit_data *jit_data;
962 struct codegen_context cgctx;
965 struct bpf_binary_header *bpf_hdr;
966 struct bpf_prog *org_fp = fp;
967 struct bpf_prog *tmp_fp;
968 bool bpf_blinded = false;
969 bool extra_pass = false;
971 if (!fp->jit_requested)
974 tmp_fp = bpf_jit_blind_constants(org_fp);
978 if (tmp_fp != org_fp) {
983 jit_data = fp->aux->jit_data;
985 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
990 fp->aux->jit_data = jit_data;
994 addrs = jit_data->addrs;
996 cgctx = jit_data->ctx;
997 image = jit_data->image;
998 bpf_hdr = jit_data->header;
999 proglen = jit_data->proglen;
1000 alloclen = proglen + FUNCTION_DESCR_SIZE;
1005 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
1006 if (addrs == NULL) {
1011 memset(&cgctx, 0, sizeof(struct codegen_context));
1013 /* Make sure that the stack is quadword aligned. */
1014 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
1016 /* Scouting faux-generate pass 0 */
1017 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
1018 /* We hit something illegal or unsupported. */
1024 * If we have seen a tail call, we need a second pass.
1025 * This is because bpf_jit_emit_common_epilogue() is called
1026 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
1028 if (cgctx.seen & SEEN_TAILCALL) {
1030 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
1037 * Pretend to build prologue, given the features we've seen. This will
1038 * update ctgtx.idx as it pretends to output instructions, then we can
1039 * calculate total size from idx.
1041 bpf_jit_build_prologue(0, &cgctx);
1042 bpf_jit_build_epilogue(0, &cgctx);
1044 proglen = cgctx.idx * 4;
1045 alloclen = proglen + FUNCTION_DESCR_SIZE;
1047 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1048 bpf_jit_fill_ill_insns);
1055 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1057 /* Code generation passes 1-2 */
1058 for (pass = 1; pass < 3; pass++) {
1059 /* Now build the prologue, body code & epilogue for real. */
1061 bpf_jit_build_prologue(code_base, &cgctx);
1062 bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
1063 bpf_jit_build_epilogue(code_base, &cgctx);
1065 if (bpf_jit_enable > 1)
1066 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1067 proglen - (cgctx.idx * 4), cgctx.seen);
1070 if (bpf_jit_enable > 1)
1072 * Note that we output the base address of the code_base
1073 * rather than image, since opcodes are in code_base.
1075 bpf_jit_dump(flen, proglen, pass, code_base);
1077 #ifdef PPC64_ELF_ABI_v1
1078 /* Function descriptor nastiness: Address + TOC */
1079 ((u64 *)image)[0] = (u64)code_base;
1080 ((u64 *)image)[1] = local_paca->kernel_toc;
1083 fp->bpf_func = (void *)image;
1085 fp->jited_len = alloclen;
1087 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1088 if (!fp->is_func || extra_pass) {
1092 fp->aux->jit_data = NULL;
1094 jit_data->addrs = addrs;
1095 jit_data->ctx = cgctx;
1096 jit_data->proglen = proglen;
1097 jit_data->image = image;
1098 jit_data->header = bpf_hdr;
1103 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1108 /* Overriding bpf_jit_free() as we don't set images read-only. */
1109 void bpf_jit_free(struct bpf_prog *fp)
1111 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1112 struct bpf_binary_header *bpf_hdr = (void *)addr;
1115 bpf_jit_binary_free(bpf_hdr);
1117 bpf_prog_unlock_free(fp);