2 * bpf_jit_comp64.c: eBPF JIT compiler
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19 #include <asm/kprobes.h>
20 #include <linux/bpf.h>
22 #include "bpf_jit64.h"
24 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
28 /* Fill whole space with trap instructions */
29 while (p < (int *)((char *)area + size))
30 *p++ = BREAKPOINT_INSTRUCTION;
33 static inline void bpf_flush_icache(void *start, void *end)
36 flush_icache_range((unsigned long)start, (unsigned long)end);
39 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
41 return (ctx->seen & (1 << (31 - b2p[i])));
44 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
46 ctx->seen |= (1 << (31 - b2p[i]));
49 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
52 * We only need a stack frame if:
53 * - we call other functions (kernel helpers), or
54 * - the bpf program uses its stack area
55 * The latter condition is deduced from the usage of BPF_REG_FP
57 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
61 * When not setting up our own stackframe, the redzone usage is:
63 * [ prev sp ] <-------------
65 * sp (r1) ---> [ stack pointer ] --------------
66 * [ nv gpr save area ] 8*8
69 * [ unused red zone ] 208 bytes protected
71 static int bpf_jit_stack_local(struct codegen_context *ctx)
73 if (bpf_has_stack_frame(ctx))
74 return STACK_FRAME_MIN_SIZE + MAX_BPF_STACK;
76 return -(BPF_PPC_STACK_SAVE + 16);
79 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
81 return bpf_jit_stack_local(ctx) + 8;
84 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
86 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
87 return (bpf_has_stack_frame(ctx) ? BPF_PPC_STACKFRAME : 0)
90 pr_err("BPF JIT is asking about unknown registers");
94 static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
97 * Load skb->len and skb->data_len
100 PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
101 PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
102 /* header_len = len - data_len */
103 PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
105 /* skb->data pointer */
106 PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
109 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
114 * Initialize tail_call_cnt if we do tail calls.
115 * Otherwise, put in NOPs so that it can be skipped when we are
116 * invoked through a tail call.
118 if (ctx->seen & SEEN_TAILCALL) {
119 PPC_LI(b2p[TMP_REG_1], 0);
120 /* this goes in the redzone */
121 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
127 #define BPF_TAILCALL_PROLOGUE_SIZE 8
129 if (bpf_has_stack_frame(ctx)) {
131 * We need a stack frame, but we don't necessarily need to
132 * save/restore LR unless we call other functions
134 if (ctx->seen & SEEN_FUNC) {
135 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
136 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
139 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
143 * Back up non-volatile regs -- BPF registers 6-10
144 * If we haven't created our own stack frame, we save these
145 * in the protected zone below the previous stack frame
147 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
148 if (bpf_is_seen_register(ctx, i))
149 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
152 * Save additional non-volatile regs if we cache skb
153 * Also, setup skb data
155 if (ctx->seen & SEEN_SKB) {
156 PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
157 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
158 PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
159 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
160 bpf_jit_emit_skb_loads(image, ctx);
163 /* Setup frame pointer to point to the bpf stack area */
164 if (bpf_is_seen_register(ctx, BPF_REG_FP))
165 PPC_ADDI(b2p[BPF_REG_FP], 1,
166 STACK_FRAME_MIN_SIZE + MAX_BPF_STACK);
169 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
174 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
175 if (bpf_is_seen_register(ctx, i))
176 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
178 /* Restore non-volatile registers used for skb cache */
179 if (ctx->seen & SEEN_SKB) {
180 PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
181 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
182 PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
183 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
186 /* Tear down our stack frame */
187 if (bpf_has_stack_frame(ctx)) {
188 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
189 if (ctx->seen & SEEN_FUNC) {
190 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
196 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
198 bpf_jit_emit_common_epilogue(image, ctx);
200 /* Move result to r3 */
201 PPC_MR(3, b2p[BPF_REG_0]);
206 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
208 unsigned int i, ctx_idx = ctx->idx;
210 /* Load function address into r12 */
213 /* For bpf-to-bpf function calls, the callee's address is unknown
214 * until the last extra pass. As seen above, we use PPC_LI64() to
215 * load the callee's address, but this may optimize the number of
216 * instructions required based on the nature of the address.
218 * Since we don't want the number of instructions emitted to change,
219 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
220 * we always have a five-instruction sequence, which is the maximum
221 * that PPC_LI64() can emit.
223 for (i = ctx->idx - ctx_idx; i < 5; i++)
226 #ifdef PPC64_ELF_ABI_v1
228 * Load TOC from function descriptor at offset 8.
229 * We can clobber r2 since we get called through a
230 * function pointer (so caller will save/restore r2)
231 * and since we don't use a TOC ourself.
233 PPC_BPF_LL(2, 12, 8);
234 /* Load actual entry point from function descriptor */
235 PPC_BPF_LL(12, 12, 0);
242 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
245 * By now, the eBPF program has already setup parameters in r3, r4 and r5
246 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
247 * r4/BPF_REG_2 - pointer to bpf_array
248 * r5/BPF_REG_3 - index in bpf_array
250 int b2p_bpf_array = b2p[BPF_REG_2];
251 int b2p_index = b2p[BPF_REG_3];
254 * if (index >= array->map.max_entries)
257 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
258 PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
259 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
260 PPC_BCC(COND_GE, out);
263 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
266 PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
267 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
268 PPC_BCC(COND_GT, out);
273 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
274 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
276 /* prog = array->ptrs[index]; */
277 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
278 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
279 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
285 PPC_CMPLDI(b2p[TMP_REG_1], 0);
286 PPC_BCC(COND_EQ, out);
288 /* goto *(prog->bpf_func + prologue_size); */
289 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
290 #ifdef PPC64_ELF_ABI_v1
291 /* skip past the function descriptor */
292 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
293 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
295 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
297 PPC_MTCTR(b2p[TMP_REG_1]);
299 /* tear down stack, restore NVRs, ... */
300 bpf_jit_emit_common_epilogue(image, ctx);
308 /* Assemble the body code between the prologue & epilogue */
309 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
310 struct codegen_context *ctx,
313 const struct bpf_insn *insn = fp->insnsi;
317 /* Start of epilogue code - will only be valid 2nd pass onwards */
318 u32 exit_addr = addrs[flen];
320 for (i = 0; i < flen; i++) {
321 u32 code = insn[i].code;
322 u32 dst_reg = b2p[insn[i].dst_reg];
323 u32 src_reg = b2p[insn[i].src_reg];
324 s16 off = insn[i].off;
325 s32 imm = insn[i].imm;
332 * addrs[] maps a BPF bytecode address into a real offset from
333 * the start of the body code.
335 addrs[i] = ctx->idx * 4;
338 * As an optimization, we note down which non-volatile registers
339 * are used so that we can only save/restore those in our
340 * prologue and epilogue. We do this here regardless of whether
341 * the actual BPF instruction uses src/dst registers or not
342 * (for instance, BPF_CALL does not use them). The expectation
343 * is that those instructions will have src_reg/dst_reg set to
344 * 0. Even otherwise, we just lose some prologue/epilogue
345 * optimization but everything else should work without
348 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
349 bpf_set_seen_register(ctx, insn[i].dst_reg);
350 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
351 bpf_set_seen_register(ctx, insn[i].src_reg);
355 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
357 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
358 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
359 PPC_ADD(dst_reg, dst_reg, src_reg);
360 goto bpf_alu32_trunc;
361 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
362 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
363 PPC_SUB(dst_reg, dst_reg, src_reg);
364 goto bpf_alu32_trunc;
365 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
366 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
368 goto bpf_alu32_trunc;
369 } else if (imm >= -32768 && imm < 32768) {
370 PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
372 PPC_LI32(b2p[TMP_REG_1], imm);
373 PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
375 goto bpf_alu32_trunc;
376 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
377 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
379 goto bpf_alu32_trunc;
380 } else if (imm > -32768 && imm <= 32768) {
381 PPC_ADDI(dst_reg, dst_reg, IMM_L(-imm));
383 PPC_LI32(b2p[TMP_REG_1], imm);
384 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
386 goto bpf_alu32_trunc;
387 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
388 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
389 if (BPF_CLASS(code) == BPF_ALU)
390 PPC_MULW(dst_reg, dst_reg, src_reg);
392 PPC_MULD(dst_reg, dst_reg, src_reg);
393 goto bpf_alu32_trunc;
394 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
395 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
396 if (imm >= -32768 && imm < 32768)
397 PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
399 PPC_LI32(b2p[TMP_REG_1], imm);
400 if (BPF_CLASS(code) == BPF_ALU)
401 PPC_MULW(dst_reg, dst_reg,
404 PPC_MULD(dst_reg, dst_reg,
407 goto bpf_alu32_trunc;
408 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
409 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
410 PPC_CMPWI(src_reg, 0);
411 PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
412 PPC_LI(b2p[BPF_REG_0], 0);
414 if (BPF_OP(code) == BPF_MOD) {
415 PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
416 PPC_MULW(b2p[TMP_REG_1], src_reg,
418 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
420 PPC_DIVWU(dst_reg, dst_reg, src_reg);
421 goto bpf_alu32_trunc;
422 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
423 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
424 PPC_CMPDI(src_reg, 0);
425 PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
426 PPC_LI(b2p[BPF_REG_0], 0);
428 if (BPF_OP(code) == BPF_MOD) {
429 PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
430 PPC_MULD(b2p[TMP_REG_1], src_reg,
432 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
434 PPC_DIVDU(dst_reg, dst_reg, src_reg);
436 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
437 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
438 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
439 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
443 if (BPF_OP(code) == BPF_DIV) {
444 goto bpf_alu32_trunc;
451 PPC_LI32(b2p[TMP_REG_1], imm);
452 switch (BPF_CLASS(code)) {
454 if (BPF_OP(code) == BPF_MOD) {
455 PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
457 PPC_MULW(b2p[TMP_REG_1],
460 PPC_SUB(dst_reg, dst_reg,
463 PPC_DIVWU(dst_reg, dst_reg,
467 if (BPF_OP(code) == BPF_MOD) {
468 PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
470 PPC_MULD(b2p[TMP_REG_1],
473 PPC_SUB(dst_reg, dst_reg,
476 PPC_DIVDU(dst_reg, dst_reg,
480 goto bpf_alu32_trunc;
481 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
482 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
483 PPC_NEG(dst_reg, dst_reg);
484 goto bpf_alu32_trunc;
487 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
489 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
490 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
491 PPC_AND(dst_reg, dst_reg, src_reg);
492 goto bpf_alu32_trunc;
493 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
494 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
496 PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
499 PPC_LI32(b2p[TMP_REG_1], imm);
500 PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
502 goto bpf_alu32_trunc;
503 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
504 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
505 PPC_OR(dst_reg, dst_reg, src_reg);
506 goto bpf_alu32_trunc;
507 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
508 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
509 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
511 PPC_LI32(b2p[TMP_REG_1], imm);
512 PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
515 PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
517 PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
519 goto bpf_alu32_trunc;
520 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
521 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
522 PPC_XOR(dst_reg, dst_reg, src_reg);
523 goto bpf_alu32_trunc;
524 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
525 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
526 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
528 PPC_LI32(b2p[TMP_REG_1], imm);
529 PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
532 PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
534 PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
536 goto bpf_alu32_trunc;
537 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
538 /* slw clears top 32 bits */
539 PPC_SLW(dst_reg, dst_reg, src_reg);
541 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
542 PPC_SLD(dst_reg, dst_reg, src_reg);
544 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
545 /* with imm 0, we still need to clear top 32 bits */
546 PPC_SLWI(dst_reg, dst_reg, imm);
548 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
550 PPC_SLDI(dst_reg, dst_reg, imm);
552 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
553 PPC_SRW(dst_reg, dst_reg, src_reg);
555 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
556 PPC_SRD(dst_reg, dst_reg, src_reg);
558 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
559 PPC_SRWI(dst_reg, dst_reg, imm);
561 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
563 PPC_SRDI(dst_reg, dst_reg, imm);
565 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
566 PPC_SRAD(dst_reg, dst_reg, src_reg);
568 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
570 PPC_SRADI(dst_reg, dst_reg, imm);
576 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
577 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
578 PPC_MR(dst_reg, src_reg);
579 goto bpf_alu32_trunc;
580 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
581 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
582 PPC_LI32(dst_reg, imm);
584 goto bpf_alu32_trunc;
588 /* Truncate to 32-bits */
589 if (BPF_CLASS(code) == BPF_ALU)
590 PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
596 case BPF_ALU | BPF_END | BPF_FROM_LE:
597 case BPF_ALU | BPF_END | BPF_FROM_BE:
598 #ifdef __BIG_ENDIAN__
599 if (BPF_SRC(code) == BPF_FROM_BE)
601 #else /* !__BIG_ENDIAN__ */
602 if (BPF_SRC(code) == BPF_FROM_LE)
607 /* Rotate 8 bits left & mask with 0x0000ff00 */
608 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
609 /* Rotate 8 bits right & insert LSB to reg */
610 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
611 /* Move result back to dst_reg */
612 PPC_MR(dst_reg, b2p[TMP_REG_1]);
616 * Rotate word left by 8 bits:
617 * 2 bytes are already in their final position
618 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
620 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
621 /* Rotate 24 bits and insert byte 1 */
622 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
623 /* Rotate 24 bits and insert byte 3 */
624 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
625 PPC_MR(dst_reg, b2p[TMP_REG_1]);
629 * Way easier and faster(?) to store the value
630 * into stack and then use ldbrx
632 * ctx->seen will be reliable in pass2, but
633 * the instructions generated will remain the
634 * same across all passes
636 PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
637 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
638 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
646 /* zero-extend 16 bits into 64 bits */
647 PPC_RLDICL(dst_reg, dst_reg, 0, 48);
650 /* zero-extend 32 bits into 64 bits */
651 PPC_RLDICL(dst_reg, dst_reg, 0, 32);
662 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
663 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
664 if (BPF_CLASS(code) == BPF_ST) {
665 PPC_LI(b2p[TMP_REG_1], imm);
666 src_reg = b2p[TMP_REG_1];
668 PPC_STB(src_reg, dst_reg, off);
670 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
671 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
672 if (BPF_CLASS(code) == BPF_ST) {
673 PPC_LI(b2p[TMP_REG_1], imm);
674 src_reg = b2p[TMP_REG_1];
676 PPC_STH(src_reg, dst_reg, off);
678 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
679 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
680 if (BPF_CLASS(code) == BPF_ST) {
681 PPC_LI32(b2p[TMP_REG_1], imm);
682 src_reg = b2p[TMP_REG_1];
684 PPC_STW(src_reg, dst_reg, off);
686 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
687 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
688 if (BPF_CLASS(code) == BPF_ST) {
689 PPC_LI32(b2p[TMP_REG_1], imm);
690 src_reg = b2p[TMP_REG_1];
692 PPC_BPF_STL(src_reg, dst_reg, off);
696 * BPF_STX XADD (atomic_add)
698 /* *(u32 *)(dst + off) += src */
699 case BPF_STX | BPF_XADD | BPF_W:
700 /* Get EA into TMP_REG_1 */
701 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
702 tmp_idx = ctx->idx * 4;
703 /* load value from memory into TMP_REG_2 */
704 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
705 /* add value from src_reg into this */
706 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
707 /* store result back */
708 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
709 /* we're done if this succeeded */
710 PPC_BCC_SHORT(COND_NE, tmp_idx);
712 /* *(u64 *)(dst + off) += src */
713 case BPF_STX | BPF_XADD | BPF_DW:
714 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
715 tmp_idx = ctx->idx * 4;
716 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
717 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
718 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
719 PPC_BCC_SHORT(COND_NE, tmp_idx);
725 /* dst = *(u8 *)(ul) (src + off) */
726 case BPF_LDX | BPF_MEM | BPF_B:
727 PPC_LBZ(dst_reg, src_reg, off);
729 /* dst = *(u16 *)(ul) (src + off) */
730 case BPF_LDX | BPF_MEM | BPF_H:
731 PPC_LHZ(dst_reg, src_reg, off);
733 /* dst = *(u32 *)(ul) (src + off) */
734 case BPF_LDX | BPF_MEM | BPF_W:
735 PPC_LWZ(dst_reg, src_reg, off);
737 /* dst = *(u64 *)(ul) (src + off) */
738 case BPF_LDX | BPF_MEM | BPF_DW:
739 PPC_BPF_LL(dst_reg, src_reg, off);
744 * 16 byte instruction that uses two 'struct bpf_insn'
746 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
747 imm64 = ((u64)(u32) insn[i].imm) |
748 (((u64)(u32) insn[i+1].imm) << 32);
749 /* Adjust for two bpf instructions */
750 addrs[++i] = ctx->idx * 4;
751 PPC_LI64(dst_reg, imm64);
757 case BPF_JMP | BPF_EXIT:
759 * If this isn't the very last instruction, branch to
760 * the epilogue. If we _are_ the last instruction,
761 * we'll just fall through to the epilogue.
765 /* else fall through to the epilogue */
771 case BPF_JMP | BPF_CALL:
772 ctx->seen |= SEEN_FUNC;
773 func = (u8 *) __bpf_call_base + imm;
775 /* Save skb pointer if we need to re-cache skb data */
776 if (bpf_helper_changes_skb_data(func))
777 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
779 bpf_jit_emit_func_call(image, ctx, (u64)func);
781 /* move return value from r3 to BPF_REG_0 */
782 PPC_MR(b2p[BPF_REG_0], 3);
784 /* refresh skb cache */
785 if (bpf_helper_changes_skb_data(func)) {
786 /* reload skb pointer to r3 */
787 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
788 bpf_jit_emit_skb_loads(image, ctx);
795 case BPF_JMP | BPF_JA:
796 PPC_JMP(addrs[i + 1 + off]);
799 case BPF_JMP | BPF_JGT | BPF_K:
800 case BPF_JMP | BPF_JGT | BPF_X:
801 case BPF_JMP | BPF_JSGT | BPF_K:
802 case BPF_JMP | BPF_JSGT | BPF_X:
805 case BPF_JMP | BPF_JGE | BPF_K:
806 case BPF_JMP | BPF_JGE | BPF_X:
807 case BPF_JMP | BPF_JSGE | BPF_K:
808 case BPF_JMP | BPF_JSGE | BPF_X:
811 case BPF_JMP | BPF_JEQ | BPF_K:
812 case BPF_JMP | BPF_JEQ | BPF_X:
815 case BPF_JMP | BPF_JNE | BPF_K:
816 case BPF_JMP | BPF_JNE | BPF_X:
819 case BPF_JMP | BPF_JSET | BPF_K:
820 case BPF_JMP | BPF_JSET | BPF_X:
826 case BPF_JMP | BPF_JGT | BPF_X:
827 case BPF_JMP | BPF_JGE | BPF_X:
828 case BPF_JMP | BPF_JEQ | BPF_X:
829 case BPF_JMP | BPF_JNE | BPF_X:
830 /* unsigned comparison */
831 PPC_CMPLD(dst_reg, src_reg);
833 case BPF_JMP | BPF_JSGT | BPF_X:
834 case BPF_JMP | BPF_JSGE | BPF_X:
835 /* signed comparison */
836 PPC_CMPD(dst_reg, src_reg);
838 case BPF_JMP | BPF_JSET | BPF_X:
839 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
841 case BPF_JMP | BPF_JNE | BPF_K:
842 case BPF_JMP | BPF_JEQ | BPF_K:
843 case BPF_JMP | BPF_JGT | BPF_K:
844 case BPF_JMP | BPF_JGE | BPF_K:
846 * Need sign-extended load, so only positive
847 * values can be used as imm in cmpldi
849 if (imm >= 0 && imm < 32768)
850 PPC_CMPLDI(dst_reg, imm);
852 /* sign-extending load */
853 PPC_LI32(b2p[TMP_REG_1], imm);
854 /* ... but unsigned comparison */
855 PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
858 case BPF_JMP | BPF_JSGT | BPF_K:
859 case BPF_JMP | BPF_JSGE | BPF_K:
861 * signed comparison, so any 16-bit value
862 * can be used in cmpdi
864 if (imm >= -32768 && imm < 32768)
865 PPC_CMPDI(dst_reg, imm);
867 PPC_LI32(b2p[TMP_REG_1], imm);
868 PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
871 case BPF_JMP | BPF_JSET | BPF_K:
872 /* andi does not sign-extend the immediate */
873 if (imm >= 0 && imm < 32768)
874 /* PPC_ANDI is _only/always_ dot-form */
875 PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
877 PPC_LI32(b2p[TMP_REG_1], imm);
878 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
883 PPC_BCC(true_cond, addrs[i + 1 + off]);
887 * Loads from packet header/data
888 * Assume 32-bit input value in imm and X (src_reg)
892 case BPF_LD | BPF_W | BPF_ABS:
893 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
894 goto common_load_abs;
895 case BPF_LD | BPF_H | BPF_ABS:
896 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
897 goto common_load_abs;
898 case BPF_LD | BPF_B | BPF_ABS:
899 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
903 * Load into r4, which can just be passed onto
904 * skb load helpers as the second parameter
910 case BPF_LD | BPF_W | BPF_IND:
911 func = (u8 *)sk_load_word;
912 goto common_load_ind;
913 case BPF_LD | BPF_H | BPF_IND:
914 func = (u8 *)sk_load_half;
915 goto common_load_ind;
916 case BPF_LD | BPF_B | BPF_IND:
917 func = (u8 *)sk_load_byte;
920 * Load from [src_reg + imm]
921 * Treat src_reg as a 32-bit value
923 PPC_EXTSW(4, src_reg);
925 if (imm >= -32768 && imm < 32768)
926 PPC_ADDI(4, 4, IMM_L(imm));
928 PPC_LI32(b2p[TMP_REG_1], imm);
929 PPC_ADD(4, 4, b2p[TMP_REG_1]);
934 ctx->seen |= SEEN_SKB;
935 ctx->seen |= SEEN_FUNC;
936 bpf_jit_emit_func_call(image, ctx, (u64)func);
939 * Helper returns 'lt' condition on error, and an
940 * appropriate return value in BPF_REG_0
942 PPC_BCC(COND_LT, exit_addr);
948 case BPF_JMP | BPF_CALL | BPF_X:
949 ctx->seen |= SEEN_TAILCALL;
950 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
957 * The filter contains something cruel & unusual.
958 * We don't handle it, but also there shouldn't be
959 * anything missing from our list.
961 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
967 /* Set end-of-body-code address for exit. */
968 addrs[i] = ctx->idx * 4;
973 void bpf_jit_compile(struct bpf_prog *fp) { }
975 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
982 struct codegen_context cgctx;
985 struct bpf_binary_header *bpf_hdr;
986 struct bpf_prog *org_fp = fp;
987 struct bpf_prog *tmp_fp;
988 bool bpf_blinded = false;
993 tmp_fp = bpf_jit_blind_constants(org_fp);
997 if (tmp_fp != org_fp) {
1003 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
1004 if (addrs == NULL) {
1009 memset(&cgctx, 0, sizeof(struct codegen_context));
1011 /* Scouting faux-generate pass 0 */
1012 if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
1013 /* We hit something illegal or unsupported. */
1019 * Pretend to build prologue, given the features we've seen. This will
1020 * update ctgtx.idx as it pretends to output instructions, then we can
1021 * calculate total size from idx.
1023 bpf_jit_build_prologue(0, &cgctx);
1024 bpf_jit_build_epilogue(0, &cgctx);
1026 proglen = cgctx.idx * 4;
1027 alloclen = proglen + FUNCTION_DESCR_SIZE;
1029 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1030 bpf_jit_fill_ill_insns);
1036 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1038 /* Code generation passes 1-2 */
1039 for (pass = 1; pass < 3; pass++) {
1040 /* Now build the prologue, body code & epilogue for real. */
1042 bpf_jit_build_prologue(code_base, &cgctx);
1043 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
1044 bpf_jit_build_epilogue(code_base, &cgctx);
1046 if (bpf_jit_enable > 1)
1047 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1048 proglen - (cgctx.idx * 4), cgctx.seen);
1051 if (bpf_jit_enable > 1)
1053 * Note that we output the base address of the code_base
1054 * rather than image, since opcodes are in code_base.
1056 bpf_jit_dump(flen, proglen, pass, code_base);
1059 bpf_flush_icache(bpf_hdr, image + alloclen);
1060 #ifdef PPC64_ELF_ABI_v1
1061 /* Function descriptor nastiness: Address + TOC */
1062 ((u64 *)image)[0] = (u64)code_base;
1063 ((u64 *)image)[1] = local_paca->kernel_toc;
1065 fp->bpf_func = (void *)image;
1073 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1078 void bpf_jit_free(struct bpf_prog *fp)
1080 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1081 struct bpf_binary_header *bpf_hdr = (void *)addr;
1084 bpf_jit_binary_free(bpf_hdr);
1086 bpf_prog_unlock_free(fp);