2 * Just-In-Time compiler for BPF filters on MIPS
4 * Copyright (c) 2014 Imagination Technologies Ltd.
5 * Author: Markos Chandras <markos.chandras@imgtec.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License.
12 #include <linux/bitops.h>
13 #include <linux/compiler.h>
14 #include <linux/errno.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
17 #include <linux/kconfig.h>
18 #include <linux/moduleloader.h>
19 #include <linux/netdevice.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/types.h>
24 #include <asm/bitops.h>
25 #include <asm/cacheflush.h>
26 #include <asm/cpu-features.h>
32 * r_skb_hl SKB header length
33 * r_data SKB data pointer
39 * r_skb_len SKB length
41 * On entry (*bpf_func)(*skb, *filter)
42 * a0 = MIPS_R_A0 = skb;
43 * a1 = MIPS_R_A1 = filter;
55 * saved reg 0 <-- r_sp
60 * <--------------------- len ------------------------>
61 * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
62 * ----------------------------------------------------
64 * ----------------------------------------------------
67 #define ptr typeof(unsigned long)
69 #define SCRATCH_OFF(k) (4 * (k))
72 #define SEEN_CALL (1 << BPF_MEMWORDS)
73 #define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
74 #define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
75 #define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
76 #define SEEN_OFF SEEN_SREG(2)
77 #define SEEN_A SEEN_SREG(3)
78 #define SEEN_X SEEN_SREG(4)
79 #define SEEN_SKB SEEN_SREG(5)
80 #define SEEN_MEM SEEN_SREG(6)
81 /* SEEN_SK_DATA also implies skb_hl an skb_len */
82 #define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
84 /* Arguments used by JIT */
85 #define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
87 #define SBIT(x) (1 << (x)) /* Signed version of BIT() */
90 * struct jit_ctx - JIT context
92 * @prologue_bytes: Number of bytes for prologue
93 * @idx: Instruction index
95 * @offsets: Instruction offsets
96 * @target: Memory location for the compiled filter
99 const struct bpf_prog *skf;
100 unsigned int prologue_bytes;
108 static inline int optimize_div(u32 *k)
110 /* power of 2 divides can be implemented with right shift */
111 if (!(*k & (*k-1))) {
119 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
121 /* Simply emit the instruction if the JIT memory space has been allocated */
122 #define emit_instr(ctx, func, ...) \
124 if ((ctx)->target != NULL) { \
125 u32 *p = &(ctx)->target[ctx->idx]; \
126 uasm_i_##func(&p, ##__VA_ARGS__); \
132 * Similar to emit_instr but it must be used when we need to emit
133 * 32-bit or 64-bit instructions
135 #define emit_long_instr(ctx, func, ...) \
137 if ((ctx)->target != NULL) { \
138 u32 *p = &(ctx)->target[ctx->idx]; \
139 UASM_i_##func(&p, ##__VA_ARGS__); \
144 /* Determine if immediate is within the 16-bit signed range */
145 static inline bool is_range16(s32 imm)
147 return !(imm >= SBIT(15) || imm < -SBIT(15));
150 static inline void emit_addu(unsigned int dst, unsigned int src1,
151 unsigned int src2, struct jit_ctx *ctx)
153 emit_instr(ctx, addu, dst, src1, src2);
156 static inline void emit_nop(struct jit_ctx *ctx)
158 emit_instr(ctx, nop);
161 /* Load a u32 immediate to a register */
162 static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
164 if (ctx->target != NULL) {
165 /* addiu can only handle s16 */
166 if (!is_range16(imm)) {
167 u32 *p = &ctx->target[ctx->idx];
168 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
169 p = &ctx->target[ctx->idx + 1];
170 uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
172 u32 *p = &ctx->target[ctx->idx];
173 uasm_i_addiu(&p, dst, r_zero, imm);
178 if (!is_range16(imm))
182 static inline void emit_or(unsigned int dst, unsigned int src1,
183 unsigned int src2, struct jit_ctx *ctx)
185 emit_instr(ctx, or, dst, src1, src2);
188 static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
191 if (imm >= BIT(16)) {
192 emit_load_imm(r_tmp, imm, ctx);
193 emit_or(dst, src, r_tmp, ctx);
195 emit_instr(ctx, ori, dst, src, imm);
199 static inline void emit_daddiu(unsigned int dst, unsigned int src,
200 int imm, struct jit_ctx *ctx)
203 * Only used for stack, so the imm is relatively small
204 * and it fits in 15-bits
206 emit_instr(ctx, daddiu, dst, src, imm);
209 static inline void emit_addiu(unsigned int dst, unsigned int src,
210 u32 imm, struct jit_ctx *ctx)
212 if (!is_range16(imm)) {
213 emit_load_imm(r_tmp, imm, ctx);
214 emit_addu(dst, r_tmp, src, ctx);
216 emit_instr(ctx, addiu, dst, src, imm);
220 static inline void emit_and(unsigned int dst, unsigned int src1,
221 unsigned int src2, struct jit_ctx *ctx)
223 emit_instr(ctx, and, dst, src1, src2);
226 static inline void emit_andi(unsigned int dst, unsigned int src,
227 u32 imm, struct jit_ctx *ctx)
229 /* If imm does not fit in u16 then load it to register */
230 if (imm >= BIT(16)) {
231 emit_load_imm(r_tmp, imm, ctx);
232 emit_and(dst, src, r_tmp, ctx);
234 emit_instr(ctx, andi, dst, src, imm);
238 static inline void emit_xor(unsigned int dst, unsigned int src1,
239 unsigned int src2, struct jit_ctx *ctx)
241 emit_instr(ctx, xor, dst, src1, src2);
244 static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
246 /* If imm does not fit in u16 then load it to register */
247 if (imm >= BIT(16)) {
248 emit_load_imm(r_tmp, imm, ctx);
249 emit_xor(dst, src, r_tmp, ctx);
251 emit_instr(ctx, xori, dst, src, imm);
255 static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
257 emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
260 static inline void emit_subu(unsigned int dst, unsigned int src1,
261 unsigned int src2, struct jit_ctx *ctx)
263 emit_instr(ctx, subu, dst, src1, src2);
266 static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
268 emit_subu(reg, r_zero, reg, ctx);
271 static inline void emit_sllv(unsigned int dst, unsigned int src,
272 unsigned int sa, struct jit_ctx *ctx)
274 emit_instr(ctx, sllv, dst, src, sa);
277 static inline void emit_sll(unsigned int dst, unsigned int src,
278 unsigned int sa, struct jit_ctx *ctx)
280 /* sa is 5-bits long */
282 /* Shifting >= 32 results in zero */
283 emit_jit_reg_move(dst, r_zero, ctx);
285 emit_instr(ctx, sll, dst, src, sa);
288 static inline void emit_srlv(unsigned int dst, unsigned int src,
289 unsigned int sa, struct jit_ctx *ctx)
291 emit_instr(ctx, srlv, dst, src, sa);
294 static inline void emit_srl(unsigned int dst, unsigned int src,
295 unsigned int sa, struct jit_ctx *ctx)
297 /* sa is 5-bits long */
299 /* Shifting >= 32 results in zero */
300 emit_jit_reg_move(dst, r_zero, ctx);
302 emit_instr(ctx, srl, dst, src, sa);
305 static inline void emit_slt(unsigned int dst, unsigned int src1,
306 unsigned int src2, struct jit_ctx *ctx)
308 emit_instr(ctx, slt, dst, src1, src2);
311 static inline void emit_sltu(unsigned int dst, unsigned int src1,
312 unsigned int src2, struct jit_ctx *ctx)
314 emit_instr(ctx, sltu, dst, src1, src2);
317 static inline void emit_sltiu(unsigned dst, unsigned int src,
318 unsigned int imm, struct jit_ctx *ctx)
320 /* 16 bit immediate */
321 if (!is_range16((s32)imm)) {
322 emit_load_imm(r_tmp, imm, ctx);
323 emit_sltu(dst, src, r_tmp, ctx);
325 emit_instr(ctx, sltiu, dst, src, imm);
330 /* Store register on the stack */
331 static inline void emit_store_stack_reg(ptr reg, ptr base,
335 emit_long_instr(ctx, SW, reg, offset, base);
338 static inline void emit_store(ptr reg, ptr base, unsigned int offset,
341 emit_instr(ctx, sw, reg, offset, base);
344 static inline void emit_load_stack_reg(ptr reg, ptr base,
348 emit_long_instr(ctx, LW, reg, offset, base);
351 static inline void emit_load(unsigned int reg, unsigned int base,
352 unsigned int offset, struct jit_ctx *ctx)
354 emit_instr(ctx, lw, reg, offset, base);
357 static inline void emit_load_byte(unsigned int reg, unsigned int base,
358 unsigned int offset, struct jit_ctx *ctx)
360 emit_instr(ctx, lb, reg, offset, base);
363 static inline void emit_half_load(unsigned int reg, unsigned int base,
364 unsigned int offset, struct jit_ctx *ctx)
366 emit_instr(ctx, lh, reg, offset, base);
369 static inline void emit_mul(unsigned int dst, unsigned int src1,
370 unsigned int src2, struct jit_ctx *ctx)
372 emit_instr(ctx, mul, dst, src1, src2);
375 static inline void emit_div(unsigned int dst, unsigned int src,
378 if (ctx->target != NULL) {
379 u32 *p = &ctx->target[ctx->idx];
380 uasm_i_divu(&p, dst, src);
381 p = &ctx->target[ctx->idx + 1];
382 uasm_i_mflo(&p, dst);
384 ctx->idx += 2; /* 2 insts */
387 static inline void emit_mod(unsigned int dst, unsigned int src,
390 if (ctx->target != NULL) {
391 u32 *p = &ctx->target[ctx->idx];
392 uasm_i_divu(&p, dst, src);
393 p = &ctx->target[ctx->idx + 1];
394 uasm_i_mfhi(&p, dst);
396 ctx->idx += 2; /* 2 insts */
399 static inline void emit_dsll(unsigned int dst, unsigned int src,
400 unsigned int sa, struct jit_ctx *ctx)
402 emit_instr(ctx, dsll, dst, src, sa);
405 static inline void emit_dsrl32(unsigned int dst, unsigned int src,
406 unsigned int sa, struct jit_ctx *ctx)
408 emit_instr(ctx, dsrl32, dst, src, sa);
411 static inline void emit_wsbh(unsigned int dst, unsigned int src,
414 emit_instr(ctx, wsbh, dst, src);
417 /* load pointer to register */
418 static inline void emit_load_ptr(unsigned int dst, unsigned int src,
419 int imm, struct jit_ctx *ctx)
421 /* src contains the base addr of the 32/64-pointer */
422 emit_long_instr(ctx, LW, dst, imm, src);
425 /* load a function pointer to register */
426 static inline void emit_load_func(unsigned int reg, ptr imm,
429 if (config_enabled(CONFIG_64BIT)) {
430 /* At this point imm is always 64-bit */
431 emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
432 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
433 emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
434 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
435 emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
437 emit_load_imm(reg, imm, ctx);
441 /* Move to real MIPS register */
442 static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
444 emit_long_instr(ctx, ADDU, dst, src, r_zero);
447 /* Move to JIT (32-bit) register */
448 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
450 emit_addu(dst, src, r_zero, ctx);
453 /* Compute the immediate value for PC-relative branches. */
454 static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
456 if (ctx->target == NULL)
460 * We want a pc-relative branch. We only do forward branches
461 * so tgt is always after pc. tgt is the instruction offset
462 * we want to jump to.
465 * I: target_offset <- sign_extend(offset)
466 * I+1: PC += target_offset (delay slot)
468 * ctx->idx currently points to the branch instruction
469 * but the offset is added to the delay slot so we need
472 return ctx->offsets[tgt] -
473 (ctx->idx * 4 - ctx->prologue_bytes) - 4;
476 static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
477 unsigned int imm, struct jit_ctx *ctx)
479 if (ctx->target != NULL) {
480 u32 *p = &ctx->target[ctx->idx];
484 uasm_i_beq(&p, reg1, reg2, imm);
487 uasm_i_bne(&p, reg1, reg2, imm);
493 pr_warn("%s: Unhandled branch conditional: %d\n",
500 static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
502 emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
505 static inline void emit_jalr(unsigned int link, unsigned int reg,
508 emit_instr(ctx, jalr, link, reg);
511 static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
513 emit_instr(ctx, jr, reg);
516 static inline u16 align_sp(unsigned int num)
518 /* Double word alignment for 32-bit, quadword for 64-bit */
519 unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
520 num = (num + (align - 1)) & -align;
524 static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
526 int i = 0, real_off = 0;
527 u32 sflags, tmp_flags;
529 /* Adjust the stack pointer */
531 emit_stack_offset(-align_sp(offset), ctx);
533 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
534 /* sflags is essentially a bitmap */
536 if ((sflags >> i) & 0x1) {
537 emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
545 /* save return address */
546 if (ctx->flags & SEEN_CALL) {
547 emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
551 /* Setup r_M leaving the alignment gap if necessary */
552 if (ctx->flags & SEEN_MEM) {
553 if (real_off % (SZREG * 2))
555 emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
559 static void restore_bpf_jit_regs(struct jit_ctx *ctx,
563 u32 sflags, tmp_flags;
565 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
566 /* sflags is a bitmap */
569 if ((sflags >> i) & 0x1) {
570 emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
578 /* restore return address */
579 if (ctx->flags & SEEN_CALL)
580 emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
582 /* Restore the sp and discard the scrach memory */
584 emit_stack_offset(align_sp(offset), ctx);
587 static unsigned int get_stack_depth(struct jit_ctx *ctx)
592 /* How may s* regs do we need to preserved? */
593 sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
595 if (ctx->flags & SEEN_MEM)
596 sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
598 if (ctx->flags & SEEN_CALL)
599 sp_off += SZREG; /* Space for our ra register */
604 static void build_prologue(struct jit_ctx *ctx)
608 /* Calculate the total offset for the stack pointer */
609 sp_off = get_stack_depth(ctx);
610 save_bpf_jit_regs(ctx, sp_off);
612 if (ctx->flags & SEEN_SKB)
613 emit_reg_move(r_skb, MIPS_R_A0, ctx);
615 if (ctx->flags & SEEN_SKB_DATA) {
616 /* Load packet length */
617 emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
619 emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
621 /* Load the data pointer */
622 emit_load_ptr(r_skb_data, r_skb,
623 offsetof(struct sk_buff, data), ctx);
624 /* Load the header length */
625 emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
628 if (ctx->flags & SEEN_X)
629 emit_jit_reg_move(r_X, r_zero, ctx);
632 * Do not leak kernel data to userspace, we only need to clear
633 * r_A if it is ever used. In fact if it is never used, we
634 * will not save/restore it, so clearing it in this case would
635 * corrupt the state of the caller.
637 if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
638 (ctx->flags & SEEN_A))
639 emit_jit_reg_move(r_A, r_zero, ctx);
642 static void build_epilogue(struct jit_ctx *ctx)
646 /* Calculate the total offset for the stack pointer */
648 sp_off = get_stack_depth(ctx);
649 restore_bpf_jit_regs(ctx, sp_off);
656 #define CHOOSE_LOAD_FUNC(K, func) \
657 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
660 static int build_body(struct jit_ctx *ctx)
662 const struct bpf_prog *prog = ctx->skf;
663 const struct sock_filter *inst;
664 unsigned int i, off, condt;
665 u32 k, b_off __maybe_unused;
666 u8 (*sk_load_func)(unsigned long *skb, int offset);
668 for (i = 0; i < prog->len; i++) {
671 inst = &(prog->insns[i]);
672 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
673 __func__, inst->code, inst->jt, inst->jf, inst->k);
675 code = bpf_anc_helper(inst);
677 if (ctx->target == NULL)
678 ctx->offsets[i] = ctx->idx * 4;
681 case BPF_LD | BPF_IMM:
682 /* A <- k ==> li r_A, k */
683 ctx->flags |= SEEN_A;
684 emit_load_imm(r_A, k, ctx);
686 case BPF_LD | BPF_W | BPF_LEN:
687 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
688 /* A <- len ==> lw r_A, offset(skb) */
689 ctx->flags |= SEEN_SKB | SEEN_A;
690 off = offsetof(struct sk_buff, len);
691 emit_load(r_A, r_skb, off, ctx);
693 case BPF_LD | BPF_MEM:
694 /* A <- M[k] ==> lw r_A, offset(M) */
695 ctx->flags |= SEEN_MEM | SEEN_A;
696 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
698 case BPF_LD | BPF_W | BPF_ABS:
700 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
702 case BPF_LD | BPF_H | BPF_ABS:
704 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
706 case BPF_LD | BPF_B | BPF_ABS:
708 sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
710 emit_load_imm(r_off, k, ctx);
712 ctx->flags |= SEEN_CALL | SEEN_OFF |
713 SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
715 emit_load_func(r_s0, (ptr)sk_load_func, ctx);
716 emit_reg_move(MIPS_R_A0, r_skb, ctx);
717 emit_jalr(MIPS_R_RA, r_s0, ctx);
718 /* Load second argument to delay slot */
719 emit_reg_move(MIPS_R_A1, r_off, ctx);
720 /* Check the error value */
721 emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
723 /* Load return register on DS for failures */
724 emit_reg_move(r_ret, r_zero, ctx);
725 /* Return with error */
726 emit_b(b_imm(prog->len, ctx), ctx);
729 case BPF_LD | BPF_W | BPF_IND:
730 /* A <- P[X + k:4] */
731 sk_load_func = sk_load_word;
733 case BPF_LD | BPF_H | BPF_IND:
734 /* A <- P[X + k:2] */
735 sk_load_func = sk_load_half;
737 case BPF_LD | BPF_B | BPF_IND:
738 /* A <- P[X + k:1] */
739 sk_load_func = sk_load_byte;
741 ctx->flags |= SEEN_OFF | SEEN_X;
742 emit_addiu(r_off, r_X, k, ctx);
744 case BPF_LDX | BPF_IMM:
746 ctx->flags |= SEEN_X;
747 emit_load_imm(r_X, k, ctx);
749 case BPF_LDX | BPF_MEM:
751 ctx->flags |= SEEN_X | SEEN_MEM;
752 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
754 case BPF_LDX | BPF_W | BPF_LEN:
756 ctx->flags |= SEEN_X | SEEN_SKB;
757 off = offsetof(struct sk_buff, len);
758 emit_load(r_X, r_skb, off, ctx);
760 case BPF_LDX | BPF_B | BPF_MSH:
761 /* X <- 4 * (P[k:1] & 0xf) */
762 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
763 /* Load offset to a1 */
764 emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
766 * This may emit two instructions so it may not fit
767 * in the delay slot. So use a0 in the delay slot.
769 emit_load_imm(MIPS_R_A1, k, ctx);
770 emit_jalr(MIPS_R_RA, r_s0, ctx);
771 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
772 /* Check the error value */
773 emit_bcond(MIPS_COND_NE, r_ret, 0,
774 b_imm(prog->len, ctx), ctx);
775 emit_reg_move(r_ret, r_zero, ctx);
777 /* X <- P[1:K] & 0xf */
778 emit_andi(r_X, r_A, 0xf, ctx);
780 emit_b(b_imm(i + 1, ctx), ctx);
781 emit_sll(r_X, r_X, 2, ctx); /* delay slot */
785 ctx->flags |= SEEN_MEM | SEEN_A;
786 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
790 ctx->flags |= SEEN_MEM | SEEN_X;
791 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
793 case BPF_ALU | BPF_ADD | BPF_K:
795 ctx->flags |= SEEN_A;
796 emit_addiu(r_A, r_A, k, ctx);
798 case BPF_ALU | BPF_ADD | BPF_X:
800 ctx->flags |= SEEN_A | SEEN_X;
801 emit_addu(r_A, r_A, r_X, ctx);
803 case BPF_ALU | BPF_SUB | BPF_K:
805 ctx->flags |= SEEN_A;
806 emit_addiu(r_A, r_A, -k, ctx);
808 case BPF_ALU | BPF_SUB | BPF_X:
810 ctx->flags |= SEEN_A | SEEN_X;
811 emit_subu(r_A, r_A, r_X, ctx);
813 case BPF_ALU | BPF_MUL | BPF_K:
815 /* Load K to scratch register before MUL */
816 ctx->flags |= SEEN_A;
817 emit_load_imm(r_s0, k, ctx);
818 emit_mul(r_A, r_A, r_s0, ctx);
820 case BPF_ALU | BPF_MUL | BPF_X:
822 ctx->flags |= SEEN_A | SEEN_X;
823 emit_mul(r_A, r_A, r_X, ctx);
825 case BPF_ALU | BPF_DIV | BPF_K:
829 if (optimize_div(&k)) {
830 ctx->flags |= SEEN_A;
831 emit_srl(r_A, r_A, k, ctx);
834 ctx->flags |= SEEN_A;
835 emit_load_imm(r_s0, k, ctx);
836 emit_div(r_A, r_s0, ctx);
838 case BPF_ALU | BPF_MOD | BPF_K:
841 ctx->flags |= SEEN_A;
842 emit_jit_reg_move(r_A, r_zero, ctx);
844 ctx->flags |= SEEN_A;
845 emit_load_imm(r_s0, k, ctx);
846 emit_mod(r_A, r_s0, ctx);
849 case BPF_ALU | BPF_DIV | BPF_X:
851 ctx->flags |= SEEN_X | SEEN_A;
852 /* Check if r_X is zero */
853 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
854 b_imm(prog->len, ctx), ctx);
855 emit_load_imm(r_ret, 0, ctx); /* delay slot */
856 emit_div(r_A, r_X, ctx);
858 case BPF_ALU | BPF_MOD | BPF_X:
860 ctx->flags |= SEEN_X | SEEN_A;
861 /* Check if r_X is zero */
862 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
863 b_imm(prog->len, ctx), ctx);
864 emit_load_imm(r_ret, 0, ctx); /* delay slot */
865 emit_mod(r_A, r_X, ctx);
867 case BPF_ALU | BPF_OR | BPF_K:
869 ctx->flags |= SEEN_A;
870 emit_ori(r_A, r_A, k, ctx);
872 case BPF_ALU | BPF_OR | BPF_X:
874 ctx->flags |= SEEN_A;
875 emit_ori(r_A, r_A, r_X, ctx);
877 case BPF_ALU | BPF_XOR | BPF_K:
879 ctx->flags |= SEEN_A;
880 emit_xori(r_A, r_A, k, ctx);
882 case BPF_ANC | SKF_AD_ALU_XOR_X:
883 case BPF_ALU | BPF_XOR | BPF_X:
885 ctx->flags |= SEEN_A;
886 emit_xor(r_A, r_A, r_X, ctx);
888 case BPF_ALU | BPF_AND | BPF_K:
890 ctx->flags |= SEEN_A;
891 emit_andi(r_A, r_A, k, ctx);
893 case BPF_ALU | BPF_AND | BPF_X:
895 ctx->flags |= SEEN_A | SEEN_X;
896 emit_and(r_A, r_A, r_X, ctx);
898 case BPF_ALU | BPF_LSH | BPF_K:
900 ctx->flags |= SEEN_A;
901 emit_sll(r_A, r_A, k, ctx);
903 case BPF_ALU | BPF_LSH | BPF_X:
905 ctx->flags |= SEEN_A | SEEN_X;
906 emit_sllv(r_A, r_A, r_X, ctx);
908 case BPF_ALU | BPF_RSH | BPF_K:
910 ctx->flags |= SEEN_A;
911 emit_srl(r_A, r_A, k, ctx);
913 case BPF_ALU | BPF_RSH | BPF_X:
914 ctx->flags |= SEEN_A | SEEN_X;
915 emit_srlv(r_A, r_A, r_X, ctx);
917 case BPF_ALU | BPF_NEG:
919 ctx->flags |= SEEN_A;
922 case BPF_JMP | BPF_JA:
924 emit_b(b_imm(i + k + 1, ctx), ctx);
927 case BPF_JMP | BPF_JEQ | BPF_K:
928 /* pc += ( A == K ) ? pc->jt : pc->jf */
929 condt = MIPS_COND_EQ | MIPS_COND_K;
931 case BPF_JMP | BPF_JEQ | BPF_X:
932 ctx->flags |= SEEN_X;
933 /* pc += ( A == X ) ? pc->jt : pc->jf */
934 condt = MIPS_COND_EQ | MIPS_COND_X;
936 case BPF_JMP | BPF_JGE | BPF_K:
937 /* pc += ( A >= K ) ? pc->jt : pc->jf */
938 condt = MIPS_COND_GE | MIPS_COND_K;
940 case BPF_JMP | BPF_JGE | BPF_X:
941 ctx->flags |= SEEN_X;
942 /* pc += ( A >= X ) ? pc->jt : pc->jf */
943 condt = MIPS_COND_GE | MIPS_COND_X;
945 case BPF_JMP | BPF_JGT | BPF_K:
946 /* pc += ( A > K ) ? pc->jt : pc->jf */
947 condt = MIPS_COND_GT | MIPS_COND_K;
949 case BPF_JMP | BPF_JGT | BPF_X:
950 ctx->flags |= SEEN_X;
951 /* pc += ( A > X ) ? pc->jt : pc->jf */
952 condt = MIPS_COND_GT | MIPS_COND_X;
954 /* Greater or Equal */
955 if ((condt & MIPS_COND_GE) ||
956 (condt & MIPS_COND_GT)) {
957 if (condt & MIPS_COND_K) { /* K */
958 ctx->flags |= SEEN_A;
959 emit_sltiu(r_s0, r_A, k, ctx);
961 ctx->flags |= SEEN_A |
963 emit_sltu(r_s0, r_A, r_X, ctx);
965 /* A < (K|X) ? r_scrach = 1 */
966 b_off = b_imm(i + inst->jf + 1, ctx);
967 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
970 /* A > (K|X) ? scratch = 0 */
971 if (condt & MIPS_COND_GT) {
972 /* Checking for equality */
973 ctx->flags |= SEEN_A | SEEN_X;
974 if (condt & MIPS_COND_K)
975 emit_load_imm(r_s0, k, ctx);
977 emit_jit_reg_move(r_s0, r_X,
979 b_off = b_imm(i + inst->jf + 1, ctx);
980 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
983 /* Finally, A > K|X */
984 b_off = b_imm(i + inst->jt + 1, ctx);
988 /* A >= (K|X) so jump */
989 b_off = b_imm(i + inst->jt + 1, ctx);
995 if (condt & MIPS_COND_K) { /* K */
996 ctx->flags |= SEEN_A;
997 emit_load_imm(r_s0, k, ctx);
999 b_off = b_imm(i + inst->jt + 1, ctx);
1000 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1004 b_off = b_imm(i + inst->jf + 1,
1006 emit_bcond(MIPS_COND_NE, r_A, r_s0,
1011 ctx->flags |= SEEN_A | SEEN_X;
1012 b_off = b_imm(i + inst->jt + 1,
1014 emit_bcond(MIPS_COND_EQ, r_A, r_X,
1018 b_off = b_imm(i + inst->jf + 1, ctx);
1019 emit_bcond(MIPS_COND_NE, r_A, r_X,
1025 case BPF_JMP | BPF_JSET | BPF_K:
1026 ctx->flags |= SEEN_A;
1027 /* pc += (A & K) ? pc -> jt : pc -> jf */
1028 emit_load_imm(r_s1, k, ctx);
1029 emit_and(r_s0, r_A, r_s1, ctx);
1031 b_off = b_imm(i + inst->jt + 1, ctx);
1032 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1035 b_off = b_imm(i + inst->jf + 1, ctx);
1039 case BPF_JMP | BPF_JSET | BPF_X:
1040 ctx->flags |= SEEN_X | SEEN_A;
1041 /* pc += (A & X) ? pc -> jt : pc -> jf */
1042 emit_and(r_s0, r_A, r_X, ctx);
1044 b_off = b_imm(i + inst->jt + 1, ctx);
1045 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1048 b_off = b_imm(i + inst->jf + 1, ctx);
1052 case BPF_RET | BPF_A:
1053 ctx->flags |= SEEN_A;
1054 if (i != prog->len - 1)
1056 * If this is not the last instruction
1057 * then jump to the epilogue
1059 emit_b(b_imm(prog->len, ctx), ctx);
1060 emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1062 case BPF_RET | BPF_K:
1064 * It can emit two instructions so it does not fit on
1067 emit_load_imm(r_ret, k, ctx);
1068 if (i != prog->len - 1) {
1070 * If this is not the last instruction
1071 * then jump to the epilogue
1073 emit_b(b_imm(prog->len, ctx), ctx);
1077 case BPF_MISC | BPF_TAX:
1079 ctx->flags |= SEEN_X | SEEN_A;
1080 emit_jit_reg_move(r_X, r_A, ctx);
1082 case BPF_MISC | BPF_TXA:
1084 ctx->flags |= SEEN_A | SEEN_X;
1085 emit_jit_reg_move(r_A, r_X, ctx);
1088 case BPF_ANC | SKF_AD_PROTOCOL:
1089 /* A = ntohs(skb->protocol */
1090 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1091 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1093 off = offsetof(struct sk_buff, protocol);
1094 emit_half_load(r_A, r_skb, off, ctx);
1095 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1096 /* This needs little endian fixup */
1098 /* R2 and later have the wsbh instruction */
1099 emit_wsbh(r_A, r_A, ctx);
1101 /* Get first byte */
1102 emit_andi(r_tmp_imm, r_A, 0xff, ctx);
1104 emit_sll(r_tmp, r_tmp_imm, 8, ctx);
1105 /* Get second byte */
1106 emit_srl(r_tmp_imm, r_A, 8, ctx);
1107 emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
1108 /* Put everyting together in r_A */
1109 emit_or(r_A, r_tmp, r_tmp_imm, ctx);
1113 case BPF_ANC | SKF_AD_CPU:
1114 ctx->flags |= SEEN_A | SEEN_OFF;
1115 /* A = current_thread_info()->cpu */
1116 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
1118 off = offsetof(struct thread_info, cpu);
1119 /* $28/gp points to the thread_info struct */
1120 emit_load(r_A, 28, off, ctx);
1122 case BPF_ANC | SKF_AD_IFINDEX:
1123 /* A = skb->dev->ifindex */
1124 ctx->flags |= SEEN_SKB | SEEN_A;
1125 off = offsetof(struct sk_buff, dev);
1126 /* Load *dev pointer */
1127 emit_load_ptr(r_s0, r_skb, off, ctx);
1128 /* error (0) in the delay slot */
1129 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1130 b_imm(prog->len, ctx), ctx);
1131 emit_reg_move(r_ret, r_zero, ctx);
1132 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
1134 off = offsetof(struct net_device, ifindex);
1135 emit_load(r_A, r_s0, off, ctx);
1137 case BPF_ANC | SKF_AD_MARK:
1138 ctx->flags |= SEEN_SKB | SEEN_A;
1139 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1140 off = offsetof(struct sk_buff, mark);
1141 emit_load(r_A, r_skb, off, ctx);
1143 case BPF_ANC | SKF_AD_RXHASH:
1144 ctx->flags |= SEEN_SKB | SEEN_A;
1145 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1146 off = offsetof(struct sk_buff, hash);
1147 emit_load(r_A, r_skb, off, ctx);
1149 case BPF_ANC | SKF_AD_VLAN_TAG:
1150 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1151 ctx->flags |= SEEN_SKB | SEEN_A;
1152 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1154 off = offsetof(struct sk_buff, vlan_tci);
1155 emit_half_load(r_s0, r_skb, off, ctx);
1156 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
1157 emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
1159 emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
1160 /* return 1 if present */
1161 emit_sltu(r_A, r_zero, r_A, ctx);
1164 case BPF_ANC | SKF_AD_PKTTYPE:
1165 ctx->flags |= SEEN_SKB;
1167 emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
1168 /* Keep only the last 3 bits */
1169 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1170 #ifdef __BIG_ENDIAN_BITFIELD
1171 /* Get the actual packet type to the lower 3 bits */
1172 emit_srl(r_A, r_A, 5, ctx);
1175 case BPF_ANC | SKF_AD_QUEUE:
1176 ctx->flags |= SEEN_SKB | SEEN_A;
1177 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1178 queue_mapping) != 2);
1179 BUILD_BUG_ON(offsetof(struct sk_buff,
1180 queue_mapping) > 0xff);
1181 off = offsetof(struct sk_buff, queue_mapping);
1182 emit_half_load(r_A, r_skb, off, ctx);
1185 pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1191 /* compute offsets only during the first pass */
1192 if (ctx->target == NULL)
1193 ctx->offsets[i] = ctx->idx * 4;
1198 int bpf_jit_enable __read_mostly;
1200 void bpf_jit_compile(struct bpf_prog *fp)
1203 unsigned int alloc_size, tmp_idx;
1205 if (!bpf_jit_enable)
1208 memset(&ctx, 0, sizeof(ctx));
1210 ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1211 if (ctx.offsets == NULL)
1216 if (build_body(&ctx))
1220 build_prologue(&ctx);
1221 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1222 /* just to complete the ctx.idx count */
1223 build_epilogue(&ctx);
1225 alloc_size = 4 * ctx.idx;
1226 ctx.target = module_alloc(alloc_size);
1227 if (ctx.target == NULL)
1231 memset(ctx.target, 0, alloc_size);
1235 /* Generate the actual JIT code */
1236 build_prologue(&ctx);
1238 build_epilogue(&ctx);
1240 /* Update the icache */
1241 flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
1243 if (bpf_jit_enable > 1)
1245 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1247 fp->bpf_func = (void *)ctx.target;
1254 void bpf_jit_free(struct bpf_prog *fp)
1257 module_memfree(fp->bpf_func);
1259 bpf_prog_unlock_free(fp);