2 * Just-In-Time compiler for BPF filters on 32bit ARM
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/netdevice.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include <linux/if_vlan.h>
20 #include <asm/cacheflush.h>
21 #include <asm/hwcap.h>
22 #include <asm/opcodes.h>
24 #include "bpf_jit_32.h"
32 * r6 pointer to the skb
37 #define r_scratch ARM_R0
38 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
43 #define r_skb_data ARM_R7
44 #define r_skb_hl ARM_R8
46 #define SCRATCH_SP_OFFSET 0
47 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
49 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
50 #define SEEN_MEM_WORD(k) (1 << (k))
51 #define SEEN_X (1 << BPF_MEMWORDS)
52 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
53 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
54 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
56 #define FLAG_NEED_X_RESET (1 << 0)
57 #define FLAG_IMM_OVERFLOW (1 << 1)
60 const struct bpf_prog *skf;
62 unsigned prologue_bytes;
68 #if __LINUX_ARM_ARCH__ < 7
75 static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
82 memcpy(ret, ptr, size);
86 static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
92 err = call_neg_helper(skb, offset, &ret, 1);
94 err = skb_copy_bits(skb, offset, &ret, 1);
96 return (u64)err << 32 | ret;
99 static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
105 err = call_neg_helper(skb, offset, &ret, 2);
107 err = skb_copy_bits(skb, offset, &ret, 2);
109 return (u64)err << 32 | ntohs(ret);
112 static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
118 err = call_neg_helper(skb, offset, &ret, 4);
120 err = skb_copy_bits(skb, offset, &ret, 4);
122 return (u64)err << 32 | ntohl(ret);
126 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
127 * (where the assembly routines like __aeabi_uidiv could cause problems).
129 static u32 jit_udiv(u32 dividend, u32 divisor)
131 return dividend / divisor;
134 static u32 jit_mod(u32 dividend, u32 divisor)
136 return dividend % divisor;
139 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
141 inst |= (cond << 28);
142 inst = __opcode_to_mem_arm(inst);
144 if (ctx->target != NULL)
145 ctx->target[ctx->idx] = inst;
151 * Emit an instruction that will be executed unconditionally.
153 static inline void emit(u32 inst, struct jit_ctx *ctx)
155 _emit(ARM_COND_AL, inst, ctx);
158 static u16 saved_regs(struct jit_ctx *ctx)
162 if ((ctx->skf->len > 1) ||
163 (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
166 #ifdef CONFIG_FRAME_POINTER
167 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
169 if (ctx->seen & SEEN_CALL)
172 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
174 if (ctx->seen & SEEN_DATA)
175 ret |= (1 << r_skb_data) | (1 << r_skb_hl);
176 if (ctx->seen & SEEN_X)
182 static inline int mem_words_used(struct jit_ctx *ctx)
184 /* yes, we do waste some stack space IF there are "holes" in the set" */
185 return fls(ctx->seen & SEEN_MEM);
188 static void jit_fill_hole(void *area, unsigned int size)
191 /* We are guaranteed to have aligned memory. */
192 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
193 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
196 static void build_prologue(struct jit_ctx *ctx)
198 u16 reg_set = saved_regs(ctx);
201 #ifdef CONFIG_FRAME_POINTER
202 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
203 emit(ARM_PUSH(reg_set), ctx);
204 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
207 emit(ARM_PUSH(reg_set), ctx);
210 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
211 emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
213 if (ctx->seen & SEEN_DATA) {
214 off = offsetof(struct sk_buff, data);
215 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
216 /* headlen = len - data_len */
217 off = offsetof(struct sk_buff, len);
218 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
219 off = offsetof(struct sk_buff, data_len);
220 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
221 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
224 if (ctx->flags & FLAG_NEED_X_RESET)
225 emit(ARM_MOV_I(r_X, 0), ctx);
227 /* do not leak kernel data to userspace */
228 if (bpf_needs_clear_a(&ctx->skf->insns[0]))
229 emit(ARM_MOV_I(r_A, 0), ctx);
231 /* stack space for the BPF_MEM words */
232 if (ctx->seen & SEEN_MEM)
233 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
236 static void build_epilogue(struct jit_ctx *ctx)
238 u16 reg_set = saved_regs(ctx);
240 if (ctx->seen & SEEN_MEM)
241 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
243 reg_set &= ~(1 << ARM_LR);
245 #ifdef CONFIG_FRAME_POINTER
246 /* the first instruction of the prologue was: mov ip, sp */
247 reg_set &= ~(1 << ARM_IP);
248 reg_set |= (1 << ARM_SP);
249 emit(ARM_LDM(ARM_SP, reg_set), ctx);
252 if (ctx->seen & SEEN_CALL)
253 reg_set |= 1 << ARM_PC;
254 emit(ARM_POP(reg_set), ctx);
257 if (!(ctx->seen & SEEN_CALL))
258 emit(ARM_BX(ARM_LR), ctx);
262 static int16_t imm8m(u32 x)
266 for (rot = 0; rot < 16; rot++)
267 if ((x & ~ror32(0xff, 2 * rot)) == 0)
268 return rol32(x, 2 * rot) | (rot << 8);
273 #if __LINUX_ARM_ARCH__ < 7
275 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
277 unsigned i = 0, offset;
280 /* on the "fake" run we just count them (duplicates included) */
281 if (ctx->target == NULL) {
286 while ((i < ctx->imm_count) && ctx->imms[i]) {
287 if (ctx->imms[i] == k)
292 if (ctx->imms[i] == 0)
295 /* constants go just after the epilogue */
296 offset = ctx->offsets[ctx->skf->len];
297 offset += ctx->prologue_bytes;
298 offset += ctx->epilogue_bytes;
301 ctx->target[offset / 4] = k;
303 /* PC in ARM mode == address of the instruction + 8 */
304 imm = offset - (8 + ctx->idx * 4);
308 * literal pool is too far, signal it into flags. we
309 * can only detect it on the second pass unfortunately.
311 ctx->flags |= FLAG_IMM_OVERFLOW;
318 #endif /* __LINUX_ARM_ARCH__ */
321 * Move an immediate that's not an imm8m to a core register.
323 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
325 #if __LINUX_ARM_ARCH__ < 7
326 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
328 emit(ARM_MOVW(rd, val & 0xffff), ctx);
330 emit(ARM_MOVT(rd, val >> 16), ctx);
334 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
336 int imm12 = imm8m(val);
339 emit(ARM_MOV_I(rd, imm12), ctx);
341 emit_mov_i_no8m(rd, val, ctx);
344 #if __LINUX_ARM_ARCH__ < 6
346 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
348 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
349 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
350 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
351 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
352 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
353 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
354 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
355 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
358 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
360 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
361 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
362 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
365 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
367 /* r_dst = (r_src << 8) | (r_src >> 8) */
368 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
369 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
372 * we need to mask out the bits set in r_dst[23:16] due to
373 * the first shift instruction.
375 * note that 0x8ff is the encoded immediate 0x00ff0000.
377 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
382 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
384 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
385 #ifdef __LITTLE_ENDIAN
386 _emit(cond, ARM_REV(r_res, r_res), ctx);
390 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
392 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
393 #ifdef __LITTLE_ENDIAN
394 _emit(cond, ARM_REV16(r_res, r_res), ctx);
398 static inline void emit_swap16(u8 r_dst __maybe_unused,
399 u8 r_src __maybe_unused,
400 struct jit_ctx *ctx __maybe_unused)
402 #ifdef __LITTLE_ENDIAN
403 emit(ARM_REV16(r_dst, r_src), ctx);
407 #endif /* __LINUX_ARM_ARCH__ < 6 */
410 /* Compute the immediate value for a PC-relative branch. */
411 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
415 if (ctx->target == NULL)
418 * BPF allows only forward jumps and the offset of the target is
419 * still the one computed during the first pass.
421 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
426 #define OP_IMM3(op, r1, r2, imm_val, ctx) \
428 imm12 = imm8m(imm_val); \
430 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
431 emit(op ## _R((r1), (r2), r_scratch), ctx); \
433 emit(op ## _I((r1), (r2), imm12), ctx); \
437 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
439 if (ctx->ret0_fp_idx >= 0) {
440 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
441 /* NOP to keep the size constant between passes */
442 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
444 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
445 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
449 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
451 #if __LINUX_ARM_ARCH__ < 5
452 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
454 if (elf_hwcap & HWCAP_THUMB)
455 emit(ARM_BX(tgt_reg), ctx);
457 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
459 emit(ARM_BLX_R(tgt_reg), ctx);
463 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx,
466 #if __LINUX_ARM_ARCH__ == 7
467 if (elf_hwcap & HWCAP_IDIVA) {
468 if (bpf_op == BPF_DIV)
469 emit(ARM_UDIV(rd, rm, rn), ctx);
471 emit(ARM_UDIV(ARM_R3, rm, rn), ctx);
472 emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx);
479 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
480 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
481 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
482 * before using it as a source for ARM_R1.
484 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
485 * ARM_R5 (r_X) so there is no particular register overlap
489 emit(ARM_MOV_R(ARM_R1, rn), ctx);
491 emit(ARM_MOV_R(ARM_R0, rm), ctx);
493 ctx->seen |= SEEN_CALL;
494 emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod,
496 emit_blx_r(ARM_R3, ctx);
499 emit(ARM_MOV_R(rd, ARM_R0), ctx);
502 static inline void update_on_xread(struct jit_ctx *ctx)
504 if (!(ctx->seen & SEEN_X))
505 ctx->flags |= FLAG_NEED_X_RESET;
510 static int build_body(struct jit_ctx *ctx)
512 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
513 const struct bpf_prog *prog = ctx->skf;
514 const struct sock_filter *inst;
515 unsigned i, load_order, off, condt;
519 for (i = 0; i < prog->len; i++) {
522 inst = &(prog->insns[i]);
523 /* K as an immediate value operand */
525 code = bpf_anc_helper(inst);
527 /* compute offsets only in the fake pass */
528 if (ctx->target == NULL)
529 ctx->offsets[i] = ctx->idx * 4;
532 case BPF_LD | BPF_IMM:
533 emit_mov_i(r_A, k, ctx);
535 case BPF_LD | BPF_W | BPF_LEN:
536 ctx->seen |= SEEN_SKB;
537 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
538 emit(ARM_LDR_I(r_A, r_skb,
539 offsetof(struct sk_buff, len)), ctx);
541 case BPF_LD | BPF_MEM:
543 ctx->seen |= SEEN_MEM_WORD(k);
544 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
546 case BPF_LD | BPF_W | BPF_ABS:
549 case BPF_LD | BPF_H | BPF_ABS:
552 case BPF_LD | BPF_B | BPF_ABS:
555 emit_mov_i(r_off, k, ctx);
557 ctx->seen |= SEEN_DATA | SEEN_CALL;
559 if (load_order > 0) {
560 emit(ARM_SUB_I(r_scratch, r_skb_hl,
561 1 << load_order), ctx);
562 emit(ARM_CMP_R(r_scratch, r_off), ctx);
565 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
570 * test for negative offset, only if we are
571 * currently scheduled to take the fast
572 * path. this will update the flags so that
573 * the slowpath instruction are ignored if the
574 * offset is negative.
576 * for loard_order == 0 the HI condition will
577 * make loads at offset 0 take the slow path too.
579 _emit(condt, ARM_CMP_I(r_off, 0), ctx);
581 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
585 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
587 else if (load_order == 1)
588 emit_load_be16(condt, r_A, r_scratch, ctx);
589 else if (load_order == 2)
590 emit_load_be32(condt, r_A, r_scratch, ctx);
592 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
595 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
596 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
597 /* the offset is already in R1 */
598 emit_blx_r(ARM_R3, ctx);
599 /* check the result of skb_copy_bits */
600 emit(ARM_CMP_I(ARM_R1, 0), ctx);
601 emit_err_ret(ARM_COND_NE, ctx);
602 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
604 case BPF_LD | BPF_W | BPF_IND:
607 case BPF_LD | BPF_H | BPF_IND:
610 case BPF_LD | BPF_B | BPF_IND:
613 update_on_xread(ctx);
614 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
616 case BPF_LDX | BPF_IMM:
618 emit_mov_i(r_X, k, ctx);
620 case BPF_LDX | BPF_W | BPF_LEN:
621 ctx->seen |= SEEN_X | SEEN_SKB;
622 emit(ARM_LDR_I(r_X, r_skb,
623 offsetof(struct sk_buff, len)), ctx);
625 case BPF_LDX | BPF_MEM:
626 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
627 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
629 case BPF_LDX | BPF_B | BPF_MSH:
630 /* x = ((*(frame + k)) & 0xf) << 2; */
631 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
632 /* the interpreter should deal with the negative K */
635 /* offset in r1: we might have to take the slow path */
636 emit_mov_i(r_off, k, ctx);
637 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
639 /* load in r0: common with the slowpath */
640 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
643 * emit_mov_i() might generate one or two instructions,
644 * the same holds for emit_blx_r()
646 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
648 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
650 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
651 emit_blx_r(ARM_R3, ctx);
652 /* check the return value of skb_copy_bits */
653 emit(ARM_CMP_I(ARM_R1, 0), ctx);
654 emit_err_ret(ARM_COND_NE, ctx);
656 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
657 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
660 ctx->seen |= SEEN_MEM_WORD(k);
661 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
664 update_on_xread(ctx);
665 ctx->seen |= SEEN_MEM_WORD(k);
666 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
668 case BPF_ALU | BPF_ADD | BPF_K:
670 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
672 case BPF_ALU | BPF_ADD | BPF_X:
673 update_on_xread(ctx);
674 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
676 case BPF_ALU | BPF_SUB | BPF_K:
678 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
680 case BPF_ALU | BPF_SUB | BPF_X:
681 update_on_xread(ctx);
682 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
684 case BPF_ALU | BPF_MUL | BPF_K:
686 emit_mov_i(r_scratch, k, ctx);
687 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
689 case BPF_ALU | BPF_MUL | BPF_X:
690 update_on_xread(ctx);
691 emit(ARM_MUL(r_A, r_A, r_X), ctx);
693 case BPF_ALU | BPF_DIV | BPF_K:
696 emit_mov_i(r_scratch, k, ctx);
697 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV);
699 case BPF_ALU | BPF_DIV | BPF_X:
700 update_on_xread(ctx);
701 emit(ARM_CMP_I(r_X, 0), ctx);
702 emit_err_ret(ARM_COND_EQ, ctx);
703 emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV);
705 case BPF_ALU | BPF_MOD | BPF_K:
707 emit_mov_i(r_A, 0, ctx);
710 emit_mov_i(r_scratch, k, ctx);
711 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD);
713 case BPF_ALU | BPF_MOD | BPF_X:
714 update_on_xread(ctx);
715 emit(ARM_CMP_I(r_X, 0), ctx);
716 emit_err_ret(ARM_COND_EQ, ctx);
717 emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD);
719 case BPF_ALU | BPF_OR | BPF_K:
721 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
723 case BPF_ALU | BPF_OR | BPF_X:
724 update_on_xread(ctx);
725 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
727 case BPF_ALU | BPF_XOR | BPF_K:
729 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
731 case BPF_ANC | SKF_AD_ALU_XOR_X:
732 case BPF_ALU | BPF_XOR | BPF_X:
734 update_on_xread(ctx);
735 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
737 case BPF_ALU | BPF_AND | BPF_K:
739 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
741 case BPF_ALU | BPF_AND | BPF_X:
742 update_on_xread(ctx);
743 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
745 case BPF_ALU | BPF_LSH | BPF_K:
746 if (unlikely(k > 31))
748 emit(ARM_LSL_I(r_A, r_A, k), ctx);
750 case BPF_ALU | BPF_LSH | BPF_X:
751 update_on_xread(ctx);
752 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
754 case BPF_ALU | BPF_RSH | BPF_K:
755 if (unlikely(k > 31))
758 emit(ARM_LSR_I(r_A, r_A, k), ctx);
760 case BPF_ALU | BPF_RSH | BPF_X:
761 update_on_xread(ctx);
762 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
764 case BPF_ALU | BPF_NEG:
766 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
768 case BPF_JMP | BPF_JA:
770 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
772 case BPF_JMP | BPF_JEQ | BPF_K:
773 /* pc += (A == K) ? pc->jt : pc->jf */
776 case BPF_JMP | BPF_JGT | BPF_K:
777 /* pc += (A > K) ? pc->jt : pc->jf */
780 case BPF_JMP | BPF_JGE | BPF_K:
781 /* pc += (A >= K) ? pc->jt : pc->jf */
786 emit_mov_i_no8m(r_scratch, k, ctx);
787 emit(ARM_CMP_R(r_A, r_scratch), ctx);
789 emit(ARM_CMP_I(r_A, imm12), ctx);
793 _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
796 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
799 case BPF_JMP | BPF_JEQ | BPF_X:
800 /* pc += (A == X) ? pc->jt : pc->jf */
803 case BPF_JMP | BPF_JGT | BPF_X:
804 /* pc += (A > X) ? pc->jt : pc->jf */
807 case BPF_JMP | BPF_JGE | BPF_X:
808 /* pc += (A >= X) ? pc->jt : pc->jf */
811 update_on_xread(ctx);
812 emit(ARM_CMP_R(r_A, r_X), ctx);
814 case BPF_JMP | BPF_JSET | BPF_K:
815 /* pc += (A & K) ? pc->jt : pc->jf */
817 /* not set iff all zeroes iff Z==1 iff EQ */
821 emit_mov_i_no8m(r_scratch, k, ctx);
822 emit(ARM_TST_R(r_A, r_scratch), ctx);
824 emit(ARM_TST_I(r_A, imm12), ctx);
827 case BPF_JMP | BPF_JSET | BPF_X:
828 /* pc += (A & X) ? pc->jt : pc->jf */
829 update_on_xread(ctx);
831 emit(ARM_TST_R(r_A, r_X), ctx);
833 case BPF_RET | BPF_A:
834 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
836 case BPF_RET | BPF_K:
837 if ((k == 0) && (ctx->ret0_fp_idx < 0))
838 ctx->ret0_fp_idx = i;
839 emit_mov_i(ARM_R0, k, ctx);
841 if (i != ctx->skf->len - 1)
842 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
844 case BPF_MISC | BPF_TAX:
847 emit(ARM_MOV_R(r_X, r_A), ctx);
849 case BPF_MISC | BPF_TXA:
851 update_on_xread(ctx);
852 emit(ARM_MOV_R(r_A, r_X), ctx);
854 case BPF_ANC | SKF_AD_PROTOCOL:
855 /* A = ntohs(skb->protocol) */
856 ctx->seen |= SEEN_SKB;
857 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
859 off = offsetof(struct sk_buff, protocol);
860 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
861 emit_swap16(r_A, r_scratch, ctx);
863 case BPF_ANC | SKF_AD_CPU:
864 /* r_scratch = current_thread_info() */
865 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
866 /* A = current_thread_info()->cpu */
867 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
868 off = offsetof(struct thread_info, cpu);
869 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
871 case BPF_ANC | SKF_AD_IFINDEX:
872 case BPF_ANC | SKF_AD_HATYPE:
873 /* A = skb->dev->ifindex */
874 /* A = skb->dev->type */
875 ctx->seen |= SEEN_SKB;
876 off = offsetof(struct sk_buff, dev);
877 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
879 emit(ARM_CMP_I(r_scratch, 0), ctx);
880 emit_err_ret(ARM_COND_EQ, ctx);
882 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
884 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
887 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
888 off = offsetof(struct net_device, ifindex);
889 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
892 * offset of field "type" in "struct
893 * net_device" is above what can be
894 * used in the ldrh rd, [rn, #imm]
895 * instruction, so load the offset in
896 * a register and use ldrh rd, [rn, rm]
898 off = offsetof(struct net_device, type);
899 emit_mov_i(ARM_R3, off, ctx);
900 emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
903 case BPF_ANC | SKF_AD_MARK:
904 ctx->seen |= SEEN_SKB;
905 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
906 off = offsetof(struct sk_buff, mark);
907 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
909 case BPF_ANC | SKF_AD_RXHASH:
910 ctx->seen |= SEEN_SKB;
911 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
912 off = offsetof(struct sk_buff, hash);
913 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
915 case BPF_ANC | SKF_AD_VLAN_TAG:
916 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
917 ctx->seen |= SEEN_SKB;
918 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
919 off = offsetof(struct sk_buff, vlan_tci);
920 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
921 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
922 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
924 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
925 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
928 case BPF_ANC | SKF_AD_PKTTYPE:
929 ctx->seen |= SEEN_SKB;
930 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
931 __pkt_type_offset[0]) != 1);
932 off = PKT_TYPE_OFFSET();
933 emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
934 emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
935 #ifdef __BIG_ENDIAN_BITFIELD
936 emit(ARM_LSR_I(r_A, r_A, 5), ctx);
939 case BPF_ANC | SKF_AD_QUEUE:
940 ctx->seen |= SEEN_SKB;
941 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
942 queue_mapping) != 2);
943 BUILD_BUG_ON(offsetof(struct sk_buff,
944 queue_mapping) > 0xff);
945 off = offsetof(struct sk_buff, queue_mapping);
946 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
948 case BPF_ANC | SKF_AD_PAY_OFFSET:
949 ctx->seen |= SEEN_SKB | SEEN_CALL;
951 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
952 emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
953 emit_blx_r(ARM_R3, ctx);
954 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
956 case BPF_LDX | BPF_W | BPF_ABS:
958 * load a 32bit word from struct seccomp_data.
959 * seccomp_check_filter() will already have checked
960 * that k is 32bit aligned and lies within the
961 * struct seccomp_data.
963 ctx->seen |= SEEN_SKB;
964 emit(ARM_LDR_I(r_A, r_skb, k), ctx);
970 if (ctx->flags & FLAG_IMM_OVERFLOW)
972 * this instruction generated an overflow when
973 * trying to access the literal pool, so
974 * delegate this filter to the kernel interpreter.
979 /* compute offsets only during the first pass */
980 if (ctx->target == NULL)
981 ctx->offsets[i] = ctx->idx * 4;
987 void bpf_jit_compile(struct bpf_prog *fp)
989 struct bpf_binary_header *header;
998 memset(&ctx, 0, sizeof(ctx));
1000 ctx.ret0_fp_idx = -1;
1002 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
1003 if (ctx.offsets == NULL)
1006 /* fake pass to fill in the ctx->seen */
1007 if (unlikely(build_body(&ctx)))
1011 build_prologue(&ctx);
1012 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1014 #if __LINUX_ARM_ARCH__ < 7
1016 build_epilogue(&ctx);
1017 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1019 ctx.idx += ctx.imm_count;
1020 if (ctx.imm_count) {
1021 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
1022 if (ctx.imms == NULL)
1026 /* there's nothing after the epilogue on ARMv7 */
1027 build_epilogue(&ctx);
1029 alloc_size = 4 * ctx.idx;
1030 header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
1035 ctx.target = (u32 *) target_ptr;
1038 build_prologue(&ctx);
1039 if (build_body(&ctx) < 0) {
1040 #if __LINUX_ARM_ARCH__ < 7
1044 bpf_jit_binary_free(header);
1047 build_epilogue(&ctx);
1049 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1051 #if __LINUX_ARM_ARCH__ < 7
1056 if (bpf_jit_enable > 1)
1057 /* there are 2 passes here */
1058 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1060 set_memory_ro((unsigned long)header, header->pages);
1061 fp->bpf_func = (void *)ctx.target;
1068 void bpf_jit_free(struct bpf_prog *fp)
1070 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1071 struct bpf_binary_header *header = (void *)addr;
1076 set_memory_rw(addr, header->pages);
1077 bpf_jit_binary_free(header);
1080 bpf_prog_unlock_free(fp);