1 /* bpf_jit.S: Packet/header access helper functions
2 * for PPC64 BPF compiler.
4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
12 #include <asm/ppc_asm.h>
13 #include <asm/asm-compat.h>
14 #include "bpf_jit32.h"
17 * All of these routines are called directly from generated code,
18 * whose register usage is:
22 * r6 *** address parameter to helper ***
30 * To consider: These helpers are so small it could be better to just
31 * generate them inline. Inline code can do the simple headlen check
32 * then branch directly to slow_path_XXX if required. (In fact, could
33 * load a spare GPR with the address of slow_path_generic and pass size
34 * as an argument, making the call site a mtlr, li and bllr.)
39 blt bpf_slow_path_word_neg
40 .globl sk_load_word_positive_offset
41 sk_load_word_positive_offset:
42 /* Are we accessing past headlen? */
43 subi r_scratch1, r_HL, 4
44 PPC_LCMP r_scratch1, r_addr
45 blt bpf_slow_path_word
46 /* Nope, just hitting the header. cr0 here is eq or gt! */
47 #ifdef __LITTLE_ENDIAN__
48 lwbrx r_A, r_D, r_addr
52 blr /* Return success, cr0 != LT */
57 blt bpf_slow_path_half_neg
58 .globl sk_load_half_positive_offset
59 sk_load_half_positive_offset:
60 subi r_scratch1, r_HL, 2
61 PPC_LCMP r_scratch1, r_addr
62 blt bpf_slow_path_half
63 #ifdef __LITTLE_ENDIAN__
64 lhbrx r_A, r_D, r_addr
73 blt bpf_slow_path_byte_neg
74 .globl sk_load_byte_positive_offset
75 sk_load_byte_positive_offset:
77 ble bpf_slow_path_byte
82 * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
83 * r_addr is the offset value
85 .globl sk_load_byte_msh
88 blt bpf_slow_path_byte_msh_neg
89 .globl sk_load_byte_msh_positive_offset
90 sk_load_byte_msh_positive_offset:
92 ble bpf_slow_path_byte_msh
94 rlwinm r_X, r_X, 2, 32-4-2, 31-2
97 /* Call out to skb_copy_bits:
98 * We'll need to back up our volatile regs first; we have
99 * local variable space at r1+(BPF_PPC_STACK_BASIC).
100 * Allocate a new stack frame here to remain ABI-compliant in
103 #define bpf_slow_path_common(SIZE) \
105 PPC_STL r0, PPC_LR_STKOFF(r1); \
106 /* R3 goes in parameter space of caller's frame */ \
107 PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
108 PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
109 PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
110 addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \
111 PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
112 /* R3 = r_skb, as passed */ \
117 /* R3 = 0 on success */ \
118 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
119 PPC_LL r0, PPC_LR_STKOFF(r1); \
120 PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
121 PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
124 blt bpf_error; /* cr0 = LT */ \
125 PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
129 bpf_slow_path_common(4)
130 /* Data value is on stack, and cr0 != LT */
131 lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
135 bpf_slow_path_common(2)
136 lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
140 bpf_slow_path_common(1)
141 lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
144 bpf_slow_path_byte_msh:
145 bpf_slow_path_common(1)
146 lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
147 rlwinm r_X, r_X, 2, 32-4-2, 31-2
150 /* Call out to bpf_internal_load_pointer_neg_helper:
151 * We'll need to back up our volatile regs first; we have
152 * local variable space at r1+(BPF_PPC_STACK_BASIC).
153 * Allocate a new stack frame here to remain ABI-compliant in
156 #define sk_negative_common(SIZE) \
158 PPC_STL r0, PPC_LR_STKOFF(r1); \
159 /* R3 goes in parameter space of caller's frame */ \
160 PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
161 PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
162 PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
163 PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
164 /* R3 = r_skb, as passed */ \
167 bl bpf_internal_load_pointer_neg_helper; \
169 /* R3 != 0 on success */ \
170 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
171 PPC_LL r0, PPC_LR_STKOFF(r1); \
172 PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
173 PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
176 beq bpf_error_slow; /* cr0 = EQ */ \
178 PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
181 bpf_slow_path_word_neg:
182 lis r_scratch1,-32 /* SKF_LL_OFF */
183 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
184 blt bpf_error /* cr0 = LT */
185 .globl sk_load_word_negative_offset
186 sk_load_word_negative_offset:
187 sk_negative_common(4)
191 bpf_slow_path_half_neg:
192 lis r_scratch1,-32 /* SKF_LL_OFF */
193 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
194 blt bpf_error /* cr0 = LT */
195 .globl sk_load_half_negative_offset
196 sk_load_half_negative_offset:
197 sk_negative_common(2)
201 bpf_slow_path_byte_neg:
202 lis r_scratch1,-32 /* SKF_LL_OFF */
203 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
204 blt bpf_error /* cr0 = LT */
205 .globl sk_load_byte_negative_offset
206 sk_load_byte_negative_offset:
207 sk_negative_common(1)
211 bpf_slow_path_byte_msh_neg:
212 lis r_scratch1,-32 /* SKF_LL_OFF */
213 PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
214 blt bpf_error /* cr0 = LT */
215 .globl sk_load_byte_msh_negative_offset
216 sk_load_byte_msh_negative_offset:
217 sk_negative_common(1)
219 rlwinm r_X, r_X, 2, 32-4-2, 31-2
223 /* fabricate a cr0 = lt */
225 PPC_LCMPI r_scratch1, 0
227 /* Entered with cr0 = lt */
229 /* Generated code will 'blt epilogue', returning 0. */