GNU Linux-libre 4.4.295-gnu1
[releases.git] / arch / powerpc / net / bpf_jit_comp.c
1 /* bpf_jit_comp.c: BPF JIT compiler
2  *
3  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4  *
5  * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6  * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; version 2
11  * of the License.
12  */
13 #include <linux/moduleloader.h>
14 #include <asm/cacheflush.h>
15 #include <linux/netdevice.h>
16 #include <linux/filter.h>
17 #include <linux/if_vlan.h>
18
19 #include "bpf_jit.h"
20
21 int bpf_jit_enable __read_mostly;
22
23 static inline void bpf_flush_icache(void *start, void *end)
24 {
25         smp_wmb();
26         flush_icache_range((unsigned long)start, (unsigned long)end);
27 }
28
29 static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
30                                    struct codegen_context *ctx)
31 {
32         int i;
33         const struct sock_filter *filter = fp->insns;
34
35         if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
36                 /* Make stackframe */
37                 if (ctx->seen & SEEN_DATAREF) {
38                         /* If we call any helpers (for loads), save LR */
39                         EMIT(PPC_INST_MFLR | __PPC_RT(R0));
40                         PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
41
42                         /* Back up non-volatile regs. */
43                         PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
44                         PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
45                 }
46                 if (ctx->seen & SEEN_MEM) {
47                         /*
48                          * Conditionally save regs r15-r31 as some will be used
49                          * for M[] data.
50                          */
51                         for (i = r_M; i < (r_M+16); i++) {
52                                 if (ctx->seen & (1 << (i-r_M)))
53                                         PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
54                         }
55                 }
56                 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
57         }
58
59         if (ctx->seen & SEEN_DATAREF) {
60                 /*
61                  * If this filter needs to access skb data,
62                  * prepare r_D and r_HL:
63                  *  r_HL = skb->len - skb->data_len
64                  *  r_D  = skb->data
65                  */
66                 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
67                                                          data_len));
68                 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
69                 PPC_SUB(r_HL, r_HL, r_scratch1);
70                 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
71         }
72
73         if (ctx->seen & SEEN_XREG) {
74                 /*
75                  * TODO: Could also detect whether first instr. sets X and
76                  * avoid this (as below, with A).
77                  */
78                 PPC_LI(r_X, 0);
79         }
80
81         /* make sure we dont leak kernel information to user */
82         if (bpf_needs_clear_a(&filter[0]))
83                 PPC_LI(r_A, 0);
84 }
85
86 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
87 {
88         int i;
89
90         if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
91                 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
92                 if (ctx->seen & SEEN_DATAREF) {
93                         PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
94                         PPC_MTLR(0);
95                         PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
96                         PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
97                 }
98                 if (ctx->seen & SEEN_MEM) {
99                         /* Restore any saved non-vol registers */
100                         for (i = r_M; i < (r_M+16); i++) {
101                                 if (ctx->seen & (1 << (i-r_M)))
102                                         PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
103                         }
104                 }
105         }
106         /* The RETs have left a return value in R3. */
107
108         PPC_BLR();
109 }
110
111 #define CHOOSE_LOAD_FUNC(K, func) \
112         ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
113
114 /* Assemble the body code between the prologue & epilogue. */
115 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
116                               struct codegen_context *ctx,
117                               unsigned int *addrs)
118 {
119         const struct sock_filter *filter = fp->insns;
120         int flen = fp->len;
121         u8 *func;
122         unsigned int true_cond;
123         int i;
124
125         /* Start of epilogue code */
126         unsigned int exit_addr = addrs[flen];
127
128         for (i = 0; i < flen; i++) {
129                 unsigned int K = filter[i].k;
130                 u16 code = bpf_anc_helper(&filter[i]);
131
132                 /*
133                  * addrs[] maps a BPF bytecode address into a real offset from
134                  * the start of the body code.
135                  */
136                 addrs[i] = ctx->idx * 4;
137
138                 switch (code) {
139                         /*** ALU ops ***/
140                 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
141                         ctx->seen |= SEEN_XREG;
142                         PPC_ADD(r_A, r_A, r_X);
143                         break;
144                 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
145                         if (!K)
146                                 break;
147                         PPC_ADDI(r_A, r_A, IMM_L(K));
148                         if (K >= 32768)
149                                 PPC_ADDIS(r_A, r_A, IMM_HA(K));
150                         break;
151                 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
152                         ctx->seen |= SEEN_XREG;
153                         PPC_SUB(r_A, r_A, r_X);
154                         break;
155                 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
156                         if (!K)
157                                 break;
158                         PPC_ADDI(r_A, r_A, IMM_L(-K));
159                         if (K >= 32768)
160                                 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
161                         break;
162                 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
163                         ctx->seen |= SEEN_XREG;
164                         PPC_MUL(r_A, r_A, r_X);
165                         break;
166                 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
167                         if (K < 32768)
168                                 PPC_MULI(r_A, r_A, K);
169                         else {
170                                 PPC_LI32(r_scratch1, K);
171                                 PPC_MUL(r_A, r_A, r_scratch1);
172                         }
173                         break;
174                 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
175                 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
176                         ctx->seen |= SEEN_XREG;
177                         PPC_CMPWI(r_X, 0);
178                         if (ctx->pc_ret0 != -1) {
179                                 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
180                         } else {
181                                 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
182                                 PPC_LI(r_ret, 0);
183                                 PPC_JMP(exit_addr);
184                         }
185                         if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
186                                 PPC_DIVWU(r_scratch1, r_A, r_X);
187                                 PPC_MUL(r_scratch1, r_X, r_scratch1);
188                                 PPC_SUB(r_A, r_A, r_scratch1);
189                         } else {
190                                 PPC_DIVWU(r_A, r_A, r_X);
191                         }
192                         break;
193                 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
194                         PPC_LI32(r_scratch2, K);
195                         PPC_DIVWU(r_scratch1, r_A, r_scratch2);
196                         PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
197                         PPC_SUB(r_A, r_A, r_scratch1);
198                         break;
199                 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
200                         if (K == 1)
201                                 break;
202                         PPC_LI32(r_scratch1, K);
203                         PPC_DIVWU(r_A, r_A, r_scratch1);
204                         break;
205                 case BPF_ALU | BPF_AND | BPF_X:
206                         ctx->seen |= SEEN_XREG;
207                         PPC_AND(r_A, r_A, r_X);
208                         break;
209                 case BPF_ALU | BPF_AND | BPF_K:
210                         if (!IMM_H(K))
211                                 PPC_ANDI(r_A, r_A, K);
212                         else {
213                                 PPC_LI32(r_scratch1, K);
214                                 PPC_AND(r_A, r_A, r_scratch1);
215                         }
216                         break;
217                 case BPF_ALU | BPF_OR | BPF_X:
218                         ctx->seen |= SEEN_XREG;
219                         PPC_OR(r_A, r_A, r_X);
220                         break;
221                 case BPF_ALU | BPF_OR | BPF_K:
222                         if (IMM_L(K))
223                                 PPC_ORI(r_A, r_A, IMM_L(K));
224                         if (K >= 65536)
225                                 PPC_ORIS(r_A, r_A, IMM_H(K));
226                         break;
227                 case BPF_ANC | SKF_AD_ALU_XOR_X:
228                 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
229                         ctx->seen |= SEEN_XREG;
230                         PPC_XOR(r_A, r_A, r_X);
231                         break;
232                 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
233                         if (IMM_L(K))
234                                 PPC_XORI(r_A, r_A, IMM_L(K));
235                         if (K >= 65536)
236                                 PPC_XORIS(r_A, r_A, IMM_H(K));
237                         break;
238                 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
239                         ctx->seen |= SEEN_XREG;
240                         PPC_SLW(r_A, r_A, r_X);
241                         break;
242                 case BPF_ALU | BPF_LSH | BPF_K:
243                         if (K == 0)
244                                 break;
245                         else
246                                 PPC_SLWI(r_A, r_A, K);
247                         break;
248                 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
249                         ctx->seen |= SEEN_XREG;
250                         PPC_SRW(r_A, r_A, r_X);
251                         break;
252                 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
253                         if (K == 0)
254                                 break;
255                         else
256                                 PPC_SRWI(r_A, r_A, K);
257                         break;
258                 case BPF_ALU | BPF_NEG:
259                         PPC_NEG(r_A, r_A);
260                         break;
261                 case BPF_RET | BPF_K:
262                         PPC_LI32(r_ret, K);
263                         if (!K) {
264                                 if (ctx->pc_ret0 == -1)
265                                         ctx->pc_ret0 = i;
266                         }
267                         /*
268                          * If this isn't the very last instruction, branch to
269                          * the epilogue if we've stuff to clean up.  Otherwise,
270                          * if there's nothing to tidy, just return.  If we /are/
271                          * the last instruction, we're about to fall through to
272                          * the epilogue to return.
273                          */
274                         if (i != flen - 1) {
275                                 /*
276                                  * Note: 'seen' is properly valid only on pass
277                                  * #2.  Both parts of this conditional are the
278                                  * same instruction size though, meaning the
279                                  * first pass will still correctly determine the
280                                  * code size/addresses.
281                                  */
282                                 if (ctx->seen)
283                                         PPC_JMP(exit_addr);
284                                 else
285                                         PPC_BLR();
286                         }
287                         break;
288                 case BPF_RET | BPF_A:
289                         PPC_MR(r_ret, r_A);
290                         if (i != flen - 1) {
291                                 if (ctx->seen)
292                                         PPC_JMP(exit_addr);
293                                 else
294                                         PPC_BLR();
295                         }
296                         break;
297                 case BPF_MISC | BPF_TAX: /* X = A */
298                         PPC_MR(r_X, r_A);
299                         break;
300                 case BPF_MISC | BPF_TXA: /* A = X */
301                         ctx->seen |= SEEN_XREG;
302                         PPC_MR(r_A, r_X);
303                         break;
304
305                         /*** Constant loads/M[] access ***/
306                 case BPF_LD | BPF_IMM: /* A = K */
307                         PPC_LI32(r_A, K);
308                         break;
309                 case BPF_LDX | BPF_IMM: /* X = K */
310                         PPC_LI32(r_X, K);
311                         break;
312                 case BPF_LD | BPF_MEM: /* A = mem[K] */
313                         PPC_MR(r_A, r_M + (K & 0xf));
314                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
315                         break;
316                 case BPF_LDX | BPF_MEM: /* X = mem[K] */
317                         PPC_MR(r_X, r_M + (K & 0xf));
318                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
319                         break;
320                 case BPF_ST: /* mem[K] = A */
321                         PPC_MR(r_M + (K & 0xf), r_A);
322                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
323                         break;
324                 case BPF_STX: /* mem[K] = X */
325                         PPC_MR(r_M + (K & 0xf), r_X);
326                         ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
327                         break;
328                 case BPF_LD | BPF_W | BPF_LEN: /*       A = skb->len; */
329                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
330                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
331                         break;
332                 case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
333                         PPC_LWZ_OFFS(r_A, r_skb, K);
334                         break;
335                 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
336                         PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
337                         break;
338
339                         /*** Ancillary info loads ***/
340                 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
341                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
342                                                   protocol) != 2);
343                         PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
344                                                             protocol));
345                         break;
346                 case BPF_ANC | SKF_AD_IFINDEX:
347                 case BPF_ANC | SKF_AD_HATYPE:
348                         BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
349                                                 ifindex) != 4);
350                         BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
351                                                 type) != 2);
352                         PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
353                                                                 dev));
354                         PPC_CMPDI(r_scratch1, 0);
355                         if (ctx->pc_ret0 != -1) {
356                                 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
357                         } else {
358                                 /* Exit, returning 0; first pass hits here. */
359                                 PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
360                                 PPC_LI(r_ret, 0);
361                                 PPC_JMP(exit_addr);
362                         }
363                         if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
364                                 PPC_LWZ_OFFS(r_A, r_scratch1,
365                                      offsetof(struct net_device, ifindex));
366                         } else {
367                                 PPC_LHZ_OFFS(r_A, r_scratch1,
368                                      offsetof(struct net_device, type));
369                         }
370
371                         break;
372                 case BPF_ANC | SKF_AD_MARK:
373                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
374                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
375                                                           mark));
376                         break;
377                 case BPF_ANC | SKF_AD_RXHASH:
378                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
379                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
380                                                           hash));
381                         break;
382                 case BPF_ANC | SKF_AD_VLAN_TAG:
383                 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
384                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
385                         BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
386
387                         PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
388                                                           vlan_tci));
389                         if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
390                                 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
391                         } else {
392                                 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
393                                 PPC_SRWI(r_A, r_A, 12);
394                         }
395                         break;
396                 case BPF_ANC | SKF_AD_QUEUE:
397                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
398                                                   queue_mapping) != 2);
399                         PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
400                                                           queue_mapping));
401                         break;
402                 case BPF_ANC | SKF_AD_PKTTYPE:
403                         PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
404                         PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
405                         PPC_SRWI(r_A, r_A, 5);
406                         break;
407                 case BPF_ANC | SKF_AD_CPU:
408                         PPC_BPF_LOAD_CPU(r_A);
409                         break;
410                         /*** Absolute loads from packet header/data ***/
411                 case BPF_LD | BPF_W | BPF_ABS:
412                         func = CHOOSE_LOAD_FUNC(K, sk_load_word);
413                         goto common_load;
414                 case BPF_LD | BPF_H | BPF_ABS:
415                         func = CHOOSE_LOAD_FUNC(K, sk_load_half);
416                         goto common_load;
417                 case BPF_LD | BPF_B | BPF_ABS:
418                         func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
419                 common_load:
420                         /* Load from [K]. */
421                         ctx->seen |= SEEN_DATAREF;
422                         PPC_FUNC_ADDR(r_scratch1, func);
423                         PPC_MTLR(r_scratch1);
424                         PPC_LI32(r_addr, K);
425                         PPC_BLRL();
426                         /*
427                          * Helper returns 'lt' condition on error, and an
428                          * appropriate return value in r3
429                          */
430                         PPC_BCC(COND_LT, exit_addr);
431                         break;
432
433                         /*** Indirect loads from packet header/data ***/
434                 case BPF_LD | BPF_W | BPF_IND:
435                         func = sk_load_word;
436                         goto common_load_ind;
437                 case BPF_LD | BPF_H | BPF_IND:
438                         func = sk_load_half;
439                         goto common_load_ind;
440                 case BPF_LD | BPF_B | BPF_IND:
441                         func = sk_load_byte;
442                 common_load_ind:
443                         /*
444                          * Load from [X + K].  Negative offsets are tested for
445                          * in the helper functions.
446                          */
447                         ctx->seen |= SEEN_DATAREF | SEEN_XREG;
448                         PPC_FUNC_ADDR(r_scratch1, func);
449                         PPC_MTLR(r_scratch1);
450                         PPC_ADDI(r_addr, r_X, IMM_L(K));
451                         if (K >= 32768)
452                                 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
453                         PPC_BLRL();
454                         /* If error, cr0.LT set */
455                         PPC_BCC(COND_LT, exit_addr);
456                         break;
457
458                 case BPF_LDX | BPF_B | BPF_MSH:
459                         func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
460                         goto common_load;
461                         break;
462
463                         /*** Jump and branches ***/
464                 case BPF_JMP | BPF_JA:
465                         if (K != 0)
466                                 PPC_JMP(addrs[i + 1 + K]);
467                         break;
468
469                 case BPF_JMP | BPF_JGT | BPF_K:
470                 case BPF_JMP | BPF_JGT | BPF_X:
471                         true_cond = COND_GT;
472                         goto cond_branch;
473                 case BPF_JMP | BPF_JGE | BPF_K:
474                 case BPF_JMP | BPF_JGE | BPF_X:
475                         true_cond = COND_GE;
476                         goto cond_branch;
477                 case BPF_JMP | BPF_JEQ | BPF_K:
478                 case BPF_JMP | BPF_JEQ | BPF_X:
479                         true_cond = COND_EQ;
480                         goto cond_branch;
481                 case BPF_JMP | BPF_JSET | BPF_K:
482                 case BPF_JMP | BPF_JSET | BPF_X:
483                         true_cond = COND_NE;
484                         /* Fall through */
485                 cond_branch:
486                         /* same targets, can avoid doing the test :) */
487                         if (filter[i].jt == filter[i].jf) {
488                                 if (filter[i].jt > 0)
489                                         PPC_JMP(addrs[i + 1 + filter[i].jt]);
490                                 break;
491                         }
492
493                         switch (code) {
494                         case BPF_JMP | BPF_JGT | BPF_X:
495                         case BPF_JMP | BPF_JGE | BPF_X:
496                         case BPF_JMP | BPF_JEQ | BPF_X:
497                                 ctx->seen |= SEEN_XREG;
498                                 PPC_CMPLW(r_A, r_X);
499                                 break;
500                         case BPF_JMP | BPF_JSET | BPF_X:
501                                 ctx->seen |= SEEN_XREG;
502                                 PPC_AND_DOT(r_scratch1, r_A, r_X);
503                                 break;
504                         case BPF_JMP | BPF_JEQ | BPF_K:
505                         case BPF_JMP | BPF_JGT | BPF_K:
506                         case BPF_JMP | BPF_JGE | BPF_K:
507                                 if (K < 32768)
508                                         PPC_CMPLWI(r_A, K);
509                                 else {
510                                         PPC_LI32(r_scratch1, K);
511                                         PPC_CMPLW(r_A, r_scratch1);
512                                 }
513                                 break;
514                         case BPF_JMP | BPF_JSET | BPF_K:
515                                 if (K < 32768)
516                                         /* PPC_ANDI is /only/ dot-form */
517                                         PPC_ANDI(r_scratch1, r_A, K);
518                                 else {
519                                         PPC_LI32(r_scratch1, K);
520                                         PPC_AND_DOT(r_scratch1, r_A,
521                                                     r_scratch1);
522                                 }
523                                 break;
524                         }
525                         /* Sometimes branches are constructed "backward", with
526                          * the false path being the branch and true path being
527                          * a fallthrough to the next instruction.
528                          */
529                         if (filter[i].jt == 0)
530                                 /* Swap the sense of the branch */
531                                 PPC_BCC(true_cond ^ COND_CMP_TRUE,
532                                         addrs[i + 1 + filter[i].jf]);
533                         else {
534                                 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
535                                 if (filter[i].jf != 0)
536                                         PPC_JMP(addrs[i + 1 + filter[i].jf]);
537                         }
538                         break;
539                 default:
540                         /* The filter contains something cruel & unusual.
541                          * We don't handle it, but also there shouldn't be
542                          * anything missing from our list.
543                          */
544                         if (printk_ratelimit())
545                                 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
546                                        filter[i].code, i);
547                         return -ENOTSUPP;
548                 }
549
550         }
551         /* Set end-of-body-code address for exit. */
552         addrs[i] = ctx->idx * 4;
553
554         return 0;
555 }
556
557 void bpf_jit_compile(struct bpf_prog *fp)
558 {
559         unsigned int proglen;
560         unsigned int alloclen;
561         u32 *image = NULL;
562         u32 *code_base;
563         unsigned int *addrs;
564         struct codegen_context cgctx;
565         int pass;
566         int flen = fp->len;
567
568         if (!bpf_jit_enable)
569                 return;
570
571         addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
572         if (addrs == NULL)
573                 return;
574
575         /*
576          * There are multiple assembly passes as the generated code will change
577          * size as it settles down, figuring out the max branch offsets/exit
578          * paths required.
579          *
580          * The range of standard conditional branches is +/- 32Kbytes.  Since
581          * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
582          * finish with 8 bytes/instruction.  Not feasible, so long jumps are
583          * used, distinct from short branches.
584          *
585          * Current:
586          *
587          * For now, both branch types assemble to 2 words (short branches padded
588          * with a NOP); this is less efficient, but assembly will always complete
589          * after exactly 3 passes:
590          *
591          * First pass: No code buffer; Program is "faux-generated" -- no code
592          * emitted but maximum size of output determined (and addrs[] filled
593          * in).  Also, we note whether we use M[], whether we use skb data, etc.
594          * All generation choices assumed to be 'worst-case', e.g. branches all
595          * far (2 instructions), return path code reduction not available, etc.
596          *
597          * Second pass: Code buffer allocated with size determined previously.
598          * Prologue generated to support features we have seen used.  Exit paths
599          * determined and addrs[] is filled in again, as code may be slightly
600          * smaller as a result.
601          *
602          * Third pass: Code generated 'for real', and branch destinations
603          * determined from now-accurate addrs[] map.
604          *
605          * Ideal:
606          *
607          * If we optimise this, near branches will be shorter.  On the
608          * first assembly pass, we should err on the side of caution and
609          * generate the biggest code.  On subsequent passes, branches will be
610          * generated short or long and code size will reduce.  With smaller
611          * code, more branches may fall into the short category, and code will
612          * reduce more.
613          *
614          * Finally, if we see one pass generate code the same size as the
615          * previous pass we have converged and should now generate code for
616          * real.  Allocating at the end will also save the memory that would
617          * otherwise be wasted by the (small) current code shrinkage.
618          * Preferably, we should do a small number of passes (e.g. 5) and if we
619          * haven't converged by then, get impatient and force code to generate
620          * as-is, even if the odd branch would be left long.  The chances of a
621          * long jump are tiny with all but the most enormous of BPF filter
622          * inputs, so we should usually converge on the third pass.
623          */
624
625         cgctx.idx = 0;
626         cgctx.seen = 0;
627         cgctx.pc_ret0 = -1;
628         /* Scouting faux-generate pass 0 */
629         if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
630                 /* We hit something illegal or unsupported. */
631                 goto out;
632
633         /*
634          * Pretend to build prologue, given the features we've seen.  This will
635          * update ctgtx.idx as it pretends to output instructions, then we can
636          * calculate total size from idx.
637          */
638         bpf_jit_build_prologue(fp, 0, &cgctx);
639         bpf_jit_build_epilogue(0, &cgctx);
640
641         proglen = cgctx.idx * 4;
642         alloclen = proglen + FUNCTION_DESCR_SIZE;
643         image = module_alloc(alloclen);
644         if (!image)
645                 goto out;
646
647         code_base = image + (FUNCTION_DESCR_SIZE/4);
648
649         /* Code generation passes 1-2 */
650         for (pass = 1; pass < 3; pass++) {
651                 /* Now build the prologue, body code & epilogue for real. */
652                 cgctx.idx = 0;
653                 bpf_jit_build_prologue(fp, code_base, &cgctx);
654                 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
655                 bpf_jit_build_epilogue(code_base, &cgctx);
656
657                 if (bpf_jit_enable > 1)
658                         pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
659                                 proglen - (cgctx.idx * 4), cgctx.seen);
660         }
661
662         if (bpf_jit_enable > 1)
663                 /* Note that we output the base address of the code_base
664                  * rather than image, since opcodes are in code_base.
665                  */
666                 bpf_jit_dump(flen, proglen, pass, code_base);
667
668         if (image) {
669                 bpf_flush_icache(code_base, code_base + (proglen/4));
670 #ifdef CONFIG_PPC64
671                 /* Function descriptor nastiness: Address + TOC */
672                 ((u64 *)image)[0] = (u64)code_base;
673                 ((u64 *)image)[1] = local_paca->kernel_toc;
674 #endif
675                 fp->bpf_func = (void *)image;
676                 fp->jited = 1;
677         }
678 out:
679         kfree(addrs);
680         return;
681 }
682
683 void bpf_jit_free(struct bpf_prog *fp)
684 {
685         if (fp->jited)
686                 module_memfree(fp->bpf_func);
687
688         bpf_prog_unlock_free(fp);
689 }