2 * Copyright (C) 2016-2018 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/bpf.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/kernel.h>
37 #include <linux/pkt_cls.h>
39 #include "../nfp_app.h"
40 #include "../nfp_main.h"
44 #define pr_vlog(env, fmt, ...) \
45 bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
47 struct nfp_insn_meta *
48 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
49 unsigned int insn_idx, unsigned int n_insns)
51 unsigned int forward, backward, i;
53 backward = meta->n - insn_idx;
54 forward = insn_idx - meta->n;
56 if (min(forward, backward) > n_insns - insn_idx - 1) {
57 backward = n_insns - insn_idx - 1;
58 meta = nfp_prog_last_meta(nfp_prog);
60 if (min(forward, backward) > insn_idx && backward > insn_idx) {
62 meta = nfp_prog_first_meta(nfp_prog);
65 if (forward < backward)
66 for (i = 0; i < forward; i++)
67 meta = nfp_meta_next(meta);
69 for (i = 0; i < backward; i++)
70 meta = nfp_meta_prev(meta);
76 nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
77 struct nfp_insn_meta *meta,
78 const struct bpf_reg_state *reg2)
80 unsigned int location = UINT_MAX;
83 /* Datapath usually can give us guarantees on how much adjust head
84 * can be done without the need for any checks. Optimize the simple
85 * case where there is only one adjust head by a constant.
87 if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
88 goto exit_set_location;
89 imm = reg2->var_off.value;
90 /* Translator will skip all checks, we need to guarantee min pkt len */
91 if (imm > ETH_ZLEN - ETH_HLEN)
92 goto exit_set_location;
93 if (imm > (int)bpf->adjust_head.guaranteed_add ||
94 imm < -bpf->adjust_head.guaranteed_sub)
95 goto exit_set_location;
97 if (nfp_prog->adjust_head_location) {
98 /* Only one call per program allowed */
99 if (nfp_prog->adjust_head_location != meta->n)
100 goto exit_set_location;
102 if (meta->arg2.reg.var_off.value != imm)
103 goto exit_set_location;
108 nfp_prog->adjust_head_location = location;
111 static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env)
113 const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
114 const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
115 struct bpf_offloaded_map *offmap;
116 struct bpf_func_state *state;
117 struct nfp_bpf_map *nfp_map;
120 state = env->cur_state->frame[reg3->frameno];
122 /* We need to record each time update happens with non-zero words,
123 * in case such word is used in atomic operations.
124 * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before.
127 offmap = map_to_offmap(reg1->map_ptr);
128 nfp_map = offmap->dev_priv;
129 off = reg3->off + reg3->var_off.value;
131 for (i = 0; i < offmap->map.value_size; i++) {
132 struct bpf_stack_state *stack_entry;
135 soff = -(off + i) - 1;
136 stack_entry = &state->stack[soff / BPF_REG_SIZE];
137 if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO)
140 if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) {
141 pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n",
145 nfp_map->use_map[i / 4].non_zero_update = 1;
152 nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
153 const struct bpf_reg_state *reg,
154 struct nfp_bpf_reg_state *old_arg)
158 if (reg->type != PTR_TO_STACK) {
159 pr_vlog(env, "%s: unsupported ptr type %d\n",
163 if (!tnum_is_const(reg->var_off)) {
164 pr_vlog(env, "%s: variable pointer\n", fname);
168 off = reg->var_off.value + reg->off;
170 pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
174 /* Rest of the checks is only if we re-parse the same insn */
178 old_off = old_arg->reg.var_off.value + old_arg->reg.off;
179 old_arg->var_off |= off != old_off;
185 nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
186 struct nfp_insn_meta *meta,
187 u32 helper_tgt, const struct bpf_reg_state *reg1)
190 pr_vlog(env, "%s: not supported by FW\n", fname);
198 nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
199 struct nfp_insn_meta *meta)
201 const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
202 const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
203 const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
204 struct nfp_app_bpf *bpf = nfp_prog->bpf;
205 u32 func_id = meta->insn.imm;
208 case BPF_FUNC_xdp_adjust_head:
209 if (!bpf->adjust_head.off_max) {
210 pr_vlog(env, "adjust_head not supported by FW\n");
213 if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
214 pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
218 nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
221 case BPF_FUNC_xdp_adjust_tail:
222 if (!bpf->adjust_tail) {
223 pr_vlog(env, "adjust_tail not supported by FW\n");
228 case BPF_FUNC_map_lookup_elem:
229 if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
230 bpf->helpers.map_lookup, reg1) ||
231 !nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
232 meta->func_id ? &meta->arg2 : NULL))
236 case BPF_FUNC_map_update_elem:
237 if (!nfp_bpf_map_call_ok("map_update", env, meta,
238 bpf->helpers.map_update, reg1) ||
239 !nfp_bpf_stack_arg_ok("map_update", env, reg2,
240 meta->func_id ? &meta->arg2 : NULL) ||
241 !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) ||
242 !nfp_bpf_map_update_value_ok(env))
246 case BPF_FUNC_map_delete_elem:
247 if (!nfp_bpf_map_call_ok("map_delete", env, meta,
248 bpf->helpers.map_delete, reg1) ||
249 !nfp_bpf_stack_arg_ok("map_delete", env, reg2,
250 meta->func_id ? &meta->arg2 : NULL))
254 case BPF_FUNC_get_prandom_u32:
255 if (bpf->pseudo_random)
257 pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
260 case BPF_FUNC_perf_event_output:
261 BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
262 NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
263 NFP_BPF_STACK != PTR_TO_STACK ||
264 NFP_BPF_PACKET_DATA != PTR_TO_PACKET);
266 if (!bpf->helpers.perf_event_output) {
267 pr_vlog(env, "event_output: not supported by FW\n");
271 /* Force current CPU to make sure we can report the event
272 * wherever we get the control message from FW.
274 if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
275 (reg3->var_off.value & BPF_F_INDEX_MASK) !=
279 tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
280 pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
285 /* Save space in meta, we don't care about arguments other
286 * than 4th meta, shove it into arg1.
288 reg1 = cur_regs(env) + BPF_REG_4;
290 if (reg1->type != SCALAR_VALUE /* NULL ptr */ &&
291 reg1->type != PTR_TO_STACK &&
292 reg1->type != PTR_TO_MAP_VALUE &&
293 reg1->type != PTR_TO_PACKET) {
294 pr_vlog(env, "event_output: unsupported ptr type: %d\n",
299 if (reg1->type == PTR_TO_STACK &&
300 !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
303 /* Warn user that on offload NFP may return success even if map
304 * is not going to accept the event, since the event output is
305 * fully async and device won't know the state of the map.
306 * There is also FW limitation on the event length.
308 * Lost events will not show up on the perf ring, driver
309 * won't see them at all. Events may also get reordered.
311 dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
312 "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
313 pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
318 if (reg1->type != meta->arg1.type) {
319 pr_vlog(env, "event_output: ptr type changed: %d %d\n",
320 meta->arg1.type, reg1->type);
326 pr_vlog(env, "unsupported function id: %d\n", func_id);
330 meta->func_id = func_id;
332 meta->arg2.reg = *reg2;
338 nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
339 struct bpf_verifier_env *env)
341 const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
344 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
347 if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
350 tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
351 pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
356 imm = reg0->var_off.value;
357 if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
358 imm <= TC_ACT_REDIRECT &&
359 imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
360 imm != TC_ACT_QUEUED) {
361 pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
370 nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
371 struct nfp_insn_meta *meta,
372 const struct bpf_reg_state *reg,
373 struct bpf_verifier_env *env)
375 s32 old_off, new_off;
377 if (!tnum_is_const(reg->var_off)) {
378 pr_vlog(env, "variable ptr stack access\n");
382 if (meta->ptr.type == NOT_INIT)
385 old_off = meta->ptr.off + meta->ptr.var_off.value;
386 new_off = reg->off + reg->var_off.value;
388 meta->ptr_not_const |= old_off != new_off;
390 if (!meta->ptr_not_const)
393 if (old_off % 4 == new_off % 4)
396 pr_vlog(env, "stack access changed location was:%d is:%d\n",
401 static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
403 static const char * const names[] = {
404 [NFP_MAP_UNUSED] = "unused",
405 [NFP_MAP_USE_READ] = "read",
406 [NFP_MAP_USE_WRITE] = "write",
407 [NFP_MAP_USE_ATOMIC_CNT] = "atomic",
410 if (use >= ARRAY_SIZE(names) || !names[use])
416 nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
417 struct nfp_bpf_map *nfp_map,
418 unsigned int off, enum nfp_bpf_map_use use)
420 if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED &&
421 nfp_map->use_map[off / 4].type != use) {
422 pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
423 nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type),
424 nfp_bpf_map_use_name(use), off);
428 if (nfp_map->use_map[off / 4].non_zero_update &&
429 use == NFP_MAP_USE_ATOMIC_CNT) {
430 pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n",
435 nfp_map->use_map[off / 4].type = use;
441 nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
442 const struct bpf_reg_state *reg,
443 enum nfp_bpf_map_use use)
445 struct bpf_offloaded_map *offmap;
446 struct nfp_bpf_map *nfp_map;
447 unsigned int size, off;
450 if (!tnum_is_const(reg->var_off)) {
451 pr_vlog(env, "map value offset is variable\n");
455 off = reg->var_off.value + meta->insn.off + reg->off;
456 size = BPF_LDST_BYTES(&meta->insn);
457 offmap = map_to_offmap(reg->map_ptr);
458 nfp_map = offmap->dev_priv;
460 if (off + size > offmap->map.value_size) {
461 pr_vlog(env, "map value access out-of-bounds\n");
465 for (i = 0; i < size; i += 4 - (off + i) % 4) {
466 err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
475 nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
476 struct bpf_verifier_env *env, u8 reg_no)
478 const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
481 if (reg->type != PTR_TO_CTX &&
482 reg->type != PTR_TO_STACK &&
483 reg->type != PTR_TO_MAP_VALUE &&
484 reg->type != PTR_TO_PACKET) {
485 pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
489 if (reg->type == PTR_TO_STACK) {
490 err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
495 if (reg->type == PTR_TO_MAP_VALUE) {
496 if (is_mbpf_load(meta)) {
497 err = nfp_bpf_map_mark_used(env, meta, reg,
502 if (is_mbpf_store(meta)) {
503 pr_vlog(env, "map writes not supported\n");
506 if (is_mbpf_xadd(meta)) {
507 err = nfp_bpf_map_mark_used(env, meta, reg,
508 NFP_MAP_USE_ATOMIC_CNT);
514 if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
515 pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
516 meta->ptr.type, reg->type);
526 nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
527 struct bpf_verifier_env *env)
529 const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg;
531 if (reg->type == PTR_TO_CTX) {
532 if (nfp_prog->type == BPF_PROG_TYPE_XDP) {
533 /* XDP ctx accesses must be 4B in size */
534 switch (meta->insn.off) {
535 case offsetof(struct xdp_md, rx_queue_index):
536 if (nfp_prog->bpf->queue_select)
538 pr_vlog(env, "queue selection not supported by FW\n");
542 pr_vlog(env, "unsupported store to context field\n");
546 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
550 nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
551 struct bpf_verifier_env *env)
553 const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
554 const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
556 if (dreg->type != PTR_TO_MAP_VALUE) {
557 pr_vlog(env, "atomic add not to a map value pointer: %d\n",
561 if (sreg->type != SCALAR_VALUE) {
562 pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type);
566 meta->xadd_over_16bit |=
567 sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff;
568 meta->xadd_maybe_16bit |=
569 (sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff;
571 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
575 nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
576 struct bpf_verifier_env *env)
578 const struct bpf_reg_state *sreg =
579 cur_regs(env) + meta->insn.src_reg;
580 const struct bpf_reg_state *dreg =
581 cur_regs(env) + meta->insn.dst_reg;
583 meta->umin_src = min(meta->umin_src, sreg->umin_value);
584 meta->umax_src = max(meta->umax_src, sreg->umax_value);
585 meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
586 meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
588 /* NFP supports u16 and u32 multiplication.
590 * For ALU64, if either operand is beyond u32's value range, we reject
591 * it. One thing to note, if the source operand is BPF_K, then we need
592 * to check "imm" field directly, and we'd reject it if it is negative.
593 * Because for ALU64, "imm" (with s32 type) is expected to be sign
594 * extended to s64 which NFP mul doesn't support.
596 * For ALU32, it is fine for "imm" be negative though, because the
597 * result is 32-bits and there is no difference on the low halve of
598 * the result for signed/unsigned mul, so we will get correct result.
600 if (is_mbpf_mul(meta)) {
601 if (meta->umax_dst > U32_MAX) {
602 pr_vlog(env, "multiplier is not within u32 value range\n");
605 if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
606 pr_vlog(env, "multiplicand is not within u32 value range\n");
609 if (mbpf_class(meta) == BPF_ALU64 &&
610 mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
611 pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
616 /* NFP doesn't have divide instructions, we support divide by constant
617 * through reciprocal multiplication. Given NFP support multiplication
618 * no bigger than u32, we'd require divisor and dividend no bigger than
621 * Also eBPF doesn't support signed divide and has enforced this on C
622 * language level by failing compilation. However LLVM assembler hasn't
623 * enforced this, so it is possible for negative constant to leak in as
624 * a BPF_K operand through assembly code, we reject such cases as well.
626 if (is_mbpf_div(meta)) {
627 if (meta->umax_dst > U32_MAX) {
628 pr_vlog(env, "dividend is not within u32 value range\n");
631 if (mbpf_src(meta) == BPF_X) {
632 if (meta->umin_src != meta->umax_src) {
633 pr_vlog(env, "divisor is not constant\n");
636 if (meta->umax_src > U32_MAX) {
637 pr_vlog(env, "divisor is not within u32 value range\n");
641 if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
642 pr_vlog(env, "divide by negative constant is not supported\n");
651 nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
653 struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
654 struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
656 meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
657 nfp_prog->verifier_meta = meta;
659 if (!nfp_bpf_supported_opcode(meta->insn.code)) {
660 pr_vlog(env, "instruction %#02x not supported\n",
665 if (meta->insn.src_reg >= MAX_BPF_REG ||
666 meta->insn.dst_reg >= MAX_BPF_REG) {
667 pr_vlog(env, "program uses extended registers - jit hardening?\n");
671 if (meta->insn.code == (BPF_JMP | BPF_CALL))
672 return nfp_bpf_check_call(nfp_prog, env, meta);
673 if (meta->insn.code == (BPF_JMP | BPF_EXIT))
674 return nfp_bpf_check_exit(nfp_prog, env);
676 if (is_mbpf_load(meta))
677 return nfp_bpf_check_ptr(nfp_prog, meta, env,
679 if (is_mbpf_store(meta))
680 return nfp_bpf_check_store(nfp_prog, meta, env);
682 if (is_mbpf_xadd(meta))
683 return nfp_bpf_check_xadd(nfp_prog, meta, env);
685 if (is_mbpf_alu(meta))
686 return nfp_bpf_check_alu(nfp_prog, meta, env);
691 const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
692 .insn_hook = nfp_verify_insn,