1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
47 #include <linux/bpf.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
52 #define DEV_CREATE_FLAG_MASK \
53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 struct xdp_dev_bulk_queue {
56 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
57 struct list_head flush_node;
58 struct net_device *dev;
59 struct net_device *dev_rx;
63 struct bpf_dtab_netdev {
64 struct net_device *dev; /* must be first member, due to tracepoint */
65 struct hlist_node index_hlist;
66 struct bpf_dtab *dtab;
67 struct bpf_prog *xdp_prog;
70 struct bpf_devmap_val val;
75 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
76 struct list_head list;
78 /* these are only used for DEVMAP_HASH type maps */
79 struct hlist_head *dev_index_head;
80 spinlock_t index_lock;
85 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
86 static DEFINE_SPINLOCK(dev_map_lock);
87 static LIST_HEAD(dev_map_list);
89 static struct hlist_head *dev_map_create_hash(unsigned int entries,
93 struct hlist_head *hash;
95 hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
97 for (i = 0; i < entries; i++)
98 INIT_HLIST_HEAD(&hash[i]);
103 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
106 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
109 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
111 u32 valsize = attr->value_size;
113 /* check sanity of attributes. 2 value sizes supported:
115 * 8 bytes: ifindex + prog fd
117 if (attr->max_entries == 0 || attr->key_size != 4 ||
118 (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
119 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
120 attr->map_flags & ~DEV_CREATE_FLAG_MASK)
123 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
124 * verifier prevents writes from the BPF side
126 attr->map_flags |= BPF_F_RDONLY_PROG;
129 bpf_map_init_from_attr(&dtab->map, attr);
131 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
132 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
134 if (!dtab->n_buckets) /* Overflow check */
138 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
139 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
140 dtab->map.numa_node);
141 if (!dtab->dev_index_head)
144 spin_lock_init(&dtab->index_lock);
146 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
147 sizeof(struct bpf_dtab_netdev *),
148 dtab->map.numa_node);
149 if (!dtab->netdev_map)
156 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
158 struct bpf_dtab *dtab;
161 if (!capable(CAP_NET_ADMIN))
162 return ERR_PTR(-EPERM);
164 dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT);
166 return ERR_PTR(-ENOMEM);
168 err = dev_map_init_map(dtab, attr);
174 spin_lock(&dev_map_lock);
175 list_add_tail_rcu(&dtab->list, &dev_map_list);
176 spin_unlock(&dev_map_lock);
181 static void dev_map_free(struct bpf_map *map)
183 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
186 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
187 * so the programs (can be more than one that used this map) were
188 * disconnected from events. The following synchronize_rcu() guarantees
189 * both rcu read critical sections complete and waits for
190 * preempt-disable regions (NAPI being the relevant context here) so we
191 * are certain there will be no further reads against the netdev_map and
192 * all flush operations are complete. Flush operations can only be done
193 * from NAPI context for this reason.
196 spin_lock(&dev_map_lock);
197 list_del_rcu(&dtab->list);
198 spin_unlock(&dev_map_lock);
202 /* Make sure prior __dev_map_entry_free() have completed. */
205 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
206 for (i = 0; i < dtab->n_buckets; i++) {
207 struct bpf_dtab_netdev *dev;
208 struct hlist_head *head;
209 struct hlist_node *next;
211 head = dev_map_index_hash(dtab, i);
213 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
214 hlist_del_rcu(&dev->index_hlist);
216 bpf_prog_put(dev->xdp_prog);
222 bpf_map_area_free(dtab->dev_index_head);
224 for (i = 0; i < dtab->map.max_entries; i++) {
225 struct bpf_dtab_netdev *dev;
227 dev = dtab->netdev_map[i];
232 bpf_prog_put(dev->xdp_prog);
237 bpf_map_area_free(dtab->netdev_map);
243 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
245 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
246 u32 index = key ? *(u32 *)key : U32_MAX;
247 u32 *next = next_key;
249 if (index >= dtab->map.max_entries) {
254 if (index == dtab->map.max_entries - 1)
260 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
262 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
263 struct hlist_head *head = dev_map_index_hash(dtab, key);
264 struct bpf_dtab_netdev *dev;
266 hlist_for_each_entry_rcu(dev, head, index_hlist,
267 lockdep_is_held(&dtab->index_lock))
274 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
277 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
278 u32 idx, *next = next_key;
279 struct bpf_dtab_netdev *dev, *next_dev;
280 struct hlist_head *head;
288 dev = __dev_map_hash_lookup_elem(map, idx);
292 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
293 struct bpf_dtab_netdev, index_hlist);
296 *next = next_dev->idx;
300 i = idx & (dtab->n_buckets - 1);
304 for (; i < dtab->n_buckets; i++) {
305 head = dev_map_index_hash(dtab, i);
307 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
308 struct bpf_dtab_netdev,
311 *next = next_dev->idx;
319 bool dev_map_can_have_prog(struct bpf_map *map)
321 if ((map->map_type == BPF_MAP_TYPE_DEVMAP ||
322 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) &&
323 map->value_size != offsetofend(struct bpf_devmap_val, ifindex))
329 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
331 struct net_device *dev = bq->dev;
332 int sent = 0, err = 0;
335 if (unlikely(!bq->count))
338 for (i = 0; i < bq->count; i++) {
339 struct xdp_frame *xdpf = bq->q[i];
344 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
346 /* If ndo_xdp_xmit fails with an errno, no frames have
353 /* If not all frames have been transmitted, it is our
354 * responsibility to free them
356 for (i = sent; unlikely(i < bq->count); i++)
357 xdp_return_frame_rx_napi(bq->q[i]);
359 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, bq->count - sent, err);
362 __list_del_clearprev(&bq->flush_node);
365 /* __dev_flush is called from xdp_do_flush() which _must_ be signaled
366 * from the driver before returning from its napi->poll() routine. The poll()
367 * routine is called either from busy_poll context or net_rx_action signaled
368 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
369 * net device can be torn down. On devmap tear down we ensure the flush list
370 * is empty before completing to ensure all flush operations have completed.
371 * When drivers update the bpf program they may need to ensure any flush ops
372 * are also complete. Using synchronize_rcu or call_rcu will suffice for this
373 * because both wait for napi context to exit.
375 void __dev_flush(void)
377 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
378 struct xdp_dev_bulk_queue *bq, *tmp;
380 list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
381 bq_xmit_all(bq, XDP_XMIT_FLUSH);
384 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
385 * update happens in parallel here a dev_put wont happen until after reading the
388 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
390 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
391 struct bpf_dtab_netdev *obj;
393 if (key >= map->max_entries)
396 obj = READ_ONCE(dtab->netdev_map[key]);
400 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
401 * Thus, safe percpu variable access.
403 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
404 struct net_device *dev_rx)
406 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
407 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
409 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
412 /* Ingress dev_rx will be the same for all xdp_frame's in
413 * bulk_queue, because bq stored per-CPU and must be flushed
414 * from net_device drivers NAPI func end.
419 bq->q[bq->count++] = xdpf;
421 if (!bq->flush_node.prev)
422 list_add(&bq->flush_node, flush_list);
425 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
426 struct net_device *dev_rx)
428 struct xdp_frame *xdpf;
431 if (!dev->netdev_ops->ndo_xdp_xmit)
434 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
438 xdpf = xdp_convert_buff_to_frame(xdp);
442 bq_enqueue(dev, xdpf, dev_rx);
446 static struct xdp_buff *dev_map_run_prog(struct net_device *dev,
447 struct xdp_buff *xdp,
448 struct bpf_prog *xdp_prog)
450 struct xdp_txq_info txq = { .dev = dev };
453 xdp_set_data_meta_invalid(xdp);
456 act = bpf_prog_run_xdp(xdp_prog, xdp);
463 bpf_warn_invalid_xdp_action(act);
466 trace_xdp_exception(dev, xdp_prog, act);
470 xdp_return_buff(xdp);
474 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
475 struct net_device *dev_rx)
477 return __xdp_enqueue(dev, xdp, dev_rx);
480 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
481 struct net_device *dev_rx)
483 struct net_device *dev = dst->dev;
486 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog);
490 return __xdp_enqueue(dev, xdp, dev_rx);
493 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
494 struct bpf_prog *xdp_prog)
498 err = xdp_ok_fwd_dev(dst->dev, skb->len);
502 generic_xdp_tx(skb, xdp_prog);
507 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
509 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
511 return obj ? &obj->val : NULL;
514 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
516 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
518 return obj ? &obj->val : NULL;
521 static void __dev_map_entry_free(struct rcu_head *rcu)
523 struct bpf_dtab_netdev *dev;
525 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
527 bpf_prog_put(dev->xdp_prog);
532 static int dev_map_delete_elem(struct bpf_map *map, void *key)
534 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
535 struct bpf_dtab_netdev *old_dev;
538 if (k >= map->max_entries)
541 /* Use call_rcu() here to ensure any rcu critical sections have
542 * completed as well as any flush operations because call_rcu
543 * will wait for preempt-disable region to complete, NAPI in this
544 * context. And additionally, the driver tear down ensures all
545 * soft irqs are complete before removing the net device in the
546 * case of dev_put equals zero.
548 old_dev = xchg(&dtab->netdev_map[k], NULL);
550 call_rcu(&old_dev->rcu, __dev_map_entry_free);
554 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
556 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
557 struct bpf_dtab_netdev *old_dev;
562 spin_lock_irqsave(&dtab->index_lock, flags);
564 old_dev = __dev_map_hash_lookup_elem(map, k);
567 hlist_del_init_rcu(&old_dev->index_hlist);
568 call_rcu(&old_dev->rcu, __dev_map_entry_free);
571 spin_unlock_irqrestore(&dtab->index_lock, flags);
576 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
577 struct bpf_dtab *dtab,
578 struct bpf_devmap_val *val,
581 struct bpf_prog *prog = NULL;
582 struct bpf_dtab_netdev *dev;
584 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
585 GFP_ATOMIC | __GFP_NOWARN,
586 dtab->map.numa_node);
588 return ERR_PTR(-ENOMEM);
590 dev->dev = dev_get_by_index(net, val->ifindex);
594 if (val->bpf_prog.fd > 0) {
595 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
596 BPF_PROG_TYPE_XDP, false);
599 if (prog->expected_attach_type != BPF_XDP_DEVMAP)
606 dev->xdp_prog = prog;
607 dev->val.bpf_prog.id = prog->aux->id;
609 dev->xdp_prog = NULL;
610 dev->val.bpf_prog.id = 0;
612 dev->val.ifindex = val->ifindex;
621 return ERR_PTR(-EINVAL);
624 static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
625 void *key, void *value, u64 map_flags)
627 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
628 struct bpf_dtab_netdev *dev, *old_dev;
629 struct bpf_devmap_val val = {};
632 if (unlikely(map_flags > BPF_EXIST))
634 if (unlikely(i >= dtab->map.max_entries))
636 if (unlikely(map_flags == BPF_NOEXIST))
639 /* already verified value_size <= sizeof val */
640 memcpy(&val, value, map->value_size);
644 /* can not specify fd if ifindex is 0 */
645 if (val.bpf_prog.fd > 0)
648 dev = __dev_map_alloc_node(net, dtab, &val, i);
653 /* Use call_rcu() here to ensure rcu critical sections have completed
654 * Remembering the driver side flush operation will happen before the
655 * net device is removed.
657 old_dev = xchg(&dtab->netdev_map[i], dev);
659 call_rcu(&old_dev->rcu, __dev_map_entry_free);
664 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
667 return __dev_map_update_elem(current->nsproxy->net_ns,
668 map, key, value, map_flags);
671 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
672 void *key, void *value, u64 map_flags)
674 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
675 struct bpf_dtab_netdev *dev, *old_dev;
676 struct bpf_devmap_val val = {};
677 u32 idx = *(u32 *)key;
681 /* already verified value_size <= sizeof val */
682 memcpy(&val, value, map->value_size);
684 if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
687 spin_lock_irqsave(&dtab->index_lock, flags);
689 old_dev = __dev_map_hash_lookup_elem(map, idx);
690 if (old_dev && (map_flags & BPF_NOEXIST))
693 dev = __dev_map_alloc_node(net, dtab, &val, idx);
700 hlist_del_rcu(&old_dev->index_hlist);
702 if (dtab->items >= dtab->map.max_entries) {
703 spin_unlock_irqrestore(&dtab->index_lock, flags);
704 call_rcu(&dev->rcu, __dev_map_entry_free);
710 hlist_add_head_rcu(&dev->index_hlist,
711 dev_map_index_hash(dtab, idx));
712 spin_unlock_irqrestore(&dtab->index_lock, flags);
715 call_rcu(&old_dev->rcu, __dev_map_entry_free);
720 spin_unlock_irqrestore(&dtab->index_lock, flags);
724 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
727 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
728 map, key, value, map_flags);
731 static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
733 return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_lookup_elem);
736 static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
738 return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_hash_lookup_elem);
741 static int dev_map_btf_id;
742 const struct bpf_map_ops dev_map_ops = {
743 .map_meta_equal = bpf_map_meta_equal,
744 .map_alloc = dev_map_alloc,
745 .map_free = dev_map_free,
746 .map_get_next_key = dev_map_get_next_key,
747 .map_lookup_elem = dev_map_lookup_elem,
748 .map_update_elem = dev_map_update_elem,
749 .map_delete_elem = dev_map_delete_elem,
750 .map_check_btf = map_check_no_btf,
751 .map_btf_name = "bpf_dtab",
752 .map_btf_id = &dev_map_btf_id,
753 .map_redirect = dev_map_redirect,
756 static int dev_map_hash_map_btf_id;
757 const struct bpf_map_ops dev_map_hash_ops = {
758 .map_meta_equal = bpf_map_meta_equal,
759 .map_alloc = dev_map_alloc,
760 .map_free = dev_map_free,
761 .map_get_next_key = dev_map_hash_get_next_key,
762 .map_lookup_elem = dev_map_hash_lookup_elem,
763 .map_update_elem = dev_map_hash_update_elem,
764 .map_delete_elem = dev_map_hash_delete_elem,
765 .map_check_btf = map_check_no_btf,
766 .map_btf_name = "bpf_dtab",
767 .map_btf_id = &dev_map_hash_map_btf_id,
768 .map_redirect = dev_hash_map_redirect,
771 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
772 struct net_device *netdev)
777 spin_lock_irqsave(&dtab->index_lock, flags);
778 for (i = 0; i < dtab->n_buckets; i++) {
779 struct bpf_dtab_netdev *dev;
780 struct hlist_head *head;
781 struct hlist_node *next;
783 head = dev_map_index_hash(dtab, i);
785 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
786 if (netdev != dev->dev)
790 hlist_del_rcu(&dev->index_hlist);
791 call_rcu(&dev->rcu, __dev_map_entry_free);
794 spin_unlock_irqrestore(&dtab->index_lock, flags);
797 static int dev_map_notification(struct notifier_block *notifier,
798 ulong event, void *ptr)
800 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
801 struct bpf_dtab *dtab;
805 case NETDEV_REGISTER:
806 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
809 /* will be freed in free_netdev() */
810 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
811 if (!netdev->xdp_bulkq)
814 for_each_possible_cpu(cpu)
815 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
817 case NETDEV_UNREGISTER:
818 /* This rcu_read_lock/unlock pair is needed because
819 * dev_map_list is an RCU list AND to ensure a delete
820 * operation does not free a netdev_map entry while we
821 * are comparing it against the netdev being unregistered.
824 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
825 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
826 dev_map_hash_remove_netdev(dtab, netdev);
830 for (i = 0; i < dtab->map.max_entries; i++) {
831 struct bpf_dtab_netdev *dev, *odev;
833 dev = READ_ONCE(dtab->netdev_map[i]);
834 if (!dev || netdev != dev->dev)
836 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
839 __dev_map_entry_free);
850 static struct notifier_block dev_map_notifier = {
851 .notifier_call = dev_map_notification,
854 static int __init dev_map_init(void)
858 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
859 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
860 offsetof(struct _bpf_dtab_netdev, dev));
861 register_netdevice_notifier(&dev_map_notifier);
863 for_each_possible_cpu(cpu)
864 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
868 subsys_initcall(dev_map_init);