GNU Linux-libre 4.19.242-gnu1
[releases.git] / drivers / net / veth.c
1 /*
2  *  drivers/net/veth.c
3  *
4  *  Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
5  *
6  * Author: Pavel Emelianov <xemul@openvz.org>
7  * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
8  *
9  */
10
11 #include <linux/netdevice.h>
12 #include <linux/slab.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
15 #include <linux/u64_stats_sync.h>
16
17 #include <net/rtnetlink.h>
18 #include <net/dst.h>
19 #include <net/xfrm.h>
20 #include <net/xdp.h>
21 #include <linux/veth.h>
22 #include <linux/module.h>
23 #include <linux/bpf.h>
24 #include <linux/filter.h>
25 #include <linux/ptr_ring.h>
26 #include <linux/bpf_trace.h>
27
28 #define DRV_NAME        "veth"
29 #define DRV_VERSION     "1.0"
30
31 #define VETH_XDP_FLAG           BIT(0)
32 #define VETH_RING_SIZE          256
33 #define VETH_XDP_HEADROOM       (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
34
35 /* Separating two types of XDP xmit */
36 #define VETH_XDP_TX             BIT(0)
37 #define VETH_XDP_REDIR          BIT(1)
38
39 struct pcpu_vstats {
40         u64                     packets;
41         u64                     bytes;
42         struct u64_stats_sync   syncp;
43 };
44
45 struct veth_rq {
46         struct napi_struct      xdp_napi;
47         struct net_device       *dev;
48         struct bpf_prog __rcu   *xdp_prog;
49         struct xdp_mem_info     xdp_mem;
50         bool                    rx_notify_masked;
51         struct ptr_ring         xdp_ring;
52         struct xdp_rxq_info     xdp_rxq;
53 };
54
55 struct veth_priv {
56         struct net_device __rcu *peer;
57         atomic64_t              dropped;
58         struct bpf_prog         *_xdp_prog;
59         struct veth_rq          *rq;
60         unsigned int            requested_headroom;
61 };
62
63 /*
64  * ethtool interface
65  */
66
67 static struct {
68         const char string[ETH_GSTRING_LEN];
69 } ethtool_stats_keys[] = {
70         { "peer_ifindex" },
71 };
72
73 static int veth_get_link_ksettings(struct net_device *dev,
74                                    struct ethtool_link_ksettings *cmd)
75 {
76         cmd->base.speed         = SPEED_10000;
77         cmd->base.duplex        = DUPLEX_FULL;
78         cmd->base.port          = PORT_TP;
79         cmd->base.autoneg       = AUTONEG_DISABLE;
80         return 0;
81 }
82
83 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
84 {
85         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
86         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
87 }
88
89 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
90 {
91         switch(stringset) {
92         case ETH_SS_STATS:
93                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
94                 break;
95         }
96 }
97
98 static int veth_get_sset_count(struct net_device *dev, int sset)
99 {
100         switch (sset) {
101         case ETH_SS_STATS:
102                 return ARRAY_SIZE(ethtool_stats_keys);
103         default:
104                 return -EOPNOTSUPP;
105         }
106 }
107
108 static void veth_get_ethtool_stats(struct net_device *dev,
109                 struct ethtool_stats *stats, u64 *data)
110 {
111         struct veth_priv *priv = netdev_priv(dev);
112         struct net_device *peer = rtnl_dereference(priv->peer);
113
114         data[0] = peer ? peer->ifindex : 0;
115 }
116
117 static const struct ethtool_ops veth_ethtool_ops = {
118         .get_drvinfo            = veth_get_drvinfo,
119         .get_link               = ethtool_op_get_link,
120         .get_strings            = veth_get_strings,
121         .get_sset_count         = veth_get_sset_count,
122         .get_ethtool_stats      = veth_get_ethtool_stats,
123         .get_link_ksettings     = veth_get_link_ksettings,
124 };
125
126 /* general routines */
127
128 static bool veth_is_xdp_frame(void *ptr)
129 {
130         return (unsigned long)ptr & VETH_XDP_FLAG;
131 }
132
133 static void *veth_ptr_to_xdp(void *ptr)
134 {
135         return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
136 }
137
138 static void *veth_xdp_to_ptr(void *ptr)
139 {
140         return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
141 }
142
143 static void veth_ptr_free(void *ptr)
144 {
145         if (veth_is_xdp_frame(ptr))
146                 xdp_return_frame(veth_ptr_to_xdp(ptr));
147         else
148                 kfree_skb(ptr);
149 }
150
151 static void __veth_xdp_flush(struct veth_rq *rq)
152 {
153         /* Write ptr_ring before reading rx_notify_masked */
154         smp_mb();
155         if (!READ_ONCE(rq->rx_notify_masked) &&
156             napi_schedule_prep(&rq->xdp_napi)) {
157                 WRITE_ONCE(rq->rx_notify_masked, true);
158                 __napi_schedule(&rq->xdp_napi);
159         }
160 }
161
162 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
163 {
164         if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
165                 dev_kfree_skb_any(skb);
166                 return NET_RX_DROP;
167         }
168
169         return NET_RX_SUCCESS;
170 }
171
172 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
173                             struct veth_rq *rq, bool xdp)
174 {
175         return __dev_forward_skb(dev, skb) ?: xdp ?
176                 veth_xdp_rx(rq, skb) :
177                 netif_rx(skb);
178 }
179
180 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
181 {
182         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
183         struct veth_rq *rq = NULL;
184         struct net_device *rcv;
185         int length = skb->len;
186         bool rcv_xdp = false;
187         int rxq;
188
189         rcu_read_lock();
190         rcv = rcu_dereference(priv->peer);
191         if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
192                 kfree_skb(skb);
193                 goto drop;
194         }
195
196         rcv_priv = netdev_priv(rcv);
197         rxq = skb_get_queue_mapping(skb);
198         if (rxq < rcv->real_num_rx_queues) {
199                 rq = &rcv_priv->rq[rxq];
200                 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
201         }
202
203         if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
204                 struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
205
206                 u64_stats_update_begin(&stats->syncp);
207                 stats->bytes += length;
208                 stats->packets++;
209                 u64_stats_update_end(&stats->syncp);
210         } else {
211 drop:
212                 atomic64_inc(&priv->dropped);
213         }
214
215         if (rcv_xdp)
216                 __veth_xdp_flush(rq);
217
218         rcu_read_unlock();
219
220         return NETDEV_TX_OK;
221 }
222
223 static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
224 {
225         struct veth_priv *priv = netdev_priv(dev);
226         int cpu;
227
228         result->packets = 0;
229         result->bytes = 0;
230         for_each_possible_cpu(cpu) {
231                 struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu);
232                 u64 packets, bytes;
233                 unsigned int start;
234
235                 do {
236                         start = u64_stats_fetch_begin_irq(&stats->syncp);
237                         packets = stats->packets;
238                         bytes = stats->bytes;
239                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
240                 result->packets += packets;
241                 result->bytes += bytes;
242         }
243         return atomic64_read(&priv->dropped);
244 }
245
246 static void veth_get_stats64(struct net_device *dev,
247                              struct rtnl_link_stats64 *tot)
248 {
249         struct veth_priv *priv = netdev_priv(dev);
250         struct net_device *peer;
251         struct pcpu_vstats one;
252
253         tot->tx_dropped = veth_stats_one(&one, dev);
254         tot->tx_bytes = one.bytes;
255         tot->tx_packets = one.packets;
256
257         rcu_read_lock();
258         peer = rcu_dereference(priv->peer);
259         if (peer) {
260                 tot->rx_dropped = veth_stats_one(&one, peer);
261                 tot->rx_bytes = one.bytes;
262                 tot->rx_packets = one.packets;
263         }
264         rcu_read_unlock();
265 }
266
267 /* fake multicast ability */
268 static void veth_set_multicast_list(struct net_device *dev)
269 {
270 }
271
272 static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
273                                       int buflen)
274 {
275         struct sk_buff *skb;
276
277         if (!buflen) {
278                 buflen = SKB_DATA_ALIGN(headroom + len) +
279                          SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
280         }
281         skb = build_skb(head, buflen);
282         if (!skb)
283                 return NULL;
284
285         skb_reserve(skb, headroom);
286         skb_put(skb, len);
287
288         return skb;
289 }
290
291 static int veth_select_rxq(struct net_device *dev)
292 {
293         return smp_processor_id() % dev->real_num_rx_queues;
294 }
295
296 static int veth_xdp_xmit(struct net_device *dev, int n,
297                          struct xdp_frame **frames, u32 flags)
298 {
299         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
300         struct net_device *rcv;
301         unsigned int max_len;
302         struct veth_rq *rq;
303         int i, drops = 0;
304
305         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
306                 return -EINVAL;
307
308         rcv = rcu_dereference(priv->peer);
309         if (unlikely(!rcv))
310                 return -ENXIO;
311
312         rcv_priv = netdev_priv(rcv);
313         rq = &rcv_priv->rq[veth_select_rxq(rcv)];
314         /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
315          * side. This means an XDP program is loaded on the peer and the peer
316          * device is up.
317          */
318         if (!rcu_access_pointer(rq->xdp_prog))
319                 return -ENXIO;
320
321         max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
322
323         spin_lock(&rq->xdp_ring.producer_lock);
324         for (i = 0; i < n; i++) {
325                 struct xdp_frame *frame = frames[i];
326                 void *ptr = veth_xdp_to_ptr(frame);
327
328                 if (unlikely(frame->len > max_len ||
329                              __ptr_ring_produce(&rq->xdp_ring, ptr))) {
330                         xdp_return_frame_rx_napi(frame);
331                         drops++;
332                 }
333         }
334         spin_unlock(&rq->xdp_ring.producer_lock);
335
336         if (flags & XDP_XMIT_FLUSH)
337                 __veth_xdp_flush(rq);
338
339         return n - drops;
340 }
341
342 static void veth_xdp_flush(struct net_device *dev)
343 {
344         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
345         struct net_device *rcv;
346         struct veth_rq *rq;
347
348         rcu_read_lock();
349         rcv = rcu_dereference(priv->peer);
350         if (unlikely(!rcv))
351                 goto out;
352
353         rcv_priv = netdev_priv(rcv);
354         rq = &rcv_priv->rq[veth_select_rxq(rcv)];
355         /* xdp_ring is initialized on receive side? */
356         if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
357                 goto out;
358
359         __veth_xdp_flush(rq);
360 out:
361         rcu_read_unlock();
362 }
363
364 static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
365 {
366         struct xdp_frame *frame = convert_to_xdp_frame(xdp);
367
368         if (unlikely(!frame))
369                 return -EOVERFLOW;
370
371         return veth_xdp_xmit(dev, 1, &frame, 0);
372 }
373
374 static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
375                                         struct xdp_frame *frame,
376                                         unsigned int *xdp_xmit)
377 {
378         void *hard_start = frame->data - frame->headroom;
379         int len = frame->len, delta = 0;
380         struct xdp_frame orig_frame;
381         struct bpf_prog *xdp_prog;
382         unsigned int headroom;
383         struct sk_buff *skb;
384
385         /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
386         hard_start -= sizeof(struct xdp_frame);
387
388         rcu_read_lock();
389         xdp_prog = rcu_dereference(rq->xdp_prog);
390         if (likely(xdp_prog)) {
391                 struct xdp_buff xdp;
392                 u32 act;
393
394                 xdp.data_hard_start = hard_start;
395                 xdp.data = frame->data;
396                 xdp.data_end = frame->data + frame->len;
397                 xdp.data_meta = frame->data - frame->metasize;
398                 xdp.rxq = &rq->xdp_rxq;
399
400                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
401
402                 switch (act) {
403                 case XDP_PASS:
404                         delta = frame->data - xdp.data;
405                         len = xdp.data_end - xdp.data;
406                         break;
407                 case XDP_TX:
408                         orig_frame = *frame;
409                         xdp.rxq->mem = frame->mem;
410                         if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
411                                 trace_xdp_exception(rq->dev, xdp_prog, act);
412                                 frame = &orig_frame;
413                                 goto err_xdp;
414                         }
415                         *xdp_xmit |= VETH_XDP_TX;
416                         rcu_read_unlock();
417                         goto xdp_xmit;
418                 case XDP_REDIRECT:
419                         orig_frame = *frame;
420                         xdp.rxq->mem = frame->mem;
421                         if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
422                                 frame = &orig_frame;
423                                 goto err_xdp;
424                         }
425                         *xdp_xmit |= VETH_XDP_REDIR;
426                         rcu_read_unlock();
427                         goto xdp_xmit;
428                 default:
429                         bpf_warn_invalid_xdp_action(act);
430                 case XDP_ABORTED:
431                         trace_xdp_exception(rq->dev, xdp_prog, act);
432                 case XDP_DROP:
433                         goto err_xdp;
434                 }
435         }
436         rcu_read_unlock();
437
438         headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
439         skb = veth_build_skb(hard_start, headroom, len, 0);
440         if (!skb) {
441                 xdp_return_frame(frame);
442                 goto err;
443         }
444
445         xdp_scrub_frame(frame);
446         skb->protocol = eth_type_trans(skb, rq->dev);
447 err:
448         return skb;
449 err_xdp:
450         rcu_read_unlock();
451         xdp_return_frame(frame);
452 xdp_xmit:
453         return NULL;
454 }
455
456 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
457                                         unsigned int *xdp_xmit)
458 {
459         u32 pktlen, headroom, act, metalen;
460         void *orig_data, *orig_data_end;
461         struct bpf_prog *xdp_prog;
462         int mac_len, delta, off;
463         struct xdp_buff xdp;
464
465         skb_orphan(skb);
466
467         rcu_read_lock();
468         xdp_prog = rcu_dereference(rq->xdp_prog);
469         if (unlikely(!xdp_prog)) {
470                 rcu_read_unlock();
471                 goto out;
472         }
473
474         mac_len = skb->data - skb_mac_header(skb);
475         pktlen = skb->len + mac_len;
476         headroom = skb_headroom(skb) - mac_len;
477
478         if (skb_shared(skb) || skb_head_is_locked(skb) ||
479             skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
480                 struct sk_buff *nskb;
481                 int size, head_off;
482                 void *head, *start;
483                 struct page *page;
484
485                 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
486                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
487                 if (size > PAGE_SIZE)
488                         goto drop;
489
490                 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
491                 if (!page)
492                         goto drop;
493
494                 head = page_address(page);
495                 start = head + VETH_XDP_HEADROOM;
496                 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
497                         page_frag_free(head);
498                         goto drop;
499                 }
500
501                 nskb = veth_build_skb(head,
502                                       VETH_XDP_HEADROOM + mac_len, skb->len,
503                                       PAGE_SIZE);
504                 if (!nskb) {
505                         page_frag_free(head);
506                         goto drop;
507                 }
508
509                 skb_copy_header(nskb, skb);
510                 head_off = skb_headroom(nskb) - skb_headroom(skb);
511                 skb_headers_offset_update(nskb, head_off);
512                 consume_skb(skb);
513                 skb = nskb;
514         }
515
516         xdp.data_hard_start = skb->head;
517         xdp.data = skb_mac_header(skb);
518         xdp.data_end = xdp.data + pktlen;
519         xdp.data_meta = xdp.data;
520         xdp.rxq = &rq->xdp_rxq;
521         orig_data = xdp.data;
522         orig_data_end = xdp.data_end;
523
524         act = bpf_prog_run_xdp(xdp_prog, &xdp);
525
526         switch (act) {
527         case XDP_PASS:
528                 break;
529         case XDP_TX:
530                 get_page(virt_to_page(xdp.data));
531                 consume_skb(skb);
532                 xdp.rxq->mem = rq->xdp_mem;
533                 if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
534                         trace_xdp_exception(rq->dev, xdp_prog, act);
535                         goto err_xdp;
536                 }
537                 *xdp_xmit |= VETH_XDP_TX;
538                 rcu_read_unlock();
539                 goto xdp_xmit;
540         case XDP_REDIRECT:
541                 get_page(virt_to_page(xdp.data));
542                 consume_skb(skb);
543                 xdp.rxq->mem = rq->xdp_mem;
544                 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
545                         goto err_xdp;
546                 *xdp_xmit |= VETH_XDP_REDIR;
547                 rcu_read_unlock();
548                 goto xdp_xmit;
549         default:
550                 bpf_warn_invalid_xdp_action(act);
551         case XDP_ABORTED:
552                 trace_xdp_exception(rq->dev, xdp_prog, act);
553         case XDP_DROP:
554                 goto drop;
555         }
556         rcu_read_unlock();
557
558         delta = orig_data - xdp.data;
559         off = mac_len + delta;
560         if (off > 0)
561                 __skb_push(skb, off);
562         else if (off < 0)
563                 __skb_pull(skb, -off);
564         skb->mac_header -= delta;
565         off = xdp.data_end - orig_data_end;
566         if (off != 0)
567                 __skb_put(skb, off);
568         skb->protocol = eth_type_trans(skb, rq->dev);
569
570         metalen = xdp.data - xdp.data_meta;
571         if (metalen)
572                 skb_metadata_set(skb, metalen);
573 out:
574         return skb;
575 drop:
576         rcu_read_unlock();
577         kfree_skb(skb);
578         return NULL;
579 err_xdp:
580         rcu_read_unlock();
581         page_frag_free(xdp.data);
582 xdp_xmit:
583         return NULL;
584 }
585
586 static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
587 {
588         int i, done = 0;
589
590         for (i = 0; i < budget; i++) {
591                 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
592                 struct sk_buff *skb;
593
594                 if (!ptr)
595                         break;
596
597                 if (veth_is_xdp_frame(ptr)) {
598                         skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr),
599                                                xdp_xmit);
600                 } else {
601                         skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit);
602                 }
603
604                 if (skb)
605                         napi_gro_receive(&rq->xdp_napi, skb);
606
607                 done++;
608         }
609
610         return done;
611 }
612
613 static int veth_poll(struct napi_struct *napi, int budget)
614 {
615         struct veth_rq *rq =
616                 container_of(napi, struct veth_rq, xdp_napi);
617         unsigned int xdp_xmit = 0;
618         int done;
619
620         xdp_set_return_frame_no_direct();
621         done = veth_xdp_rcv(rq, budget, &xdp_xmit);
622
623         if (done < budget && napi_complete_done(napi, done)) {
624                 /* Write rx_notify_masked before reading ptr_ring */
625                 smp_store_mb(rq->rx_notify_masked, false);
626                 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
627                         if (napi_schedule_prep(&rq->xdp_napi)) {
628                                 WRITE_ONCE(rq->rx_notify_masked, true);
629                                 __napi_schedule(&rq->xdp_napi);
630                         }
631                 }
632         }
633
634         if (xdp_xmit & VETH_XDP_TX)
635                 veth_xdp_flush(rq->dev);
636         if (xdp_xmit & VETH_XDP_REDIR)
637                 xdp_do_flush_map();
638         xdp_clear_return_frame_no_direct();
639
640         return done;
641 }
642
643 static int veth_napi_add(struct net_device *dev)
644 {
645         struct veth_priv *priv = netdev_priv(dev);
646         int err, i;
647
648         for (i = 0; i < dev->real_num_rx_queues; i++) {
649                 struct veth_rq *rq = &priv->rq[i];
650
651                 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
652                 if (err)
653                         goto err_xdp_ring;
654         }
655
656         for (i = 0; i < dev->real_num_rx_queues; i++) {
657                 struct veth_rq *rq = &priv->rq[i];
658
659                 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
660                 napi_enable(&rq->xdp_napi);
661         }
662
663         return 0;
664 err_xdp_ring:
665         for (i--; i >= 0; i--)
666                 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
667
668         return err;
669 }
670
671 static void veth_napi_del(struct net_device *dev)
672 {
673         struct veth_priv *priv = netdev_priv(dev);
674         int i;
675
676         for (i = 0; i < dev->real_num_rx_queues; i++) {
677                 struct veth_rq *rq = &priv->rq[i];
678
679                 napi_disable(&rq->xdp_napi);
680                 napi_hash_del(&rq->xdp_napi);
681         }
682         synchronize_net();
683
684         for (i = 0; i < dev->real_num_rx_queues; i++) {
685                 struct veth_rq *rq = &priv->rq[i];
686
687                 netif_napi_del(&rq->xdp_napi);
688                 rq->rx_notify_masked = false;
689                 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
690         }
691 }
692
693 static int veth_enable_xdp(struct net_device *dev)
694 {
695         struct veth_priv *priv = netdev_priv(dev);
696         int err, i;
697
698         if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
699                 for (i = 0; i < dev->real_num_rx_queues; i++) {
700                         struct veth_rq *rq = &priv->rq[i];
701
702                         err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
703                         if (err < 0)
704                                 goto err_rxq_reg;
705
706                         err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
707                                                          MEM_TYPE_PAGE_SHARED,
708                                                          NULL);
709                         if (err < 0)
710                                 goto err_reg_mem;
711
712                         /* Save original mem info as it can be overwritten */
713                         rq->xdp_mem = rq->xdp_rxq.mem;
714                 }
715
716                 err = veth_napi_add(dev);
717                 if (err)
718                         goto err_rxq_reg;
719         }
720
721         for (i = 0; i < dev->real_num_rx_queues; i++)
722                 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
723
724         return 0;
725 err_reg_mem:
726         xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
727 err_rxq_reg:
728         for (i--; i >= 0; i--)
729                 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
730
731         return err;
732 }
733
734 static void veth_disable_xdp(struct net_device *dev)
735 {
736         struct veth_priv *priv = netdev_priv(dev);
737         int i;
738
739         for (i = 0; i < dev->real_num_rx_queues; i++)
740                 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
741         veth_napi_del(dev);
742         for (i = 0; i < dev->real_num_rx_queues; i++) {
743                 struct veth_rq *rq = &priv->rq[i];
744
745                 rq->xdp_rxq.mem = rq->xdp_mem;
746                 xdp_rxq_info_unreg(&rq->xdp_rxq);
747         }
748 }
749
750 static int veth_open(struct net_device *dev)
751 {
752         struct veth_priv *priv = netdev_priv(dev);
753         struct net_device *peer = rtnl_dereference(priv->peer);
754         int err;
755
756         if (!peer)
757                 return -ENOTCONN;
758
759         if (priv->_xdp_prog) {
760                 err = veth_enable_xdp(dev);
761                 if (err)
762                         return err;
763         }
764
765         if (peer->flags & IFF_UP) {
766                 netif_carrier_on(dev);
767                 netif_carrier_on(peer);
768         }
769
770         return 0;
771 }
772
773 static int veth_close(struct net_device *dev)
774 {
775         struct veth_priv *priv = netdev_priv(dev);
776         struct net_device *peer = rtnl_dereference(priv->peer);
777
778         netif_carrier_off(dev);
779         if (peer)
780                 netif_carrier_off(peer);
781
782         if (priv->_xdp_prog)
783                 veth_disable_xdp(dev);
784
785         return 0;
786 }
787
788 static int is_valid_veth_mtu(int mtu)
789 {
790         return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
791 }
792
793 static int veth_alloc_queues(struct net_device *dev)
794 {
795         struct veth_priv *priv = netdev_priv(dev);
796         int i;
797
798         priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
799         if (!priv->rq)
800                 return -ENOMEM;
801
802         for (i = 0; i < dev->num_rx_queues; i++)
803                 priv->rq[i].dev = dev;
804
805         return 0;
806 }
807
808 static void veth_free_queues(struct net_device *dev)
809 {
810         struct veth_priv *priv = netdev_priv(dev);
811
812         kfree(priv->rq);
813 }
814
815 static int veth_dev_init(struct net_device *dev)
816 {
817         int err;
818
819         dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
820         if (!dev->vstats)
821                 return -ENOMEM;
822
823         err = veth_alloc_queues(dev);
824         if (err) {
825                 free_percpu(dev->vstats);
826                 return err;
827         }
828
829         return 0;
830 }
831
832 static void veth_dev_free(struct net_device *dev)
833 {
834         veth_free_queues(dev);
835         free_percpu(dev->vstats);
836 }
837
838 #ifdef CONFIG_NET_POLL_CONTROLLER
839 static void veth_poll_controller(struct net_device *dev)
840 {
841         /* veth only receives frames when its peer sends one
842          * Since it has nothing to do with disabling irqs, we are guaranteed
843          * never to have pending data when we poll for it so
844          * there is nothing to do here.
845          *
846          * We need this though so netpoll recognizes us as an interface that
847          * supports polling, which enables bridge devices in virt setups to
848          * still use netconsole
849          */
850 }
851 #endif  /* CONFIG_NET_POLL_CONTROLLER */
852
853 static int veth_get_iflink(const struct net_device *dev)
854 {
855         struct veth_priv *priv = netdev_priv(dev);
856         struct net_device *peer;
857         int iflink;
858
859         rcu_read_lock();
860         peer = rcu_dereference(priv->peer);
861         iflink = peer ? peer->ifindex : 0;
862         rcu_read_unlock();
863
864         return iflink;
865 }
866
867 static netdev_features_t veth_fix_features(struct net_device *dev,
868                                            netdev_features_t features)
869 {
870         struct veth_priv *priv = netdev_priv(dev);
871         struct net_device *peer;
872
873         peer = rtnl_dereference(priv->peer);
874         if (peer) {
875                 struct veth_priv *peer_priv = netdev_priv(peer);
876
877                 if (peer_priv->_xdp_prog)
878                         features &= ~NETIF_F_GSO_SOFTWARE;
879         }
880
881         return features;
882 }
883
884 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
885 {
886         struct veth_priv *peer_priv, *priv = netdev_priv(dev);
887         struct net_device *peer;
888
889         if (new_hr < 0)
890                 new_hr = 0;
891
892         rcu_read_lock();
893         peer = rcu_dereference(priv->peer);
894         if (unlikely(!peer))
895                 goto out;
896
897         peer_priv = netdev_priv(peer);
898         priv->requested_headroom = new_hr;
899         new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
900         dev->needed_headroom = new_hr;
901         peer->needed_headroom = new_hr;
902
903 out:
904         rcu_read_unlock();
905 }
906
907 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
908                         struct netlink_ext_ack *extack)
909 {
910         struct veth_priv *priv = netdev_priv(dev);
911         struct bpf_prog *old_prog;
912         struct net_device *peer;
913         unsigned int max_mtu;
914         int err;
915
916         old_prog = priv->_xdp_prog;
917         priv->_xdp_prog = prog;
918         peer = rtnl_dereference(priv->peer);
919
920         if (prog) {
921                 if (!peer) {
922                         NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
923                         err = -ENOTCONN;
924                         goto err;
925                 }
926
927                 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
928                           peer->hard_header_len -
929                           SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
930                 if (peer->mtu > max_mtu) {
931                         NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
932                         err = -ERANGE;
933                         goto err;
934                 }
935
936                 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
937                         NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
938                         err = -ENOSPC;
939                         goto err;
940                 }
941
942                 if (dev->flags & IFF_UP) {
943                         err = veth_enable_xdp(dev);
944                         if (err) {
945                                 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
946                                 goto err;
947                         }
948                 }
949
950                 if (!old_prog) {
951                         peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
952                         peer->max_mtu = max_mtu;
953                 }
954         }
955
956         if (old_prog) {
957                 if (!prog) {
958                         if (dev->flags & IFF_UP)
959                                 veth_disable_xdp(dev);
960
961                         if (peer) {
962                                 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
963                                 peer->max_mtu = ETH_MAX_MTU;
964                         }
965                 }
966                 bpf_prog_put(old_prog);
967         }
968
969         if ((!!old_prog ^ !!prog) && peer)
970                 netdev_update_features(peer);
971
972         return 0;
973 err:
974         priv->_xdp_prog = old_prog;
975
976         return err;
977 }
978
979 static u32 veth_xdp_query(struct net_device *dev)
980 {
981         struct veth_priv *priv = netdev_priv(dev);
982         const struct bpf_prog *xdp_prog;
983
984         xdp_prog = priv->_xdp_prog;
985         if (xdp_prog)
986                 return xdp_prog->aux->id;
987
988         return 0;
989 }
990
991 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
992 {
993         switch (xdp->command) {
994         case XDP_SETUP_PROG:
995                 return veth_xdp_set(dev, xdp->prog, xdp->extack);
996         case XDP_QUERY_PROG:
997                 xdp->prog_id = veth_xdp_query(dev);
998                 return 0;
999         default:
1000                 return -EINVAL;
1001         }
1002 }
1003
1004 static const struct net_device_ops veth_netdev_ops = {
1005         .ndo_init            = veth_dev_init,
1006         .ndo_open            = veth_open,
1007         .ndo_stop            = veth_close,
1008         .ndo_start_xmit      = veth_xmit,
1009         .ndo_get_stats64     = veth_get_stats64,
1010         .ndo_set_rx_mode     = veth_set_multicast_list,
1011         .ndo_set_mac_address = eth_mac_addr,
1012 #ifdef CONFIG_NET_POLL_CONTROLLER
1013         .ndo_poll_controller    = veth_poll_controller,
1014 #endif
1015         .ndo_get_iflink         = veth_get_iflink,
1016         .ndo_fix_features       = veth_fix_features,
1017         .ndo_features_check     = passthru_features_check,
1018         .ndo_set_rx_headroom    = veth_set_rx_headroom,
1019         .ndo_bpf                = veth_xdp,
1020         .ndo_xdp_xmit           = veth_xdp_xmit,
1021 };
1022
1023 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1024                        NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1025                        NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1026                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1027                        NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1028
1029 static void veth_setup(struct net_device *dev)
1030 {
1031         ether_setup(dev);
1032
1033         dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1034         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1035         dev->priv_flags |= IFF_NO_QUEUE;
1036         dev->priv_flags |= IFF_PHONY_HEADROOM;
1037
1038         dev->netdev_ops = &veth_netdev_ops;
1039         dev->ethtool_ops = &veth_ethtool_ops;
1040         dev->features |= NETIF_F_LLTX;
1041         dev->features |= VETH_FEATURES;
1042         dev->vlan_features = dev->features &
1043                              ~(NETIF_F_HW_VLAN_CTAG_TX |
1044                                NETIF_F_HW_VLAN_STAG_TX |
1045                                NETIF_F_HW_VLAN_CTAG_RX |
1046                                NETIF_F_HW_VLAN_STAG_RX);
1047         dev->needs_free_netdev = true;
1048         dev->priv_destructor = veth_dev_free;
1049         dev->max_mtu = ETH_MAX_MTU;
1050
1051         dev->hw_features = VETH_FEATURES;
1052         dev->hw_enc_features = VETH_FEATURES;
1053         dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1054 }
1055
1056 /*
1057  * netlink interface
1058  */
1059
1060 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1061                          struct netlink_ext_ack *extack)
1062 {
1063         if (tb[IFLA_ADDRESS]) {
1064                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1065                         return -EINVAL;
1066                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1067                         return -EADDRNOTAVAIL;
1068         }
1069         if (tb[IFLA_MTU]) {
1070                 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1071                         return -EINVAL;
1072         }
1073         return 0;
1074 }
1075
1076 static struct rtnl_link_ops veth_link_ops;
1077
1078 static int veth_newlink(struct net *src_net, struct net_device *dev,
1079                         struct nlattr *tb[], struct nlattr *data[],
1080                         struct netlink_ext_ack *extack)
1081 {
1082         int err;
1083         struct net_device *peer;
1084         struct veth_priv *priv;
1085         char ifname[IFNAMSIZ];
1086         struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1087         unsigned char name_assign_type;
1088         struct ifinfomsg *ifmp;
1089         struct net *net;
1090
1091         /*
1092          * create and register peer first
1093          */
1094         if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1095                 struct nlattr *nla_peer;
1096
1097                 nla_peer = data[VETH_INFO_PEER];
1098                 ifmp = nla_data(nla_peer);
1099                 err = rtnl_nla_parse_ifla(peer_tb,
1100                                           nla_data(nla_peer) + sizeof(struct ifinfomsg),
1101                                           nla_len(nla_peer) - sizeof(struct ifinfomsg),
1102                                           NULL);
1103                 if (err < 0)
1104                         return err;
1105
1106                 err = veth_validate(peer_tb, NULL, extack);
1107                 if (err < 0)
1108                         return err;
1109
1110                 tbp = peer_tb;
1111         } else {
1112                 ifmp = NULL;
1113                 tbp = tb;
1114         }
1115
1116         if (ifmp && tbp[IFLA_IFNAME]) {
1117                 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1118                 name_assign_type = NET_NAME_USER;
1119         } else {
1120                 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1121                 name_assign_type = NET_NAME_ENUM;
1122         }
1123
1124         net = rtnl_link_get_net(src_net, tbp);
1125         if (IS_ERR(net))
1126                 return PTR_ERR(net);
1127
1128         peer = rtnl_create_link(net, ifname, name_assign_type,
1129                                 &veth_link_ops, tbp);
1130         if (IS_ERR(peer)) {
1131                 put_net(net);
1132                 return PTR_ERR(peer);
1133         }
1134
1135         if (!ifmp || !tbp[IFLA_ADDRESS])
1136                 eth_hw_addr_random(peer);
1137
1138         if (ifmp && (dev->ifindex != 0))
1139                 peer->ifindex = ifmp->ifi_index;
1140
1141         peer->gso_max_size = dev->gso_max_size;
1142         peer->gso_max_segs = dev->gso_max_segs;
1143
1144         err = register_netdevice(peer);
1145         put_net(net);
1146         net = NULL;
1147         if (err < 0)
1148                 goto err_register_peer;
1149
1150         netif_carrier_off(peer);
1151
1152         err = rtnl_configure_link(peer, ifmp);
1153         if (err < 0)
1154                 goto err_configure_peer;
1155
1156         /*
1157          * register dev last
1158          *
1159          * note, that since we've registered new device the dev's name
1160          * should be re-allocated
1161          */
1162
1163         if (tb[IFLA_ADDRESS] == NULL)
1164                 eth_hw_addr_random(dev);
1165
1166         if (tb[IFLA_IFNAME])
1167                 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1168         else
1169                 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1170
1171         err = register_netdevice(dev);
1172         if (err < 0)
1173                 goto err_register_dev;
1174
1175         netif_carrier_off(dev);
1176
1177         /*
1178          * tie the deviced together
1179          */
1180
1181         priv = netdev_priv(dev);
1182         rcu_assign_pointer(priv->peer, peer);
1183
1184         priv = netdev_priv(peer);
1185         rcu_assign_pointer(priv->peer, dev);
1186
1187         return 0;
1188
1189 err_register_dev:
1190         /* nothing to do */
1191 err_configure_peer:
1192         unregister_netdevice(peer);
1193         return err;
1194
1195 err_register_peer:
1196         free_netdev(peer);
1197         return err;
1198 }
1199
1200 static void veth_dellink(struct net_device *dev, struct list_head *head)
1201 {
1202         struct veth_priv *priv;
1203         struct net_device *peer;
1204
1205         priv = netdev_priv(dev);
1206         peer = rtnl_dereference(priv->peer);
1207
1208         /* Note : dellink() is called from default_device_exit_batch(),
1209          * before a rcu_synchronize() point. The devices are guaranteed
1210          * not being freed before one RCU grace period.
1211          */
1212         RCU_INIT_POINTER(priv->peer, NULL);
1213         unregister_netdevice_queue(dev, head);
1214
1215         if (peer) {
1216                 priv = netdev_priv(peer);
1217                 RCU_INIT_POINTER(priv->peer, NULL);
1218                 unregister_netdevice_queue(peer, head);
1219         }
1220 }
1221
1222 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1223         [VETH_INFO_PEER]        = { .len = sizeof(struct ifinfomsg) },
1224 };
1225
1226 static struct net *veth_get_link_net(const struct net_device *dev)
1227 {
1228         struct veth_priv *priv = netdev_priv(dev);
1229         struct net_device *peer = rtnl_dereference(priv->peer);
1230
1231         return peer ? dev_net(peer) : dev_net(dev);
1232 }
1233
1234 static struct rtnl_link_ops veth_link_ops = {
1235         .kind           = DRV_NAME,
1236         .priv_size      = sizeof(struct veth_priv),
1237         .setup          = veth_setup,
1238         .validate       = veth_validate,
1239         .newlink        = veth_newlink,
1240         .dellink        = veth_dellink,
1241         .policy         = veth_policy,
1242         .maxtype        = VETH_INFO_MAX,
1243         .get_link_net   = veth_get_link_net,
1244 };
1245
1246 /*
1247  * init/fini
1248  */
1249
1250 static __init int veth_init(void)
1251 {
1252         return rtnl_link_register(&veth_link_ops);
1253 }
1254
1255 static __exit void veth_exit(void)
1256 {
1257         rtnl_link_unregister(&veth_link_ops);
1258 }
1259
1260 module_init(veth_init);
1261 module_exit(veth_exit);
1262
1263 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1264 MODULE_LICENSE("GPL v2");
1265 MODULE_ALIAS_RTNL_LINK(DRV_NAME);