GNU Linux-libre 6.1.24-gnu
[releases.git] / drivers / net / veth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  drivers/net/veth.c
4  *
5  *  Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6  *
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9  *
10  */
11
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
17
18 #include <net/rtnetlink.h>
19 #include <net/dst.h>
20 #include <net/xfrm.h>
21 #include <net/xdp.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
29
30 #define DRV_NAME        "veth"
31 #define DRV_VERSION     "1.0"
32
33 #define VETH_XDP_FLAG           BIT(0)
34 #define VETH_RING_SIZE          256
35 #define VETH_XDP_HEADROOM       (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
37 #define VETH_XDP_TX_BULK_SIZE   16
38 #define VETH_XDP_BATCH          16
39
40 struct veth_stats {
41         u64     rx_drops;
42         /* xdp */
43         u64     xdp_packets;
44         u64     xdp_bytes;
45         u64     xdp_redirect;
46         u64     xdp_drops;
47         u64     xdp_tx;
48         u64     xdp_tx_err;
49         u64     peer_tq_xdp_xmit;
50         u64     peer_tq_xdp_xmit_err;
51 };
52
53 struct veth_rq_stats {
54         struct veth_stats       vs;
55         struct u64_stats_sync   syncp;
56 };
57
58 struct veth_rq {
59         struct napi_struct      xdp_napi;
60         struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
61         struct net_device       *dev;
62         struct bpf_prog __rcu   *xdp_prog;
63         struct xdp_mem_info     xdp_mem;
64         struct veth_rq_stats    stats;
65         bool                    rx_notify_masked;
66         struct ptr_ring         xdp_ring;
67         struct xdp_rxq_info     xdp_rxq;
68 };
69
70 struct veth_priv {
71         struct net_device __rcu *peer;
72         atomic64_t              dropped;
73         struct bpf_prog         *_xdp_prog;
74         struct veth_rq          *rq;
75         unsigned int            requested_headroom;
76 };
77
78 struct veth_xdp_tx_bq {
79         struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
80         unsigned int count;
81 };
82
83 /*
84  * ethtool interface
85  */
86
87 struct veth_q_stat_desc {
88         char    desc[ETH_GSTRING_LEN];
89         size_t  offset;
90 };
91
92 #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
93
94 static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
95         { "xdp_packets",        VETH_RQ_STAT(xdp_packets) },
96         { "xdp_bytes",          VETH_RQ_STAT(xdp_bytes) },
97         { "drops",              VETH_RQ_STAT(rx_drops) },
98         { "xdp_redirect",       VETH_RQ_STAT(xdp_redirect) },
99         { "xdp_drops",          VETH_RQ_STAT(xdp_drops) },
100         { "xdp_tx",             VETH_RQ_STAT(xdp_tx) },
101         { "xdp_tx_errors",      VETH_RQ_STAT(xdp_tx_err) },
102 };
103
104 #define VETH_RQ_STATS_LEN       ARRAY_SIZE(veth_rq_stats_desc)
105
106 static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
107         { "xdp_xmit",           VETH_RQ_STAT(peer_tq_xdp_xmit) },
108         { "xdp_xmit_errors",    VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
109 };
110
111 #define VETH_TQ_STATS_LEN       ARRAY_SIZE(veth_tq_stats_desc)
112
113 static struct {
114         const char string[ETH_GSTRING_LEN];
115 } ethtool_stats_keys[] = {
116         { "peer_ifindex" },
117 };
118
119 static int veth_get_link_ksettings(struct net_device *dev,
120                                    struct ethtool_link_ksettings *cmd)
121 {
122         cmd->base.speed         = SPEED_10000;
123         cmd->base.duplex        = DUPLEX_FULL;
124         cmd->base.port          = PORT_TP;
125         cmd->base.autoneg       = AUTONEG_DISABLE;
126         return 0;
127 }
128
129 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
130 {
131         strscpy(info->driver, DRV_NAME, sizeof(info->driver));
132         strscpy(info->version, DRV_VERSION, sizeof(info->version));
133 }
134
135 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
136 {
137         u8 *p = buf;
138         int i, j;
139
140         switch(stringset) {
141         case ETH_SS_STATS:
142                 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
143                 p += sizeof(ethtool_stats_keys);
144                 for (i = 0; i < dev->real_num_rx_queues; i++)
145                         for (j = 0; j < VETH_RQ_STATS_LEN; j++)
146                                 ethtool_sprintf(&p, "rx_queue_%u_%.18s",
147                                                 i, veth_rq_stats_desc[j].desc);
148
149                 for (i = 0; i < dev->real_num_tx_queues; i++)
150                         for (j = 0; j < VETH_TQ_STATS_LEN; j++)
151                                 ethtool_sprintf(&p, "tx_queue_%u_%.18s",
152                                                 i, veth_tq_stats_desc[j].desc);
153                 break;
154         }
155 }
156
157 static int veth_get_sset_count(struct net_device *dev, int sset)
158 {
159         switch (sset) {
160         case ETH_SS_STATS:
161                 return ARRAY_SIZE(ethtool_stats_keys) +
162                        VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
163                        VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
164         default:
165                 return -EOPNOTSUPP;
166         }
167 }
168
169 static void veth_get_ethtool_stats(struct net_device *dev,
170                 struct ethtool_stats *stats, u64 *data)
171 {
172         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
173         struct net_device *peer = rtnl_dereference(priv->peer);
174         int i, j, idx;
175
176         data[0] = peer ? peer->ifindex : 0;
177         idx = 1;
178         for (i = 0; i < dev->real_num_rx_queues; i++) {
179                 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
180                 const void *stats_base = (void *)&rq_stats->vs;
181                 unsigned int start;
182                 size_t offset;
183
184                 do {
185                         start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
186                         for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
187                                 offset = veth_rq_stats_desc[j].offset;
188                                 data[idx + j] = *(u64 *)(stats_base + offset);
189                         }
190                 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
191                 idx += VETH_RQ_STATS_LEN;
192         }
193
194         if (!peer)
195                 return;
196
197         rcv_priv = netdev_priv(peer);
198         for (i = 0; i < peer->real_num_rx_queues; i++) {
199                 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
200                 const void *base = (void *)&rq_stats->vs;
201                 unsigned int start, tx_idx = idx;
202                 size_t offset;
203
204                 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
205                 do {
206                         start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
207                         for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
208                                 offset = veth_tq_stats_desc[j].offset;
209                                 data[tx_idx + j] += *(u64 *)(base + offset);
210                         }
211                 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
212         }
213 }
214
215 static void veth_get_channels(struct net_device *dev,
216                               struct ethtool_channels *channels)
217 {
218         channels->tx_count = dev->real_num_tx_queues;
219         channels->rx_count = dev->real_num_rx_queues;
220         channels->max_tx = dev->num_tx_queues;
221         channels->max_rx = dev->num_rx_queues;
222 }
223
224 static int veth_set_channels(struct net_device *dev,
225                              struct ethtool_channels *ch);
226
227 static const struct ethtool_ops veth_ethtool_ops = {
228         .get_drvinfo            = veth_get_drvinfo,
229         .get_link               = ethtool_op_get_link,
230         .get_strings            = veth_get_strings,
231         .get_sset_count         = veth_get_sset_count,
232         .get_ethtool_stats      = veth_get_ethtool_stats,
233         .get_link_ksettings     = veth_get_link_ksettings,
234         .get_ts_info            = ethtool_op_get_ts_info,
235         .get_channels           = veth_get_channels,
236         .set_channels           = veth_set_channels,
237 };
238
239 /* general routines */
240
241 static bool veth_is_xdp_frame(void *ptr)
242 {
243         return (unsigned long)ptr & VETH_XDP_FLAG;
244 }
245
246 static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
247 {
248         return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
249 }
250
251 static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
252 {
253         return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
254 }
255
256 static void veth_ptr_free(void *ptr)
257 {
258         if (veth_is_xdp_frame(ptr))
259                 xdp_return_frame(veth_ptr_to_xdp(ptr));
260         else
261                 kfree_skb(ptr);
262 }
263
264 static void __veth_xdp_flush(struct veth_rq *rq)
265 {
266         /* Write ptr_ring before reading rx_notify_masked */
267         smp_mb();
268         if (!READ_ONCE(rq->rx_notify_masked) &&
269             napi_schedule_prep(&rq->xdp_napi)) {
270                 WRITE_ONCE(rq->rx_notify_masked, true);
271                 __napi_schedule(&rq->xdp_napi);
272         }
273 }
274
275 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
276 {
277         if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
278                 dev_kfree_skb_any(skb);
279                 return NET_RX_DROP;
280         }
281
282         return NET_RX_SUCCESS;
283 }
284
285 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
286                             struct veth_rq *rq, bool xdp)
287 {
288         return __dev_forward_skb(dev, skb) ?: xdp ?
289                 veth_xdp_rx(rq, skb) :
290                 __netif_rx(skb);
291 }
292
293 /* return true if the specified skb has chances of GRO aggregation
294  * Don't strive for accuracy, but try to avoid GRO overhead in the most
295  * common scenarios.
296  * When XDP is enabled, all traffic is considered eligible, as the xmit
297  * device has TSO off.
298  * When TSO is enabled on the xmit device, we are likely interested only
299  * in UDP aggregation, explicitly check for that if the skb is suspected
300  * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
301  * to belong to locally generated UDP traffic.
302  */
303 static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
304                                          const struct net_device *rcv,
305                                          const struct sk_buff *skb)
306 {
307         return !(dev->features & NETIF_F_ALL_TSO) ||
308                 (skb->destructor == sock_wfree &&
309                  rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
310 }
311
312 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
313 {
314         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
315         struct veth_rq *rq = NULL;
316         struct net_device *rcv;
317         int length = skb->len;
318         bool use_napi = false;
319         int rxq;
320
321         rcu_read_lock();
322         rcv = rcu_dereference(priv->peer);
323         if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
324                 kfree_skb(skb);
325                 goto drop;
326         }
327
328         rcv_priv = netdev_priv(rcv);
329         rxq = skb_get_queue_mapping(skb);
330         if (rxq < rcv->real_num_rx_queues) {
331                 rq = &rcv_priv->rq[rxq];
332
333                 /* The napi pointer is available when an XDP program is
334                  * attached or when GRO is enabled
335                  * Don't bother with napi/GRO if the skb can't be aggregated
336                  */
337                 use_napi = rcu_access_pointer(rq->napi) &&
338                            veth_skb_is_eligible_for_gro(dev, rcv, skb);
339         }
340
341         skb_tx_timestamp(skb);
342         if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
343                 if (!use_napi)
344                         dev_lstats_add(dev, length);
345         } else {
346 drop:
347                 atomic64_inc(&priv->dropped);
348         }
349
350         if (use_napi)
351                 __veth_xdp_flush(rq);
352
353         rcu_read_unlock();
354
355         return NETDEV_TX_OK;
356 }
357
358 static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
359 {
360         struct veth_priv *priv = netdev_priv(dev);
361
362         dev_lstats_read(dev, packets, bytes);
363         return atomic64_read(&priv->dropped);
364 }
365
366 static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
367 {
368         struct veth_priv *priv = netdev_priv(dev);
369         int i;
370
371         result->peer_tq_xdp_xmit_err = 0;
372         result->xdp_packets = 0;
373         result->xdp_tx_err = 0;
374         result->xdp_bytes = 0;
375         result->rx_drops = 0;
376         for (i = 0; i < dev->num_rx_queues; i++) {
377                 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
378                 struct veth_rq_stats *stats = &priv->rq[i].stats;
379                 unsigned int start;
380
381                 do {
382                         start = u64_stats_fetch_begin_irq(&stats->syncp);
383                         peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
384                         xdp_tx_err = stats->vs.xdp_tx_err;
385                         packets = stats->vs.xdp_packets;
386                         bytes = stats->vs.xdp_bytes;
387                         drops = stats->vs.rx_drops;
388                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
389                 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
390                 result->xdp_tx_err += xdp_tx_err;
391                 result->xdp_packets += packets;
392                 result->xdp_bytes += bytes;
393                 result->rx_drops += drops;
394         }
395 }
396
397 static void veth_get_stats64(struct net_device *dev,
398                              struct rtnl_link_stats64 *tot)
399 {
400         struct veth_priv *priv = netdev_priv(dev);
401         struct net_device *peer;
402         struct veth_stats rx;
403         u64 packets, bytes;
404
405         tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
406         tot->tx_bytes = bytes;
407         tot->tx_packets = packets;
408
409         veth_stats_rx(&rx, dev);
410         tot->tx_dropped += rx.xdp_tx_err;
411         tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
412         tot->rx_bytes = rx.xdp_bytes;
413         tot->rx_packets = rx.xdp_packets;
414
415         rcu_read_lock();
416         peer = rcu_dereference(priv->peer);
417         if (peer) {
418                 veth_stats_tx(peer, &packets, &bytes);
419                 tot->rx_bytes += bytes;
420                 tot->rx_packets += packets;
421
422                 veth_stats_rx(&rx, peer);
423                 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
424                 tot->rx_dropped += rx.xdp_tx_err;
425                 tot->tx_bytes += rx.xdp_bytes;
426                 tot->tx_packets += rx.xdp_packets;
427         }
428         rcu_read_unlock();
429 }
430
431 /* fake multicast ability */
432 static void veth_set_multicast_list(struct net_device *dev)
433 {
434 }
435
436 static int veth_select_rxq(struct net_device *dev)
437 {
438         return smp_processor_id() % dev->real_num_rx_queues;
439 }
440
441 static struct net_device *veth_peer_dev(struct net_device *dev)
442 {
443         struct veth_priv *priv = netdev_priv(dev);
444
445         /* Callers must be under RCU read side. */
446         return rcu_dereference(priv->peer);
447 }
448
449 static int veth_xdp_xmit(struct net_device *dev, int n,
450                          struct xdp_frame **frames,
451                          u32 flags, bool ndo_xmit)
452 {
453         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
454         int i, ret = -ENXIO, nxmit = 0;
455         struct net_device *rcv;
456         unsigned int max_len;
457         struct veth_rq *rq;
458
459         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
460                 return -EINVAL;
461
462         rcu_read_lock();
463         rcv = rcu_dereference(priv->peer);
464         if (unlikely(!rcv))
465                 goto out;
466
467         rcv_priv = netdev_priv(rcv);
468         rq = &rcv_priv->rq[veth_select_rxq(rcv)];
469         /* The napi pointer is set if NAPI is enabled, which ensures that
470          * xdp_ring is initialized on receive side and the peer device is up.
471          */
472         if (!rcu_access_pointer(rq->napi))
473                 goto out;
474
475         max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
476
477         spin_lock(&rq->xdp_ring.producer_lock);
478         for (i = 0; i < n; i++) {
479                 struct xdp_frame *frame = frames[i];
480                 void *ptr = veth_xdp_to_ptr(frame);
481
482                 if (unlikely(xdp_get_frame_len(frame) > max_len ||
483                              __ptr_ring_produce(&rq->xdp_ring, ptr)))
484                         break;
485                 nxmit++;
486         }
487         spin_unlock(&rq->xdp_ring.producer_lock);
488
489         if (flags & XDP_XMIT_FLUSH)
490                 __veth_xdp_flush(rq);
491
492         ret = nxmit;
493         if (ndo_xmit) {
494                 u64_stats_update_begin(&rq->stats.syncp);
495                 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
496                 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
497                 u64_stats_update_end(&rq->stats.syncp);
498         }
499
500 out:
501         rcu_read_unlock();
502
503         return ret;
504 }
505
506 static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
507                              struct xdp_frame **frames, u32 flags)
508 {
509         int err;
510
511         err = veth_xdp_xmit(dev, n, frames, flags, true);
512         if (err < 0) {
513                 struct veth_priv *priv = netdev_priv(dev);
514
515                 atomic64_add(n, &priv->dropped);
516         }
517
518         return err;
519 }
520
521 static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
522 {
523         int sent, i, err = 0, drops;
524
525         sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
526         if (sent < 0) {
527                 err = sent;
528                 sent = 0;
529         }
530
531         for (i = sent; unlikely(i < bq->count); i++)
532                 xdp_return_frame(bq->q[i]);
533
534         drops = bq->count - sent;
535         trace_xdp_bulk_tx(rq->dev, sent, drops, err);
536
537         u64_stats_update_begin(&rq->stats.syncp);
538         rq->stats.vs.xdp_tx += sent;
539         rq->stats.vs.xdp_tx_err += drops;
540         u64_stats_update_end(&rq->stats.syncp);
541
542         bq->count = 0;
543 }
544
545 static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
546 {
547         struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
548         struct net_device *rcv;
549         struct veth_rq *rcv_rq;
550
551         rcu_read_lock();
552         veth_xdp_flush_bq(rq, bq);
553         rcv = rcu_dereference(priv->peer);
554         if (unlikely(!rcv))
555                 goto out;
556
557         rcv_priv = netdev_priv(rcv);
558         rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
559         /* xdp_ring is initialized on receive side? */
560         if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
561                 goto out;
562
563         __veth_xdp_flush(rcv_rq);
564 out:
565         rcu_read_unlock();
566 }
567
568 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
569                        struct veth_xdp_tx_bq *bq)
570 {
571         struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
572
573         if (unlikely(!frame))
574                 return -EOVERFLOW;
575
576         if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
577                 veth_xdp_flush_bq(rq, bq);
578
579         bq->q[bq->count++] = frame;
580
581         return 0;
582 }
583
584 static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
585                                           struct xdp_frame *frame,
586                                           struct veth_xdp_tx_bq *bq,
587                                           struct veth_stats *stats)
588 {
589         struct xdp_frame orig_frame;
590         struct bpf_prog *xdp_prog;
591
592         rcu_read_lock();
593         xdp_prog = rcu_dereference(rq->xdp_prog);
594         if (likely(xdp_prog)) {
595                 struct xdp_buff xdp;
596                 u32 act;
597
598                 xdp_convert_frame_to_buff(frame, &xdp);
599                 xdp.rxq = &rq->xdp_rxq;
600
601                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
602
603                 switch (act) {
604                 case XDP_PASS:
605                         if (xdp_update_frame_from_buff(&xdp, frame))
606                                 goto err_xdp;
607                         break;
608                 case XDP_TX:
609                         orig_frame = *frame;
610                         xdp.rxq->mem = frame->mem;
611                         if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
612                                 trace_xdp_exception(rq->dev, xdp_prog, act);
613                                 frame = &orig_frame;
614                                 stats->rx_drops++;
615                                 goto err_xdp;
616                         }
617                         stats->xdp_tx++;
618                         rcu_read_unlock();
619                         goto xdp_xmit;
620                 case XDP_REDIRECT:
621                         orig_frame = *frame;
622                         xdp.rxq->mem = frame->mem;
623                         if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
624                                 frame = &orig_frame;
625                                 stats->rx_drops++;
626                                 goto err_xdp;
627                         }
628                         stats->xdp_redirect++;
629                         rcu_read_unlock();
630                         goto xdp_xmit;
631                 default:
632                         bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
633                         fallthrough;
634                 case XDP_ABORTED:
635                         trace_xdp_exception(rq->dev, xdp_prog, act);
636                         fallthrough;
637                 case XDP_DROP:
638                         stats->xdp_drops++;
639                         goto err_xdp;
640                 }
641         }
642         rcu_read_unlock();
643
644         return frame;
645 err_xdp:
646         rcu_read_unlock();
647         xdp_return_frame(frame);
648 xdp_xmit:
649         return NULL;
650 }
651
652 /* frames array contains VETH_XDP_BATCH at most */
653 static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
654                                   int n_xdpf, struct veth_xdp_tx_bq *bq,
655                                   struct veth_stats *stats)
656 {
657         void *skbs[VETH_XDP_BATCH];
658         int i;
659
660         if (xdp_alloc_skb_bulk(skbs, n_xdpf,
661                                GFP_ATOMIC | __GFP_ZERO) < 0) {
662                 for (i = 0; i < n_xdpf; i++)
663                         xdp_return_frame(frames[i]);
664                 stats->rx_drops += n_xdpf;
665
666                 return;
667         }
668
669         for (i = 0; i < n_xdpf; i++) {
670                 struct sk_buff *skb = skbs[i];
671
672                 skb = __xdp_build_skb_from_frame(frames[i], skb,
673                                                  rq->dev);
674                 if (!skb) {
675                         xdp_return_frame(frames[i]);
676                         stats->rx_drops++;
677                         continue;
678                 }
679                 napi_gro_receive(&rq->xdp_napi, skb);
680         }
681 }
682
683 static void veth_xdp_get(struct xdp_buff *xdp)
684 {
685         struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
686         int i;
687
688         get_page(virt_to_page(xdp->data));
689         if (likely(!xdp_buff_has_frags(xdp)))
690                 return;
691
692         for (i = 0; i < sinfo->nr_frags; i++)
693                 __skb_frag_ref(&sinfo->frags[i]);
694 }
695
696 static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
697                                         struct xdp_buff *xdp,
698                                         struct sk_buff **pskb)
699 {
700         struct sk_buff *skb = *pskb;
701         u32 frame_sz;
702
703         if (skb_shared(skb) || skb_head_is_locked(skb) ||
704             skb_shinfo(skb)->nr_frags ||
705             skb_headroom(skb) < XDP_PACKET_HEADROOM) {
706                 u32 size, len, max_head_size, off;
707                 struct sk_buff *nskb;
708                 struct page *page;
709                 int i, head_off;
710
711                 /* We need a private copy of the skb and data buffers since
712                  * the ebpf program can modify it. We segment the original skb
713                  * into order-0 pages without linearize it.
714                  *
715                  * Make sure we have enough space for linear and paged area
716                  */
717                 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
718                                                   VETH_XDP_HEADROOM);
719                 if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
720                         goto drop;
721
722                 /* Allocate skb head */
723                 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
724                 if (!page)
725                         goto drop;
726
727                 nskb = build_skb(page_address(page), PAGE_SIZE);
728                 if (!nskb) {
729                         put_page(page);
730                         goto drop;
731                 }
732
733                 skb_reserve(nskb, VETH_XDP_HEADROOM);
734                 size = min_t(u32, skb->len, max_head_size);
735                 if (skb_copy_bits(skb, 0, nskb->data, size)) {
736                         consume_skb(nskb);
737                         goto drop;
738                 }
739                 skb_put(nskb, size);
740
741                 skb_copy_header(nskb, skb);
742                 head_off = skb_headroom(nskb) - skb_headroom(skb);
743                 skb_headers_offset_update(nskb, head_off);
744
745                 /* Allocate paged area of new skb */
746                 off = size;
747                 len = skb->len - off;
748
749                 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
750                         page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
751                         if (!page) {
752                                 consume_skb(nskb);
753                                 goto drop;
754                         }
755
756                         size = min_t(u32, len, PAGE_SIZE);
757                         skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE);
758                         if (skb_copy_bits(skb, off, page_address(page),
759                                           size)) {
760                                 consume_skb(nskb);
761                                 goto drop;
762                         }
763
764                         len -= size;
765                         off += size;
766                 }
767
768                 consume_skb(skb);
769                 skb = nskb;
770         }
771
772         /* SKB "head" area always have tailroom for skb_shared_info */
773         frame_sz = skb_end_pointer(skb) - skb->head;
774         frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
775         xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
776         xdp_prepare_buff(xdp, skb->head, skb_headroom(skb),
777                          skb_headlen(skb), true);
778
779         if (skb_is_nonlinear(skb)) {
780                 skb_shinfo(skb)->xdp_frags_size = skb->data_len;
781                 xdp_buff_set_frags_flag(xdp);
782         } else {
783                 xdp_buff_clear_frags_flag(xdp);
784         }
785         *pskb = skb;
786
787         return 0;
788 drop:
789         consume_skb(skb);
790         *pskb = NULL;
791
792         return -ENOMEM;
793 }
794
795 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
796                                         struct sk_buff *skb,
797                                         struct veth_xdp_tx_bq *bq,
798                                         struct veth_stats *stats)
799 {
800         void *orig_data, *orig_data_end;
801         struct bpf_prog *xdp_prog;
802         struct xdp_buff xdp;
803         u32 act, metalen;
804         int off;
805
806         skb_prepare_for_gro(skb);
807
808         rcu_read_lock();
809         xdp_prog = rcu_dereference(rq->xdp_prog);
810         if (unlikely(!xdp_prog)) {
811                 rcu_read_unlock();
812                 goto out;
813         }
814
815         __skb_push(skb, skb->data - skb_mac_header(skb));
816         if (veth_convert_skb_to_xdp_buff(rq, &xdp, &skb))
817                 goto drop;
818
819         orig_data = xdp.data;
820         orig_data_end = xdp.data_end;
821
822         act = bpf_prog_run_xdp(xdp_prog, &xdp);
823
824         switch (act) {
825         case XDP_PASS:
826                 break;
827         case XDP_TX:
828                 veth_xdp_get(&xdp);
829                 consume_skb(skb);
830                 xdp.rxq->mem = rq->xdp_mem;
831                 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
832                         trace_xdp_exception(rq->dev, xdp_prog, act);
833                         stats->rx_drops++;
834                         goto err_xdp;
835                 }
836                 stats->xdp_tx++;
837                 rcu_read_unlock();
838                 goto xdp_xmit;
839         case XDP_REDIRECT:
840                 veth_xdp_get(&xdp);
841                 consume_skb(skb);
842                 xdp.rxq->mem = rq->xdp_mem;
843                 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
844                         stats->rx_drops++;
845                         goto err_xdp;
846                 }
847                 stats->xdp_redirect++;
848                 rcu_read_unlock();
849                 goto xdp_xmit;
850         default:
851                 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
852                 fallthrough;
853         case XDP_ABORTED:
854                 trace_xdp_exception(rq->dev, xdp_prog, act);
855                 fallthrough;
856         case XDP_DROP:
857                 stats->xdp_drops++;
858                 goto xdp_drop;
859         }
860         rcu_read_unlock();
861
862         /* check if bpf_xdp_adjust_head was used */
863         off = orig_data - xdp.data;
864         if (off > 0)
865                 __skb_push(skb, off);
866         else if (off < 0)
867                 __skb_pull(skb, -off);
868
869         skb_reset_mac_header(skb);
870
871         /* check if bpf_xdp_adjust_tail was used */
872         off = xdp.data_end - orig_data_end;
873         if (off != 0)
874                 __skb_put(skb, off); /* positive on grow, negative on shrink */
875
876         /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
877          * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
878          */
879         if (xdp_buff_has_frags(&xdp))
880                 skb->data_len = skb_shinfo(skb)->xdp_frags_size;
881         else
882                 skb->data_len = 0;
883
884         skb->protocol = eth_type_trans(skb, rq->dev);
885
886         metalen = xdp.data - xdp.data_meta;
887         if (metalen)
888                 skb_metadata_set(skb, metalen);
889 out:
890         return skb;
891 drop:
892         stats->rx_drops++;
893 xdp_drop:
894         rcu_read_unlock();
895         kfree_skb(skb);
896         return NULL;
897 err_xdp:
898         rcu_read_unlock();
899         xdp_return_buff(&xdp);
900 xdp_xmit:
901         return NULL;
902 }
903
904 static int veth_xdp_rcv(struct veth_rq *rq, int budget,
905                         struct veth_xdp_tx_bq *bq,
906                         struct veth_stats *stats)
907 {
908         int i, done = 0, n_xdpf = 0;
909         void *xdpf[VETH_XDP_BATCH];
910
911         for (i = 0; i < budget; i++) {
912                 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
913
914                 if (!ptr)
915                         break;
916
917                 if (veth_is_xdp_frame(ptr)) {
918                         /* ndo_xdp_xmit */
919                         struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
920
921                         stats->xdp_bytes += xdp_get_frame_len(frame);
922                         frame = veth_xdp_rcv_one(rq, frame, bq, stats);
923                         if (frame) {
924                                 /* XDP_PASS */
925                                 xdpf[n_xdpf++] = frame;
926                                 if (n_xdpf == VETH_XDP_BATCH) {
927                                         veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
928                                                               bq, stats);
929                                         n_xdpf = 0;
930                                 }
931                         }
932                 } else {
933                         /* ndo_start_xmit */
934                         struct sk_buff *skb = ptr;
935
936                         stats->xdp_bytes += skb->len;
937                         skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
938                         if (skb) {
939                                 if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
940                                         netif_receive_skb(skb);
941                                 else
942                                         napi_gro_receive(&rq->xdp_napi, skb);
943                         }
944                 }
945                 done++;
946         }
947
948         if (n_xdpf)
949                 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
950
951         u64_stats_update_begin(&rq->stats.syncp);
952         rq->stats.vs.xdp_redirect += stats->xdp_redirect;
953         rq->stats.vs.xdp_bytes += stats->xdp_bytes;
954         rq->stats.vs.xdp_drops += stats->xdp_drops;
955         rq->stats.vs.rx_drops += stats->rx_drops;
956         rq->stats.vs.xdp_packets += done;
957         u64_stats_update_end(&rq->stats.syncp);
958
959         return done;
960 }
961
962 static int veth_poll(struct napi_struct *napi, int budget)
963 {
964         struct veth_rq *rq =
965                 container_of(napi, struct veth_rq, xdp_napi);
966         struct veth_stats stats = {};
967         struct veth_xdp_tx_bq bq;
968         int done;
969
970         bq.count = 0;
971
972         xdp_set_return_frame_no_direct();
973         done = veth_xdp_rcv(rq, budget, &bq, &stats);
974
975         if (stats.xdp_redirect > 0)
976                 xdp_do_flush();
977
978         if (done < budget && napi_complete_done(napi, done)) {
979                 /* Write rx_notify_masked before reading ptr_ring */
980                 smp_store_mb(rq->rx_notify_masked, false);
981                 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
982                         if (napi_schedule_prep(&rq->xdp_napi)) {
983                                 WRITE_ONCE(rq->rx_notify_masked, true);
984                                 __napi_schedule(&rq->xdp_napi);
985                         }
986                 }
987         }
988
989         if (stats.xdp_tx > 0)
990                 veth_xdp_flush(rq, &bq);
991         xdp_clear_return_frame_no_direct();
992
993         return done;
994 }
995
996 static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
997 {
998         struct veth_priv *priv = netdev_priv(dev);
999         int err, i;
1000
1001         for (i = start; i < end; i++) {
1002                 struct veth_rq *rq = &priv->rq[i];
1003
1004                 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
1005                 if (err)
1006                         goto err_xdp_ring;
1007         }
1008
1009         for (i = start; i < end; i++) {
1010                 struct veth_rq *rq = &priv->rq[i];
1011
1012                 napi_enable(&rq->xdp_napi);
1013                 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1014         }
1015
1016         return 0;
1017
1018 err_xdp_ring:
1019         for (i--; i >= start; i--)
1020                 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
1021
1022         return err;
1023 }
1024
1025 static int __veth_napi_enable(struct net_device *dev)
1026 {
1027         return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1028 }
1029
1030 static void veth_napi_del_range(struct net_device *dev, int start, int end)
1031 {
1032         struct veth_priv *priv = netdev_priv(dev);
1033         int i;
1034
1035         for (i = start; i < end; i++) {
1036                 struct veth_rq *rq = &priv->rq[i];
1037
1038                 rcu_assign_pointer(priv->rq[i].napi, NULL);
1039                 napi_disable(&rq->xdp_napi);
1040                 __netif_napi_del(&rq->xdp_napi);
1041         }
1042         synchronize_net();
1043
1044         for (i = start; i < end; i++) {
1045                 struct veth_rq *rq = &priv->rq[i];
1046
1047                 rq->rx_notify_masked = false;
1048                 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
1049         }
1050 }
1051
1052 static void veth_napi_del(struct net_device *dev)
1053 {
1054         veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
1055 }
1056
1057 static bool veth_gro_requested(const struct net_device *dev)
1058 {
1059         return !!(dev->wanted_features & NETIF_F_GRO);
1060 }
1061
1062 static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
1063                                  bool napi_already_on)
1064 {
1065         struct veth_priv *priv = netdev_priv(dev);
1066         int err, i;
1067
1068         for (i = start; i < end; i++) {
1069                 struct veth_rq *rq = &priv->rq[i];
1070
1071                 if (!napi_already_on)
1072                         netif_napi_add(dev, &rq->xdp_napi, veth_poll);
1073                 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1074                 if (err < 0)
1075                         goto err_rxq_reg;
1076
1077                 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1078                                                  MEM_TYPE_PAGE_SHARED,
1079                                                  NULL);
1080                 if (err < 0)
1081                         goto err_reg_mem;
1082
1083                 /* Save original mem info as it can be overwritten */
1084                 rq->xdp_mem = rq->xdp_rxq.mem;
1085         }
1086         return 0;
1087
1088 err_reg_mem:
1089         xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1090 err_rxq_reg:
1091         for (i--; i >= start; i--) {
1092                 struct veth_rq *rq = &priv->rq[i];
1093
1094                 xdp_rxq_info_unreg(&rq->xdp_rxq);
1095                 if (!napi_already_on)
1096                         netif_napi_del(&rq->xdp_napi);
1097         }
1098
1099         return err;
1100 }
1101
1102 static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1103                                    bool delete_napi)
1104 {
1105         struct veth_priv *priv = netdev_priv(dev);
1106         int i;
1107
1108         for (i = start; i < end; i++) {
1109                 struct veth_rq *rq = &priv->rq[i];
1110
1111                 rq->xdp_rxq.mem = rq->xdp_mem;
1112                 xdp_rxq_info_unreg(&rq->xdp_rxq);
1113
1114                 if (delete_napi)
1115                         netif_napi_del(&rq->xdp_napi);
1116         }
1117 }
1118
1119 static int veth_enable_xdp(struct net_device *dev)
1120 {
1121         bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
1122         struct veth_priv *priv = netdev_priv(dev);
1123         int err, i;
1124
1125         if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
1126                 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1127                 if (err)
1128                         return err;
1129
1130                 if (!napi_already_on) {
1131                         err = __veth_napi_enable(dev);
1132                         if (err) {
1133                                 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1134                                 return err;
1135                         }
1136
1137                         if (!veth_gro_requested(dev)) {
1138                                 /* user-space did not require GRO, but adding XDP
1139                                  * is supposed to get GRO working
1140                                  */
1141                                 dev->features |= NETIF_F_GRO;
1142                                 netdev_features_change(dev);
1143                         }
1144                 }
1145         }
1146
1147         for (i = 0; i < dev->real_num_rx_queues; i++) {
1148                 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
1149                 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1150         }
1151
1152         return 0;
1153 }
1154
1155 static void veth_disable_xdp(struct net_device *dev)
1156 {
1157         struct veth_priv *priv = netdev_priv(dev);
1158         int i;
1159
1160         for (i = 0; i < dev->real_num_rx_queues; i++)
1161                 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
1162
1163         if (!netif_running(dev) || !veth_gro_requested(dev)) {
1164                 veth_napi_del(dev);
1165
1166                 /* if user-space did not require GRO, since adding XDP
1167                  * enabled it, clear it now
1168                  */
1169                 if (!veth_gro_requested(dev) && netif_running(dev)) {
1170                         dev->features &= ~NETIF_F_GRO;
1171                         netdev_features_change(dev);
1172                 }
1173         }
1174
1175         veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
1176 }
1177
1178 static int veth_napi_enable_range(struct net_device *dev, int start, int end)
1179 {
1180         struct veth_priv *priv = netdev_priv(dev);
1181         int err, i;
1182
1183         for (i = start; i < end; i++) {
1184                 struct veth_rq *rq = &priv->rq[i];
1185
1186                 netif_napi_add(dev, &rq->xdp_napi, veth_poll);
1187         }
1188
1189         err = __veth_napi_enable_range(dev, start, end);
1190         if (err) {
1191                 for (i = start; i < end; i++) {
1192                         struct veth_rq *rq = &priv->rq[i];
1193
1194                         netif_napi_del(&rq->xdp_napi);
1195                 }
1196                 return err;
1197         }
1198         return err;
1199 }
1200
1201 static int veth_napi_enable(struct net_device *dev)
1202 {
1203         return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1204 }
1205
1206 static void veth_disable_range_safe(struct net_device *dev, int start, int end)
1207 {
1208         struct veth_priv *priv = netdev_priv(dev);
1209
1210         if (start >= end)
1211                 return;
1212
1213         if (priv->_xdp_prog) {
1214                 veth_napi_del_range(dev, start, end);
1215                 veth_disable_xdp_range(dev, start, end, false);
1216         } else if (veth_gro_requested(dev)) {
1217                 veth_napi_del_range(dev, start, end);
1218         }
1219 }
1220
1221 static int veth_enable_range_safe(struct net_device *dev, int start, int end)
1222 {
1223         struct veth_priv *priv = netdev_priv(dev);
1224         int err;
1225
1226         if (start >= end)
1227                 return 0;
1228
1229         if (priv->_xdp_prog) {
1230                 /* these channels are freshly initialized, napi is not on there even
1231                  * when GRO is requeste
1232                  */
1233                 err = veth_enable_xdp_range(dev, start, end, false);
1234                 if (err)
1235                         return err;
1236
1237                 err = __veth_napi_enable_range(dev, start, end);
1238                 if (err) {
1239                         /* on error always delete the newly added napis */
1240                         veth_disable_xdp_range(dev, start, end, true);
1241                         return err;
1242                 }
1243         } else if (veth_gro_requested(dev)) {
1244                 return veth_napi_enable_range(dev, start, end);
1245         }
1246         return 0;
1247 }
1248
1249 static int veth_set_channels(struct net_device *dev,
1250                              struct ethtool_channels *ch)
1251 {
1252         struct veth_priv *priv = netdev_priv(dev);
1253         unsigned int old_rx_count, new_rx_count;
1254         struct veth_priv *peer_priv;
1255         struct net_device *peer;
1256         int err;
1257
1258         /* sanity check. Upper bounds are already enforced by the caller */
1259         if (!ch->rx_count || !ch->tx_count)
1260                 return -EINVAL;
1261
1262         /* avoid braking XDP, if that is enabled */
1263         peer = rtnl_dereference(priv->peer);
1264         peer_priv = peer ? netdev_priv(peer) : NULL;
1265         if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
1266                 return -EINVAL;
1267
1268         if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
1269                 return -EINVAL;
1270
1271         old_rx_count = dev->real_num_rx_queues;
1272         new_rx_count = ch->rx_count;
1273         if (netif_running(dev)) {
1274                 /* turn device off */
1275                 netif_carrier_off(dev);
1276                 if (peer)
1277                         netif_carrier_off(peer);
1278
1279                 /* try to allocate new resurces, as needed*/
1280                 err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
1281                 if (err)
1282                         goto out;
1283         }
1284
1285         err = netif_set_real_num_rx_queues(dev, ch->rx_count);
1286         if (err)
1287                 goto revert;
1288
1289         err = netif_set_real_num_tx_queues(dev, ch->tx_count);
1290         if (err) {
1291                 int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
1292
1293                 /* this error condition could happen only if rx and tx change
1294                  * in opposite directions (e.g. tx nr raises, rx nr decreases)
1295                  * and we can't do anything to fully restore the original
1296                  * status
1297                  */
1298                 if (err2)
1299                         pr_warn("Can't restore rx queues config %d -> %d %d",
1300                                 new_rx_count, old_rx_count, err2);
1301                 else
1302                         goto revert;
1303         }
1304
1305 out:
1306         if (netif_running(dev)) {
1307                 /* note that we need to swap the arguments WRT the enable part
1308                  * to identify the range we have to disable
1309                  */
1310                 veth_disable_range_safe(dev, new_rx_count, old_rx_count);
1311                 netif_carrier_on(dev);
1312                 if (peer)
1313                         netif_carrier_on(peer);
1314         }
1315         return err;
1316
1317 revert:
1318         new_rx_count = old_rx_count;
1319         old_rx_count = ch->rx_count;
1320         goto out;
1321 }
1322
1323 static int veth_open(struct net_device *dev)
1324 {
1325         struct veth_priv *priv = netdev_priv(dev);
1326         struct net_device *peer = rtnl_dereference(priv->peer);
1327         int err;
1328
1329         if (!peer)
1330                 return -ENOTCONN;
1331
1332         if (priv->_xdp_prog) {
1333                 err = veth_enable_xdp(dev);
1334                 if (err)
1335                         return err;
1336         } else if (veth_gro_requested(dev)) {
1337                 err = veth_napi_enable(dev);
1338                 if (err)
1339                         return err;
1340         }
1341
1342         if (peer->flags & IFF_UP) {
1343                 netif_carrier_on(dev);
1344                 netif_carrier_on(peer);
1345         }
1346
1347         return 0;
1348 }
1349
1350 static int veth_close(struct net_device *dev)
1351 {
1352         struct veth_priv *priv = netdev_priv(dev);
1353         struct net_device *peer = rtnl_dereference(priv->peer);
1354
1355         netif_carrier_off(dev);
1356         if (peer)
1357                 netif_carrier_off(peer);
1358
1359         if (priv->_xdp_prog)
1360                 veth_disable_xdp(dev);
1361         else if (veth_gro_requested(dev))
1362                 veth_napi_del(dev);
1363
1364         return 0;
1365 }
1366
1367 static int is_valid_veth_mtu(int mtu)
1368 {
1369         return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
1370 }
1371
1372 static int veth_alloc_queues(struct net_device *dev)
1373 {
1374         struct veth_priv *priv = netdev_priv(dev);
1375         int i;
1376
1377         priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
1378         if (!priv->rq)
1379                 return -ENOMEM;
1380
1381         for (i = 0; i < dev->num_rx_queues; i++) {
1382                 priv->rq[i].dev = dev;
1383                 u64_stats_init(&priv->rq[i].stats.syncp);
1384         }
1385
1386         return 0;
1387 }
1388
1389 static void veth_free_queues(struct net_device *dev)
1390 {
1391         struct veth_priv *priv = netdev_priv(dev);
1392
1393         kfree(priv->rq);
1394 }
1395
1396 static int veth_dev_init(struct net_device *dev)
1397 {
1398         int err;
1399
1400         dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1401         if (!dev->lstats)
1402                 return -ENOMEM;
1403
1404         err = veth_alloc_queues(dev);
1405         if (err) {
1406                 free_percpu(dev->lstats);
1407                 return err;
1408         }
1409
1410         return 0;
1411 }
1412
1413 static void veth_dev_free(struct net_device *dev)
1414 {
1415         veth_free_queues(dev);
1416         free_percpu(dev->lstats);
1417 }
1418
1419 #ifdef CONFIG_NET_POLL_CONTROLLER
1420 static void veth_poll_controller(struct net_device *dev)
1421 {
1422         /* veth only receives frames when its peer sends one
1423          * Since it has nothing to do with disabling irqs, we are guaranteed
1424          * never to have pending data when we poll for it so
1425          * there is nothing to do here.
1426          *
1427          * We need this though so netpoll recognizes us as an interface that
1428          * supports polling, which enables bridge devices in virt setups to
1429          * still use netconsole
1430          */
1431 }
1432 #endif  /* CONFIG_NET_POLL_CONTROLLER */
1433
1434 static int veth_get_iflink(const struct net_device *dev)
1435 {
1436         struct veth_priv *priv = netdev_priv(dev);
1437         struct net_device *peer;
1438         int iflink;
1439
1440         rcu_read_lock();
1441         peer = rcu_dereference(priv->peer);
1442         iflink = peer ? peer->ifindex : 0;
1443         rcu_read_unlock();
1444
1445         return iflink;
1446 }
1447
1448 static netdev_features_t veth_fix_features(struct net_device *dev,
1449                                            netdev_features_t features)
1450 {
1451         struct veth_priv *priv = netdev_priv(dev);
1452         struct net_device *peer;
1453
1454         peer = rtnl_dereference(priv->peer);
1455         if (peer) {
1456                 struct veth_priv *peer_priv = netdev_priv(peer);
1457
1458                 if (peer_priv->_xdp_prog)
1459                         features &= ~NETIF_F_GSO_SOFTWARE;
1460         }
1461         if (priv->_xdp_prog)
1462                 features |= NETIF_F_GRO;
1463
1464         return features;
1465 }
1466
1467 static int veth_set_features(struct net_device *dev,
1468                              netdev_features_t features)
1469 {
1470         netdev_features_t changed = features ^ dev->features;
1471         struct veth_priv *priv = netdev_priv(dev);
1472         int err;
1473
1474         if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1475                 return 0;
1476
1477         if (features & NETIF_F_GRO) {
1478                 err = veth_napi_enable(dev);
1479                 if (err)
1480                         return err;
1481         } else {
1482                 veth_napi_del(dev);
1483         }
1484         return 0;
1485 }
1486
1487 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1488 {
1489         struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1490         struct net_device *peer;
1491
1492         if (new_hr < 0)
1493                 new_hr = 0;
1494
1495         rcu_read_lock();
1496         peer = rcu_dereference(priv->peer);
1497         if (unlikely(!peer))
1498                 goto out;
1499
1500         peer_priv = netdev_priv(peer);
1501         priv->requested_headroom = new_hr;
1502         new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1503         dev->needed_headroom = new_hr;
1504         peer->needed_headroom = new_hr;
1505
1506 out:
1507         rcu_read_unlock();
1508 }
1509
1510 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1511                         struct netlink_ext_ack *extack)
1512 {
1513         struct veth_priv *priv = netdev_priv(dev);
1514         struct bpf_prog *old_prog;
1515         struct net_device *peer;
1516         unsigned int max_mtu;
1517         int err;
1518
1519         old_prog = priv->_xdp_prog;
1520         priv->_xdp_prog = prog;
1521         peer = rtnl_dereference(priv->peer);
1522
1523         if (prog) {
1524                 if (!peer) {
1525                         NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1526                         err = -ENOTCONN;
1527                         goto err;
1528                 }
1529
1530                 max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
1531                           peer->hard_header_len;
1532                 /* Allow increasing the max_mtu if the program supports
1533                  * XDP fragments.
1534                  */
1535                 if (prog->aux->xdp_has_frags)
1536                         max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
1537
1538                 if (peer->mtu > max_mtu) {
1539                         NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1540                         err = -ERANGE;
1541                         goto err;
1542                 }
1543
1544                 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1545                         NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1546                         err = -ENOSPC;
1547                         goto err;
1548                 }
1549
1550                 if (dev->flags & IFF_UP) {
1551                         err = veth_enable_xdp(dev);
1552                         if (err) {
1553                                 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1554                                 goto err;
1555                         }
1556                 }
1557
1558                 if (!old_prog) {
1559                         peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1560                         peer->max_mtu = max_mtu;
1561                 }
1562         }
1563
1564         if (old_prog) {
1565                 if (!prog) {
1566                         if (dev->flags & IFF_UP)
1567                                 veth_disable_xdp(dev);
1568
1569                         if (peer) {
1570                                 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1571                                 peer->max_mtu = ETH_MAX_MTU;
1572                         }
1573                 }
1574                 bpf_prog_put(old_prog);
1575         }
1576
1577         if ((!!old_prog ^ !!prog) && peer)
1578                 netdev_update_features(peer);
1579
1580         return 0;
1581 err:
1582         priv->_xdp_prog = old_prog;
1583
1584         return err;
1585 }
1586
1587 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1588 {
1589         switch (xdp->command) {
1590         case XDP_SETUP_PROG:
1591                 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1592         default:
1593                 return -EINVAL;
1594         }
1595 }
1596
1597 static const struct net_device_ops veth_netdev_ops = {
1598         .ndo_init            = veth_dev_init,
1599         .ndo_open            = veth_open,
1600         .ndo_stop            = veth_close,
1601         .ndo_start_xmit      = veth_xmit,
1602         .ndo_get_stats64     = veth_get_stats64,
1603         .ndo_set_rx_mode     = veth_set_multicast_list,
1604         .ndo_set_mac_address = eth_mac_addr,
1605 #ifdef CONFIG_NET_POLL_CONTROLLER
1606         .ndo_poll_controller    = veth_poll_controller,
1607 #endif
1608         .ndo_get_iflink         = veth_get_iflink,
1609         .ndo_fix_features       = veth_fix_features,
1610         .ndo_set_features       = veth_set_features,
1611         .ndo_features_check     = passthru_features_check,
1612         .ndo_set_rx_headroom    = veth_set_rx_headroom,
1613         .ndo_bpf                = veth_xdp,
1614         .ndo_xdp_xmit           = veth_ndo_xdp_xmit,
1615         .ndo_get_peer_dev       = veth_peer_dev,
1616 };
1617
1618 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1619                        NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1620                        NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1621                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1622                        NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1623
1624 static void veth_setup(struct net_device *dev)
1625 {
1626         ether_setup(dev);
1627
1628         dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1629         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1630         dev->priv_flags |= IFF_NO_QUEUE;
1631         dev->priv_flags |= IFF_PHONY_HEADROOM;
1632
1633         dev->netdev_ops = &veth_netdev_ops;
1634         dev->ethtool_ops = &veth_ethtool_ops;
1635         dev->features |= NETIF_F_LLTX;
1636         dev->features |= VETH_FEATURES;
1637         dev->vlan_features = dev->features &
1638                              ~(NETIF_F_HW_VLAN_CTAG_TX |
1639                                NETIF_F_HW_VLAN_STAG_TX |
1640                                NETIF_F_HW_VLAN_CTAG_RX |
1641                                NETIF_F_HW_VLAN_STAG_RX);
1642         dev->needs_free_netdev = true;
1643         dev->priv_destructor = veth_dev_free;
1644         dev->max_mtu = ETH_MAX_MTU;
1645
1646         dev->hw_features = VETH_FEATURES;
1647         dev->hw_enc_features = VETH_FEATURES;
1648         dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1649         netif_set_tso_max_size(dev, GSO_MAX_SIZE);
1650 }
1651
1652 /*
1653  * netlink interface
1654  */
1655
1656 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1657                          struct netlink_ext_ack *extack)
1658 {
1659         if (tb[IFLA_ADDRESS]) {
1660                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1661                         return -EINVAL;
1662                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1663                         return -EADDRNOTAVAIL;
1664         }
1665         if (tb[IFLA_MTU]) {
1666                 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1667                         return -EINVAL;
1668         }
1669         return 0;
1670 }
1671
1672 static struct rtnl_link_ops veth_link_ops;
1673
1674 static void veth_disable_gro(struct net_device *dev)
1675 {
1676         dev->features &= ~NETIF_F_GRO;
1677         dev->wanted_features &= ~NETIF_F_GRO;
1678         netdev_update_features(dev);
1679 }
1680
1681 static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
1682 {
1683         int err;
1684
1685         if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
1686                 err = netif_set_real_num_tx_queues(dev, 1);
1687                 if (err)
1688                         return err;
1689         }
1690         if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
1691                 err = netif_set_real_num_rx_queues(dev, 1);
1692                 if (err)
1693                         return err;
1694         }
1695         return 0;
1696 }
1697
1698 static int veth_newlink(struct net *src_net, struct net_device *dev,
1699                         struct nlattr *tb[], struct nlattr *data[],
1700                         struct netlink_ext_ack *extack)
1701 {
1702         int err;
1703         struct net_device *peer;
1704         struct veth_priv *priv;
1705         char ifname[IFNAMSIZ];
1706         struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1707         unsigned char name_assign_type;
1708         struct ifinfomsg *ifmp;
1709         struct net *net;
1710
1711         /*
1712          * create and register peer first
1713          */
1714         if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1715                 struct nlattr *nla_peer;
1716
1717                 nla_peer = data[VETH_INFO_PEER];
1718                 ifmp = nla_data(nla_peer);
1719                 err = rtnl_nla_parse_ifla(peer_tb,
1720                                           nla_data(nla_peer) + sizeof(struct ifinfomsg),
1721                                           nla_len(nla_peer) - sizeof(struct ifinfomsg),
1722                                           NULL);
1723                 if (err < 0)
1724                         return err;
1725
1726                 err = veth_validate(peer_tb, NULL, extack);
1727                 if (err < 0)
1728                         return err;
1729
1730                 tbp = peer_tb;
1731         } else {
1732                 ifmp = NULL;
1733                 tbp = tb;
1734         }
1735
1736         if (ifmp && tbp[IFLA_IFNAME]) {
1737                 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1738                 name_assign_type = NET_NAME_USER;
1739         } else {
1740                 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1741                 name_assign_type = NET_NAME_ENUM;
1742         }
1743
1744         net = rtnl_link_get_net(src_net, tbp);
1745         if (IS_ERR(net))
1746                 return PTR_ERR(net);
1747
1748         peer = rtnl_create_link(net, ifname, name_assign_type,
1749                                 &veth_link_ops, tbp, extack);
1750         if (IS_ERR(peer)) {
1751                 put_net(net);
1752                 return PTR_ERR(peer);
1753         }
1754
1755         if (!ifmp || !tbp[IFLA_ADDRESS])
1756                 eth_hw_addr_random(peer);
1757
1758         if (ifmp && (dev->ifindex != 0))
1759                 peer->ifindex = ifmp->ifi_index;
1760
1761         netif_inherit_tso_max(peer, dev);
1762
1763         err = register_netdevice(peer);
1764         put_net(net);
1765         net = NULL;
1766         if (err < 0)
1767                 goto err_register_peer;
1768
1769         /* keep GRO disabled by default to be consistent with the established
1770          * veth behavior
1771          */
1772         veth_disable_gro(peer);
1773         netif_carrier_off(peer);
1774
1775         err = rtnl_configure_link(peer, ifmp);
1776         if (err < 0)
1777                 goto err_configure_peer;
1778
1779         /*
1780          * register dev last
1781          *
1782          * note, that since we've registered new device the dev's name
1783          * should be re-allocated
1784          */
1785
1786         if (tb[IFLA_ADDRESS] == NULL)
1787                 eth_hw_addr_random(dev);
1788
1789         if (tb[IFLA_IFNAME])
1790                 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1791         else
1792                 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1793
1794         err = register_netdevice(dev);
1795         if (err < 0)
1796                 goto err_register_dev;
1797
1798         netif_carrier_off(dev);
1799
1800         /*
1801          * tie the deviced together
1802          */
1803
1804         priv = netdev_priv(dev);
1805         rcu_assign_pointer(priv->peer, peer);
1806         err = veth_init_queues(dev, tb);
1807         if (err)
1808                 goto err_queues;
1809
1810         priv = netdev_priv(peer);
1811         rcu_assign_pointer(priv->peer, dev);
1812         err = veth_init_queues(peer, tb);
1813         if (err)
1814                 goto err_queues;
1815
1816         veth_disable_gro(dev);
1817         return 0;
1818
1819 err_queues:
1820         unregister_netdevice(dev);
1821 err_register_dev:
1822         /* nothing to do */
1823 err_configure_peer:
1824         unregister_netdevice(peer);
1825         return err;
1826
1827 err_register_peer:
1828         free_netdev(peer);
1829         return err;
1830 }
1831
1832 static void veth_dellink(struct net_device *dev, struct list_head *head)
1833 {
1834         struct veth_priv *priv;
1835         struct net_device *peer;
1836
1837         priv = netdev_priv(dev);
1838         peer = rtnl_dereference(priv->peer);
1839
1840         /* Note : dellink() is called from default_device_exit_batch(),
1841          * before a rcu_synchronize() point. The devices are guaranteed
1842          * not being freed before one RCU grace period.
1843          */
1844         RCU_INIT_POINTER(priv->peer, NULL);
1845         unregister_netdevice_queue(dev, head);
1846
1847         if (peer) {
1848                 priv = netdev_priv(peer);
1849                 RCU_INIT_POINTER(priv->peer, NULL);
1850                 unregister_netdevice_queue(peer, head);
1851         }
1852 }
1853
1854 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1855         [VETH_INFO_PEER]        = { .len = sizeof(struct ifinfomsg) },
1856 };
1857
1858 static struct net *veth_get_link_net(const struct net_device *dev)
1859 {
1860         struct veth_priv *priv = netdev_priv(dev);
1861         struct net_device *peer = rtnl_dereference(priv->peer);
1862
1863         return peer ? dev_net(peer) : dev_net(dev);
1864 }
1865
1866 static unsigned int veth_get_num_queues(void)
1867 {
1868         /* enforce the same queue limit as rtnl_create_link */
1869         int queues = num_possible_cpus();
1870
1871         if (queues > 4096)
1872                 queues = 4096;
1873         return queues;
1874 }
1875
1876 static struct rtnl_link_ops veth_link_ops = {
1877         .kind           = DRV_NAME,
1878         .priv_size      = sizeof(struct veth_priv),
1879         .setup          = veth_setup,
1880         .validate       = veth_validate,
1881         .newlink        = veth_newlink,
1882         .dellink        = veth_dellink,
1883         .policy         = veth_policy,
1884         .maxtype        = VETH_INFO_MAX,
1885         .get_link_net   = veth_get_link_net,
1886         .get_num_tx_queues      = veth_get_num_queues,
1887         .get_num_rx_queues      = veth_get_num_queues,
1888 };
1889
1890 /*
1891  * init/fini
1892  */
1893
1894 static __init int veth_init(void)
1895 {
1896         return rtnl_link_register(&veth_link_ops);
1897 }
1898
1899 static __exit void veth_exit(void)
1900 {
1901         rtnl_link_unregister(&veth_link_ops);
1902 }
1903
1904 module_init(veth_init);
1905 module_exit(veth_exit);
1906
1907 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1908 MODULE_LICENSE("GPL v2");
1909 MODULE_ALIAS_RTNL_LINK(DRV_NAME);