GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / net / xen-netfront.c
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 #include <linux/bpf.h>
48 #include <net/page_pool.h>
49 #include <linux/bpf_trace.h>
50
51 #include <xen/xen.h>
52 #include <xen/xenbus.h>
53 #include <xen/events.h>
54 #include <xen/page.h>
55 #include <xen/platform_pci.h>
56 #include <xen/grant_table.h>
57
58 #include <xen/interface/io/netif.h>
59 #include <xen/interface/memory.h>
60 #include <xen/interface/grant_table.h>
61
62 /* Module parameters */
63 #define MAX_QUEUES_DEFAULT 8
64 static unsigned int xennet_max_queues;
65 module_param_named(max_queues, xennet_max_queues, uint, 0644);
66 MODULE_PARM_DESC(max_queues,
67                  "Maximum number of queues per virtual interface");
68
69 static bool __read_mostly xennet_trusted = true;
70 module_param_named(trusted, xennet_trusted, bool, 0644);
71 MODULE_PARM_DESC(trusted, "Is the backend trusted");
72
73 #define XENNET_TIMEOUT  (5 * HZ)
74
75 static const struct ethtool_ops xennet_ethtool_ops;
76
77 struct netfront_cb {
78         int pull_to;
79 };
80
81 #define NETFRONT_SKB_CB(skb)    ((struct netfront_cb *)((skb)->cb))
82
83 #define RX_COPY_THRESHOLD 256
84
85 #define GRANT_INVALID_REF       0
86
87 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
88 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
89
90 /* Minimum number of Rx slots (includes slot for GSO metadata). */
91 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
92
93 /* Queue name is interface name with "-qNNN" appended */
94 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
95
96 /* IRQ name is queue name with "-tx" or "-rx" appended */
97 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
98
99 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
100
101 struct netfront_stats {
102         u64                     packets;
103         u64                     bytes;
104         struct u64_stats_sync   syncp;
105 };
106
107 struct netfront_info;
108
109 struct netfront_queue {
110         unsigned int id; /* Queue ID, 0-based */
111         char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
112         struct netfront_info *info;
113
114         struct bpf_prog __rcu *xdp_prog;
115
116         struct napi_struct napi;
117
118         /* Split event channels support, tx_* == rx_* when using
119          * single event channel.
120          */
121         unsigned int tx_evtchn, rx_evtchn;
122         unsigned int tx_irq, rx_irq;
123         /* Only used when split event channels support is enabled */
124         char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
125         char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
126
127         spinlock_t   tx_lock;
128         struct xen_netif_tx_front_ring tx;
129         int tx_ring_ref;
130
131         /*
132          * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
133          * are linked from tx_skb_freelist through tx_link.
134          */
135         struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
136         unsigned short tx_link[NET_TX_RING_SIZE];
137 #define TX_LINK_NONE 0xffff
138 #define TX_PENDING   0xfffe
139         grant_ref_t gref_tx_head;
140         grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
141         struct page *grant_tx_page[NET_TX_RING_SIZE];
142         unsigned tx_skb_freelist;
143         unsigned int tx_pend_queue;
144
145         spinlock_t   rx_lock ____cacheline_aligned_in_smp;
146         struct xen_netif_rx_front_ring rx;
147         int rx_ring_ref;
148
149         struct timer_list rx_refill_timer;
150
151         struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
152         grant_ref_t gref_rx_head;
153         grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
154
155         unsigned int rx_rsp_unconsumed;
156         spinlock_t rx_cons_lock;
157
158         struct page_pool *page_pool;
159         struct xdp_rxq_info xdp_rxq;
160 };
161
162 struct netfront_info {
163         struct list_head list;
164         struct net_device *netdev;
165
166         struct xenbus_device *xbdev;
167
168         /* Multi-queue support */
169         struct netfront_queue *queues;
170
171         /* Statistics */
172         struct netfront_stats __percpu *rx_stats;
173         struct netfront_stats __percpu *tx_stats;
174
175         /* XDP state */
176         bool netback_has_xdp_headroom;
177         bool netfront_xdp_enabled;
178
179         /* Is device behaving sane? */
180         bool broken;
181
182         /* Should skbs be bounced into a zeroed buffer? */
183         bool bounce;
184
185         atomic_t rx_gso_checksum_fixup;
186 };
187
188 struct netfront_rx_info {
189         struct xen_netif_rx_response rx;
190         struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
191 };
192
193 /*
194  * Access macros for acquiring freeing slots in tx_skbs[].
195  */
196
197 static void add_id_to_list(unsigned *head, unsigned short *list,
198                            unsigned short id)
199 {
200         list[id] = *head;
201         *head = id;
202 }
203
204 static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
205 {
206         unsigned int id = *head;
207
208         if (id != TX_LINK_NONE) {
209                 *head = list[id];
210                 list[id] = TX_LINK_NONE;
211         }
212         return id;
213 }
214
215 static int xennet_rxidx(RING_IDX idx)
216 {
217         return idx & (NET_RX_RING_SIZE - 1);
218 }
219
220 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
221                                          RING_IDX ri)
222 {
223         int i = xennet_rxidx(ri);
224         struct sk_buff *skb = queue->rx_skbs[i];
225         queue->rx_skbs[i] = NULL;
226         return skb;
227 }
228
229 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
230                                             RING_IDX ri)
231 {
232         int i = xennet_rxidx(ri);
233         grant_ref_t ref = queue->grant_rx_ref[i];
234         queue->grant_rx_ref[i] = GRANT_INVALID_REF;
235         return ref;
236 }
237
238 #ifdef CONFIG_SYSFS
239 static const struct attribute_group xennet_dev_group;
240 #endif
241
242 static bool xennet_can_sg(struct net_device *dev)
243 {
244         return dev->features & NETIF_F_SG;
245 }
246
247
248 static void rx_refill_timeout(struct timer_list *t)
249 {
250         struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
251         napi_schedule(&queue->napi);
252 }
253
254 static int netfront_tx_slot_available(struct netfront_queue *queue)
255 {
256         return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
257                 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
258 }
259
260 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
261 {
262         struct net_device *dev = queue->info->netdev;
263         struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
264
265         if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
266             netfront_tx_slot_available(queue) &&
267             likely(netif_running(dev)))
268                 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
269 }
270
271
272 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
273 {
274         struct sk_buff *skb;
275         struct page *page;
276
277         skb = __netdev_alloc_skb(queue->info->netdev,
278                                  RX_COPY_THRESHOLD + NET_IP_ALIGN,
279                                  GFP_ATOMIC | __GFP_NOWARN);
280         if (unlikely(!skb))
281                 return NULL;
282
283         page = page_pool_alloc_pages(queue->page_pool,
284                                      GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
285         if (unlikely(!page)) {
286                 kfree_skb(skb);
287                 return NULL;
288         }
289         skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
290
291         /* Align ip header to a 16 bytes boundary */
292         skb_reserve(skb, NET_IP_ALIGN);
293         skb->dev = queue->info->netdev;
294
295         return skb;
296 }
297
298
299 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
300 {
301         RING_IDX req_prod = queue->rx.req_prod_pvt;
302         int notify;
303         int err = 0;
304
305         if (unlikely(!netif_carrier_ok(queue->info->netdev)))
306                 return;
307
308         for (req_prod = queue->rx.req_prod_pvt;
309              req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
310              req_prod++) {
311                 struct sk_buff *skb;
312                 unsigned short id;
313                 grant_ref_t ref;
314                 struct page *page;
315                 struct xen_netif_rx_request *req;
316
317                 skb = xennet_alloc_one_rx_buffer(queue);
318                 if (!skb) {
319                         err = -ENOMEM;
320                         break;
321                 }
322
323                 id = xennet_rxidx(req_prod);
324
325                 BUG_ON(queue->rx_skbs[id]);
326                 queue->rx_skbs[id] = skb;
327
328                 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
329                 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
330                 queue->grant_rx_ref[id] = ref;
331
332                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
333
334                 req = RING_GET_REQUEST(&queue->rx, req_prod);
335                 gnttab_page_grant_foreign_access_ref_one(ref,
336                                                          queue->info->xbdev->otherend_id,
337                                                          page,
338                                                          0);
339                 req->id = id;
340                 req->gref = ref;
341         }
342
343         queue->rx.req_prod_pvt = req_prod;
344
345         /* Try again later if there are not enough requests or skb allocation
346          * failed.
347          * Enough requests is quantified as the sum of newly created slots and
348          * the unconsumed slots at the backend.
349          */
350         if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
351             unlikely(err)) {
352                 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
353                 return;
354         }
355
356         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
357         if (notify)
358                 notify_remote_via_irq(queue->rx_irq);
359 }
360
361 static int xennet_open(struct net_device *dev)
362 {
363         struct netfront_info *np = netdev_priv(dev);
364         unsigned int num_queues = dev->real_num_tx_queues;
365         unsigned int i = 0;
366         struct netfront_queue *queue = NULL;
367
368         if (!np->queues || np->broken)
369                 return -ENODEV;
370
371         for (i = 0; i < num_queues; ++i) {
372                 queue = &np->queues[i];
373                 napi_enable(&queue->napi);
374
375                 spin_lock_bh(&queue->rx_lock);
376                 if (netif_carrier_ok(dev)) {
377                         xennet_alloc_rx_buffers(queue);
378                         queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
379                         if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
380                                 napi_schedule(&queue->napi);
381                 }
382                 spin_unlock_bh(&queue->rx_lock);
383         }
384
385         netif_tx_start_all_queues(dev);
386
387         return 0;
388 }
389
390 static bool xennet_tx_buf_gc(struct netfront_queue *queue)
391 {
392         RING_IDX cons, prod;
393         unsigned short id;
394         struct sk_buff *skb;
395         bool more_to_do;
396         bool work_done = false;
397         const struct device *dev = &queue->info->netdev->dev;
398
399         BUG_ON(!netif_carrier_ok(queue->info->netdev));
400
401         do {
402                 prod = queue->tx.sring->rsp_prod;
403                 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
404                         dev_alert(dev, "Illegal number of responses %u\n",
405                                   prod - queue->tx.rsp_cons);
406                         goto err;
407                 }
408                 rmb(); /* Ensure we see responses up to 'rp'. */
409
410                 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
411                         struct xen_netif_tx_response txrsp;
412
413                         work_done = true;
414
415                         RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
416                         if (txrsp.status == XEN_NETIF_RSP_NULL)
417                                 continue;
418
419                         id = txrsp.id;
420                         if (id >= RING_SIZE(&queue->tx)) {
421                                 dev_alert(dev,
422                                           "Response has incorrect id (%u)\n",
423                                           id);
424                                 goto err;
425                         }
426                         if (queue->tx_link[id] != TX_PENDING) {
427                                 dev_alert(dev,
428                                           "Response for inactive request\n");
429                                 goto err;
430                         }
431
432                         queue->tx_link[id] = TX_LINK_NONE;
433                         skb = queue->tx_skbs[id];
434                         queue->tx_skbs[id] = NULL;
435                         if (unlikely(!gnttab_end_foreign_access_ref(
436                                 queue->grant_tx_ref[id], GNTMAP_readonly))) {
437                                 dev_alert(dev,
438                                           "Grant still in use by backend domain\n");
439                                 goto err;
440                         }
441                         gnttab_release_grant_reference(
442                                 &queue->gref_tx_head, queue->grant_tx_ref[id]);
443                         queue->grant_tx_ref[id] = GRANT_INVALID_REF;
444                         queue->grant_tx_page[id] = NULL;
445                         add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
446                         dev_kfree_skb_irq(skb);
447                 }
448
449                 queue->tx.rsp_cons = prod;
450
451                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
452         } while (more_to_do);
453
454         xennet_maybe_wake_tx(queue);
455
456         return work_done;
457
458  err:
459         queue->info->broken = true;
460         dev_alert(dev, "Disabled for further use\n");
461
462         return work_done;
463 }
464
465 struct xennet_gnttab_make_txreq {
466         struct netfront_queue *queue;
467         struct sk_buff *skb;
468         struct page *page;
469         struct xen_netif_tx_request *tx;      /* Last request on ring page */
470         struct xen_netif_tx_request tx_local; /* Last request local copy*/
471         unsigned int size;
472 };
473
474 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
475                                   unsigned int len, void *data)
476 {
477         struct xennet_gnttab_make_txreq *info = data;
478         unsigned int id;
479         struct xen_netif_tx_request *tx;
480         grant_ref_t ref;
481         /* convenient aliases */
482         struct page *page = info->page;
483         struct netfront_queue *queue = info->queue;
484         struct sk_buff *skb = info->skb;
485
486         id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
487         tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
488         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
489         WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
490
491         gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
492                                         gfn, GNTMAP_readonly);
493
494         queue->tx_skbs[id] = skb;
495         queue->grant_tx_page[id] = page;
496         queue->grant_tx_ref[id] = ref;
497
498         info->tx_local.id = id;
499         info->tx_local.gref = ref;
500         info->tx_local.offset = offset;
501         info->tx_local.size = len;
502         info->tx_local.flags = 0;
503
504         *tx = info->tx_local;
505
506         /*
507          * Put the request in the pending queue, it will be set to be pending
508          * when the producer index is about to be raised.
509          */
510         add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
511
512         info->tx = tx;
513         info->size += info->tx_local.size;
514 }
515
516 static struct xen_netif_tx_request *xennet_make_first_txreq(
517         struct xennet_gnttab_make_txreq *info,
518         unsigned int offset, unsigned int len)
519 {
520         info->size = 0;
521
522         gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
523
524         return info->tx;
525 }
526
527 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
528                                   unsigned int len, void *data)
529 {
530         struct xennet_gnttab_make_txreq *info = data;
531
532         info->tx->flags |= XEN_NETTXF_more_data;
533         skb_get(info->skb);
534         xennet_tx_setup_grant(gfn, offset, len, data);
535 }
536
537 static void xennet_make_txreqs(
538         struct xennet_gnttab_make_txreq *info,
539         struct page *page,
540         unsigned int offset, unsigned int len)
541 {
542         /* Skip unused frames from start of page */
543         page += offset >> PAGE_SHIFT;
544         offset &= ~PAGE_MASK;
545
546         while (len) {
547                 info->page = page;
548                 info->size = 0;
549
550                 gnttab_foreach_grant_in_range(page, offset, len,
551                                               xennet_make_one_txreq,
552                                               info);
553
554                 page++;
555                 offset = 0;
556                 len -= info->size;
557         }
558 }
559
560 /*
561  * Count how many ring slots are required to send this skb. Each frag
562  * might be a compound page.
563  */
564 static int xennet_count_skb_slots(struct sk_buff *skb)
565 {
566         int i, frags = skb_shinfo(skb)->nr_frags;
567         int slots;
568
569         slots = gnttab_count_grant(offset_in_page(skb->data),
570                                    skb_headlen(skb));
571
572         for (i = 0; i < frags; i++) {
573                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
574                 unsigned long size = skb_frag_size(frag);
575                 unsigned long offset = skb_frag_off(frag);
576
577                 /* Skip unused frames from start of page */
578                 offset &= ~PAGE_MASK;
579
580                 slots += gnttab_count_grant(offset, size);
581         }
582
583         return slots;
584 }
585
586 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
587                                struct net_device *sb_dev)
588 {
589         unsigned int num_queues = dev->real_num_tx_queues;
590         u32 hash;
591         u16 queue_idx;
592
593         /* First, check if there is only one queue */
594         if (num_queues == 1) {
595                 queue_idx = 0;
596         } else {
597                 hash = skb_get_hash(skb);
598                 queue_idx = hash % num_queues;
599         }
600
601         return queue_idx;
602 }
603
604 static void xennet_mark_tx_pending(struct netfront_queue *queue)
605 {
606         unsigned int i;
607
608         while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
609                TX_LINK_NONE)
610                 queue->tx_link[i] = TX_PENDING;
611 }
612
613 static int xennet_xdp_xmit_one(struct net_device *dev,
614                                struct netfront_queue *queue,
615                                struct xdp_frame *xdpf)
616 {
617         struct netfront_info *np = netdev_priv(dev);
618         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
619         struct xennet_gnttab_make_txreq info = {
620                 .queue = queue,
621                 .skb = NULL,
622                 .page = virt_to_page(xdpf->data),
623         };
624         int notify;
625
626         xennet_make_first_txreq(&info,
627                                 offset_in_page(xdpf->data),
628                                 xdpf->len);
629
630         xennet_mark_tx_pending(queue);
631
632         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
633         if (notify)
634                 notify_remote_via_irq(queue->tx_irq);
635
636         u64_stats_update_begin(&tx_stats->syncp);
637         tx_stats->bytes += xdpf->len;
638         tx_stats->packets++;
639         u64_stats_update_end(&tx_stats->syncp);
640
641         xennet_tx_buf_gc(queue);
642
643         return 0;
644 }
645
646 static int xennet_xdp_xmit(struct net_device *dev, int n,
647                            struct xdp_frame **frames, u32 flags)
648 {
649         unsigned int num_queues = dev->real_num_tx_queues;
650         struct netfront_info *np = netdev_priv(dev);
651         struct netfront_queue *queue = NULL;
652         unsigned long irq_flags;
653         int drops = 0;
654         int i, err;
655
656         if (unlikely(np->broken))
657                 return -ENODEV;
658         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
659                 return -EINVAL;
660
661         queue = &np->queues[smp_processor_id() % num_queues];
662
663         spin_lock_irqsave(&queue->tx_lock, irq_flags);
664         for (i = 0; i < n; i++) {
665                 struct xdp_frame *xdpf = frames[i];
666
667                 if (!xdpf)
668                         continue;
669                 err = xennet_xdp_xmit_one(dev, queue, xdpf);
670                 if (err) {
671                         xdp_return_frame_rx_napi(xdpf);
672                         drops++;
673                 }
674         }
675         spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
676
677         return n - drops;
678 }
679
680 struct sk_buff *bounce_skb(const struct sk_buff *skb)
681 {
682         unsigned int headerlen = skb_headroom(skb);
683         /* Align size to allocate full pages and avoid contiguous data leaks */
684         unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
685                                   XEN_PAGE_SIZE);
686         struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
687
688         if (!n)
689                 return NULL;
690
691         if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
692                 WARN_ONCE(1, "misaligned skb allocated\n");
693                 kfree_skb(n);
694                 return NULL;
695         }
696
697         /* Set the data pointer */
698         skb_reserve(n, headerlen);
699         /* Set the tail pointer and length */
700         skb_put(n, skb->len);
701
702         BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
703
704         skb_copy_header(n, skb);
705         return n;
706 }
707
708 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
709
710 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
711 {
712         struct netfront_info *np = netdev_priv(dev);
713         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
714         struct xen_netif_tx_request *first_tx;
715         unsigned int i;
716         int notify;
717         int slots;
718         struct page *page;
719         unsigned int offset;
720         unsigned int len;
721         unsigned long flags;
722         struct netfront_queue *queue = NULL;
723         struct xennet_gnttab_make_txreq info = { };
724         unsigned int num_queues = dev->real_num_tx_queues;
725         u16 queue_index;
726         struct sk_buff *nskb;
727
728         /* Drop the packet if no queues are set up */
729         if (num_queues < 1)
730                 goto drop;
731         if (unlikely(np->broken))
732                 goto drop;
733         /* Determine which queue to transmit this SKB on */
734         queue_index = skb_get_queue_mapping(skb);
735         queue = &np->queues[queue_index];
736
737         /* If skb->len is too big for wire format, drop skb and alert
738          * user about misconfiguration.
739          */
740         if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
741                 net_alert_ratelimited(
742                         "xennet: skb->len = %u, too big for wire format\n",
743                         skb->len);
744                 goto drop;
745         }
746
747         slots = xennet_count_skb_slots(skb);
748         if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
749                 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
750                                     slots, skb->len);
751                 if (skb_linearize(skb))
752                         goto drop;
753         }
754
755         page = virt_to_page(skb->data);
756         offset = offset_in_page(skb->data);
757
758         /* The first req should be at least ETH_HLEN size or the packet will be
759          * dropped by netback.
760          *
761          * If the backend is not trusted bounce all data to zeroed pages to
762          * avoid exposing contiguous data on the granted page not belonging to
763          * the skb.
764          */
765         if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
766                 nskb = bounce_skb(skb);
767                 if (!nskb)
768                         goto drop;
769                 dev_consume_skb_any(skb);
770                 skb = nskb;
771                 page = virt_to_page(skb->data);
772                 offset = offset_in_page(skb->data);
773         }
774
775         len = skb_headlen(skb);
776
777         spin_lock_irqsave(&queue->tx_lock, flags);
778
779         if (unlikely(!netif_carrier_ok(dev) ||
780                      (slots > 1 && !xennet_can_sg(dev)) ||
781                      netif_needs_gso(skb, netif_skb_features(skb)))) {
782                 spin_unlock_irqrestore(&queue->tx_lock, flags);
783                 goto drop;
784         }
785
786         /* First request for the linear area. */
787         info.queue = queue;
788         info.skb = skb;
789         info.page = page;
790         first_tx = xennet_make_first_txreq(&info, offset, len);
791         offset += info.tx_local.size;
792         if (offset == PAGE_SIZE) {
793                 page++;
794                 offset = 0;
795         }
796         len -= info.tx_local.size;
797
798         if (skb->ip_summed == CHECKSUM_PARTIAL)
799                 /* local packet? */
800                 first_tx->flags |= XEN_NETTXF_csum_blank |
801                                    XEN_NETTXF_data_validated;
802         else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
803                 /* remote but checksummed. */
804                 first_tx->flags |= XEN_NETTXF_data_validated;
805
806         /* Optional extra info after the first request. */
807         if (skb_shinfo(skb)->gso_size) {
808                 struct xen_netif_extra_info *gso;
809
810                 gso = (struct xen_netif_extra_info *)
811                         RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
812
813                 first_tx->flags |= XEN_NETTXF_extra_info;
814
815                 gso->u.gso.size = skb_shinfo(skb)->gso_size;
816                 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
817                         XEN_NETIF_GSO_TYPE_TCPV6 :
818                         XEN_NETIF_GSO_TYPE_TCPV4;
819                 gso->u.gso.pad = 0;
820                 gso->u.gso.features = 0;
821
822                 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
823                 gso->flags = 0;
824         }
825
826         /* Requests for the rest of the linear area. */
827         xennet_make_txreqs(&info, page, offset, len);
828
829         /* Requests for all the frags. */
830         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
831                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
832                 xennet_make_txreqs(&info, skb_frag_page(frag),
833                                         skb_frag_off(frag),
834                                         skb_frag_size(frag));
835         }
836
837         /* First request has the packet length. */
838         first_tx->size = skb->len;
839
840         /* timestamp packet in software */
841         skb_tx_timestamp(skb);
842
843         xennet_mark_tx_pending(queue);
844
845         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
846         if (notify)
847                 notify_remote_via_irq(queue->tx_irq);
848
849         u64_stats_update_begin(&tx_stats->syncp);
850         tx_stats->bytes += skb->len;
851         tx_stats->packets++;
852         u64_stats_update_end(&tx_stats->syncp);
853
854         /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
855         xennet_tx_buf_gc(queue);
856
857         if (!netfront_tx_slot_available(queue))
858                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
859
860         spin_unlock_irqrestore(&queue->tx_lock, flags);
861
862         return NETDEV_TX_OK;
863
864  drop:
865         dev->stats.tx_dropped++;
866         dev_kfree_skb_any(skb);
867         return NETDEV_TX_OK;
868 }
869
870 static int xennet_close(struct net_device *dev)
871 {
872         struct netfront_info *np = netdev_priv(dev);
873         unsigned int num_queues = dev->real_num_tx_queues;
874         unsigned int i;
875         struct netfront_queue *queue;
876         netif_tx_stop_all_queues(np->netdev);
877         for (i = 0; i < num_queues; ++i) {
878                 queue = &np->queues[i];
879                 napi_disable(&queue->napi);
880         }
881         return 0;
882 }
883
884 static void xennet_destroy_queues(struct netfront_info *info)
885 {
886         unsigned int i;
887
888         for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
889                 struct netfront_queue *queue = &info->queues[i];
890
891                 if (netif_running(info->netdev))
892                         napi_disable(&queue->napi);
893                 netif_napi_del(&queue->napi);
894         }
895
896         kfree(info->queues);
897         info->queues = NULL;
898 }
899
900 static void xennet_uninit(struct net_device *dev)
901 {
902         struct netfront_info *np = netdev_priv(dev);
903         xennet_destroy_queues(np);
904 }
905
906 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
907 {
908         unsigned long flags;
909
910         spin_lock_irqsave(&queue->rx_cons_lock, flags);
911         queue->rx.rsp_cons = val;
912         queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
913         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
914 }
915
916 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
917                                 grant_ref_t ref)
918 {
919         int new = xennet_rxidx(queue->rx.req_prod_pvt);
920
921         BUG_ON(queue->rx_skbs[new]);
922         queue->rx_skbs[new] = skb;
923         queue->grant_rx_ref[new] = ref;
924         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
925         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
926         queue->rx.req_prod_pvt++;
927 }
928
929 static int xennet_get_extras(struct netfront_queue *queue,
930                              struct xen_netif_extra_info *extras,
931                              RING_IDX rp)
932
933 {
934         struct xen_netif_extra_info extra;
935         struct device *dev = &queue->info->netdev->dev;
936         RING_IDX cons = queue->rx.rsp_cons;
937         int err = 0;
938
939         do {
940                 struct sk_buff *skb;
941                 grant_ref_t ref;
942
943                 if (unlikely(cons + 1 == rp)) {
944                         if (net_ratelimit())
945                                 dev_warn(dev, "Missing extra info\n");
946                         err = -EBADR;
947                         break;
948                 }
949
950                 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
951
952                 if (unlikely(!extra.type ||
953                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
954                         if (net_ratelimit())
955                                 dev_warn(dev, "Invalid extra type: %d\n",
956                                          extra.type);
957                         err = -EINVAL;
958                 } else {
959                         extras[extra.type - 1] = extra;
960                 }
961
962                 skb = xennet_get_rx_skb(queue, cons);
963                 ref = xennet_get_rx_ref(queue, cons);
964                 xennet_move_rx_slot(queue, skb, ref);
965         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
966
967         xennet_set_rx_rsp_cons(queue, cons);
968         return err;
969 }
970
971 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
972                    struct xen_netif_rx_response *rx, struct bpf_prog *prog,
973                    struct xdp_buff *xdp, bool *need_xdp_flush)
974 {
975         struct xdp_frame *xdpf;
976         u32 len = rx->status;
977         u32 act;
978         int err;
979
980         xdp->data_hard_start = page_address(pdata);
981         xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
982         xdp_set_data_meta_invalid(xdp);
983         xdp->data_end = xdp->data + len;
984         xdp->rxq = &queue->xdp_rxq;
985         xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
986
987         act = bpf_prog_run_xdp(prog, xdp);
988         switch (act) {
989         case XDP_TX:
990                 get_page(pdata);
991                 xdpf = xdp_convert_buff_to_frame(xdp);
992                 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
993                 if (unlikely(err < 0))
994                         trace_xdp_exception(queue->info->netdev, prog, act);
995                 break;
996         case XDP_REDIRECT:
997                 get_page(pdata);
998                 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
999                 *need_xdp_flush = true;
1000                 if (unlikely(err))
1001                         trace_xdp_exception(queue->info->netdev, prog, act);
1002                 break;
1003         case XDP_PASS:
1004         case XDP_DROP:
1005                 break;
1006
1007         case XDP_ABORTED:
1008                 trace_xdp_exception(queue->info->netdev, prog, act);
1009                 break;
1010
1011         default:
1012                 bpf_warn_invalid_xdp_action(act);
1013         }
1014
1015         return act;
1016 }
1017
1018 static int xennet_get_responses(struct netfront_queue *queue,
1019                                 struct netfront_rx_info *rinfo, RING_IDX rp,
1020                                 struct sk_buff_head *list,
1021                                 bool *need_xdp_flush)
1022 {
1023         struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1024         int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
1025         RING_IDX cons = queue->rx.rsp_cons;
1026         struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1027         struct xen_netif_extra_info *extras = rinfo->extras;
1028         grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1029         struct device *dev = &queue->info->netdev->dev;
1030         struct bpf_prog *xdp_prog;
1031         struct xdp_buff xdp;
1032         int slots = 1;
1033         int err = 0;
1034         u32 verdict;
1035
1036         if (rx->flags & XEN_NETRXF_extra_info) {
1037                 err = xennet_get_extras(queue, extras, rp);
1038                 if (!err) {
1039                         if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
1040                                 struct xen_netif_extra_info *xdp;
1041
1042                                 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
1043                                 rx->offset = xdp->u.xdp.headroom;
1044                         }
1045                 }
1046                 cons = queue->rx.rsp_cons;
1047         }
1048
1049         for (;;) {
1050                 if (unlikely(rx->status < 0 ||
1051                              rx->offset + rx->status > XEN_PAGE_SIZE)) {
1052                         if (net_ratelimit())
1053                                 dev_warn(dev, "rx->offset: %u, size: %d\n",
1054                                          rx->offset, rx->status);
1055                         xennet_move_rx_slot(queue, skb, ref);
1056                         err = -EINVAL;
1057                         goto next;
1058                 }
1059
1060                 /*
1061                  * This definitely indicates a bug, either in this driver or in
1062                  * the backend driver. In future this should flag the bad
1063                  * situation to the system controller to reboot the backend.
1064                  */
1065                 if (ref == GRANT_INVALID_REF) {
1066                         if (net_ratelimit())
1067                                 dev_warn(dev, "Bad rx response id %d.\n",
1068                                          rx->id);
1069                         err = -EINVAL;
1070                         goto next;
1071                 }
1072
1073                 if (!gnttab_end_foreign_access_ref(ref, 0)) {
1074                         dev_alert(dev,
1075                                   "Grant still in use by backend domain\n");
1076                         queue->info->broken = true;
1077                         dev_alert(dev, "Disabled for further use\n");
1078                         return -EINVAL;
1079                 }
1080
1081                 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1082
1083                 rcu_read_lock();
1084                 xdp_prog = rcu_dereference(queue->xdp_prog);
1085                 if (xdp_prog) {
1086                         if (!(rx->flags & XEN_NETRXF_more_data)) {
1087                                 /* currently only a single page contains data */
1088                                 verdict = xennet_run_xdp(queue,
1089                                                          skb_frag_page(&skb_shinfo(skb)->frags[0]),
1090                                                          rx, xdp_prog, &xdp, need_xdp_flush);
1091                                 if (verdict != XDP_PASS)
1092                                         err = -EINVAL;
1093                         } else {
1094                                 /* drop the frame */
1095                                 err = -EINVAL;
1096                         }
1097                 }
1098                 rcu_read_unlock();
1099
1100                 __skb_queue_tail(list, skb);
1101
1102 next:
1103                 if (!(rx->flags & XEN_NETRXF_more_data))
1104                         break;
1105
1106                 if (cons + slots == rp) {
1107                         if (net_ratelimit())
1108                                 dev_warn(dev, "Need more slots\n");
1109                         err = -ENOENT;
1110                         break;
1111                 }
1112
1113                 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1114                 rx = &rx_local;
1115                 skb = xennet_get_rx_skb(queue, cons + slots);
1116                 ref = xennet_get_rx_ref(queue, cons + slots);
1117                 slots++;
1118         }
1119
1120         if (unlikely(slots > max)) {
1121                 if (net_ratelimit())
1122                         dev_warn(dev, "Too many slots\n");
1123                 err = -E2BIG;
1124         }
1125
1126         if (unlikely(err))
1127                 xennet_set_rx_rsp_cons(queue, cons + slots);
1128
1129         return err;
1130 }
1131
1132 static int xennet_set_skb_gso(struct sk_buff *skb,
1133                               struct xen_netif_extra_info *gso)
1134 {
1135         if (!gso->u.gso.size) {
1136                 if (net_ratelimit())
1137                         pr_warn("GSO size must not be zero\n");
1138                 return -EINVAL;
1139         }
1140
1141         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1142             gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1143                 if (net_ratelimit())
1144                         pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1145                 return -EINVAL;
1146         }
1147
1148         skb_shinfo(skb)->gso_size = gso->u.gso.size;
1149         skb_shinfo(skb)->gso_type =
1150                 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1151                 SKB_GSO_TCPV4 :
1152                 SKB_GSO_TCPV6;
1153
1154         /* Header must be checked, and gso_segs computed. */
1155         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1156         skb_shinfo(skb)->gso_segs = 0;
1157
1158         return 0;
1159 }
1160
1161 static int xennet_fill_frags(struct netfront_queue *queue,
1162                              struct sk_buff *skb,
1163                              struct sk_buff_head *list)
1164 {
1165         RING_IDX cons = queue->rx.rsp_cons;
1166         struct sk_buff *nskb;
1167
1168         while ((nskb = __skb_dequeue(list))) {
1169                 struct xen_netif_rx_response rx;
1170                 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1171
1172                 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1173
1174                 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1175                         unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1176
1177                         BUG_ON(pull_to < skb_headlen(skb));
1178                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1179                 }
1180                 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1181                         xennet_set_rx_rsp_cons(queue,
1182                                                ++cons + skb_queue_len(list));
1183                         kfree_skb(nskb);
1184                         return -ENOENT;
1185                 }
1186
1187                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1188                                 skb_frag_page(nfrag),
1189                                 rx.offset, rx.status, PAGE_SIZE);
1190
1191                 skb_shinfo(nskb)->nr_frags = 0;
1192                 kfree_skb(nskb);
1193         }
1194
1195         xennet_set_rx_rsp_cons(queue, cons);
1196
1197         return 0;
1198 }
1199
1200 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1201 {
1202         bool recalculate_partial_csum = false;
1203
1204         /*
1205          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1206          * peers can fail to set NETRXF_csum_blank when sending a GSO
1207          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1208          * recalculate the partial checksum.
1209          */
1210         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1211                 struct netfront_info *np = netdev_priv(dev);
1212                 atomic_inc(&np->rx_gso_checksum_fixup);
1213                 skb->ip_summed = CHECKSUM_PARTIAL;
1214                 recalculate_partial_csum = true;
1215         }
1216
1217         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1218         if (skb->ip_summed != CHECKSUM_PARTIAL)
1219                 return 0;
1220
1221         return skb_checksum_setup(skb, recalculate_partial_csum);
1222 }
1223
1224 static int handle_incoming_queue(struct netfront_queue *queue,
1225                                  struct sk_buff_head *rxq)
1226 {
1227         struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1228         int packets_dropped = 0;
1229         struct sk_buff *skb;
1230
1231         while ((skb = __skb_dequeue(rxq)) != NULL) {
1232                 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1233
1234                 if (pull_to > skb_headlen(skb))
1235                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1236
1237                 /* Ethernet work: Delayed to here as it peeks the header. */
1238                 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1239                 skb_reset_network_header(skb);
1240
1241                 if (checksum_setup(queue->info->netdev, skb)) {
1242                         kfree_skb(skb);
1243                         packets_dropped++;
1244                         queue->info->netdev->stats.rx_errors++;
1245                         continue;
1246                 }
1247
1248                 u64_stats_update_begin(&rx_stats->syncp);
1249                 rx_stats->packets++;
1250                 rx_stats->bytes += skb->len;
1251                 u64_stats_update_end(&rx_stats->syncp);
1252
1253                 /* Pass it up. */
1254                 napi_gro_receive(&queue->napi, skb);
1255         }
1256
1257         return packets_dropped;
1258 }
1259
1260 static int xennet_poll(struct napi_struct *napi, int budget)
1261 {
1262         struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1263         struct net_device *dev = queue->info->netdev;
1264         struct sk_buff *skb;
1265         struct netfront_rx_info rinfo;
1266         struct xen_netif_rx_response *rx = &rinfo.rx;
1267         struct xen_netif_extra_info *extras = rinfo.extras;
1268         RING_IDX i, rp;
1269         int work_done;
1270         struct sk_buff_head rxq;
1271         struct sk_buff_head errq;
1272         struct sk_buff_head tmpq;
1273         int err;
1274         bool need_xdp_flush = false;
1275
1276         spin_lock(&queue->rx_lock);
1277
1278         skb_queue_head_init(&rxq);
1279         skb_queue_head_init(&errq);
1280         skb_queue_head_init(&tmpq);
1281
1282         rp = queue->rx.sring->rsp_prod;
1283         if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1284                 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1285                           rp - queue->rx.rsp_cons);
1286                 queue->info->broken = true;
1287                 spin_unlock(&queue->rx_lock);
1288                 return 0;
1289         }
1290         rmb(); /* Ensure we see queued responses up to 'rp'. */
1291
1292         i = queue->rx.rsp_cons;
1293         work_done = 0;
1294         while ((i != rp) && (work_done < budget)) {
1295                 RING_COPY_RESPONSE(&queue->rx, i, rx);
1296                 memset(extras, 0, sizeof(rinfo.extras));
1297
1298                 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1299                                            &need_xdp_flush);
1300
1301                 if (unlikely(err)) {
1302                         if (queue->info->broken) {
1303                                 spin_unlock(&queue->rx_lock);
1304                                 return 0;
1305                         }
1306 err:
1307                         while ((skb = __skb_dequeue(&tmpq)))
1308                                 __skb_queue_tail(&errq, skb);
1309                         dev->stats.rx_errors++;
1310                         i = queue->rx.rsp_cons;
1311                         continue;
1312                 }
1313
1314                 skb = __skb_dequeue(&tmpq);
1315
1316                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1317                         struct xen_netif_extra_info *gso;
1318                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1319
1320                         if (unlikely(xennet_set_skb_gso(skb, gso))) {
1321                                 __skb_queue_head(&tmpq, skb);
1322                                 xennet_set_rx_rsp_cons(queue,
1323                                                        queue->rx.rsp_cons +
1324                                                        skb_queue_len(&tmpq));
1325                                 goto err;
1326                         }
1327                 }
1328
1329                 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1330                 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1331                         NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1332
1333                 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1334                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1335                 skb->data_len = rx->status;
1336                 skb->len += rx->status;
1337
1338                 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1339                         goto err;
1340
1341                 if (rx->flags & XEN_NETRXF_csum_blank)
1342                         skb->ip_summed = CHECKSUM_PARTIAL;
1343                 else if (rx->flags & XEN_NETRXF_data_validated)
1344                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1345
1346                 __skb_queue_tail(&rxq, skb);
1347
1348                 i = queue->rx.rsp_cons + 1;
1349                 xennet_set_rx_rsp_cons(queue, i);
1350                 work_done++;
1351         }
1352         if (need_xdp_flush)
1353                 xdp_do_flush();
1354
1355         __skb_queue_purge(&errq);
1356
1357         work_done -= handle_incoming_queue(queue, &rxq);
1358
1359         xennet_alloc_rx_buffers(queue);
1360
1361         if (work_done < budget) {
1362                 int more_to_do = 0;
1363
1364                 napi_complete_done(napi, work_done);
1365
1366                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1367                 if (more_to_do)
1368                         napi_schedule(napi);
1369         }
1370
1371         spin_unlock(&queue->rx_lock);
1372
1373         return work_done;
1374 }
1375
1376 static int xennet_change_mtu(struct net_device *dev, int mtu)
1377 {
1378         int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1379
1380         if (mtu > max)
1381                 return -EINVAL;
1382         dev->mtu = mtu;
1383         return 0;
1384 }
1385
1386 static void xennet_get_stats64(struct net_device *dev,
1387                                struct rtnl_link_stats64 *tot)
1388 {
1389         struct netfront_info *np = netdev_priv(dev);
1390         int cpu;
1391
1392         for_each_possible_cpu(cpu) {
1393                 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1394                 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1395                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1396                 unsigned int start;
1397
1398                 do {
1399                         start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1400                         tx_packets = tx_stats->packets;
1401                         tx_bytes = tx_stats->bytes;
1402                 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1403
1404                 do {
1405                         start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1406                         rx_packets = rx_stats->packets;
1407                         rx_bytes = rx_stats->bytes;
1408                 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1409
1410                 tot->rx_packets += rx_packets;
1411                 tot->tx_packets += tx_packets;
1412                 tot->rx_bytes   += rx_bytes;
1413                 tot->tx_bytes   += tx_bytes;
1414         }
1415
1416         tot->rx_errors  = dev->stats.rx_errors;
1417         tot->tx_dropped = dev->stats.tx_dropped;
1418 }
1419
1420 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1421 {
1422         struct sk_buff *skb;
1423         int i;
1424
1425         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1426                 /* Skip over entries which are actually freelist references */
1427                 if (!queue->tx_skbs[i])
1428                         continue;
1429
1430                 skb = queue->tx_skbs[i];
1431                 queue->tx_skbs[i] = NULL;
1432                 get_page(queue->grant_tx_page[i]);
1433                 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1434                                           GNTMAP_readonly,
1435                                           (unsigned long)page_address(queue->grant_tx_page[i]));
1436                 queue->grant_tx_page[i] = NULL;
1437                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1438                 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1439                 dev_kfree_skb_irq(skb);
1440         }
1441 }
1442
1443 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1444 {
1445         int id, ref;
1446
1447         spin_lock_bh(&queue->rx_lock);
1448
1449         for (id = 0; id < NET_RX_RING_SIZE; id++) {
1450                 struct sk_buff *skb;
1451                 struct page *page;
1452
1453                 skb = queue->rx_skbs[id];
1454                 if (!skb)
1455                         continue;
1456
1457                 ref = queue->grant_rx_ref[id];
1458                 if (ref == GRANT_INVALID_REF)
1459                         continue;
1460
1461                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1462
1463                 /* gnttab_end_foreign_access() needs a page ref until
1464                  * foreign access is ended (which may be deferred).
1465                  */
1466                 get_page(page);
1467                 gnttab_end_foreign_access(ref, 0,
1468                                           (unsigned long)page_address(page));
1469                 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1470
1471                 kfree_skb(skb);
1472         }
1473
1474         spin_unlock_bh(&queue->rx_lock);
1475 }
1476
1477 static netdev_features_t xennet_fix_features(struct net_device *dev,
1478         netdev_features_t features)
1479 {
1480         struct netfront_info *np = netdev_priv(dev);
1481
1482         if (features & NETIF_F_SG &&
1483             !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1484                 features &= ~NETIF_F_SG;
1485
1486         if (features & NETIF_F_IPV6_CSUM &&
1487             !xenbus_read_unsigned(np->xbdev->otherend,
1488                                   "feature-ipv6-csum-offload", 0))
1489                 features &= ~NETIF_F_IPV6_CSUM;
1490
1491         if (features & NETIF_F_TSO &&
1492             !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1493                 features &= ~NETIF_F_TSO;
1494
1495         if (features & NETIF_F_TSO6 &&
1496             !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1497                 features &= ~NETIF_F_TSO6;
1498
1499         return features;
1500 }
1501
1502 static int xennet_set_features(struct net_device *dev,
1503         netdev_features_t features)
1504 {
1505         if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1506                 netdev_info(dev, "Reducing MTU because no SG offload");
1507                 dev->mtu = ETH_DATA_LEN;
1508         }
1509
1510         return 0;
1511 }
1512
1513 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1514 {
1515         unsigned long flags;
1516
1517         if (unlikely(queue->info->broken))
1518                 return false;
1519
1520         spin_lock_irqsave(&queue->tx_lock, flags);
1521         if (xennet_tx_buf_gc(queue))
1522                 *eoi = 0;
1523         spin_unlock_irqrestore(&queue->tx_lock, flags);
1524
1525         return true;
1526 }
1527
1528 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1529 {
1530         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1531
1532         if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1533                 xen_irq_lateeoi(irq, eoiflag);
1534
1535         return IRQ_HANDLED;
1536 }
1537
1538 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1539 {
1540         unsigned int work_queued;
1541         unsigned long flags;
1542
1543         if (unlikely(queue->info->broken))
1544                 return false;
1545
1546         spin_lock_irqsave(&queue->rx_cons_lock, flags);
1547         work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1548         if (work_queued > queue->rx_rsp_unconsumed) {
1549                 queue->rx_rsp_unconsumed = work_queued;
1550                 *eoi = 0;
1551         } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1552                 const struct device *dev = &queue->info->netdev->dev;
1553
1554                 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1555                 dev_alert(dev, "RX producer index going backwards\n");
1556                 dev_alert(dev, "Disabled for further use\n");
1557                 queue->info->broken = true;
1558                 return false;
1559         }
1560         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1561
1562         if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1563                 napi_schedule(&queue->napi);
1564
1565         return true;
1566 }
1567
1568 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1569 {
1570         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1571
1572         if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1573                 xen_irq_lateeoi(irq, eoiflag);
1574
1575         return IRQ_HANDLED;
1576 }
1577
1578 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1579 {
1580         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1581
1582         if (xennet_handle_tx(dev_id, &eoiflag) &&
1583             xennet_handle_rx(dev_id, &eoiflag))
1584                 xen_irq_lateeoi(irq, eoiflag);
1585
1586         return IRQ_HANDLED;
1587 }
1588
1589 #ifdef CONFIG_NET_POLL_CONTROLLER
1590 static void xennet_poll_controller(struct net_device *dev)
1591 {
1592         /* Poll each queue */
1593         struct netfront_info *info = netdev_priv(dev);
1594         unsigned int num_queues = dev->real_num_tx_queues;
1595         unsigned int i;
1596
1597         if (info->broken)
1598                 return;
1599
1600         for (i = 0; i < num_queues; ++i)
1601                 xennet_interrupt(0, &info->queues[i]);
1602 }
1603 #endif
1604
1605 #define NETBACK_XDP_HEADROOM_DISABLE    0
1606 #define NETBACK_XDP_HEADROOM_ENABLE     1
1607
1608 static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1609 {
1610         int err;
1611         unsigned short headroom;
1612
1613         headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1614         err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1615                             "xdp-headroom", "%hu",
1616                             headroom);
1617         if (err)
1618                 pr_warn("Error writing xdp-headroom\n");
1619
1620         return err;
1621 }
1622
1623 static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1624                           struct netlink_ext_ack *extack)
1625 {
1626         unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1627         struct netfront_info *np = netdev_priv(dev);
1628         struct bpf_prog *old_prog;
1629         unsigned int i, err;
1630
1631         if (dev->mtu > max_mtu) {
1632                 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1633                 return -EINVAL;
1634         }
1635
1636         if (!np->netback_has_xdp_headroom)
1637                 return 0;
1638
1639         xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1640
1641         err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1642                                   NETBACK_XDP_HEADROOM_DISABLE);
1643         if (err)
1644                 return err;
1645
1646         /* avoid the race with XDP headroom adjustment */
1647         wait_event(module_wq,
1648                    xenbus_read_driver_state(np->xbdev->otherend) ==
1649                    XenbusStateReconfigured);
1650         np->netfront_xdp_enabled = true;
1651
1652         old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1653
1654         if (prog)
1655                 bpf_prog_add(prog, dev->real_num_tx_queues);
1656
1657         for (i = 0; i < dev->real_num_tx_queues; ++i)
1658                 rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1659
1660         if (old_prog)
1661                 for (i = 0; i < dev->real_num_tx_queues; ++i)
1662                         bpf_prog_put(old_prog);
1663
1664         xenbus_switch_state(np->xbdev, XenbusStateConnected);
1665
1666         return 0;
1667 }
1668
1669 static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1670 {
1671         struct netfront_info *np = netdev_priv(dev);
1672
1673         if (np->broken)
1674                 return -ENODEV;
1675
1676         switch (xdp->command) {
1677         case XDP_SETUP_PROG:
1678                 return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1679         default:
1680                 return -EINVAL;
1681         }
1682 }
1683
1684 static const struct net_device_ops xennet_netdev_ops = {
1685         .ndo_uninit          = xennet_uninit,
1686         .ndo_open            = xennet_open,
1687         .ndo_stop            = xennet_close,
1688         .ndo_start_xmit      = xennet_start_xmit,
1689         .ndo_change_mtu      = xennet_change_mtu,
1690         .ndo_get_stats64     = xennet_get_stats64,
1691         .ndo_set_mac_address = eth_mac_addr,
1692         .ndo_validate_addr   = eth_validate_addr,
1693         .ndo_fix_features    = xennet_fix_features,
1694         .ndo_set_features    = xennet_set_features,
1695         .ndo_select_queue    = xennet_select_queue,
1696         .ndo_bpf            = xennet_xdp,
1697         .ndo_xdp_xmit       = xennet_xdp_xmit,
1698 #ifdef CONFIG_NET_POLL_CONTROLLER
1699         .ndo_poll_controller = xennet_poll_controller,
1700 #endif
1701 };
1702
1703 static void xennet_free_netdev(struct net_device *netdev)
1704 {
1705         struct netfront_info *np = netdev_priv(netdev);
1706
1707         free_percpu(np->rx_stats);
1708         free_percpu(np->tx_stats);
1709         free_netdev(netdev);
1710 }
1711
1712 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1713 {
1714         int err;
1715         struct net_device *netdev;
1716         struct netfront_info *np;
1717
1718         netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1719         if (!netdev)
1720                 return ERR_PTR(-ENOMEM);
1721
1722         np                   = netdev_priv(netdev);
1723         np->xbdev            = dev;
1724
1725         np->queues = NULL;
1726
1727         err = -ENOMEM;
1728         np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1729         if (np->rx_stats == NULL)
1730                 goto exit;
1731         np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1732         if (np->tx_stats == NULL)
1733                 goto exit;
1734
1735         netdev->netdev_ops      = &xennet_netdev_ops;
1736
1737         netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1738                                   NETIF_F_GSO_ROBUST;
1739         netdev->hw_features     = NETIF_F_SG |
1740                                   NETIF_F_IPV6_CSUM |
1741                                   NETIF_F_TSO | NETIF_F_TSO6;
1742
1743         /*
1744          * Assume that all hw features are available for now. This set
1745          * will be adjusted by the call to netdev_update_features() in
1746          * xennet_connect() which is the earliest point where we can
1747          * negotiate with the backend regarding supported features.
1748          */
1749         netdev->features |= netdev->hw_features;
1750
1751         netdev->ethtool_ops = &xennet_ethtool_ops;
1752         netdev->min_mtu = ETH_MIN_MTU;
1753         netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1754         SET_NETDEV_DEV(netdev, &dev->dev);
1755
1756         np->netdev = netdev;
1757         np->netfront_xdp_enabled = false;
1758
1759         netif_carrier_off(netdev);
1760
1761         do {
1762                 xenbus_switch_state(dev, XenbusStateInitialising);
1763                 err = wait_event_timeout(module_wq,
1764                                  xenbus_read_driver_state(dev->otherend) !=
1765                                  XenbusStateClosed &&
1766                                  xenbus_read_driver_state(dev->otherend) !=
1767                                  XenbusStateUnknown, XENNET_TIMEOUT);
1768         } while (!err);
1769
1770         return netdev;
1771
1772  exit:
1773         xennet_free_netdev(netdev);
1774         return ERR_PTR(err);
1775 }
1776
1777 /**
1778  * Entry point to this code when a new device is created.  Allocate the basic
1779  * structures and the ring buffers for communication with the backend, and
1780  * inform the backend of the appropriate details for those.
1781  */
1782 static int netfront_probe(struct xenbus_device *dev,
1783                           const struct xenbus_device_id *id)
1784 {
1785         int err;
1786         struct net_device *netdev;
1787         struct netfront_info *info;
1788
1789         netdev = xennet_create_dev(dev);
1790         if (IS_ERR(netdev)) {
1791                 err = PTR_ERR(netdev);
1792                 xenbus_dev_fatal(dev, err, "creating netdev");
1793                 return err;
1794         }
1795
1796         info = netdev_priv(netdev);
1797         dev_set_drvdata(&dev->dev, info);
1798 #ifdef CONFIG_SYSFS
1799         info->netdev->sysfs_groups[0] = &xennet_dev_group;
1800 #endif
1801
1802         return 0;
1803 }
1804
1805 static void xennet_end_access(int ref, void *page)
1806 {
1807         /* This frees the page as a side-effect */
1808         if (ref != GRANT_INVALID_REF)
1809                 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1810 }
1811
1812 static void xennet_disconnect_backend(struct netfront_info *info)
1813 {
1814         unsigned int i = 0;
1815         unsigned int num_queues = info->netdev->real_num_tx_queues;
1816
1817         netif_carrier_off(info->netdev);
1818
1819         for (i = 0; i < num_queues && info->queues; ++i) {
1820                 struct netfront_queue *queue = &info->queues[i];
1821
1822                 del_timer_sync(&queue->rx_refill_timer);
1823
1824                 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1825                         unbind_from_irqhandler(queue->tx_irq, queue);
1826                 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1827                         unbind_from_irqhandler(queue->tx_irq, queue);
1828                         unbind_from_irqhandler(queue->rx_irq, queue);
1829                 }
1830                 queue->tx_evtchn = queue->rx_evtchn = 0;
1831                 queue->tx_irq = queue->rx_irq = 0;
1832
1833                 if (netif_running(info->netdev))
1834                         napi_synchronize(&queue->napi);
1835
1836                 xennet_release_tx_bufs(queue);
1837                 xennet_release_rx_bufs(queue);
1838                 gnttab_free_grant_references(queue->gref_tx_head);
1839                 gnttab_free_grant_references(queue->gref_rx_head);
1840
1841                 /* End access and free the pages */
1842                 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1843                 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1844
1845                 queue->tx_ring_ref = GRANT_INVALID_REF;
1846                 queue->rx_ring_ref = GRANT_INVALID_REF;
1847                 queue->tx.sring = NULL;
1848                 queue->rx.sring = NULL;
1849
1850                 page_pool_destroy(queue->page_pool);
1851         }
1852 }
1853
1854 /**
1855  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1856  * driver restart.  We tear down our netif structure and recreate it, but
1857  * leave the device-layer structures intact so that this is transparent to the
1858  * rest of the kernel.
1859  */
1860 static int netfront_resume(struct xenbus_device *dev)
1861 {
1862         struct netfront_info *info = dev_get_drvdata(&dev->dev);
1863
1864         dev_dbg(&dev->dev, "%s\n", dev->nodename);
1865
1866         netif_tx_lock_bh(info->netdev);
1867         netif_device_detach(info->netdev);
1868         netif_tx_unlock_bh(info->netdev);
1869
1870         xennet_disconnect_backend(info);
1871         return 0;
1872 }
1873
1874 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1875 {
1876         char *s, *e, *macstr;
1877         int i;
1878
1879         macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1880         if (IS_ERR(macstr))
1881                 return PTR_ERR(macstr);
1882
1883         for (i = 0; i < ETH_ALEN; i++) {
1884                 mac[i] = simple_strtoul(s, &e, 16);
1885                 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1886                         kfree(macstr);
1887                         return -ENOENT;
1888                 }
1889                 s = e+1;
1890         }
1891
1892         kfree(macstr);
1893         return 0;
1894 }
1895
1896 static int setup_netfront_single(struct netfront_queue *queue)
1897 {
1898         int err;
1899
1900         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1901         if (err < 0)
1902                 goto fail;
1903
1904         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1905                                                 xennet_interrupt, 0,
1906                                                 queue->info->netdev->name,
1907                                                 queue);
1908         if (err < 0)
1909                 goto bind_fail;
1910         queue->rx_evtchn = queue->tx_evtchn;
1911         queue->rx_irq = queue->tx_irq = err;
1912
1913         return 0;
1914
1915 bind_fail:
1916         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1917         queue->tx_evtchn = 0;
1918 fail:
1919         return err;
1920 }
1921
1922 static int setup_netfront_split(struct netfront_queue *queue)
1923 {
1924         int err;
1925
1926         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1927         if (err < 0)
1928                 goto fail;
1929         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1930         if (err < 0)
1931                 goto alloc_rx_evtchn_fail;
1932
1933         snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1934                  "%s-tx", queue->name);
1935         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1936                                                 xennet_tx_interrupt, 0,
1937                                                 queue->tx_irq_name, queue);
1938         if (err < 0)
1939                 goto bind_tx_fail;
1940         queue->tx_irq = err;
1941
1942         snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1943                  "%s-rx", queue->name);
1944         err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1945                                                 xennet_rx_interrupt, 0,
1946                                                 queue->rx_irq_name, queue);
1947         if (err < 0)
1948                 goto bind_rx_fail;
1949         queue->rx_irq = err;
1950
1951         return 0;
1952
1953 bind_rx_fail:
1954         unbind_from_irqhandler(queue->tx_irq, queue);
1955         queue->tx_irq = 0;
1956 bind_tx_fail:
1957         xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1958         queue->rx_evtchn = 0;
1959 alloc_rx_evtchn_fail:
1960         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1961         queue->tx_evtchn = 0;
1962 fail:
1963         return err;
1964 }
1965
1966 static int setup_netfront(struct xenbus_device *dev,
1967                         struct netfront_queue *queue, unsigned int feature_split_evtchn)
1968 {
1969         struct xen_netif_tx_sring *txs;
1970         struct xen_netif_rx_sring *rxs = NULL;
1971         grant_ref_t gref;
1972         int err;
1973
1974         queue->tx_ring_ref = GRANT_INVALID_REF;
1975         queue->rx_ring_ref = GRANT_INVALID_REF;
1976         queue->rx.sring = NULL;
1977         queue->tx.sring = NULL;
1978
1979         txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1980         if (!txs) {
1981                 err = -ENOMEM;
1982                 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1983                 goto fail;
1984         }
1985         SHARED_RING_INIT(txs);
1986         FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1987
1988         err = xenbus_grant_ring(dev, txs, 1, &gref);
1989         if (err < 0)
1990                 goto fail;
1991         queue->tx_ring_ref = gref;
1992
1993         rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1994         if (!rxs) {
1995                 err = -ENOMEM;
1996                 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1997                 goto fail;
1998         }
1999         SHARED_RING_INIT(rxs);
2000         FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
2001
2002         err = xenbus_grant_ring(dev, rxs, 1, &gref);
2003         if (err < 0)
2004                 goto fail;
2005         queue->rx_ring_ref = gref;
2006
2007         if (feature_split_evtchn)
2008                 err = setup_netfront_split(queue);
2009         /* setup single event channel if
2010          *  a) feature-split-event-channels == 0
2011          *  b) feature-split-event-channels == 1 but failed to setup
2012          */
2013         if (!feature_split_evtchn || (feature_split_evtchn && err))
2014                 err = setup_netfront_single(queue);
2015
2016         if (err)
2017                 goto fail;
2018
2019         return 0;
2020
2021         /* If we fail to setup netfront, it is safe to just revoke access to
2022          * granted pages because backend is not accessing it at this point.
2023          */
2024  fail:
2025         if (queue->rx_ring_ref != GRANT_INVALID_REF) {
2026                 gnttab_end_foreign_access(queue->rx_ring_ref, 0,
2027                                           (unsigned long)rxs);
2028                 queue->rx_ring_ref = GRANT_INVALID_REF;
2029         } else {
2030                 free_page((unsigned long)rxs);
2031         }
2032         if (queue->tx_ring_ref != GRANT_INVALID_REF) {
2033                 gnttab_end_foreign_access(queue->tx_ring_ref, 0,
2034                                           (unsigned long)txs);
2035                 queue->tx_ring_ref = GRANT_INVALID_REF;
2036         } else {
2037                 free_page((unsigned long)txs);
2038         }
2039         return err;
2040 }
2041
2042 /* Queue-specific initialisation
2043  * This used to be done in xennet_create_dev() but must now
2044  * be run per-queue.
2045  */
2046 static int xennet_init_queue(struct netfront_queue *queue)
2047 {
2048         unsigned short i;
2049         int err = 0;
2050         char *devid;
2051
2052         spin_lock_init(&queue->tx_lock);
2053         spin_lock_init(&queue->rx_lock);
2054         spin_lock_init(&queue->rx_cons_lock);
2055
2056         timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2057
2058         devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2059         snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2060                  devid, queue->id);
2061
2062         /* Initialise tx_skb_freelist as a free chain containing every entry. */
2063         queue->tx_skb_freelist = 0;
2064         queue->tx_pend_queue = TX_LINK_NONE;
2065         for (i = 0; i < NET_TX_RING_SIZE; i++) {
2066                 queue->tx_link[i] = i + 1;
2067                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
2068                 queue->grant_tx_page[i] = NULL;
2069         }
2070         queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2071
2072         /* Clear out rx_skbs */
2073         for (i = 0; i < NET_RX_RING_SIZE; i++) {
2074                 queue->rx_skbs[i] = NULL;
2075                 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
2076         }
2077
2078         /* A grant for every tx ring slot */
2079         if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2080                                           &queue->gref_tx_head) < 0) {
2081                 pr_alert("can't alloc tx grant refs\n");
2082                 err = -ENOMEM;
2083                 goto exit;
2084         }
2085
2086         /* A grant for every rx ring slot */
2087         if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2088                                           &queue->gref_rx_head) < 0) {
2089                 pr_alert("can't alloc rx grant refs\n");
2090                 err = -ENOMEM;
2091                 goto exit_free_tx;
2092         }
2093
2094         return 0;
2095
2096  exit_free_tx:
2097         gnttab_free_grant_references(queue->gref_tx_head);
2098  exit:
2099         return err;
2100 }
2101
2102 static int write_queue_xenstore_keys(struct netfront_queue *queue,
2103                            struct xenbus_transaction *xbt, int write_hierarchical)
2104 {
2105         /* Write the queue-specific keys into XenStore in the traditional
2106          * way for a single queue, or in a queue subkeys for multiple
2107          * queues.
2108          */
2109         struct xenbus_device *dev = queue->info->xbdev;
2110         int err;
2111         const char *message;
2112         char *path;
2113         size_t pathsize;
2114
2115         /* Choose the correct place to write the keys */
2116         if (write_hierarchical) {
2117                 pathsize = strlen(dev->nodename) + 10;
2118                 path = kzalloc(pathsize, GFP_KERNEL);
2119                 if (!path) {
2120                         err = -ENOMEM;
2121                         message = "out of memory while writing ring references";
2122                         goto error;
2123                 }
2124                 snprintf(path, pathsize, "%s/queue-%u",
2125                                 dev->nodename, queue->id);
2126         } else {
2127                 path = (char *)dev->nodename;
2128         }
2129
2130         /* Write ring references */
2131         err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2132                         queue->tx_ring_ref);
2133         if (err) {
2134                 message = "writing tx-ring-ref";
2135                 goto error;
2136         }
2137
2138         err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2139                         queue->rx_ring_ref);
2140         if (err) {
2141                 message = "writing rx-ring-ref";
2142                 goto error;
2143         }
2144
2145         /* Write event channels; taking into account both shared
2146          * and split event channel scenarios.
2147          */
2148         if (queue->tx_evtchn == queue->rx_evtchn) {
2149                 /* Shared event channel */
2150                 err = xenbus_printf(*xbt, path,
2151                                 "event-channel", "%u", queue->tx_evtchn);
2152                 if (err) {
2153                         message = "writing event-channel";
2154                         goto error;
2155                 }
2156         } else {
2157                 /* Split event channels */
2158                 err = xenbus_printf(*xbt, path,
2159                                 "event-channel-tx", "%u", queue->tx_evtchn);
2160                 if (err) {
2161                         message = "writing event-channel-tx";
2162                         goto error;
2163                 }
2164
2165                 err = xenbus_printf(*xbt, path,
2166                                 "event-channel-rx", "%u", queue->rx_evtchn);
2167                 if (err) {
2168                         message = "writing event-channel-rx";
2169                         goto error;
2170                 }
2171         }
2172
2173         if (write_hierarchical)
2174                 kfree(path);
2175         return 0;
2176
2177 error:
2178         if (write_hierarchical)
2179                 kfree(path);
2180         xenbus_dev_fatal(dev, err, "%s", message);
2181         return err;
2182 }
2183
2184
2185
2186 static int xennet_create_page_pool(struct netfront_queue *queue)
2187 {
2188         int err;
2189         struct page_pool_params pp_params = {
2190                 .order = 0,
2191                 .flags = 0,
2192                 .pool_size = NET_RX_RING_SIZE,
2193                 .nid = NUMA_NO_NODE,
2194                 .dev = &queue->info->netdev->dev,
2195                 .offset = XDP_PACKET_HEADROOM,
2196                 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2197         };
2198
2199         queue->page_pool = page_pool_create(&pp_params);
2200         if (IS_ERR(queue->page_pool)) {
2201                 err = PTR_ERR(queue->page_pool);
2202                 queue->page_pool = NULL;
2203                 return err;
2204         }
2205
2206         err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2207                                queue->id);
2208         if (err) {
2209                 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2210                 goto err_free_pp;
2211         }
2212
2213         err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2214                                          MEM_TYPE_PAGE_POOL, queue->page_pool);
2215         if (err) {
2216                 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2217                 goto err_unregister_rxq;
2218         }
2219         return 0;
2220
2221 err_unregister_rxq:
2222         xdp_rxq_info_unreg(&queue->xdp_rxq);
2223 err_free_pp:
2224         page_pool_destroy(queue->page_pool);
2225         queue->page_pool = NULL;
2226         return err;
2227 }
2228
2229 static int xennet_create_queues(struct netfront_info *info,
2230                                 unsigned int *num_queues)
2231 {
2232         unsigned int i;
2233         int ret;
2234
2235         info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2236                                GFP_KERNEL);
2237         if (!info->queues)
2238                 return -ENOMEM;
2239
2240         for (i = 0; i < *num_queues; i++) {
2241                 struct netfront_queue *queue = &info->queues[i];
2242
2243                 queue->id = i;
2244                 queue->info = info;
2245
2246                 ret = xennet_init_queue(queue);
2247                 if (ret < 0) {
2248                         dev_warn(&info->xbdev->dev,
2249                                  "only created %d queues\n", i);
2250                         *num_queues = i;
2251                         break;
2252                 }
2253
2254                 /* use page pool recycling instead of buddy allocator */
2255                 ret = xennet_create_page_pool(queue);
2256                 if (ret < 0) {
2257                         dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2258                         *num_queues = i;
2259                         return ret;
2260                 }
2261
2262                 netif_napi_add(queue->info->netdev, &queue->napi,
2263                                xennet_poll, 64);
2264                 if (netif_running(info->netdev))
2265                         napi_enable(&queue->napi);
2266         }
2267
2268         netif_set_real_num_tx_queues(info->netdev, *num_queues);
2269
2270         if (*num_queues == 0) {
2271                 dev_err(&info->xbdev->dev, "no queues\n");
2272                 return -EINVAL;
2273         }
2274         return 0;
2275 }
2276
2277 /* Common code used when first setting up, and when resuming. */
2278 static int talk_to_netback(struct xenbus_device *dev,
2279                            struct netfront_info *info)
2280 {
2281         const char *message;
2282         struct xenbus_transaction xbt;
2283         int err;
2284         unsigned int feature_split_evtchn;
2285         unsigned int i = 0;
2286         unsigned int max_queues = 0;
2287         struct netfront_queue *queue = NULL;
2288         unsigned int num_queues = 1;
2289
2290         info->netdev->irq = 0;
2291
2292         /* Check if backend is trusted. */
2293         info->bounce = !xennet_trusted ||
2294                        !xenbus_read_unsigned(dev->nodename, "trusted", 1);
2295
2296         /* Check if backend supports multiple queues */
2297         max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2298                                           "multi-queue-max-queues", 1);
2299         num_queues = min(max_queues, xennet_max_queues);
2300
2301         /* Check feature-split-event-channels */
2302         feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2303                                         "feature-split-event-channels", 0);
2304
2305         /* Read mac addr. */
2306         err = xen_net_read_mac(dev, info->netdev->dev_addr);
2307         if (err) {
2308                 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2309                 goto out_unlocked;
2310         }
2311
2312         info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2313                                                               "feature-xdp-headroom", 0);
2314         if (info->netback_has_xdp_headroom) {
2315                 /* set the current xen-netfront xdp state */
2316                 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2317                                           NETBACK_XDP_HEADROOM_ENABLE :
2318                                           NETBACK_XDP_HEADROOM_DISABLE);
2319                 if (err)
2320                         goto out_unlocked;
2321         }
2322
2323         rtnl_lock();
2324         if (info->queues)
2325                 xennet_destroy_queues(info);
2326
2327         /* For the case of a reconnect reset the "broken" indicator. */
2328         info->broken = false;
2329
2330         err = xennet_create_queues(info, &num_queues);
2331         if (err < 0) {
2332                 xenbus_dev_fatal(dev, err, "creating queues");
2333                 kfree(info->queues);
2334                 info->queues = NULL;
2335                 goto out;
2336         }
2337         rtnl_unlock();
2338
2339         /* Create shared ring, alloc event channel -- for each queue */
2340         for (i = 0; i < num_queues; ++i) {
2341                 queue = &info->queues[i];
2342                 err = setup_netfront(dev, queue, feature_split_evtchn);
2343                 if (err)
2344                         goto destroy_ring;
2345         }
2346
2347 again:
2348         err = xenbus_transaction_start(&xbt);
2349         if (err) {
2350                 xenbus_dev_fatal(dev, err, "starting transaction");
2351                 goto destroy_ring;
2352         }
2353
2354         if (xenbus_exists(XBT_NIL,
2355                           info->xbdev->otherend, "multi-queue-max-queues")) {
2356                 /* Write the number of queues */
2357                 err = xenbus_printf(xbt, dev->nodename,
2358                                     "multi-queue-num-queues", "%u", num_queues);
2359                 if (err) {
2360                         message = "writing multi-queue-num-queues";
2361                         goto abort_transaction_no_dev_fatal;
2362                 }
2363         }
2364
2365         if (num_queues == 1) {
2366                 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2367                 if (err)
2368                         goto abort_transaction_no_dev_fatal;
2369         } else {
2370                 /* Write the keys for each queue */
2371                 for (i = 0; i < num_queues; ++i) {
2372                         queue = &info->queues[i];
2373                         err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2374                         if (err)
2375                                 goto abort_transaction_no_dev_fatal;
2376                 }
2377         }
2378
2379         /* The remaining keys are not queue-specific */
2380         err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2381                             1);
2382         if (err) {
2383                 message = "writing request-rx-copy";
2384                 goto abort_transaction;
2385         }
2386
2387         err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2388         if (err) {
2389                 message = "writing feature-rx-notify";
2390                 goto abort_transaction;
2391         }
2392
2393         err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2394         if (err) {
2395                 message = "writing feature-sg";
2396                 goto abort_transaction;
2397         }
2398
2399         err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2400         if (err) {
2401                 message = "writing feature-gso-tcpv4";
2402                 goto abort_transaction;
2403         }
2404
2405         err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2406         if (err) {
2407                 message = "writing feature-gso-tcpv6";
2408                 goto abort_transaction;
2409         }
2410
2411         err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2412                            "1");
2413         if (err) {
2414                 message = "writing feature-ipv6-csum-offload";
2415                 goto abort_transaction;
2416         }
2417
2418         err = xenbus_transaction_end(xbt, 0);
2419         if (err) {
2420                 if (err == -EAGAIN)
2421                         goto again;
2422                 xenbus_dev_fatal(dev, err, "completing transaction");
2423                 goto destroy_ring;
2424         }
2425
2426         return 0;
2427
2428  abort_transaction:
2429         xenbus_dev_fatal(dev, err, "%s", message);
2430 abort_transaction_no_dev_fatal:
2431         xenbus_transaction_end(xbt, 1);
2432  destroy_ring:
2433         xennet_disconnect_backend(info);
2434         rtnl_lock();
2435         xennet_destroy_queues(info);
2436  out:
2437         rtnl_unlock();
2438 out_unlocked:
2439         device_unregister(&dev->dev);
2440         return err;
2441 }
2442
2443 static int xennet_connect(struct net_device *dev)
2444 {
2445         struct netfront_info *np = netdev_priv(dev);
2446         unsigned int num_queues = 0;
2447         int err;
2448         unsigned int j = 0;
2449         struct netfront_queue *queue = NULL;
2450
2451         if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2452                 dev_info(&dev->dev,
2453                          "backend does not support copying receive path\n");
2454                 return -ENODEV;
2455         }
2456
2457         err = talk_to_netback(np->xbdev, np);
2458         if (err)
2459                 return err;
2460         if (np->netback_has_xdp_headroom)
2461                 pr_info("backend supports XDP headroom\n");
2462         if (np->bounce)
2463                 dev_info(&np->xbdev->dev,
2464                          "bouncing transmitted data to zeroed pages\n");
2465
2466         /* talk_to_netback() sets the correct number of queues */
2467         num_queues = dev->real_num_tx_queues;
2468
2469         if (dev->reg_state == NETREG_UNINITIALIZED) {
2470                 err = register_netdev(dev);
2471                 if (err) {
2472                         pr_warn("%s: register_netdev err=%d\n", __func__, err);
2473                         device_unregister(&np->xbdev->dev);
2474                         return err;
2475                 }
2476         }
2477
2478         rtnl_lock();
2479         netdev_update_features(dev);
2480         rtnl_unlock();
2481
2482         /*
2483          * All public and private state should now be sane.  Get
2484          * ready to start sending and receiving packets and give the driver
2485          * domain a kick because we've probably just requeued some
2486          * packets.
2487          */
2488         netif_tx_lock_bh(np->netdev);
2489         netif_device_attach(np->netdev);
2490         netif_tx_unlock_bh(np->netdev);
2491
2492         netif_carrier_on(np->netdev);
2493         for (j = 0; j < num_queues; ++j) {
2494                 queue = &np->queues[j];
2495
2496                 notify_remote_via_irq(queue->tx_irq);
2497                 if (queue->tx_irq != queue->rx_irq)
2498                         notify_remote_via_irq(queue->rx_irq);
2499
2500                 spin_lock_irq(&queue->tx_lock);
2501                 xennet_tx_buf_gc(queue);
2502                 spin_unlock_irq(&queue->tx_lock);
2503
2504                 spin_lock_bh(&queue->rx_lock);
2505                 xennet_alloc_rx_buffers(queue);
2506                 spin_unlock_bh(&queue->rx_lock);
2507         }
2508
2509         return 0;
2510 }
2511
2512 /**
2513  * Callback received when the backend's state changes.
2514  */
2515 static void netback_changed(struct xenbus_device *dev,
2516                             enum xenbus_state backend_state)
2517 {
2518         struct netfront_info *np = dev_get_drvdata(&dev->dev);
2519         struct net_device *netdev = np->netdev;
2520
2521         dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2522
2523         wake_up_all(&module_wq);
2524
2525         switch (backend_state) {
2526         case XenbusStateInitialising:
2527         case XenbusStateInitialised:
2528         case XenbusStateReconfiguring:
2529         case XenbusStateReconfigured:
2530         case XenbusStateUnknown:
2531                 break;
2532
2533         case XenbusStateInitWait:
2534                 if (dev->state != XenbusStateInitialising)
2535                         break;
2536                 if (xennet_connect(netdev) != 0)
2537                         break;
2538                 xenbus_switch_state(dev, XenbusStateConnected);
2539                 break;
2540
2541         case XenbusStateConnected:
2542                 netdev_notify_peers(netdev);
2543                 break;
2544
2545         case XenbusStateClosed:
2546                 if (dev->state == XenbusStateClosed)
2547                         break;
2548                 fallthrough;    /* Missed the backend's CLOSING state */
2549         case XenbusStateClosing:
2550                 xenbus_frontend_closed(dev);
2551                 break;
2552         }
2553 }
2554
2555 static const struct xennet_stat {
2556         char name[ETH_GSTRING_LEN];
2557         u16 offset;
2558 } xennet_stats[] = {
2559         {
2560                 "rx_gso_checksum_fixup",
2561                 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2562         },
2563 };
2564
2565 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2566 {
2567         switch (string_set) {
2568         case ETH_SS_STATS:
2569                 return ARRAY_SIZE(xennet_stats);
2570         default:
2571                 return -EINVAL;
2572         }
2573 }
2574
2575 static void xennet_get_ethtool_stats(struct net_device *dev,
2576                                      struct ethtool_stats *stats, u64 * data)
2577 {
2578         void *np = netdev_priv(dev);
2579         int i;
2580
2581         for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2582                 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2583 }
2584
2585 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2586 {
2587         int i;
2588
2589         switch (stringset) {
2590         case ETH_SS_STATS:
2591                 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2592                         memcpy(data + i * ETH_GSTRING_LEN,
2593                                xennet_stats[i].name, ETH_GSTRING_LEN);
2594                 break;
2595         }
2596 }
2597
2598 static const struct ethtool_ops xennet_ethtool_ops =
2599 {
2600         .get_link = ethtool_op_get_link,
2601
2602         .get_sset_count = xennet_get_sset_count,
2603         .get_ethtool_stats = xennet_get_ethtool_stats,
2604         .get_strings = xennet_get_strings,
2605         .get_ts_info = ethtool_op_get_ts_info,
2606 };
2607
2608 #ifdef CONFIG_SYSFS
2609 static ssize_t show_rxbuf(struct device *dev,
2610                           struct device_attribute *attr, char *buf)
2611 {
2612         return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2613 }
2614
2615 static ssize_t store_rxbuf(struct device *dev,
2616                            struct device_attribute *attr,
2617                            const char *buf, size_t len)
2618 {
2619         char *endp;
2620         unsigned long target;
2621
2622         if (!capable(CAP_NET_ADMIN))
2623                 return -EPERM;
2624
2625         target = simple_strtoul(buf, &endp, 0);
2626         if (endp == buf)
2627                 return -EBADMSG;
2628
2629         /* rxbuf_min and rxbuf_max are no longer configurable. */
2630
2631         return len;
2632 }
2633
2634 static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2635 static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2636 static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2637
2638 static struct attribute *xennet_dev_attrs[] = {
2639         &dev_attr_rxbuf_min.attr,
2640         &dev_attr_rxbuf_max.attr,
2641         &dev_attr_rxbuf_cur.attr,
2642         NULL
2643 };
2644
2645 static const struct attribute_group xennet_dev_group = {
2646         .attrs = xennet_dev_attrs
2647 };
2648 #endif /* CONFIG_SYSFS */
2649
2650 static void xennet_bus_close(struct xenbus_device *dev)
2651 {
2652         int ret;
2653
2654         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2655                 return;
2656         do {
2657                 xenbus_switch_state(dev, XenbusStateClosing);
2658                 ret = wait_event_timeout(module_wq,
2659                                    xenbus_read_driver_state(dev->otherend) ==
2660                                    XenbusStateClosing ||
2661                                    xenbus_read_driver_state(dev->otherend) ==
2662                                    XenbusStateClosed ||
2663                                    xenbus_read_driver_state(dev->otherend) ==
2664                                    XenbusStateUnknown,
2665                                    XENNET_TIMEOUT);
2666         } while (!ret);
2667
2668         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2669                 return;
2670
2671         do {
2672                 xenbus_switch_state(dev, XenbusStateClosed);
2673                 ret = wait_event_timeout(module_wq,
2674                                    xenbus_read_driver_state(dev->otherend) ==
2675                                    XenbusStateClosed ||
2676                                    xenbus_read_driver_state(dev->otherend) ==
2677                                    XenbusStateUnknown,
2678                                    XENNET_TIMEOUT);
2679         } while (!ret);
2680 }
2681
2682 static int xennet_remove(struct xenbus_device *dev)
2683 {
2684         struct netfront_info *info = dev_get_drvdata(&dev->dev);
2685
2686         xennet_bus_close(dev);
2687         xennet_disconnect_backend(info);
2688
2689         if (info->netdev->reg_state == NETREG_REGISTERED)
2690                 unregister_netdev(info->netdev);
2691
2692         if (info->queues) {
2693                 rtnl_lock();
2694                 xennet_destroy_queues(info);
2695                 rtnl_unlock();
2696         }
2697         xennet_free_netdev(info->netdev);
2698
2699         return 0;
2700 }
2701
2702 static const struct xenbus_device_id netfront_ids[] = {
2703         { "vif" },
2704         { "" }
2705 };
2706
2707 static struct xenbus_driver netfront_driver = {
2708         .ids = netfront_ids,
2709         .probe = netfront_probe,
2710         .remove = xennet_remove,
2711         .resume = netfront_resume,
2712         .otherend_changed = netback_changed,
2713 };
2714
2715 static int __init netif_init(void)
2716 {
2717         if (!xen_domain())
2718                 return -ENODEV;
2719
2720         if (!xen_has_pv_nic_devices())
2721                 return -ENODEV;
2722
2723         pr_info("Initialising Xen virtual ethernet driver\n");
2724
2725         /* Allow as many queues as there are CPUs inut max. 8 if user has not
2726          * specified a value.
2727          */
2728         if (xennet_max_queues == 0)
2729                 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2730                                           num_online_cpus());
2731
2732         return xenbus_register_frontend(&netfront_driver);
2733 }
2734 module_init(netif_init);
2735
2736
2737 static void __exit netif_exit(void)
2738 {
2739         xenbus_unregister_driver(&netfront_driver);
2740 }
2741 module_exit(netif_exit);
2742
2743 MODULE_DESCRIPTION("Xen virtual network device frontend");
2744 MODULE_LICENSE("GPL");
2745 MODULE_ALIAS("xen:vif");
2746 MODULE_ALIAS("xennet");