GNU Linux-libre 4.9.304-gnu1
[releases.git] / drivers / net / xen-netfront.c
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47
48 #include <xen/xen.h>
49 #include <xen/xenbus.h>
50 #include <xen/events.h>
51 #include <xen/page.h>
52 #include <xen/platform_pci.h>
53 #include <xen/grant_table.h>
54
55 #include <xen/interface/io/netif.h>
56 #include <xen/interface/memory.h>
57 #include <xen/interface/grant_table.h>
58
59 /* Module parameters */
60 static unsigned int xennet_max_queues;
61 module_param_named(max_queues, xennet_max_queues, uint, 0644);
62 MODULE_PARM_DESC(max_queues,
63                  "Maximum number of queues per virtual interface");
64
65 #define XENNET_TIMEOUT  (5 * HZ)
66
67 static const struct ethtool_ops xennet_ethtool_ops;
68
69 struct netfront_cb {
70         int pull_to;
71 };
72
73 #define NETFRONT_SKB_CB(skb)    ((struct netfront_cb *)((skb)->cb))
74
75 #define RX_COPY_THRESHOLD 256
76
77 #define GRANT_INVALID_REF       0
78
79 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
80 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
81
82 /* Minimum number of Rx slots (includes slot for GSO metadata). */
83 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
84
85 /* Queue name is interface name with "-qNNN" appended */
86 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
87
88 /* IRQ name is queue name with "-tx" or "-rx" appended */
89 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
90
91 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
92
93 struct netfront_stats {
94         u64                     packets;
95         u64                     bytes;
96         struct u64_stats_sync   syncp;
97 };
98
99 struct netfront_info;
100
101 struct netfront_queue {
102         unsigned int id; /* Queue ID, 0-based */
103         char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
104         struct netfront_info *info;
105
106         struct napi_struct napi;
107
108         /* Split event channels support, tx_* == rx_* when using
109          * single event channel.
110          */
111         unsigned int tx_evtchn, rx_evtchn;
112         unsigned int tx_irq, rx_irq;
113         /* Only used when split event channels support is enabled */
114         char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
115         char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
116
117         spinlock_t   tx_lock;
118         struct xen_netif_tx_front_ring tx;
119         int tx_ring_ref;
120
121         /*
122          * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
123          * are linked from tx_skb_freelist through tx_link.
124          */
125         struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
126         unsigned short tx_link[NET_TX_RING_SIZE];
127 #define TX_LINK_NONE 0xffff
128 #define TX_PENDING   0xfffe
129         grant_ref_t gref_tx_head;
130         grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
131         struct page *grant_tx_page[NET_TX_RING_SIZE];
132         unsigned tx_skb_freelist;
133         unsigned int tx_pend_queue;
134
135         spinlock_t   rx_lock ____cacheline_aligned_in_smp;
136         struct xen_netif_rx_front_ring rx;
137         int rx_ring_ref;
138
139         struct timer_list rx_refill_timer;
140
141         struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
142         grant_ref_t gref_rx_head;
143         grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
144
145         unsigned int rx_rsp_unconsumed;
146         spinlock_t rx_cons_lock;
147 };
148
149 struct netfront_info {
150         struct list_head list;
151         struct net_device *netdev;
152
153         struct xenbus_device *xbdev;
154
155         /* Multi-queue support */
156         struct netfront_queue *queues;
157
158         /* Statistics */
159         struct netfront_stats __percpu *rx_stats;
160         struct netfront_stats __percpu *tx_stats;
161
162         /* Is device behaving sane? */
163         bool broken;
164
165         atomic_t rx_gso_checksum_fixup;
166 };
167
168 struct netfront_rx_info {
169         struct xen_netif_rx_response rx;
170         struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
171 };
172
173 /*
174  * Access macros for acquiring freeing slots in tx_skbs[].
175  */
176
177 static void add_id_to_list(unsigned *head, unsigned short *list,
178                            unsigned short id)
179 {
180         list[id] = *head;
181         *head = id;
182 }
183
184 static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
185 {
186         unsigned int id = *head;
187
188         if (id != TX_LINK_NONE) {
189                 *head = list[id];
190                 list[id] = TX_LINK_NONE;
191         }
192         return id;
193 }
194
195 static int xennet_rxidx(RING_IDX idx)
196 {
197         return idx & (NET_RX_RING_SIZE - 1);
198 }
199
200 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
201                                          RING_IDX ri)
202 {
203         int i = xennet_rxidx(ri);
204         struct sk_buff *skb = queue->rx_skbs[i];
205         queue->rx_skbs[i] = NULL;
206         return skb;
207 }
208
209 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
210                                             RING_IDX ri)
211 {
212         int i = xennet_rxidx(ri);
213         grant_ref_t ref = queue->grant_rx_ref[i];
214         queue->grant_rx_ref[i] = GRANT_INVALID_REF;
215         return ref;
216 }
217
218 #ifdef CONFIG_SYSFS
219 static const struct attribute_group xennet_dev_group;
220 #endif
221
222 static bool xennet_can_sg(struct net_device *dev)
223 {
224         return dev->features & NETIF_F_SG;
225 }
226
227
228 static void rx_refill_timeout(unsigned long data)
229 {
230         struct netfront_queue *queue = (struct netfront_queue *)data;
231         napi_schedule(&queue->napi);
232 }
233
234 static int netfront_tx_slot_available(struct netfront_queue *queue)
235 {
236         return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
237                 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
238 }
239
240 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
241 {
242         struct net_device *dev = queue->info->netdev;
243         struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
244
245         if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
246             netfront_tx_slot_available(queue) &&
247             likely(netif_running(dev)))
248                 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
249 }
250
251
252 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
253 {
254         struct sk_buff *skb;
255         struct page *page;
256
257         skb = __netdev_alloc_skb(queue->info->netdev,
258                                  RX_COPY_THRESHOLD + NET_IP_ALIGN,
259                                  GFP_ATOMIC | __GFP_NOWARN);
260         if (unlikely(!skb))
261                 return NULL;
262
263         page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
264         if (!page) {
265                 kfree_skb(skb);
266                 return NULL;
267         }
268         skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
269
270         /* Align ip header to a 16 bytes boundary */
271         skb_reserve(skb, NET_IP_ALIGN);
272         skb->dev = queue->info->netdev;
273
274         return skb;
275 }
276
277
278 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
279 {
280         RING_IDX req_prod = queue->rx.req_prod_pvt;
281         int notify;
282         int err = 0;
283
284         if (unlikely(!netif_carrier_ok(queue->info->netdev)))
285                 return;
286
287         for (req_prod = queue->rx.req_prod_pvt;
288              req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
289              req_prod++) {
290                 struct sk_buff *skb;
291                 unsigned short id;
292                 grant_ref_t ref;
293                 struct page *page;
294                 struct xen_netif_rx_request *req;
295
296                 skb = xennet_alloc_one_rx_buffer(queue);
297                 if (!skb) {
298                         err = -ENOMEM;
299                         break;
300                 }
301
302                 id = xennet_rxidx(req_prod);
303
304                 BUG_ON(queue->rx_skbs[id]);
305                 queue->rx_skbs[id] = skb;
306
307                 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
308                 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
309                 queue->grant_rx_ref[id] = ref;
310
311                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
312
313                 req = RING_GET_REQUEST(&queue->rx, req_prod);
314                 gnttab_page_grant_foreign_access_ref_one(ref,
315                                                          queue->info->xbdev->otherend_id,
316                                                          page,
317                                                          0);
318                 req->id = id;
319                 req->gref = ref;
320         }
321
322         queue->rx.req_prod_pvt = req_prod;
323
324         /* Try again later if there are not enough requests or skb allocation
325          * failed.
326          * Enough requests is quantified as the sum of newly created slots and
327          * the unconsumed slots at the backend.
328          */
329         if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
330             unlikely(err)) {
331                 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
332                 return;
333         }
334
335         wmb();          /* barrier so backend seens requests */
336
337         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
338         if (notify)
339                 notify_remote_via_irq(queue->rx_irq);
340 }
341
342 static int xennet_open(struct net_device *dev)
343 {
344         struct netfront_info *np = netdev_priv(dev);
345         unsigned int num_queues = dev->real_num_tx_queues;
346         unsigned int i = 0;
347         struct netfront_queue *queue = NULL;
348
349         if (!np->queues || np->broken)
350                 return -ENODEV;
351
352         for (i = 0; i < num_queues; ++i) {
353                 queue = &np->queues[i];
354                 napi_enable(&queue->napi);
355
356                 spin_lock_bh(&queue->rx_lock);
357                 if (netif_carrier_ok(dev)) {
358                         xennet_alloc_rx_buffers(queue);
359                         queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
360                         if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
361                                 napi_schedule(&queue->napi);
362                 }
363                 spin_unlock_bh(&queue->rx_lock);
364         }
365
366         netif_tx_start_all_queues(dev);
367
368         return 0;
369 }
370
371 static bool xennet_tx_buf_gc(struct netfront_queue *queue)
372 {
373         RING_IDX cons, prod;
374         unsigned short id;
375         struct sk_buff *skb;
376         bool more_to_do;
377         bool work_done = false;
378         const struct device *dev = &queue->info->netdev->dev;
379
380         BUG_ON(!netif_carrier_ok(queue->info->netdev));
381
382         do {
383                 prod = queue->tx.sring->rsp_prod;
384                 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
385                         dev_alert(dev, "Illegal number of responses %u\n",
386                                   prod - queue->tx.rsp_cons);
387                         goto err;
388                 }
389                 rmb(); /* Ensure we see responses up to 'rp'. */
390
391                 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
392                         struct xen_netif_tx_response txrsp;
393
394                         work_done = true;
395
396                         RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
397                         if (txrsp.status == XEN_NETIF_RSP_NULL)
398                                 continue;
399
400                         id = txrsp.id;
401                         if (id >= RING_SIZE(&queue->tx)) {
402                                 dev_alert(dev,
403                                           "Response has incorrect id (%u)\n",
404                                           id);
405                                 goto err;
406                         }
407                         if (queue->tx_link[id] != TX_PENDING) {
408                                 dev_alert(dev,
409                                           "Response for inactive request\n");
410                                 goto err;
411                         }
412
413                         queue->tx_link[id] = TX_LINK_NONE;
414                         skb = queue->tx_skbs[id];
415                         queue->tx_skbs[id] = NULL;
416                         if (unlikely(gnttab_query_foreign_access(
417                                 queue->grant_tx_ref[id]) != 0)) {
418                                 dev_alert(dev,
419                                           "Grant still in use by backend domain\n");
420                                 goto err;
421                         }
422                         gnttab_end_foreign_access_ref(
423                                 queue->grant_tx_ref[id], GNTMAP_readonly);
424                         gnttab_release_grant_reference(
425                                 &queue->gref_tx_head, queue->grant_tx_ref[id]);
426                         queue->grant_tx_ref[id] = GRANT_INVALID_REF;
427                         queue->grant_tx_page[id] = NULL;
428                         add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
429                         dev_kfree_skb_irq(skb);
430                 }
431
432                 queue->tx.rsp_cons = prod;
433
434                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
435         } while (more_to_do);
436
437         xennet_maybe_wake_tx(queue);
438
439         return work_done;
440
441  err:
442         queue->info->broken = true;
443         dev_alert(dev, "Disabled for further use\n");
444
445         return work_done;
446 }
447
448 struct xennet_gnttab_make_txreq {
449         struct netfront_queue *queue;
450         struct sk_buff *skb;
451         struct page *page;
452         struct xen_netif_tx_request *tx;      /* Last request on ring page */
453         struct xen_netif_tx_request tx_local; /* Last request local copy*/
454         unsigned int size;
455 };
456
457 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
458                                   unsigned int len, void *data)
459 {
460         struct xennet_gnttab_make_txreq *info = data;
461         unsigned int id;
462         struct xen_netif_tx_request *tx;
463         grant_ref_t ref;
464         /* convenient aliases */
465         struct page *page = info->page;
466         struct netfront_queue *queue = info->queue;
467         struct sk_buff *skb = info->skb;
468
469         id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
470         tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
471         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
472         WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
473
474         gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
475                                         gfn, GNTMAP_readonly);
476
477         queue->tx_skbs[id] = skb;
478         queue->grant_tx_page[id] = page;
479         queue->grant_tx_ref[id] = ref;
480
481         info->tx_local.id = id;
482         info->tx_local.gref = ref;
483         info->tx_local.offset = offset;
484         info->tx_local.size = len;
485         info->tx_local.flags = 0;
486
487         *tx = info->tx_local;
488
489         /*
490          * Put the request in the pending queue, it will be set to be pending
491          * when the producer index is about to be raised.
492          */
493         add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
494
495         info->tx = tx;
496         info->size += info->tx_local.size;
497 }
498
499 static struct xen_netif_tx_request *xennet_make_first_txreq(
500         struct xennet_gnttab_make_txreq *info,
501         unsigned int offset, unsigned int len)
502 {
503         info->size = 0;
504
505         gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
506
507         return info->tx;
508 }
509
510 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
511                                   unsigned int len, void *data)
512 {
513         struct xennet_gnttab_make_txreq *info = data;
514
515         info->tx->flags |= XEN_NETTXF_more_data;
516         skb_get(info->skb);
517         xennet_tx_setup_grant(gfn, offset, len, data);
518 }
519
520 static void xennet_make_txreqs(
521         struct xennet_gnttab_make_txreq *info,
522         struct page *page,
523         unsigned int offset, unsigned int len)
524 {
525         /* Skip unused frames from start of page */
526         page += offset >> PAGE_SHIFT;
527         offset &= ~PAGE_MASK;
528
529         while (len) {
530                 info->page = page;
531                 info->size = 0;
532
533                 gnttab_foreach_grant_in_range(page, offset, len,
534                                               xennet_make_one_txreq,
535                                               info);
536
537                 page++;
538                 offset = 0;
539                 len -= info->size;
540         }
541 }
542
543 /*
544  * Count how many ring slots are required to send this skb. Each frag
545  * might be a compound page.
546  */
547 static int xennet_count_skb_slots(struct sk_buff *skb)
548 {
549         int i, frags = skb_shinfo(skb)->nr_frags;
550         int slots;
551
552         slots = gnttab_count_grant(offset_in_page(skb->data),
553                                    skb_headlen(skb));
554
555         for (i = 0; i < frags; i++) {
556                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
557                 unsigned long size = skb_frag_size(frag);
558                 unsigned long offset = frag->page_offset;
559
560                 /* Skip unused frames from start of page */
561                 offset &= ~PAGE_MASK;
562
563                 slots += gnttab_count_grant(offset, size);
564         }
565
566         return slots;
567 }
568
569 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
570                                void *accel_priv, select_queue_fallback_t fallback)
571 {
572         unsigned int num_queues = dev->real_num_tx_queues;
573         u32 hash;
574         u16 queue_idx;
575
576         /* First, check if there is only one queue */
577         if (num_queues == 1) {
578                 queue_idx = 0;
579         } else {
580                 hash = skb_get_hash(skb);
581                 queue_idx = hash % num_queues;
582         }
583
584         return queue_idx;
585 }
586
587 static void xennet_mark_tx_pending(struct netfront_queue *queue)
588 {
589         unsigned int i;
590
591         while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
592                 TX_LINK_NONE)
593                 queue->tx_link[i] = TX_PENDING;
594 }
595
596 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
597
598 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
599 {
600         struct netfront_info *np = netdev_priv(dev);
601         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
602         struct xen_netif_tx_request *first_tx;
603         unsigned int i;
604         int notify;
605         int slots;
606         struct page *page;
607         unsigned int offset;
608         unsigned int len;
609         unsigned long flags;
610         struct netfront_queue *queue = NULL;
611         struct xennet_gnttab_make_txreq info = { };
612         unsigned int num_queues = dev->real_num_tx_queues;
613         u16 queue_index;
614         struct sk_buff *nskb;
615
616         /* Drop the packet if no queues are set up */
617         if (num_queues < 1)
618                 goto drop;
619         if (unlikely(np->broken))
620                 goto drop;
621         /* Determine which queue to transmit this SKB on */
622         queue_index = skb_get_queue_mapping(skb);
623         queue = &np->queues[queue_index];
624
625         /* If skb->len is too big for wire format, drop skb and alert
626          * user about misconfiguration.
627          */
628         if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
629                 net_alert_ratelimited(
630                         "xennet: skb->len = %u, too big for wire format\n",
631                         skb->len);
632                 goto drop;
633         }
634
635         slots = xennet_count_skb_slots(skb);
636         if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
637                 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
638                                     slots, skb->len);
639                 if (skb_linearize(skb))
640                         goto drop;
641         }
642
643         page = virt_to_page(skb->data);
644         offset = offset_in_page(skb->data);
645
646         /* The first req should be at least ETH_HLEN size or the packet will be
647          * dropped by netback.
648          */
649         if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
650                 nskb = skb_copy(skb, GFP_ATOMIC);
651                 if (!nskb)
652                         goto drop;
653                 dev_kfree_skb_any(skb);
654                 skb = nskb;
655                 page = virt_to_page(skb->data);
656                 offset = offset_in_page(skb->data);
657         }
658
659         len = skb_headlen(skb);
660
661         spin_lock_irqsave(&queue->tx_lock, flags);
662
663         if (unlikely(!netif_carrier_ok(dev) ||
664                      (slots > 1 && !xennet_can_sg(dev)) ||
665                      netif_needs_gso(skb, netif_skb_features(skb)))) {
666                 spin_unlock_irqrestore(&queue->tx_lock, flags);
667                 goto drop;
668         }
669
670         /* First request for the linear area. */
671         info.queue = queue;
672         info.skb = skb;
673         info.page = page;
674         first_tx = xennet_make_first_txreq(&info, offset, len);
675         offset += info.tx_local.size;
676         if (offset == PAGE_SIZE) {
677                 page++;
678                 offset = 0;
679         }
680         len -= info.tx_local.size;
681
682         if (skb->ip_summed == CHECKSUM_PARTIAL)
683                 /* local packet? */
684                 first_tx->flags |= XEN_NETTXF_csum_blank |
685                                    XEN_NETTXF_data_validated;
686         else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
687                 /* remote but checksummed. */
688                 first_tx->flags |= XEN_NETTXF_data_validated;
689
690         /* Optional extra info after the first request. */
691         if (skb_shinfo(skb)->gso_size) {
692                 struct xen_netif_extra_info *gso;
693
694                 gso = (struct xen_netif_extra_info *)
695                         RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
696
697                 first_tx->flags |= XEN_NETTXF_extra_info;
698
699                 gso->u.gso.size = skb_shinfo(skb)->gso_size;
700                 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
701                         XEN_NETIF_GSO_TYPE_TCPV6 :
702                         XEN_NETIF_GSO_TYPE_TCPV4;
703                 gso->u.gso.pad = 0;
704                 gso->u.gso.features = 0;
705
706                 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
707                 gso->flags = 0;
708         }
709
710         /* Requests for the rest of the linear area. */
711         xennet_make_txreqs(&info, page, offset, len);
712
713         /* Requests for all the frags. */
714         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
716                 xennet_make_txreqs(&info, skb_frag_page(frag),
717                                         frag->page_offset,
718                                         skb_frag_size(frag));
719         }
720
721         /* First request has the packet length. */
722         first_tx->size = skb->len;
723
724         xennet_mark_tx_pending(queue);
725
726         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
727         if (notify)
728                 notify_remote_via_irq(queue->tx_irq);
729
730         u64_stats_update_begin(&tx_stats->syncp);
731         tx_stats->bytes += skb->len;
732         tx_stats->packets++;
733         u64_stats_update_end(&tx_stats->syncp);
734
735         /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
736         xennet_tx_buf_gc(queue);
737
738         if (!netfront_tx_slot_available(queue))
739                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
740
741         spin_unlock_irqrestore(&queue->tx_lock, flags);
742
743         return NETDEV_TX_OK;
744
745  drop:
746         dev->stats.tx_dropped++;
747         dev_kfree_skb_any(skb);
748         return NETDEV_TX_OK;
749 }
750
751 static int xennet_close(struct net_device *dev)
752 {
753         struct netfront_info *np = netdev_priv(dev);
754         unsigned int num_queues = dev->real_num_tx_queues;
755         unsigned int i;
756         struct netfront_queue *queue;
757         netif_tx_stop_all_queues(np->netdev);
758         for (i = 0; i < num_queues; ++i) {
759                 queue = &np->queues[i];
760                 napi_disable(&queue->napi);
761         }
762         return 0;
763 }
764
765 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
766 {
767         unsigned long flags;
768
769         spin_lock_irqsave(&queue->rx_cons_lock, flags);
770         queue->rx.rsp_cons = val;
771         queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
772         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
773 }
774
775 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
776                                 grant_ref_t ref)
777 {
778         int new = xennet_rxidx(queue->rx.req_prod_pvt);
779
780         BUG_ON(queue->rx_skbs[new]);
781         queue->rx_skbs[new] = skb;
782         queue->grant_rx_ref[new] = ref;
783         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
784         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
785         queue->rx.req_prod_pvt++;
786 }
787
788 static int xennet_get_extras(struct netfront_queue *queue,
789                              struct xen_netif_extra_info *extras,
790                              RING_IDX rp)
791
792 {
793         struct xen_netif_extra_info extra;
794         struct device *dev = &queue->info->netdev->dev;
795         RING_IDX cons = queue->rx.rsp_cons;
796         int err = 0;
797
798         do {
799                 struct sk_buff *skb;
800                 grant_ref_t ref;
801
802                 if (unlikely(cons + 1 == rp)) {
803                         if (net_ratelimit())
804                                 dev_warn(dev, "Missing extra info\n");
805                         err = -EBADR;
806                         break;
807                 }
808
809                 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
810
811                 if (unlikely(!extra.type ||
812                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
813                         if (net_ratelimit())
814                                 dev_warn(dev, "Invalid extra type: %d\n",
815                                          extra.type);
816                         err = -EINVAL;
817                 } else {
818                         extras[extra.type - 1] = extra;
819                 }
820
821                 skb = xennet_get_rx_skb(queue, cons);
822                 ref = xennet_get_rx_ref(queue, cons);
823                 xennet_move_rx_slot(queue, skb, ref);
824         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
825
826         xennet_set_rx_rsp_cons(queue, cons);
827         return err;
828 }
829
830 static int xennet_get_responses(struct netfront_queue *queue,
831                                 struct netfront_rx_info *rinfo, RING_IDX rp,
832                                 struct sk_buff_head *list)
833 {
834         struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
835         struct xen_netif_extra_info *extras = rinfo->extras;
836         struct device *dev = &queue->info->netdev->dev;
837         RING_IDX cons = queue->rx.rsp_cons;
838         struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
839         grant_ref_t ref = xennet_get_rx_ref(queue, cons);
840         int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
841         int slots = 1;
842         int err = 0;
843         unsigned long ret;
844
845         if (rx->flags & XEN_NETRXF_extra_info) {
846                 err = xennet_get_extras(queue, extras, rp);
847                 cons = queue->rx.rsp_cons;
848         }
849
850         for (;;) {
851                 if (unlikely(rx->status < 0 ||
852                              rx->offset + rx->status > XEN_PAGE_SIZE)) {
853                         if (net_ratelimit())
854                                 dev_warn(dev, "rx->offset: %u, size: %d\n",
855                                          rx->offset, rx->status);
856                         xennet_move_rx_slot(queue, skb, ref);
857                         err = -EINVAL;
858                         goto next;
859                 }
860
861                 /*
862                  * This definitely indicates a bug, either in this driver or in
863                  * the backend driver. In future this should flag the bad
864                  * situation to the system controller to reboot the backend.
865                  */
866                 if (ref == GRANT_INVALID_REF) {
867                         if (net_ratelimit())
868                                 dev_warn(dev, "Bad rx response id %d.\n",
869                                          rx->id);
870                         err = -EINVAL;
871                         goto next;
872                 }
873
874                 ret = gnttab_end_foreign_access_ref(ref, 0);
875                 BUG_ON(!ret);
876
877                 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
878
879                 __skb_queue_tail(list, skb);
880
881 next:
882                 if (!(rx->flags & XEN_NETRXF_more_data))
883                         break;
884
885                 if (cons + slots == rp) {
886                         if (net_ratelimit())
887                                 dev_warn(dev, "Need more slots\n");
888                         err = -ENOENT;
889                         break;
890                 }
891
892                 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
893                 rx = &rx_local;
894                 skb = xennet_get_rx_skb(queue, cons + slots);
895                 ref = xennet_get_rx_ref(queue, cons + slots);
896                 slots++;
897         }
898
899         if (unlikely(slots > max)) {
900                 if (net_ratelimit())
901                         dev_warn(dev, "Too many slots\n");
902                 err = -E2BIG;
903         }
904
905         if (unlikely(err))
906                 xennet_set_rx_rsp_cons(queue, cons + slots);
907
908         return err;
909 }
910
911 static int xennet_set_skb_gso(struct sk_buff *skb,
912                               struct xen_netif_extra_info *gso)
913 {
914         if (!gso->u.gso.size) {
915                 if (net_ratelimit())
916                         pr_warn("GSO size must not be zero\n");
917                 return -EINVAL;
918         }
919
920         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
921             gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
922                 if (net_ratelimit())
923                         pr_warn("Bad GSO type %d\n", gso->u.gso.type);
924                 return -EINVAL;
925         }
926
927         skb_shinfo(skb)->gso_size = gso->u.gso.size;
928         skb_shinfo(skb)->gso_type =
929                 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
930                 SKB_GSO_TCPV4 :
931                 SKB_GSO_TCPV6;
932
933         /* Header must be checked, and gso_segs computed. */
934         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
935         skb_shinfo(skb)->gso_segs = 0;
936
937         return 0;
938 }
939
940 static int xennet_fill_frags(struct netfront_queue *queue,
941                              struct sk_buff *skb,
942                              struct sk_buff_head *list)
943 {
944         RING_IDX cons = queue->rx.rsp_cons;
945         struct sk_buff *nskb;
946
947         while ((nskb = __skb_dequeue(list))) {
948                 struct xen_netif_rx_response rx;
949                 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
950
951                 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
952
953                 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
954                         unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
955
956                         BUG_ON(pull_to < skb_headlen(skb));
957                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
958                 }
959                 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
960                         xennet_set_rx_rsp_cons(queue,
961                                                ++cons + skb_queue_len(list));
962                         kfree_skb(nskb);
963                         return -ENOENT;
964                 }
965
966                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
967                                 skb_frag_page(nfrag),
968                                 rx.offset, rx.status, PAGE_SIZE);
969
970                 skb_shinfo(nskb)->nr_frags = 0;
971                 kfree_skb(nskb);
972         }
973
974         xennet_set_rx_rsp_cons(queue, cons);
975
976         return 0;
977 }
978
979 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
980 {
981         bool recalculate_partial_csum = false;
982
983         /*
984          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
985          * peers can fail to set NETRXF_csum_blank when sending a GSO
986          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
987          * recalculate the partial checksum.
988          */
989         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
990                 struct netfront_info *np = netdev_priv(dev);
991                 atomic_inc(&np->rx_gso_checksum_fixup);
992                 skb->ip_summed = CHECKSUM_PARTIAL;
993                 recalculate_partial_csum = true;
994         }
995
996         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
997         if (skb->ip_summed != CHECKSUM_PARTIAL)
998                 return 0;
999
1000         return skb_checksum_setup(skb, recalculate_partial_csum);
1001 }
1002
1003 static int handle_incoming_queue(struct netfront_queue *queue,
1004                                  struct sk_buff_head *rxq)
1005 {
1006         struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1007         int packets_dropped = 0;
1008         struct sk_buff *skb;
1009
1010         while ((skb = __skb_dequeue(rxq)) != NULL) {
1011                 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1012
1013                 if (pull_to > skb_headlen(skb))
1014                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1015
1016                 /* Ethernet work: Delayed to here as it peeks the header. */
1017                 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1018                 skb_reset_network_header(skb);
1019
1020                 if (checksum_setup(queue->info->netdev, skb)) {
1021                         kfree_skb(skb);
1022                         packets_dropped++;
1023                         queue->info->netdev->stats.rx_errors++;
1024                         continue;
1025                 }
1026
1027                 u64_stats_update_begin(&rx_stats->syncp);
1028                 rx_stats->packets++;
1029                 rx_stats->bytes += skb->len;
1030                 u64_stats_update_end(&rx_stats->syncp);
1031
1032                 /* Pass it up. */
1033                 napi_gro_receive(&queue->napi, skb);
1034         }
1035
1036         return packets_dropped;
1037 }
1038
1039 static int xennet_poll(struct napi_struct *napi, int budget)
1040 {
1041         struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1042         struct net_device *dev = queue->info->netdev;
1043         struct sk_buff *skb;
1044         struct netfront_rx_info rinfo;
1045         struct xen_netif_rx_response *rx = &rinfo.rx;
1046         struct xen_netif_extra_info *extras = rinfo.extras;
1047         RING_IDX i, rp;
1048         int work_done;
1049         struct sk_buff_head rxq;
1050         struct sk_buff_head errq;
1051         struct sk_buff_head tmpq;
1052         int err;
1053
1054         spin_lock(&queue->rx_lock);
1055
1056         skb_queue_head_init(&rxq);
1057         skb_queue_head_init(&errq);
1058         skb_queue_head_init(&tmpq);
1059
1060         rp = queue->rx.sring->rsp_prod;
1061         if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1062                 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1063                           rp - queue->rx.rsp_cons);
1064                 queue->info->broken = true;
1065                 spin_unlock(&queue->rx_lock);
1066                 return 0;
1067         }
1068         rmb(); /* Ensure we see queued responses up to 'rp'. */
1069
1070         i = queue->rx.rsp_cons;
1071         work_done = 0;
1072         while ((i != rp) && (work_done < budget)) {
1073                 RING_COPY_RESPONSE(&queue->rx, i, rx);
1074                 memset(extras, 0, sizeof(rinfo.extras));
1075
1076                 err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1077
1078                 if (unlikely(err)) {
1079 err:
1080                         while ((skb = __skb_dequeue(&tmpq)))
1081                                 __skb_queue_tail(&errq, skb);
1082                         dev->stats.rx_errors++;
1083                         i = queue->rx.rsp_cons;
1084                         continue;
1085                 }
1086
1087                 skb = __skb_dequeue(&tmpq);
1088
1089                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1090                         struct xen_netif_extra_info *gso;
1091                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1092
1093                         if (unlikely(xennet_set_skb_gso(skb, gso))) {
1094                                 __skb_queue_head(&tmpq, skb);
1095                                 xennet_set_rx_rsp_cons(queue,
1096                                                        queue->rx.rsp_cons +
1097                                                        skb_queue_len(&tmpq));
1098                                 goto err;
1099                         }
1100                 }
1101
1102                 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1103                 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1104                         NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1105
1106                 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1107                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1108                 skb->data_len = rx->status;
1109                 skb->len += rx->status;
1110
1111                 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1112                         goto err;
1113
1114                 if (rx->flags & XEN_NETRXF_csum_blank)
1115                         skb->ip_summed = CHECKSUM_PARTIAL;
1116                 else if (rx->flags & XEN_NETRXF_data_validated)
1117                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1118
1119                 __skb_queue_tail(&rxq, skb);
1120
1121                 i = queue->rx.rsp_cons + 1;
1122                 xennet_set_rx_rsp_cons(queue, i);
1123                 work_done++;
1124         }
1125
1126         __skb_queue_purge(&errq);
1127
1128         work_done -= handle_incoming_queue(queue, &rxq);
1129
1130         xennet_alloc_rx_buffers(queue);
1131
1132         if (work_done < budget) {
1133                 int more_to_do = 0;
1134
1135                 napi_complete(napi);
1136
1137                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1138                 if (more_to_do)
1139                         napi_schedule(napi);
1140         }
1141
1142         spin_unlock(&queue->rx_lock);
1143
1144         return work_done;
1145 }
1146
1147 static int xennet_change_mtu(struct net_device *dev, int mtu)
1148 {
1149         int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1150
1151         if (mtu > max)
1152                 return -EINVAL;
1153         dev->mtu = mtu;
1154         return 0;
1155 }
1156
1157 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1158                                                     struct rtnl_link_stats64 *tot)
1159 {
1160         struct netfront_info *np = netdev_priv(dev);
1161         int cpu;
1162
1163         for_each_possible_cpu(cpu) {
1164                 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1165                 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1166                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1167                 unsigned int start;
1168
1169                 do {
1170                         start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1171                         tx_packets = tx_stats->packets;
1172                         tx_bytes = tx_stats->bytes;
1173                 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1174
1175                 do {
1176                         start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1177                         rx_packets = rx_stats->packets;
1178                         rx_bytes = rx_stats->bytes;
1179                 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1180
1181                 tot->rx_packets += rx_packets;
1182                 tot->tx_packets += tx_packets;
1183                 tot->rx_bytes   += rx_bytes;
1184                 tot->tx_bytes   += tx_bytes;
1185         }
1186
1187         tot->rx_errors  = dev->stats.rx_errors;
1188         tot->tx_dropped = dev->stats.tx_dropped;
1189
1190         return tot;
1191 }
1192
1193 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1194 {
1195         struct sk_buff *skb;
1196         int i;
1197
1198         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1199                 /* Skip over entries which are actually freelist references */
1200                 if (!queue->tx_skbs[i])
1201                         continue;
1202
1203                 skb = queue->tx_skbs[i];
1204                 queue->tx_skbs[i] = NULL;
1205                 get_page(queue->grant_tx_page[i]);
1206                 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1207                                           GNTMAP_readonly,
1208                                           (unsigned long)page_address(queue->grant_tx_page[i]));
1209                 queue->grant_tx_page[i] = NULL;
1210                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1211                 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1212                 dev_kfree_skb_irq(skb);
1213         }
1214 }
1215
1216 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1217 {
1218         int id, ref;
1219
1220         spin_lock_bh(&queue->rx_lock);
1221
1222         for (id = 0; id < NET_RX_RING_SIZE; id++) {
1223                 struct sk_buff *skb;
1224                 struct page *page;
1225
1226                 skb = queue->rx_skbs[id];
1227                 if (!skb)
1228                         continue;
1229
1230                 ref = queue->grant_rx_ref[id];
1231                 if (ref == GRANT_INVALID_REF)
1232                         continue;
1233
1234                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1235
1236                 /* gnttab_end_foreign_access() needs a page ref until
1237                  * foreign access is ended (which may be deferred).
1238                  */
1239                 get_page(page);
1240                 gnttab_end_foreign_access(ref, 0,
1241                                           (unsigned long)page_address(page));
1242                 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1243
1244                 kfree_skb(skb);
1245         }
1246
1247         spin_unlock_bh(&queue->rx_lock);
1248 }
1249
1250 static netdev_features_t xennet_fix_features(struct net_device *dev,
1251         netdev_features_t features)
1252 {
1253         struct netfront_info *np = netdev_priv(dev);
1254         int val;
1255
1256         if (features & NETIF_F_SG) {
1257                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1258                                  "%d", &val) < 0)
1259                         val = 0;
1260
1261                 if (!val)
1262                         features &= ~NETIF_F_SG;
1263         }
1264
1265         if (features & NETIF_F_IPV6_CSUM) {
1266                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1267                                  "feature-ipv6-csum-offload", "%d", &val) < 0)
1268                         val = 0;
1269
1270                 if (!val)
1271                         features &= ~NETIF_F_IPV6_CSUM;
1272         }
1273
1274         if (features & NETIF_F_TSO) {
1275                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1276                                  "feature-gso-tcpv4", "%d", &val) < 0)
1277                         val = 0;
1278
1279                 if (!val)
1280                         features &= ~NETIF_F_TSO;
1281         }
1282
1283         if (features & NETIF_F_TSO6) {
1284                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1285                                  "feature-gso-tcpv6", "%d", &val) < 0)
1286                         val = 0;
1287
1288                 if (!val)
1289                         features &= ~NETIF_F_TSO6;
1290         }
1291
1292         return features;
1293 }
1294
1295 static int xennet_set_features(struct net_device *dev,
1296         netdev_features_t features)
1297 {
1298         if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1299                 netdev_info(dev, "Reducing MTU because no SG offload");
1300                 dev->mtu = ETH_DATA_LEN;
1301         }
1302
1303         return 0;
1304 }
1305
1306 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1307 {
1308         unsigned long flags;
1309
1310         if (unlikely(queue->info->broken))
1311                 return false;
1312
1313         spin_lock_irqsave(&queue->tx_lock, flags);
1314         if (xennet_tx_buf_gc(queue))
1315                 *eoi = 0;
1316         spin_unlock_irqrestore(&queue->tx_lock, flags);
1317
1318         return true;
1319 }
1320
1321 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1322 {
1323         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1324
1325         if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1326                 xen_irq_lateeoi(irq, eoiflag);
1327
1328         return IRQ_HANDLED;
1329 }
1330
1331 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1332 {
1333         unsigned int work_queued;
1334         unsigned long flags;
1335
1336         if (unlikely(queue->info->broken))
1337                 return false;
1338
1339         spin_lock_irqsave(&queue->rx_cons_lock, flags);
1340         work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1341         if (work_queued > queue->rx_rsp_unconsumed) {
1342                 queue->rx_rsp_unconsumed = work_queued;
1343                 *eoi = 0;
1344         } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1345                 const struct device *dev = &queue->info->netdev->dev;
1346
1347                 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1348                 dev_alert(dev, "RX producer index going backwards\n");
1349                 dev_alert(dev, "Disabled for further use\n");
1350                 queue->info->broken = true;
1351                 return false;
1352         }
1353         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1354
1355         if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1356                 napi_schedule(&queue->napi);
1357
1358         return true;
1359 }
1360
1361 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1362 {
1363         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1364
1365         if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1366                 xen_irq_lateeoi(irq, eoiflag);
1367
1368         return IRQ_HANDLED;
1369 }
1370
1371 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1372 {
1373         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1374
1375         if (xennet_handle_tx(dev_id, &eoiflag) &&
1376             xennet_handle_rx(dev_id, &eoiflag))
1377                 xen_irq_lateeoi(irq, eoiflag);
1378
1379         return IRQ_HANDLED;
1380 }
1381
1382 #ifdef CONFIG_NET_POLL_CONTROLLER
1383 static void xennet_poll_controller(struct net_device *dev)
1384 {
1385         /* Poll each queue */
1386         struct netfront_info *info = netdev_priv(dev);
1387         unsigned int num_queues = dev->real_num_tx_queues;
1388         unsigned int i;
1389
1390         if (info->broken)
1391                 return;
1392
1393         for (i = 0; i < num_queues; ++i)
1394                 xennet_interrupt(0, &info->queues[i]);
1395 }
1396 #endif
1397
1398 static const struct net_device_ops xennet_netdev_ops = {
1399         .ndo_open            = xennet_open,
1400         .ndo_stop            = xennet_close,
1401         .ndo_start_xmit      = xennet_start_xmit,
1402         .ndo_change_mtu      = xennet_change_mtu,
1403         .ndo_get_stats64     = xennet_get_stats64,
1404         .ndo_set_mac_address = eth_mac_addr,
1405         .ndo_validate_addr   = eth_validate_addr,
1406         .ndo_fix_features    = xennet_fix_features,
1407         .ndo_set_features    = xennet_set_features,
1408         .ndo_select_queue    = xennet_select_queue,
1409 #ifdef CONFIG_NET_POLL_CONTROLLER
1410         .ndo_poll_controller = xennet_poll_controller,
1411 #endif
1412 };
1413
1414 static void xennet_free_netdev(struct net_device *netdev)
1415 {
1416         struct netfront_info *np = netdev_priv(netdev);
1417
1418         free_percpu(np->rx_stats);
1419         free_percpu(np->tx_stats);
1420         free_netdev(netdev);
1421 }
1422
1423 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1424 {
1425         int err;
1426         struct net_device *netdev;
1427         struct netfront_info *np;
1428
1429         netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1430         if (!netdev)
1431                 return ERR_PTR(-ENOMEM);
1432
1433         np                   = netdev_priv(netdev);
1434         np->xbdev            = dev;
1435
1436         np->queues = NULL;
1437
1438         err = -ENOMEM;
1439         np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1440         if (np->rx_stats == NULL)
1441                 goto exit;
1442         np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1443         if (np->tx_stats == NULL)
1444                 goto exit;
1445
1446         netdev->netdev_ops      = &xennet_netdev_ops;
1447
1448         netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1449                                   NETIF_F_GSO_ROBUST;
1450         netdev->hw_features     = NETIF_F_SG |
1451                                   NETIF_F_IPV6_CSUM |
1452                                   NETIF_F_TSO | NETIF_F_TSO6;
1453
1454         /*
1455          * Assume that all hw features are available for now. This set
1456          * will be adjusted by the call to netdev_update_features() in
1457          * xennet_connect() which is the earliest point where we can
1458          * negotiate with the backend regarding supported features.
1459          */
1460         netdev->features |= netdev->hw_features;
1461
1462         netdev->ethtool_ops = &xennet_ethtool_ops;
1463         SET_NETDEV_DEV(netdev, &dev->dev);
1464
1465         np->netdev = netdev;
1466
1467         netif_carrier_off(netdev);
1468
1469         do {
1470                 xenbus_switch_state(dev, XenbusStateInitialising);
1471                 err = wait_event_timeout(module_wq,
1472                                  xenbus_read_driver_state(dev->otherend) !=
1473                                  XenbusStateClosed &&
1474                                  xenbus_read_driver_state(dev->otherend) !=
1475                                  XenbusStateUnknown, XENNET_TIMEOUT);
1476         } while (!err);
1477
1478         return netdev;
1479
1480  exit:
1481         xennet_free_netdev(netdev);
1482         return ERR_PTR(err);
1483 }
1484
1485 /**
1486  * Entry point to this code when a new device is created.  Allocate the basic
1487  * structures and the ring buffers for communication with the backend, and
1488  * inform the backend of the appropriate details for those.
1489  */
1490 static int netfront_probe(struct xenbus_device *dev,
1491                           const struct xenbus_device_id *id)
1492 {
1493         int err;
1494         struct net_device *netdev;
1495         struct netfront_info *info;
1496
1497         netdev = xennet_create_dev(dev);
1498         if (IS_ERR(netdev)) {
1499                 err = PTR_ERR(netdev);
1500                 xenbus_dev_fatal(dev, err, "creating netdev");
1501                 return err;
1502         }
1503
1504         info = netdev_priv(netdev);
1505         dev_set_drvdata(&dev->dev, info);
1506 #ifdef CONFIG_SYSFS
1507         info->netdev->sysfs_groups[0] = &xennet_dev_group;
1508 #endif
1509
1510         return 0;
1511 }
1512
1513 static void xennet_end_access(int ref, void *page)
1514 {
1515         /* This frees the page as a side-effect */
1516         if (ref != GRANT_INVALID_REF)
1517                 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1518 }
1519
1520 static void xennet_disconnect_backend(struct netfront_info *info)
1521 {
1522         unsigned int i = 0;
1523         unsigned int num_queues = info->netdev->real_num_tx_queues;
1524
1525         netif_carrier_off(info->netdev);
1526
1527         for (i = 0; i < num_queues && info->queues; ++i) {
1528                 struct netfront_queue *queue = &info->queues[i];
1529
1530                 del_timer_sync(&queue->rx_refill_timer);
1531
1532                 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1533                         unbind_from_irqhandler(queue->tx_irq, queue);
1534                 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1535                         unbind_from_irqhandler(queue->tx_irq, queue);
1536                         unbind_from_irqhandler(queue->rx_irq, queue);
1537                 }
1538                 queue->tx_evtchn = queue->rx_evtchn = 0;
1539                 queue->tx_irq = queue->rx_irq = 0;
1540
1541                 if (netif_running(info->netdev))
1542                         napi_synchronize(&queue->napi);
1543
1544                 xennet_release_tx_bufs(queue);
1545                 xennet_release_rx_bufs(queue);
1546                 gnttab_free_grant_references(queue->gref_tx_head);
1547                 gnttab_free_grant_references(queue->gref_rx_head);
1548
1549                 /* End access and free the pages */
1550                 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1551                 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1552
1553                 queue->tx_ring_ref = GRANT_INVALID_REF;
1554                 queue->rx_ring_ref = GRANT_INVALID_REF;
1555                 queue->tx.sring = NULL;
1556                 queue->rx.sring = NULL;
1557         }
1558 }
1559
1560 /**
1561  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1562  * driver restart.  We tear down our netif structure and recreate it, but
1563  * leave the device-layer structures intact so that this is transparent to the
1564  * rest of the kernel.
1565  */
1566 static int netfront_resume(struct xenbus_device *dev)
1567 {
1568         struct netfront_info *info = dev_get_drvdata(&dev->dev);
1569
1570         dev_dbg(&dev->dev, "%s\n", dev->nodename);
1571
1572         netif_tx_lock_bh(info->netdev);
1573         netif_device_detach(info->netdev);
1574         netif_tx_unlock_bh(info->netdev);
1575
1576         xennet_disconnect_backend(info);
1577         return 0;
1578 }
1579
1580 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1581 {
1582         char *s, *e, *macstr;
1583         int i;
1584
1585         macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1586         if (IS_ERR(macstr))
1587                 return PTR_ERR(macstr);
1588
1589         for (i = 0; i < ETH_ALEN; i++) {
1590                 mac[i] = simple_strtoul(s, &e, 16);
1591                 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1592                         kfree(macstr);
1593                         return -ENOENT;
1594                 }
1595                 s = e+1;
1596         }
1597
1598         kfree(macstr);
1599         return 0;
1600 }
1601
1602 static int setup_netfront_single(struct netfront_queue *queue)
1603 {
1604         int err;
1605
1606         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1607         if (err < 0)
1608                 goto fail;
1609
1610         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1611                                                 xennet_interrupt, 0,
1612                                                 queue->info->netdev->name,
1613                                                 queue);
1614         if (err < 0)
1615                 goto bind_fail;
1616         queue->rx_evtchn = queue->tx_evtchn;
1617         queue->rx_irq = queue->tx_irq = err;
1618
1619         return 0;
1620
1621 bind_fail:
1622         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1623         queue->tx_evtchn = 0;
1624 fail:
1625         return err;
1626 }
1627
1628 static int setup_netfront_split(struct netfront_queue *queue)
1629 {
1630         int err;
1631
1632         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1633         if (err < 0)
1634                 goto fail;
1635         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1636         if (err < 0)
1637                 goto alloc_rx_evtchn_fail;
1638
1639         snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1640                  "%s-tx", queue->name);
1641         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1642                                                 xennet_tx_interrupt, 0,
1643                                                 queue->tx_irq_name, queue);
1644         if (err < 0)
1645                 goto bind_tx_fail;
1646         queue->tx_irq = err;
1647
1648         snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1649                  "%s-rx", queue->name);
1650         err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1651                                                 xennet_rx_interrupt, 0,
1652                                                 queue->rx_irq_name, queue);
1653         if (err < 0)
1654                 goto bind_rx_fail;
1655         queue->rx_irq = err;
1656
1657         return 0;
1658
1659 bind_rx_fail:
1660         unbind_from_irqhandler(queue->tx_irq, queue);
1661         queue->tx_irq = 0;
1662 bind_tx_fail:
1663         xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1664         queue->rx_evtchn = 0;
1665 alloc_rx_evtchn_fail:
1666         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1667         queue->tx_evtchn = 0;
1668 fail:
1669         return err;
1670 }
1671
1672 static int setup_netfront(struct xenbus_device *dev,
1673                         struct netfront_queue *queue, unsigned int feature_split_evtchn)
1674 {
1675         struct xen_netif_tx_sring *txs;
1676         struct xen_netif_rx_sring *rxs;
1677         grant_ref_t gref;
1678         int err;
1679
1680         queue->tx_ring_ref = GRANT_INVALID_REF;
1681         queue->rx_ring_ref = GRANT_INVALID_REF;
1682         queue->rx.sring = NULL;
1683         queue->tx.sring = NULL;
1684
1685         txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1686         if (!txs) {
1687                 err = -ENOMEM;
1688                 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1689                 goto fail;
1690         }
1691         SHARED_RING_INIT(txs);
1692         FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1693
1694         err = xenbus_grant_ring(dev, txs, 1, &gref);
1695         if (err < 0)
1696                 goto grant_tx_ring_fail;
1697         queue->tx_ring_ref = gref;
1698
1699         rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1700         if (!rxs) {
1701                 err = -ENOMEM;
1702                 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1703                 goto alloc_rx_ring_fail;
1704         }
1705         SHARED_RING_INIT(rxs);
1706         FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1707
1708         err = xenbus_grant_ring(dev, rxs, 1, &gref);
1709         if (err < 0)
1710                 goto grant_rx_ring_fail;
1711         queue->rx_ring_ref = gref;
1712
1713         if (feature_split_evtchn)
1714                 err = setup_netfront_split(queue);
1715         /* setup single event channel if
1716          *  a) feature-split-event-channels == 0
1717          *  b) feature-split-event-channels == 1 but failed to setup
1718          */
1719         if (!feature_split_evtchn || (feature_split_evtchn && err))
1720                 err = setup_netfront_single(queue);
1721
1722         if (err)
1723                 goto alloc_evtchn_fail;
1724
1725         return 0;
1726
1727         /* If we fail to setup netfront, it is safe to just revoke access to
1728          * granted pages because backend is not accessing it at this point.
1729          */
1730 alloc_evtchn_fail:
1731         gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1732 grant_rx_ring_fail:
1733         free_page((unsigned long)rxs);
1734 alloc_rx_ring_fail:
1735         gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1736 grant_tx_ring_fail:
1737         free_page((unsigned long)txs);
1738 fail:
1739         return err;
1740 }
1741
1742 /* Queue-specific initialisation
1743  * This used to be done in xennet_create_dev() but must now
1744  * be run per-queue.
1745  */
1746 static int xennet_init_queue(struct netfront_queue *queue)
1747 {
1748         unsigned short i;
1749         int err = 0;
1750         char *devid;
1751
1752         spin_lock_init(&queue->tx_lock);
1753         spin_lock_init(&queue->rx_lock);
1754         spin_lock_init(&queue->rx_cons_lock);
1755
1756         setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
1757                     (unsigned long)queue);
1758
1759         devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
1760         snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1761                  devid, queue->id);
1762
1763         /* Initialise tx_skb_freelist as a free chain containing every entry. */
1764         queue->tx_skb_freelist = 0;
1765         queue->tx_pend_queue = TX_LINK_NONE;
1766         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1767                 queue->tx_link[i] = i + 1;
1768                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1769                 queue->grant_tx_page[i] = NULL;
1770         }
1771         queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
1772
1773         /* Clear out rx_skbs */
1774         for (i = 0; i < NET_RX_RING_SIZE; i++) {
1775                 queue->rx_skbs[i] = NULL;
1776                 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1777         }
1778
1779         /* A grant for every tx ring slot */
1780         if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1781                                           &queue->gref_tx_head) < 0) {
1782                 pr_alert("can't alloc tx grant refs\n");
1783                 err = -ENOMEM;
1784                 goto exit;
1785         }
1786
1787         /* A grant for every rx ring slot */
1788         if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1789                                           &queue->gref_rx_head) < 0) {
1790                 pr_alert("can't alloc rx grant refs\n");
1791                 err = -ENOMEM;
1792                 goto exit_free_tx;
1793         }
1794
1795         return 0;
1796
1797  exit_free_tx:
1798         gnttab_free_grant_references(queue->gref_tx_head);
1799  exit:
1800         return err;
1801 }
1802
1803 static int write_queue_xenstore_keys(struct netfront_queue *queue,
1804                            struct xenbus_transaction *xbt, int write_hierarchical)
1805 {
1806         /* Write the queue-specific keys into XenStore in the traditional
1807          * way for a single queue, or in a queue subkeys for multiple
1808          * queues.
1809          */
1810         struct xenbus_device *dev = queue->info->xbdev;
1811         int err;
1812         const char *message;
1813         char *path;
1814         size_t pathsize;
1815
1816         /* Choose the correct place to write the keys */
1817         if (write_hierarchical) {
1818                 pathsize = strlen(dev->nodename) + 10;
1819                 path = kzalloc(pathsize, GFP_KERNEL);
1820                 if (!path) {
1821                         err = -ENOMEM;
1822                         message = "out of memory while writing ring references";
1823                         goto error;
1824                 }
1825                 snprintf(path, pathsize, "%s/queue-%u",
1826                                 dev->nodename, queue->id);
1827         } else {
1828                 path = (char *)dev->nodename;
1829         }
1830
1831         /* Write ring references */
1832         err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1833                         queue->tx_ring_ref);
1834         if (err) {
1835                 message = "writing tx-ring-ref";
1836                 goto error;
1837         }
1838
1839         err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1840                         queue->rx_ring_ref);
1841         if (err) {
1842                 message = "writing rx-ring-ref";
1843                 goto error;
1844         }
1845
1846         /* Write event channels; taking into account both shared
1847          * and split event channel scenarios.
1848          */
1849         if (queue->tx_evtchn == queue->rx_evtchn) {
1850                 /* Shared event channel */
1851                 err = xenbus_printf(*xbt, path,
1852                                 "event-channel", "%u", queue->tx_evtchn);
1853                 if (err) {
1854                         message = "writing event-channel";
1855                         goto error;
1856                 }
1857         } else {
1858                 /* Split event channels */
1859                 err = xenbus_printf(*xbt, path,
1860                                 "event-channel-tx", "%u", queue->tx_evtchn);
1861                 if (err) {
1862                         message = "writing event-channel-tx";
1863                         goto error;
1864                 }
1865
1866                 err = xenbus_printf(*xbt, path,
1867                                 "event-channel-rx", "%u", queue->rx_evtchn);
1868                 if (err) {
1869                         message = "writing event-channel-rx";
1870                         goto error;
1871                 }
1872         }
1873
1874         if (write_hierarchical)
1875                 kfree(path);
1876         return 0;
1877
1878 error:
1879         if (write_hierarchical)
1880                 kfree(path);
1881         xenbus_dev_fatal(dev, err, "%s", message);
1882         return err;
1883 }
1884
1885 static void xennet_destroy_queues(struct netfront_info *info)
1886 {
1887         unsigned int i;
1888
1889         for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1890                 struct netfront_queue *queue = &info->queues[i];
1891
1892                 if (netif_running(info->netdev))
1893                         napi_disable(&queue->napi);
1894                 netif_napi_del(&queue->napi);
1895         }
1896
1897         kfree(info->queues);
1898         info->queues = NULL;
1899 }
1900
1901 static int xennet_create_queues(struct netfront_info *info,
1902                                 unsigned int *num_queues)
1903 {
1904         unsigned int i;
1905         int ret;
1906
1907         info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1908                                GFP_KERNEL);
1909         if (!info->queues)
1910                 return -ENOMEM;
1911
1912         for (i = 0; i < *num_queues; i++) {
1913                 struct netfront_queue *queue = &info->queues[i];
1914
1915                 queue->id = i;
1916                 queue->info = info;
1917
1918                 ret = xennet_init_queue(queue);
1919                 if (ret < 0) {
1920                         dev_warn(&info->xbdev->dev,
1921                                  "only created %d queues\n", i);
1922                         *num_queues = i;
1923                         break;
1924                 }
1925
1926                 netif_napi_add(queue->info->netdev, &queue->napi,
1927                                xennet_poll, 64);
1928                 if (netif_running(info->netdev))
1929                         napi_enable(&queue->napi);
1930         }
1931
1932         netif_set_real_num_tx_queues(info->netdev, *num_queues);
1933
1934         if (*num_queues == 0) {
1935                 dev_err(&info->xbdev->dev, "no queues\n");
1936                 return -EINVAL;
1937         }
1938         return 0;
1939 }
1940
1941 /* Common code used when first setting up, and when resuming. */
1942 static int talk_to_netback(struct xenbus_device *dev,
1943                            struct netfront_info *info)
1944 {
1945         const char *message;
1946         struct xenbus_transaction xbt;
1947         int err;
1948         unsigned int feature_split_evtchn;
1949         unsigned int i = 0;
1950         unsigned int max_queues = 0;
1951         struct netfront_queue *queue = NULL;
1952         unsigned int num_queues = 1;
1953
1954         info->netdev->irq = 0;
1955
1956         /* Check if backend supports multiple queues */
1957         err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1958                            "multi-queue-max-queues", "%u", &max_queues);
1959         if (err < 0)
1960                 max_queues = 1;
1961         num_queues = min(max_queues, xennet_max_queues);
1962
1963         /* Check feature-split-event-channels */
1964         err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1965                            "feature-split-event-channels", "%u",
1966                            &feature_split_evtchn);
1967         if (err < 0)
1968                 feature_split_evtchn = 0;
1969
1970         /* Read mac addr. */
1971         err = xen_net_read_mac(dev, info->netdev->dev_addr);
1972         if (err) {
1973                 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1974                 goto out_unlocked;
1975         }
1976
1977         rtnl_lock();
1978         if (info->queues)
1979                 xennet_destroy_queues(info);
1980
1981         /* For the case of a reconnect reset the "broken" indicator. */
1982         info->broken = false;
1983
1984         err = xennet_create_queues(info, &num_queues);
1985         if (err < 0) {
1986                 xenbus_dev_fatal(dev, err, "creating queues");
1987                 kfree(info->queues);
1988                 info->queues = NULL;
1989                 goto out;
1990         }
1991         rtnl_unlock();
1992
1993         /* Create shared ring, alloc event channel -- for each queue */
1994         for (i = 0; i < num_queues; ++i) {
1995                 queue = &info->queues[i];
1996                 err = setup_netfront(dev, queue, feature_split_evtchn);
1997                 if (err)
1998                         goto destroy_ring;
1999         }
2000
2001 again:
2002         err = xenbus_transaction_start(&xbt);
2003         if (err) {
2004                 xenbus_dev_fatal(dev, err, "starting transaction");
2005                 goto destroy_ring;
2006         }
2007
2008         if (xenbus_exists(XBT_NIL,
2009                           info->xbdev->otherend, "multi-queue-max-queues")) {
2010                 /* Write the number of queues */
2011                 err = xenbus_printf(xbt, dev->nodename,
2012                                     "multi-queue-num-queues", "%u", num_queues);
2013                 if (err) {
2014                         message = "writing multi-queue-num-queues";
2015                         goto abort_transaction_no_dev_fatal;
2016                 }
2017         }
2018
2019         if (num_queues == 1) {
2020                 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2021                 if (err)
2022                         goto abort_transaction_no_dev_fatal;
2023         } else {
2024                 /* Write the keys for each queue */
2025                 for (i = 0; i < num_queues; ++i) {
2026                         queue = &info->queues[i];
2027                         err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2028                         if (err)
2029                                 goto abort_transaction_no_dev_fatal;
2030                 }
2031         }
2032
2033         /* The remaining keys are not queue-specific */
2034         err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2035                             1);
2036         if (err) {
2037                 message = "writing request-rx-copy";
2038                 goto abort_transaction;
2039         }
2040
2041         err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2042         if (err) {
2043                 message = "writing feature-rx-notify";
2044                 goto abort_transaction;
2045         }
2046
2047         err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2048         if (err) {
2049                 message = "writing feature-sg";
2050                 goto abort_transaction;
2051         }
2052
2053         err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2054         if (err) {
2055                 message = "writing feature-gso-tcpv4";
2056                 goto abort_transaction;
2057         }
2058
2059         err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2060         if (err) {
2061                 message = "writing feature-gso-tcpv6";
2062                 goto abort_transaction;
2063         }
2064
2065         err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2066                            "1");
2067         if (err) {
2068                 message = "writing feature-ipv6-csum-offload";
2069                 goto abort_transaction;
2070         }
2071
2072         err = xenbus_transaction_end(xbt, 0);
2073         if (err) {
2074                 if (err == -EAGAIN)
2075                         goto again;
2076                 xenbus_dev_fatal(dev, err, "completing transaction");
2077                 goto destroy_ring;
2078         }
2079
2080         return 0;
2081
2082  abort_transaction:
2083         xenbus_dev_fatal(dev, err, "%s", message);
2084 abort_transaction_no_dev_fatal:
2085         xenbus_transaction_end(xbt, 1);
2086  destroy_ring:
2087         xennet_disconnect_backend(info);
2088         rtnl_lock();
2089         xennet_destroy_queues(info);
2090  out:
2091         rtnl_unlock();
2092 out_unlocked:
2093         device_unregister(&dev->dev);
2094         return err;
2095 }
2096
2097 static int xennet_connect(struct net_device *dev)
2098 {
2099         struct netfront_info *np = netdev_priv(dev);
2100         unsigned int num_queues = 0;
2101         int err;
2102         unsigned int feature_rx_copy;
2103         unsigned int j = 0;
2104         struct netfront_queue *queue = NULL;
2105
2106         err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
2107                            "feature-rx-copy", "%u", &feature_rx_copy);
2108         if (err != 1)
2109                 feature_rx_copy = 0;
2110
2111         if (!feature_rx_copy) {
2112                 dev_info(&dev->dev,
2113                          "backend does not support copying receive path\n");
2114                 return -ENODEV;
2115         }
2116
2117         err = talk_to_netback(np->xbdev, np);
2118         if (err)
2119                 return err;
2120
2121         /* talk_to_netback() sets the correct number of queues */
2122         num_queues = dev->real_num_tx_queues;
2123
2124         if (dev->reg_state == NETREG_UNINITIALIZED) {
2125                 err = register_netdev(dev);
2126                 if (err) {
2127                         pr_warn("%s: register_netdev err=%d\n", __func__, err);
2128                         device_unregister(&np->xbdev->dev);
2129                         return err;
2130                 }
2131         }
2132
2133         rtnl_lock();
2134         netdev_update_features(dev);
2135         rtnl_unlock();
2136
2137         /*
2138          * All public and private state should now be sane.  Get
2139          * ready to start sending and receiving packets and give the driver
2140          * domain a kick because we've probably just requeued some
2141          * packets.
2142          */
2143         netif_tx_lock_bh(np->netdev);
2144         netif_device_attach(np->netdev);
2145         netif_tx_unlock_bh(np->netdev);
2146
2147         netif_carrier_on(np->netdev);
2148         for (j = 0; j < num_queues; ++j) {
2149                 queue = &np->queues[j];
2150
2151                 notify_remote_via_irq(queue->tx_irq);
2152                 if (queue->tx_irq != queue->rx_irq)
2153                         notify_remote_via_irq(queue->rx_irq);
2154
2155                 spin_lock_irq(&queue->tx_lock);
2156                 xennet_tx_buf_gc(queue);
2157                 spin_unlock_irq(&queue->tx_lock);
2158
2159                 spin_lock_bh(&queue->rx_lock);
2160                 xennet_alloc_rx_buffers(queue);
2161                 spin_unlock_bh(&queue->rx_lock);
2162         }
2163
2164         return 0;
2165 }
2166
2167 /**
2168  * Callback received when the backend's state changes.
2169  */
2170 static void netback_changed(struct xenbus_device *dev,
2171                             enum xenbus_state backend_state)
2172 {
2173         struct netfront_info *np = dev_get_drvdata(&dev->dev);
2174         struct net_device *netdev = np->netdev;
2175
2176         dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2177
2178         wake_up_all(&module_wq);
2179
2180         switch (backend_state) {
2181         case XenbusStateInitialising:
2182         case XenbusStateInitialised:
2183         case XenbusStateReconfiguring:
2184         case XenbusStateReconfigured:
2185         case XenbusStateUnknown:
2186                 break;
2187
2188         case XenbusStateInitWait:
2189                 if (dev->state != XenbusStateInitialising)
2190                         break;
2191                 if (xennet_connect(netdev) != 0)
2192                         break;
2193                 xenbus_switch_state(dev, XenbusStateConnected);
2194                 break;
2195
2196         case XenbusStateConnected:
2197                 netdev_notify_peers(netdev);
2198                 break;
2199
2200         case XenbusStateClosed:
2201                 if (dev->state == XenbusStateClosed)
2202                         break;
2203                 /* Missed the backend's CLOSING state -- fallthrough */
2204         case XenbusStateClosing:
2205                 xenbus_frontend_closed(dev);
2206                 break;
2207         }
2208 }
2209
2210 static const struct xennet_stat {
2211         char name[ETH_GSTRING_LEN];
2212         u16 offset;
2213 } xennet_stats[] = {
2214         {
2215                 "rx_gso_checksum_fixup",
2216                 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2217         },
2218 };
2219
2220 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2221 {
2222         switch (string_set) {
2223         case ETH_SS_STATS:
2224                 return ARRAY_SIZE(xennet_stats);
2225         default:
2226                 return -EINVAL;
2227         }
2228 }
2229
2230 static void xennet_get_ethtool_stats(struct net_device *dev,
2231                                      struct ethtool_stats *stats, u64 * data)
2232 {
2233         void *np = netdev_priv(dev);
2234         int i;
2235
2236         for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2237                 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2238 }
2239
2240 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2241 {
2242         int i;
2243
2244         switch (stringset) {
2245         case ETH_SS_STATS:
2246                 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2247                         memcpy(data + i * ETH_GSTRING_LEN,
2248                                xennet_stats[i].name, ETH_GSTRING_LEN);
2249                 break;
2250         }
2251 }
2252
2253 static const struct ethtool_ops xennet_ethtool_ops =
2254 {
2255         .get_link = ethtool_op_get_link,
2256
2257         .get_sset_count = xennet_get_sset_count,
2258         .get_ethtool_stats = xennet_get_ethtool_stats,
2259         .get_strings = xennet_get_strings,
2260 };
2261
2262 #ifdef CONFIG_SYSFS
2263 static ssize_t show_rxbuf(struct device *dev,
2264                           struct device_attribute *attr, char *buf)
2265 {
2266         return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2267 }
2268
2269 static ssize_t store_rxbuf(struct device *dev,
2270                            struct device_attribute *attr,
2271                            const char *buf, size_t len)
2272 {
2273         char *endp;
2274         unsigned long target;
2275
2276         if (!capable(CAP_NET_ADMIN))
2277                 return -EPERM;
2278
2279         target = simple_strtoul(buf, &endp, 0);
2280         if (endp == buf)
2281                 return -EBADMSG;
2282
2283         /* rxbuf_min and rxbuf_max are no longer configurable. */
2284
2285         return len;
2286 }
2287
2288 static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2289 static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2290 static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
2291
2292 static struct attribute *xennet_dev_attrs[] = {
2293         &dev_attr_rxbuf_min.attr,
2294         &dev_attr_rxbuf_max.attr,
2295         &dev_attr_rxbuf_cur.attr,
2296         NULL
2297 };
2298
2299 static const struct attribute_group xennet_dev_group = {
2300         .attrs = xennet_dev_attrs
2301 };
2302 #endif /* CONFIG_SYSFS */
2303
2304 static void xennet_bus_close(struct xenbus_device *dev)
2305 {
2306         int ret;
2307
2308         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2309                 return;
2310         do {
2311                 xenbus_switch_state(dev, XenbusStateClosing);
2312                 ret = wait_event_timeout(module_wq,
2313                                    xenbus_read_driver_state(dev->otherend) ==
2314                                    XenbusStateClosing ||
2315                                    xenbus_read_driver_state(dev->otherend) ==
2316                                    XenbusStateClosed ||
2317                                    xenbus_read_driver_state(dev->otherend) ==
2318                                    XenbusStateUnknown,
2319                                    XENNET_TIMEOUT);
2320         } while (!ret);
2321
2322         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2323                 return;
2324
2325         do {
2326                 xenbus_switch_state(dev, XenbusStateClosed);
2327                 ret = wait_event_timeout(module_wq,
2328                                    xenbus_read_driver_state(dev->otherend) ==
2329                                    XenbusStateClosed ||
2330                                    xenbus_read_driver_state(dev->otherend) ==
2331                                    XenbusStateUnknown,
2332                                    XENNET_TIMEOUT);
2333         } while (!ret);
2334 }
2335
2336 static int xennet_remove(struct xenbus_device *dev)
2337 {
2338         struct netfront_info *info = dev_get_drvdata(&dev->dev);
2339
2340         xennet_bus_close(dev);
2341         xennet_disconnect_backend(info);
2342
2343         if (info->netdev->reg_state == NETREG_REGISTERED)
2344                 unregister_netdev(info->netdev);
2345
2346         if (info->queues) {
2347                 rtnl_lock();
2348                 xennet_destroy_queues(info);
2349                 rtnl_unlock();
2350         }
2351         xennet_free_netdev(info->netdev);
2352
2353         return 0;
2354 }
2355
2356 static const struct xenbus_device_id netfront_ids[] = {
2357         { "vif" },
2358         { "" }
2359 };
2360
2361 static struct xenbus_driver netfront_driver = {
2362         .ids = netfront_ids,
2363         .probe = netfront_probe,
2364         .remove = xennet_remove,
2365         .resume = netfront_resume,
2366         .otherend_changed = netback_changed,
2367 };
2368
2369 static int __init netif_init(void)
2370 {
2371         if (!xen_domain())
2372                 return -ENODEV;
2373
2374         if (!xen_has_pv_nic_devices())
2375                 return -ENODEV;
2376
2377         pr_info("Initialising Xen virtual ethernet driver\n");
2378
2379         /* Allow as many queues as there are CPUs if user has not
2380          * specified a value.
2381          */
2382         if (xennet_max_queues == 0)
2383                 xennet_max_queues = num_online_cpus();
2384
2385         return xenbus_register_frontend(&netfront_driver);
2386 }
2387 module_init(netif_init);
2388
2389
2390 static void __exit netif_exit(void)
2391 {
2392         xenbus_unregister_driver(&netfront_driver);
2393 }
2394 module_exit(netif_exit);
2395
2396 MODULE_DESCRIPTION("Xen virtual network device frontend");
2397 MODULE_LICENSE("GPL");
2398 MODULE_ALIAS("xen:vif");
2399 MODULE_ALIAS("xennet");