GNU Linux-libre 4.14.324-gnu1
[releases.git] / drivers / net / xen-netback / netback.c
1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34
35 #include "common.h"
36
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
41
42 #include <net/tcp.h>
43
44 #include <xen/xen.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
47 #include <xen/page.h>
48
49 #include <asm/xen/hypercall.h>
50
51 /* Provide an option to disable split event channels at load time as
52  * event channels are limited resource. Split event channels are
53  * enabled by default.
54  */
55 bool separate_tx_rx_irq = true;
56 module_param(separate_tx_rx_irq, bool, 0644);
57
58 /* The time that packets can stay on the guest Rx internal queue
59  * before they are dropped.
60  */
61 unsigned int rx_drain_timeout_msecs = 10000;
62 module_param(rx_drain_timeout_msecs, uint, 0444);
63
64 /* The length of time before the frontend is considered unresponsive
65  * because it isn't providing Rx slots.
66  */
67 unsigned int rx_stall_timeout_msecs = 60000;
68 module_param(rx_stall_timeout_msecs, uint, 0444);
69
70 #define MAX_QUEUES_DEFAULT 8
71 unsigned int xenvif_max_queues;
72 module_param_named(max_queues, xenvif_max_queues, uint, 0644);
73 MODULE_PARM_DESC(max_queues,
74                  "Maximum number of queues per virtual interface");
75
76 /*
77  * This is the maximum slots a skb can have. If a guest sends a skb
78  * which exceeds this limit it is considered malicious.
79  */
80 #define FATAL_SKB_SLOTS_DEFAULT 20
81 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
82 module_param(fatal_skb_slots, uint, 0444);
83
84 /* The amount to copy out of the first guest Tx slot into the skb's
85  * linear area.  If the first slot has more data, it will be mapped
86  * and put into the first frag.
87  *
88  * This is sized to avoid pulling headers from the frags for most
89  * TCP/IP packets.
90  */
91 #define XEN_NETBACK_TX_COPY_LEN 128
92
93 /* This is the maximum number of flows in the hash cache. */
94 #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
95 unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
96 module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
97 MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
98
99 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
100                                u8 status);
101
102 static void make_tx_response(struct xenvif_queue *queue,
103                              struct xen_netif_tx_request *txp,
104                              unsigned int extra_count,
105                              s8       st);
106 static void push_tx_responses(struct xenvif_queue *queue);
107
108 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
109
110 static inline int tx_work_todo(struct xenvif_queue *queue);
111
112 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
113                                        u16 idx)
114 {
115         return page_to_pfn(queue->mmap_pages[idx]);
116 }
117
118 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
119                                          u16 idx)
120 {
121         return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
122 }
123
124 #define callback_param(vif, pending_idx) \
125         (vif->pending_tx_info[pending_idx].callback_struct)
126
127 /* Find the containing VIF's structure from a pointer in pending_tx_info array
128  */
129 static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
130 {
131         u16 pending_idx = ubuf->desc;
132         struct pending_tx_info *temp =
133                 container_of(ubuf, struct pending_tx_info, callback_struct);
134         return container_of(temp - pending_idx,
135                             struct xenvif_queue,
136                             pending_tx_info[0]);
137 }
138
139 static u16 frag_get_pending_idx(skb_frag_t *frag)
140 {
141         return (u16)frag->page_offset;
142 }
143
144 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
145 {
146         frag->page_offset = pending_idx;
147 }
148
149 static inline pending_ring_idx_t pending_index(unsigned i)
150 {
151         return i & (MAX_PENDING_REQS-1);
152 }
153
154 void xenvif_kick_thread(struct xenvif_queue *queue)
155 {
156         wake_up(&queue->wq);
157 }
158
159 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
160 {
161         int more_to_do;
162
163         RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
164
165         if (more_to_do)
166                 napi_schedule(&queue->napi);
167         else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
168                                      &queue->eoi_pending) &
169                  (NETBK_TX_EOI | NETBK_COMMON_EOI))
170                 xen_irq_lateeoi(queue->tx_irq, 0);
171 }
172
173 static void tx_add_credit(struct xenvif_queue *queue)
174 {
175         unsigned long max_burst, max_credit;
176
177         /*
178          * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
179          * Otherwise the interface can seize up due to insufficient credit.
180          */
181         max_burst = max(131072UL, queue->credit_bytes);
182
183         /* Take care that adding a new chunk of credit doesn't wrap to zero. */
184         max_credit = queue->remaining_credit + queue->credit_bytes;
185         if (max_credit < queue->remaining_credit)
186                 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
187
188         queue->remaining_credit = min(max_credit, max_burst);
189         queue->rate_limited = false;
190 }
191
192 void xenvif_tx_credit_callback(unsigned long data)
193 {
194         struct xenvif_queue *queue = (struct xenvif_queue *)data;
195         tx_add_credit(queue);
196         xenvif_napi_schedule_or_enable_events(queue);
197 }
198
199 static void xenvif_tx_err(struct xenvif_queue *queue,
200                           struct xen_netif_tx_request *txp,
201                           unsigned int extra_count, RING_IDX end)
202 {
203         RING_IDX cons = queue->tx.req_cons;
204         unsigned long flags;
205
206         do {
207                 spin_lock_irqsave(&queue->response_lock, flags);
208                 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
209                 push_tx_responses(queue);
210                 spin_unlock_irqrestore(&queue->response_lock, flags);
211                 if (cons == end)
212                         break;
213                 RING_COPY_REQUEST(&queue->tx, cons++, txp);
214                 extra_count = 0; /* only the first frag can have extras */
215         } while (1);
216         queue->tx.req_cons = cons;
217 }
218
219 static void xenvif_fatal_tx_err(struct xenvif *vif)
220 {
221         netdev_err(vif->dev, "fatal error; disabling device\n");
222         vif->disabled = true;
223         /* Disable the vif from queue 0's kthread */
224         if (vif->num_queues)
225                 xenvif_kick_thread(&vif->queues[0]);
226 }
227
228 static int xenvif_count_requests(struct xenvif_queue *queue,
229                                  struct xen_netif_tx_request *first,
230                                  unsigned int extra_count,
231                                  struct xen_netif_tx_request *txp,
232                                  int work_to_do)
233 {
234         RING_IDX cons = queue->tx.req_cons;
235         int slots = 0;
236         int drop_err = 0;
237         int more_data;
238
239         if (!(first->flags & XEN_NETTXF_more_data))
240                 return 0;
241
242         do {
243                 struct xen_netif_tx_request dropped_tx = { 0 };
244
245                 if (slots >= work_to_do) {
246                         netdev_err(queue->vif->dev,
247                                    "Asked for %d slots but exceeds this limit\n",
248                                    work_to_do);
249                         xenvif_fatal_tx_err(queue->vif);
250                         return -ENODATA;
251                 }
252
253                 /* This guest is really using too many slots and
254                  * considered malicious.
255                  */
256                 if (unlikely(slots >= fatal_skb_slots)) {
257                         netdev_err(queue->vif->dev,
258                                    "Malicious frontend using %d slots, threshold %u\n",
259                                    slots, fatal_skb_slots);
260                         xenvif_fatal_tx_err(queue->vif);
261                         return -E2BIG;
262                 }
263
264                 /* Xen network protocol had implicit dependency on
265                  * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
266                  * the historical MAX_SKB_FRAGS value 18 to honor the
267                  * same behavior as before. Any packet using more than
268                  * 18 slots but less than fatal_skb_slots slots is
269                  * dropped
270                  */
271                 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
272                         if (net_ratelimit())
273                                 netdev_dbg(queue->vif->dev,
274                                            "Too many slots (%d) exceeding limit (%d), dropping packet\n",
275                                            slots, XEN_NETBK_LEGACY_SLOTS_MAX);
276                         drop_err = -E2BIG;
277                 }
278
279                 if (drop_err)
280                         txp = &dropped_tx;
281
282                 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
283
284                 /* If the guest submitted a frame >= 64 KiB then
285                  * first->size overflowed and following slots will
286                  * appear to be larger than the frame.
287                  *
288                  * This cannot be fatal error as there are buggy
289                  * frontends that do this.
290                  *
291                  * Consume all slots and drop the packet.
292                  */
293                 if (!drop_err && txp->size > first->size) {
294                         if (net_ratelimit())
295                                 netdev_dbg(queue->vif->dev,
296                                            "Invalid tx request, slot size %u > remaining size %u\n",
297                                            txp->size, first->size);
298                         drop_err = -EIO;
299                 }
300
301                 first->size -= txp->size;
302                 slots++;
303
304                 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
305                         netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
306                                  txp->offset, txp->size);
307                         xenvif_fatal_tx_err(queue->vif);
308                         return -EINVAL;
309                 }
310
311                 more_data = txp->flags & XEN_NETTXF_more_data;
312
313                 if (!drop_err)
314                         txp++;
315
316         } while (more_data);
317
318         if (drop_err) {
319                 xenvif_tx_err(queue, first, extra_count, cons + slots);
320                 return drop_err;
321         }
322
323         return slots;
324 }
325
326
327 struct xenvif_tx_cb {
328         u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
329         u8 copy_count;
330         u32 split_mask;
331 };
332
333 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
334 #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
335 #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
336
337 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
338                                            u16 pending_idx,
339                                            struct xen_netif_tx_request *txp,
340                                            unsigned int extra_count,
341                                            struct gnttab_map_grant_ref *mop)
342 {
343         queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
344         gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
345                           GNTMAP_host_map | GNTMAP_readonly,
346                           txp->gref, queue->vif->domid);
347
348         memcpy(&queue->pending_tx_info[pending_idx].req, txp,
349                sizeof(*txp));
350         queue->pending_tx_info[pending_idx].extra_count = extra_count;
351 }
352
353 static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
354 {
355         struct sk_buff *skb =
356                 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
357                           GFP_ATOMIC | __GFP_NOWARN);
358
359         BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
360         if (unlikely(skb == NULL))
361                 return NULL;
362
363         /* Packets passed to netif_rx() must have some headroom. */
364         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
365
366         /* Initialize it here to avoid later surprises */
367         skb_shinfo(skb)->destructor_arg = NULL;
368
369         return skb;
370 }
371
372 static void xenvif_get_requests(struct xenvif_queue *queue,
373                                 struct sk_buff *skb,
374                                 struct xen_netif_tx_request *first,
375                                 struct xen_netif_tx_request *txfrags,
376                                 unsigned *copy_ops,
377                                 unsigned *map_ops,
378                                 unsigned int frag_overflow,
379                                 struct sk_buff *nskb,
380                                 unsigned int extra_count,
381                                 unsigned int data_len)
382 {
383         struct skb_shared_info *shinfo = skb_shinfo(skb);
384         skb_frag_t *frags = shinfo->frags;
385         u16 pending_idx;
386         pending_ring_idx_t index;
387         unsigned int nr_slots;
388         struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
389         struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
390         struct xen_netif_tx_request *txp = first;
391
392         nr_slots = shinfo->nr_frags + frag_overflow + 1;
393
394         copy_count(skb) = 0;
395         XENVIF_TX_CB(skb)->split_mask = 0;
396
397         /* Create copy ops for exactly data_len bytes into the skb head. */
398         __skb_put(skb, data_len);
399         while (data_len > 0) {
400                 int amount = data_len > txp->size ? txp->size : data_len;
401                 bool split = false;
402
403                 cop->source.u.ref = txp->gref;
404                 cop->source.domid = queue->vif->domid;
405                 cop->source.offset = txp->offset;
406
407                 cop->dest.domid = DOMID_SELF;
408                 cop->dest.offset = (offset_in_page(skb->data +
409                                                    skb_headlen(skb) -
410                                                    data_len)) & ~XEN_PAGE_MASK;
411                 cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
412                                                - data_len);
413
414                 /* Don't cross local page boundary! */
415                 if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
416                         amount = XEN_PAGE_SIZE - cop->dest.offset;
417                         XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
418                         split = true;
419                 }
420
421                 cop->len = amount;
422                 cop->flags = GNTCOPY_source_gref;
423
424                 index = pending_index(queue->pending_cons);
425                 pending_idx = queue->pending_ring[index];
426                 callback_param(queue, pending_idx).ctx = NULL;
427                 copy_pending_idx(skb, copy_count(skb)) = pending_idx;
428                 if (!split)
429                         copy_count(skb)++;
430
431                 cop++;
432                 data_len -= amount;
433
434                 if (amount == txp->size) {
435                         /* The copy op covered the full tx_request */
436
437                         memcpy(&queue->pending_tx_info[pending_idx].req,
438                                txp, sizeof(*txp));
439                         queue->pending_tx_info[pending_idx].extra_count =
440                                 (txp == first) ? extra_count : 0;
441
442                         if (txp == first)
443                                 txp = txfrags;
444                         else
445                                 txp++;
446                         queue->pending_cons++;
447                         nr_slots--;
448                 } else {
449                         /* The copy op partially covered the tx_request.
450                          * The remainder will be mapped or copied in the next
451                          * iteration.
452                          */
453                         txp->offset += amount;
454                         txp->size -= amount;
455                 }
456         }
457
458         for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
459              shinfo->nr_frags++, gop++, nr_slots--) {
460                 index = pending_index(queue->pending_cons++);
461                 pending_idx = queue->pending_ring[index];
462                 xenvif_tx_create_map_op(queue, pending_idx, txp,
463                                         txp == first ? extra_count : 0, gop);
464                 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
465
466                 if (txp == first)
467                         txp = txfrags;
468                 else
469                         txp++;
470         }
471
472         if (nr_slots > 0) {
473
474                 shinfo = skb_shinfo(nskb);
475                 frags = shinfo->frags;
476
477                 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
478                      shinfo->nr_frags++, txp++, gop++) {
479                         index = pending_index(queue->pending_cons++);
480                         pending_idx = queue->pending_ring[index];
481                         xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
482                                                 gop);
483                         frag_set_pending_idx(&frags[shinfo->nr_frags],
484                                              pending_idx);
485                 }
486
487                 skb_shinfo(skb)->frag_list = nskb;
488         } else if (nskb) {
489                 /* A frag_list skb was allocated but it is no longer needed
490                  * because enough slots were converted to copy ops above.
491                  */
492                 kfree_skb(nskb);
493         }
494
495         (*copy_ops) = cop - queue->tx_copy_ops;
496         (*map_ops) = gop - queue->tx_map_ops;
497 }
498
499 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
500                                            u16 pending_idx,
501                                            grant_handle_t handle)
502 {
503         if (unlikely(queue->grant_tx_handle[pending_idx] !=
504                      NETBACK_INVALID_HANDLE)) {
505                 netdev_err(queue->vif->dev,
506                            "Trying to overwrite active handle! pending_idx: 0x%x\n",
507                            pending_idx);
508                 BUG();
509         }
510         queue->grant_tx_handle[pending_idx] = handle;
511 }
512
513 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
514                                              u16 pending_idx)
515 {
516         if (unlikely(queue->grant_tx_handle[pending_idx] ==
517                      NETBACK_INVALID_HANDLE)) {
518                 netdev_err(queue->vif->dev,
519                            "Trying to unmap invalid handle! pending_idx: 0x%x\n",
520                            pending_idx);
521                 BUG();
522         }
523         queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
524 }
525
526 static int xenvif_tx_check_gop(struct xenvif_queue *queue,
527                                struct sk_buff *skb,
528                                struct gnttab_map_grant_ref **gopp_map,
529                                struct gnttab_copy **gopp_copy)
530 {
531         struct gnttab_map_grant_ref *gop_map = *gopp_map;
532         u16 pending_idx;
533         /* This always points to the shinfo of the skb being checked, which
534          * could be either the first or the one on the frag_list
535          */
536         struct skb_shared_info *shinfo = skb_shinfo(skb);
537         /* If this is non-NULL, we are currently checking the frag_list skb, and
538          * this points to the shinfo of the first one
539          */
540         struct skb_shared_info *first_shinfo = NULL;
541         int nr_frags = shinfo->nr_frags;
542         const bool sharedslot = nr_frags &&
543                                 frag_get_pending_idx(&shinfo->frags[0]) ==
544                                     copy_pending_idx(skb, copy_count(skb) - 1);
545         int i, err = 0;
546
547         for (i = 0; i < copy_count(skb); i++) {
548                 int newerr;
549
550                 /* Check status of header. */
551                 pending_idx = copy_pending_idx(skb, i);
552
553                 newerr = (*gopp_copy)->status;
554
555                 /* Split copies need to be handled together. */
556                 if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
557                         (*gopp_copy)++;
558                         if (!newerr)
559                                 newerr = (*gopp_copy)->status;
560                 }
561                 if (likely(!newerr)) {
562                         /* The first frag might still have this slot mapped */
563                         if (i < copy_count(skb) - 1 || !sharedslot)
564                                 xenvif_idx_release(queue, pending_idx,
565                                                    XEN_NETIF_RSP_OKAY);
566                 } else {
567                         err = newerr;
568                         if (net_ratelimit())
569                                 netdev_dbg(queue->vif->dev,
570                                            "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
571                                            (*gopp_copy)->status,
572                                            pending_idx,
573                                            (*gopp_copy)->source.u.ref);
574                         /* The first frag might still have this slot mapped */
575                         if (i < copy_count(skb) - 1 || !sharedslot)
576                                 xenvif_idx_release(queue, pending_idx,
577                                                    XEN_NETIF_RSP_ERROR);
578                 }
579                 (*gopp_copy)++;
580         }
581
582 check_frags:
583         for (i = 0; i < nr_frags; i++, gop_map++) {
584                 int j, newerr;
585
586                 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
587
588                 /* Check error status: if okay then remember grant handle. */
589                 newerr = gop_map->status;
590
591                 if (likely(!newerr)) {
592                         xenvif_grant_handle_set(queue,
593                                                 pending_idx,
594                                                 gop_map->handle);
595                         /* Had a previous error? Invalidate this fragment. */
596                         if (unlikely(err)) {
597                                 xenvif_idx_unmap(queue, pending_idx);
598                                 /* If the mapping of the first frag was OK, but
599                                  * the header's copy failed, and they are
600                                  * sharing a slot, send an error
601                                  */
602                                 if (i == 0 && !first_shinfo && sharedslot)
603                                         xenvif_idx_release(queue, pending_idx,
604                                                            XEN_NETIF_RSP_ERROR);
605                                 else
606                                         xenvif_idx_release(queue, pending_idx,
607                                                            XEN_NETIF_RSP_OKAY);
608                         }
609                         continue;
610                 }
611
612                 /* Error on this fragment: respond to client with an error. */
613                 if (net_ratelimit())
614                         netdev_dbg(queue->vif->dev,
615                                    "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
616                                    i,
617                                    gop_map->status,
618                                    pending_idx,
619                                    gop_map->ref);
620
621                 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
622
623                 /* Not the first error? Preceding frags already invalidated. */
624                 if (err)
625                         continue;
626
627                 /* Invalidate preceding fragments of this skb. */
628                 for (j = 0; j < i; j++) {
629                         pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
630                         xenvif_idx_unmap(queue, pending_idx);
631                         xenvif_idx_release(queue, pending_idx,
632                                            XEN_NETIF_RSP_OKAY);
633                 }
634
635                 /* And if we found the error while checking the frag_list, unmap
636                  * the first skb's frags
637                  */
638                 if (first_shinfo) {
639                         for (j = 0; j < first_shinfo->nr_frags; j++) {
640                                 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
641                                 xenvif_idx_unmap(queue, pending_idx);
642                                 xenvif_idx_release(queue, pending_idx,
643                                                    XEN_NETIF_RSP_OKAY);
644                         }
645                 }
646
647                 /* Remember the error: invalidate all subsequent fragments. */
648                 err = newerr;
649         }
650
651         if (skb_has_frag_list(skb) && !first_shinfo) {
652                 first_shinfo = skb_shinfo(skb);
653                 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
654                 nr_frags = shinfo->nr_frags;
655
656                 goto check_frags;
657         }
658
659         *gopp_map = gop_map;
660         return err;
661 }
662
663 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
664 {
665         struct skb_shared_info *shinfo = skb_shinfo(skb);
666         int nr_frags = shinfo->nr_frags;
667         int i;
668         u16 prev_pending_idx = INVALID_PENDING_IDX;
669
670         for (i = 0; i < nr_frags; i++) {
671                 skb_frag_t *frag = shinfo->frags + i;
672                 struct xen_netif_tx_request *txp;
673                 struct page *page;
674                 u16 pending_idx;
675
676                 pending_idx = frag_get_pending_idx(frag);
677
678                 /* If this is not the first frag, chain it to the previous*/
679                 if (prev_pending_idx == INVALID_PENDING_IDX)
680                         skb_shinfo(skb)->destructor_arg =
681                                 &callback_param(queue, pending_idx);
682                 else
683                         callback_param(queue, prev_pending_idx).ctx =
684                                 &callback_param(queue, pending_idx);
685
686                 callback_param(queue, pending_idx).ctx = NULL;
687                 prev_pending_idx = pending_idx;
688
689                 txp = &queue->pending_tx_info[pending_idx].req;
690                 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
691                 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
692                 skb->len += txp->size;
693                 skb->data_len += txp->size;
694                 skb->truesize += txp->size;
695
696                 /* Take an extra reference to offset network stack's put_page */
697                 get_page(queue->mmap_pages[pending_idx]);
698         }
699 }
700
701 static int xenvif_get_extras(struct xenvif_queue *queue,
702                              struct xen_netif_extra_info *extras,
703                              unsigned int *extra_count,
704                              int work_to_do)
705 {
706         struct xen_netif_extra_info extra;
707         RING_IDX cons = queue->tx.req_cons;
708
709         do {
710                 if (unlikely(work_to_do-- <= 0)) {
711                         netdev_err(queue->vif->dev, "Missing extra info\n");
712                         xenvif_fatal_tx_err(queue->vif);
713                         return -EBADR;
714                 }
715
716                 RING_COPY_REQUEST(&queue->tx, cons, &extra);
717
718                 queue->tx.req_cons = ++cons;
719                 (*extra_count)++;
720
721                 if (unlikely(!extra.type ||
722                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
723                         netdev_err(queue->vif->dev,
724                                    "Invalid extra type: %d\n", extra.type);
725                         xenvif_fatal_tx_err(queue->vif);
726                         return -EINVAL;
727                 }
728
729                 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
730         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
731
732         return work_to_do;
733 }
734
735 static int xenvif_set_skb_gso(struct xenvif *vif,
736                               struct sk_buff *skb,
737                               struct xen_netif_extra_info *gso)
738 {
739         if (!gso->u.gso.size) {
740                 netdev_err(vif->dev, "GSO size must not be zero.\n");
741                 xenvif_fatal_tx_err(vif);
742                 return -EINVAL;
743         }
744
745         switch (gso->u.gso.type) {
746         case XEN_NETIF_GSO_TYPE_TCPV4:
747                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
748                 break;
749         case XEN_NETIF_GSO_TYPE_TCPV6:
750                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
751                 break;
752         default:
753                 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
754                 xenvif_fatal_tx_err(vif);
755                 return -EINVAL;
756         }
757
758         skb_shinfo(skb)->gso_size = gso->u.gso.size;
759         /* gso_segs will be calculated later */
760
761         return 0;
762 }
763
764 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
765 {
766         bool recalculate_partial_csum = false;
767
768         /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
769          * peers can fail to set NETRXF_csum_blank when sending a GSO
770          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
771          * recalculate the partial checksum.
772          */
773         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
774                 queue->stats.rx_gso_checksum_fixup++;
775                 skb->ip_summed = CHECKSUM_PARTIAL;
776                 recalculate_partial_csum = true;
777         }
778
779         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
780         if (skb->ip_summed != CHECKSUM_PARTIAL)
781                 return 0;
782
783         return skb_checksum_setup(skb, recalculate_partial_csum);
784 }
785
786 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
787 {
788         u64 now = get_jiffies_64();
789         u64 next_credit = queue->credit_window_start +
790                 msecs_to_jiffies(queue->credit_usec / 1000);
791
792         /* Timer could already be pending in rare cases. */
793         if (timer_pending(&queue->credit_timeout)) {
794                 queue->rate_limited = true;
795                 return true;
796         }
797
798         /* Passed the point where we can replenish credit? */
799         if (time_after_eq64(now, next_credit)) {
800                 queue->credit_window_start = now;
801                 tx_add_credit(queue);
802         }
803
804         /* Still too big to send right now? Set a callback. */
805         if (size > queue->remaining_credit) {
806                 queue->credit_timeout.data     =
807                         (unsigned long)queue;
808                 mod_timer(&queue->credit_timeout,
809                           next_credit);
810                 queue->credit_window_start = next_credit;
811                 queue->rate_limited = true;
812
813                 return true;
814         }
815
816         return false;
817 }
818
819 /* No locking is required in xenvif_mcast_add/del() as they are
820  * only ever invoked from NAPI poll. An RCU list is used because
821  * xenvif_mcast_match() is called asynchronously, during start_xmit.
822  */
823
824 static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
825 {
826         struct xenvif_mcast_addr *mcast;
827
828         if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
829                 if (net_ratelimit())
830                         netdev_err(vif->dev,
831                                    "Too many multicast addresses\n");
832                 return -ENOSPC;
833         }
834
835         mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
836         if (!mcast)
837                 return -ENOMEM;
838
839         ether_addr_copy(mcast->addr, addr);
840         list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
841         vif->fe_mcast_count++;
842
843         return 0;
844 }
845
846 static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
847 {
848         struct xenvif_mcast_addr *mcast;
849
850         list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
851                 if (ether_addr_equal(addr, mcast->addr)) {
852                         --vif->fe_mcast_count;
853                         list_del_rcu(&mcast->entry);
854                         kfree_rcu(mcast, rcu);
855                         break;
856                 }
857         }
858 }
859
860 bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
861 {
862         struct xenvif_mcast_addr *mcast;
863
864         rcu_read_lock();
865         list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
866                 if (ether_addr_equal(addr, mcast->addr)) {
867                         rcu_read_unlock();
868                         return true;
869                 }
870         }
871         rcu_read_unlock();
872
873         return false;
874 }
875
876 void xenvif_mcast_addr_list_free(struct xenvif *vif)
877 {
878         /* No need for locking or RCU here. NAPI poll and TX queue
879          * are stopped.
880          */
881         while (!list_empty(&vif->fe_mcast_addr)) {
882                 struct xenvif_mcast_addr *mcast;
883
884                 mcast = list_first_entry(&vif->fe_mcast_addr,
885                                          struct xenvif_mcast_addr,
886                                          entry);
887                 --vif->fe_mcast_count;
888                 list_del(&mcast->entry);
889                 kfree(mcast);
890         }
891 }
892
893 static void xenvif_tx_build_gops(struct xenvif_queue *queue,
894                                      int budget,
895                                      unsigned *copy_ops,
896                                      unsigned *map_ops)
897 {
898         struct sk_buff *skb, *nskb;
899         int ret;
900         unsigned int frag_overflow;
901
902         while (skb_queue_len(&queue->tx_queue) < budget) {
903                 struct xen_netif_tx_request txreq;
904                 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
905                 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
906                 unsigned int extra_count;
907                 u16 pending_idx;
908                 RING_IDX idx;
909                 int work_to_do;
910                 unsigned int data_len;
911                 pending_ring_idx_t index;
912
913                 if (queue->tx.sring->req_prod - queue->tx.req_cons >
914                     XEN_NETIF_TX_RING_SIZE) {
915                         netdev_err(queue->vif->dev,
916                                    "Impossible number of requests. "
917                                    "req_prod %d, req_cons %d, size %ld\n",
918                                    queue->tx.sring->req_prod, queue->tx.req_cons,
919                                    XEN_NETIF_TX_RING_SIZE);
920                         xenvif_fatal_tx_err(queue->vif);
921                         break;
922                 }
923
924                 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
925                 if (!work_to_do)
926                         break;
927
928                 idx = queue->tx.req_cons;
929                 rmb(); /* Ensure that we see the request before we copy it. */
930                 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
931
932                 /* Credit-based scheduling. */
933                 if (txreq.size > queue->remaining_credit &&
934                     tx_credit_exceeded(queue, txreq.size))
935                         break;
936
937                 queue->remaining_credit -= txreq.size;
938
939                 work_to_do--;
940                 queue->tx.req_cons = ++idx;
941
942                 memset(extras, 0, sizeof(extras));
943                 extra_count = 0;
944                 if (txreq.flags & XEN_NETTXF_extra_info) {
945                         work_to_do = xenvif_get_extras(queue, extras,
946                                                        &extra_count,
947                                                        work_to_do);
948                         idx = queue->tx.req_cons;
949                         if (unlikely(work_to_do < 0))
950                                 break;
951                 }
952
953                 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
954                         struct xen_netif_extra_info *extra;
955
956                         extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
957                         ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
958
959                         make_tx_response(queue, &txreq, extra_count,
960                                          (ret == 0) ?
961                                          XEN_NETIF_RSP_OKAY :
962                                          XEN_NETIF_RSP_ERROR);
963                         push_tx_responses(queue);
964                         continue;
965                 }
966
967                 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
968                         struct xen_netif_extra_info *extra;
969
970                         extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
971                         xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
972
973                         make_tx_response(queue, &txreq, extra_count,
974                                          XEN_NETIF_RSP_OKAY);
975                         push_tx_responses(queue);
976                         continue;
977                 }
978
979                 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
980                         XEN_NETBACK_TX_COPY_LEN : txreq.size;
981
982                 ret = xenvif_count_requests(queue, &txreq, extra_count,
983                                             txfrags, work_to_do);
984
985                 if (unlikely(ret < 0))
986                         break;
987
988                 idx += ret;
989
990                 if (unlikely(txreq.size < ETH_HLEN)) {
991                         netdev_dbg(queue->vif->dev,
992                                    "Bad packet size: %d\n", txreq.size);
993                         xenvif_tx_err(queue, &txreq, extra_count, idx);
994                         break;
995                 }
996
997                 /* No crossing a page as the payload mustn't fragment. */
998                 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
999                         netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
1000                                    txreq.offset, txreq.size);
1001                         xenvif_fatal_tx_err(queue->vif);
1002                         break;
1003                 }
1004
1005                 index = pending_index(queue->pending_cons);
1006                 pending_idx = queue->pending_ring[index];
1007
1008                 if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
1009                         data_len = txreq.size;
1010
1011                 skb = xenvif_alloc_skb(data_len);
1012                 if (unlikely(skb == NULL)) {
1013                         netdev_dbg(queue->vif->dev,
1014                                    "Can't allocate a skb in start_xmit.\n");
1015                         xenvif_tx_err(queue, &txreq, extra_count, idx);
1016                         break;
1017                 }
1018
1019                 skb_shinfo(skb)->nr_frags = ret;
1020                 /* At this point shinfo->nr_frags is in fact the number of
1021                  * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1022                  */
1023                 frag_overflow = 0;
1024                 nskb = NULL;
1025                 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1026                         frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1027                         BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1028                         skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1029                         nskb = xenvif_alloc_skb(0);
1030                         if (unlikely(nskb == NULL)) {
1031                                 skb_shinfo(skb)->nr_frags = 0;
1032                                 kfree_skb(skb);
1033                                 xenvif_tx_err(queue, &txreq, extra_count, idx);
1034                                 if (net_ratelimit())
1035                                         netdev_err(queue->vif->dev,
1036                                                    "Can't allocate the frag_list skb.\n");
1037                                 break;
1038                         }
1039                 }
1040
1041                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1042                         struct xen_netif_extra_info *gso;
1043                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1044
1045                         if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1046                                 /* Failure in xenvif_set_skb_gso is fatal. */
1047                                 skb_shinfo(skb)->nr_frags = 0;
1048                                 kfree_skb(skb);
1049                                 kfree_skb(nskb);
1050                                 break;
1051                         }
1052                 }
1053
1054                 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
1055                         struct xen_netif_extra_info *extra;
1056                         enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
1057
1058                         extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
1059
1060                         switch (extra->u.hash.type) {
1061                         case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
1062                         case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
1063                                 type = PKT_HASH_TYPE_L3;
1064                                 break;
1065
1066                         case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
1067                         case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
1068                                 type = PKT_HASH_TYPE_L4;
1069                                 break;
1070
1071                         default:
1072                                 break;
1073                         }
1074
1075                         if (type != PKT_HASH_TYPE_NONE)
1076                                 skb_set_hash(skb,
1077                                              *(u32 *)extra->u.hash.value,
1078                                              type);
1079                 }
1080
1081                 xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1082                                     map_ops, frag_overflow, nskb, extra_count,
1083                                     data_len);
1084
1085                 __skb_queue_tail(&queue->tx_queue, skb);
1086
1087                 queue->tx.req_cons = idx;
1088
1089                 if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
1090                     (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1091                         break;
1092         }
1093
1094         return;
1095 }
1096
1097 /* Consolidate skb with a frag_list into a brand new one with local pages on
1098  * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1099  */
1100 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1101 {
1102         unsigned int offset = skb_headlen(skb);
1103         skb_frag_t frags[MAX_SKB_FRAGS];
1104         int i, f;
1105         struct ubuf_info *uarg;
1106         struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1107
1108         queue->stats.tx_zerocopy_sent += 2;
1109         queue->stats.tx_frag_overflow++;
1110
1111         xenvif_fill_frags(queue, nskb);
1112         /* Subtract frags size, we will correct it later */
1113         skb->truesize -= skb->data_len;
1114         skb->len += nskb->len;
1115         skb->data_len += nskb->len;
1116
1117         /* create a brand new frags array and coalesce there */
1118         for (i = 0; offset < skb->len; i++) {
1119                 struct page *page;
1120                 unsigned int len;
1121
1122                 BUG_ON(i >= MAX_SKB_FRAGS);
1123                 page = alloc_page(GFP_ATOMIC);
1124                 if (!page) {
1125                         int j;
1126                         skb->truesize += skb->data_len;
1127                         for (j = 0; j < i; j++)
1128                                 put_page(frags[j].page.p);
1129                         return -ENOMEM;
1130                 }
1131
1132                 if (offset + PAGE_SIZE < skb->len)
1133                         len = PAGE_SIZE;
1134                 else
1135                         len = skb->len - offset;
1136                 if (skb_copy_bits(skb, offset, page_address(page), len))
1137                         BUG();
1138
1139                 offset += len;
1140                 frags[i].page.p = page;
1141                 frags[i].page_offset = 0;
1142                 skb_frag_size_set(&frags[i], len);
1143         }
1144
1145         /* Release all the original (foreign) frags. */
1146         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1147                 skb_frag_unref(skb, f);
1148         uarg = skb_shinfo(skb)->destructor_arg;
1149         /* increase inflight counter to offset decrement in callback */
1150         atomic_inc(&queue->inflight_packets);
1151         uarg->callback(uarg, true);
1152         skb_shinfo(skb)->destructor_arg = NULL;
1153
1154         /* Fill the skb with the new (local) frags. */
1155         memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1156         skb_shinfo(skb)->nr_frags = i;
1157         skb->truesize += i * PAGE_SIZE;
1158
1159         return 0;
1160 }
1161
1162 static int xenvif_tx_submit(struct xenvif_queue *queue)
1163 {
1164         struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1165         struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1166         struct sk_buff *skb;
1167         int work_done = 0;
1168
1169         while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1170                 struct xen_netif_tx_request *txp;
1171                 u16 pending_idx;
1172
1173                 pending_idx = copy_pending_idx(skb, 0);
1174                 txp = &queue->pending_tx_info[pending_idx].req;
1175
1176                 /* Check the remap error code. */
1177                 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1178                         /* If there was an error, xenvif_tx_check_gop is
1179                          * expected to release all the frags which were mapped,
1180                          * so kfree_skb shouldn't do it again
1181                          */
1182                         skb_shinfo(skb)->nr_frags = 0;
1183                         if (skb_has_frag_list(skb)) {
1184                                 struct sk_buff *nskb =
1185                                                 skb_shinfo(skb)->frag_list;
1186                                 skb_shinfo(nskb)->nr_frags = 0;
1187                         }
1188                         kfree_skb(skb);
1189                         continue;
1190                 }
1191
1192                 if (txp->flags & XEN_NETTXF_csum_blank)
1193                         skb->ip_summed = CHECKSUM_PARTIAL;
1194                 else if (txp->flags & XEN_NETTXF_data_validated)
1195                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1196
1197                 xenvif_fill_frags(queue, skb);
1198
1199                 if (unlikely(skb_has_frag_list(skb))) {
1200                         struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1201                         xenvif_skb_zerocopy_prepare(queue, nskb);
1202                         if (xenvif_handle_frag_list(queue, skb)) {
1203                                 if (net_ratelimit())
1204                                         netdev_err(queue->vif->dev,
1205                                                    "Not enough memory to consolidate frag_list!\n");
1206                                 xenvif_skb_zerocopy_prepare(queue, skb);
1207                                 kfree_skb(skb);
1208                                 continue;
1209                         }
1210                         /* Copied all the bits from the frag list -- free it. */
1211                         skb_frag_list_init(skb);
1212                         kfree_skb(nskb);
1213                 }
1214
1215                 skb->dev      = queue->vif->dev;
1216                 skb->protocol = eth_type_trans(skb, skb->dev);
1217                 skb_reset_network_header(skb);
1218
1219                 if (checksum_setup(queue, skb)) {
1220                         netdev_dbg(queue->vif->dev,
1221                                    "Can't setup checksum in net_tx_action\n");
1222                         /* We have to set this flag to trigger the callback */
1223                         if (skb_shinfo(skb)->destructor_arg)
1224                                 xenvif_skb_zerocopy_prepare(queue, skb);
1225                         kfree_skb(skb);
1226                         continue;
1227                 }
1228
1229                 skb_probe_transport_header(skb, 0);
1230
1231                 /* If the packet is GSO then we will have just set up the
1232                  * transport header offset in checksum_setup so it's now
1233                  * straightforward to calculate gso_segs.
1234                  */
1235                 if (skb_is_gso(skb)) {
1236                         int mss = skb_shinfo(skb)->gso_size;
1237                         int hdrlen = skb_transport_header(skb) -
1238                                 skb_mac_header(skb) +
1239                                 tcp_hdrlen(skb);
1240
1241                         skb_shinfo(skb)->gso_segs =
1242                                 DIV_ROUND_UP(skb->len - hdrlen, mss);
1243                 }
1244
1245                 queue->stats.rx_bytes += skb->len;
1246                 queue->stats.rx_packets++;
1247
1248                 work_done++;
1249
1250                 /* Set this flag right before netif_receive_skb, otherwise
1251                  * someone might think this packet already left netback, and
1252                  * do a skb_copy_ubufs while we are still in control of the
1253                  * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1254                  */
1255                 if (skb_shinfo(skb)->destructor_arg) {
1256                         xenvif_skb_zerocopy_prepare(queue, skb);
1257                         queue->stats.tx_zerocopy_sent++;
1258                 }
1259
1260                 netif_receive_skb(skb);
1261         }
1262
1263         return work_done;
1264 }
1265
1266 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1267 {
1268         unsigned long flags;
1269         pending_ring_idx_t index;
1270         struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1271
1272         /* This is the only place where we grab this lock, to protect callbacks
1273          * from each other.
1274          */
1275         spin_lock_irqsave(&queue->callback_lock, flags);
1276         do {
1277                 u16 pending_idx = ubuf->desc;
1278                 ubuf = (struct ubuf_info *) ubuf->ctx;
1279                 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1280                         MAX_PENDING_REQS);
1281                 index = pending_index(queue->dealloc_prod);
1282                 queue->dealloc_ring[index] = pending_idx;
1283                 /* Sync with xenvif_tx_dealloc_action:
1284                  * insert idx then incr producer.
1285                  */
1286                 smp_wmb();
1287                 queue->dealloc_prod++;
1288         } while (ubuf);
1289         spin_unlock_irqrestore(&queue->callback_lock, flags);
1290
1291         if (likely(zerocopy_success))
1292                 queue->stats.tx_zerocopy_success++;
1293         else
1294                 queue->stats.tx_zerocopy_fail++;
1295         xenvif_skb_zerocopy_complete(queue);
1296 }
1297
1298 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1299 {
1300         struct gnttab_unmap_grant_ref *gop;
1301         pending_ring_idx_t dc, dp;
1302         u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1303         unsigned int i = 0;
1304
1305         dc = queue->dealloc_cons;
1306         gop = queue->tx_unmap_ops;
1307
1308         /* Free up any grants we have finished using */
1309         do {
1310                 dp = queue->dealloc_prod;
1311
1312                 /* Ensure we see all indices enqueued by all
1313                  * xenvif_zerocopy_callback().
1314                  */
1315                 smp_rmb();
1316
1317                 while (dc != dp) {
1318                         BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1319                         pending_idx =
1320                                 queue->dealloc_ring[pending_index(dc++)];
1321
1322                         pending_idx_release[gop - queue->tx_unmap_ops] =
1323                                 pending_idx;
1324                         queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1325                                 queue->mmap_pages[pending_idx];
1326                         gnttab_set_unmap_op(gop,
1327                                             idx_to_kaddr(queue, pending_idx),
1328                                             GNTMAP_host_map,
1329                                             queue->grant_tx_handle[pending_idx]);
1330                         xenvif_grant_handle_reset(queue, pending_idx);
1331                         ++gop;
1332                 }
1333
1334         } while (dp != queue->dealloc_prod);
1335
1336         queue->dealloc_cons = dc;
1337
1338         if (gop - queue->tx_unmap_ops > 0) {
1339                 int ret;
1340                 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1341                                         NULL,
1342                                         queue->pages_to_unmap,
1343                                         gop - queue->tx_unmap_ops);
1344                 if (ret) {
1345                         netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1346                                    gop - queue->tx_unmap_ops, ret);
1347                         for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1348                                 if (gop[i].status != GNTST_okay)
1349                                         netdev_err(queue->vif->dev,
1350                                                    " host_addr: 0x%llx handle: 0x%x status: %d\n",
1351                                                    gop[i].host_addr,
1352                                                    gop[i].handle,
1353                                                    gop[i].status);
1354                         }
1355                         BUG();
1356                 }
1357         }
1358
1359         for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1360                 xenvif_idx_release(queue, pending_idx_release[i],
1361                                    XEN_NETIF_RSP_OKAY);
1362 }
1363
1364
1365 /* Called after netfront has transmitted */
1366 int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1367 {
1368         unsigned nr_mops = 0, nr_cops = 0;
1369         int work_done, ret;
1370
1371         if (unlikely(!tx_work_todo(queue)))
1372                 return 0;
1373
1374         xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1375
1376         if (nr_cops == 0)
1377                 return 0;
1378
1379         gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1380         if (nr_mops != 0) {
1381                 ret = gnttab_map_refs(queue->tx_map_ops,
1382                                       NULL,
1383                                       queue->pages_to_map,
1384                                       nr_mops);
1385                 if (ret) {
1386                         unsigned int i;
1387
1388                         netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
1389                                    nr_mops, ret);
1390                         for (i = 0; i < nr_mops; ++i)
1391                                 WARN_ON_ONCE(queue->tx_map_ops[i].status ==
1392                                              GNTST_okay);
1393                 }
1394         }
1395
1396         work_done = xenvif_tx_submit(queue);
1397
1398         return work_done;
1399 }
1400
1401 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1402                                u8 status)
1403 {
1404         struct pending_tx_info *pending_tx_info;
1405         pending_ring_idx_t index;
1406         unsigned long flags;
1407
1408         pending_tx_info = &queue->pending_tx_info[pending_idx];
1409
1410         spin_lock_irqsave(&queue->response_lock, flags);
1411
1412         make_tx_response(queue, &pending_tx_info->req,
1413                          pending_tx_info->extra_count, status);
1414
1415         /* Release the pending index before pusing the Tx response so
1416          * its available before a new Tx request is pushed by the
1417          * frontend.
1418          */
1419         index = pending_index(queue->pending_prod++);
1420         queue->pending_ring[index] = pending_idx;
1421
1422         push_tx_responses(queue);
1423
1424         spin_unlock_irqrestore(&queue->response_lock, flags);
1425 }
1426
1427
1428 static void make_tx_response(struct xenvif_queue *queue,
1429                              struct xen_netif_tx_request *txp,
1430                              unsigned int extra_count,
1431                              s8       st)
1432 {
1433         RING_IDX i = queue->tx.rsp_prod_pvt;
1434         struct xen_netif_tx_response *resp;
1435
1436         resp = RING_GET_RESPONSE(&queue->tx, i);
1437         resp->id     = txp->id;
1438         resp->status = st;
1439
1440         while (extra_count-- != 0)
1441                 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1442
1443         queue->tx.rsp_prod_pvt = ++i;
1444 }
1445
1446 static void push_tx_responses(struct xenvif_queue *queue)
1447 {
1448         int notify;
1449
1450         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1451         if (notify)
1452                 notify_remote_via_irq(queue->tx_irq);
1453 }
1454
1455 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1456 {
1457         int ret;
1458         struct gnttab_unmap_grant_ref tx_unmap_op;
1459
1460         gnttab_set_unmap_op(&tx_unmap_op,
1461                             idx_to_kaddr(queue, pending_idx),
1462                             GNTMAP_host_map,
1463                             queue->grant_tx_handle[pending_idx]);
1464         xenvif_grant_handle_reset(queue, pending_idx);
1465
1466         ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1467                                 &queue->mmap_pages[pending_idx], 1);
1468         if (ret) {
1469                 netdev_err(queue->vif->dev,
1470                            "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1471                            ret,
1472                            pending_idx,
1473                            tx_unmap_op.host_addr,
1474                            tx_unmap_op.handle,
1475                            tx_unmap_op.status);
1476                 BUG();
1477         }
1478 }
1479
1480 static inline int tx_work_todo(struct xenvif_queue *queue)
1481 {
1482         if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1483                 return 1;
1484
1485         return 0;
1486 }
1487
1488 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1489 {
1490         return queue->dealloc_cons != queue->dealloc_prod;
1491 }
1492
1493 void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1494 {
1495         if (queue->tx.sring)
1496                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1497                                         queue->tx.sring);
1498         if (queue->rx.sring)
1499                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1500                                         queue->rx.sring);
1501 }
1502
1503 int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1504                                    grant_ref_t tx_ring_ref,
1505                                    grant_ref_t rx_ring_ref)
1506 {
1507         void *addr;
1508         struct xen_netif_tx_sring *txs;
1509         struct xen_netif_rx_sring *rxs;
1510
1511         int err = -ENOMEM;
1512
1513         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1514                                      &tx_ring_ref, 1, &addr);
1515         if (err)
1516                 goto err;
1517
1518         txs = (struct xen_netif_tx_sring *)addr;
1519         BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1520
1521         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1522                                      &rx_ring_ref, 1, &addr);
1523         if (err)
1524                 goto err;
1525
1526         rxs = (struct xen_netif_rx_sring *)addr;
1527         BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1528
1529         return 0;
1530
1531 err:
1532         xenvif_unmap_frontend_data_rings(queue);
1533         return err;
1534 }
1535
1536 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1537 {
1538         /* Dealloc thread must remain running until all inflight
1539          * packets complete.
1540          */
1541         return kthread_should_stop() &&
1542                 !atomic_read(&queue->inflight_packets);
1543 }
1544
1545 int xenvif_dealloc_kthread(void *data)
1546 {
1547         struct xenvif_queue *queue = data;
1548
1549         for (;;) {
1550                 wait_event_interruptible(queue->dealloc_wq,
1551                                          tx_dealloc_work_todo(queue) ||
1552                                          xenvif_dealloc_kthread_should_stop(queue));
1553                 if (xenvif_dealloc_kthread_should_stop(queue))
1554                         break;
1555
1556                 xenvif_tx_dealloc_action(queue);
1557                 cond_resched();
1558         }
1559
1560         /* Unmap anything remaining*/
1561         if (tx_dealloc_work_todo(queue))
1562                 xenvif_tx_dealloc_action(queue);
1563
1564         return 0;
1565 }
1566
1567 static void make_ctrl_response(struct xenvif *vif,
1568                                const struct xen_netif_ctrl_request *req,
1569                                u32 status, u32 data)
1570 {
1571         RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1572         struct xen_netif_ctrl_response rsp = {
1573                 .id = req->id,
1574                 .type = req->type,
1575                 .status = status,
1576                 .data = data,
1577         };
1578
1579         *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1580         vif->ctrl.rsp_prod_pvt = ++idx;
1581 }
1582
1583 static void push_ctrl_response(struct xenvif *vif)
1584 {
1585         int notify;
1586
1587         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1588         if (notify)
1589                 notify_remote_via_irq(vif->ctrl_irq);
1590 }
1591
1592 static void process_ctrl_request(struct xenvif *vif,
1593                                  const struct xen_netif_ctrl_request *req)
1594 {
1595         u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1596         u32 data = 0;
1597
1598         switch (req->type) {
1599         case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1600                 status = xenvif_set_hash_alg(vif, req->data[0]);
1601                 break;
1602
1603         case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1604                 status = xenvif_get_hash_flags(vif, &data);
1605                 break;
1606
1607         case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1608                 status = xenvif_set_hash_flags(vif, req->data[0]);
1609                 break;
1610
1611         case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1612                 status = xenvif_set_hash_key(vif, req->data[0],
1613                                              req->data[1]);
1614                 break;
1615
1616         case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1617                 status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1618                 data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1619                 break;
1620
1621         case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1622                 status = xenvif_set_hash_mapping_size(vif,
1623                                                       req->data[0]);
1624                 break;
1625
1626         case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1627                 status = xenvif_set_hash_mapping(vif, req->data[0],
1628                                                  req->data[1],
1629                                                  req->data[2]);
1630                 break;
1631
1632         default:
1633                 break;
1634         }
1635
1636         make_ctrl_response(vif, req, status, data);
1637         push_ctrl_response(vif);
1638 }
1639
1640 static void xenvif_ctrl_action(struct xenvif *vif)
1641 {
1642         for (;;) {
1643                 RING_IDX req_prod, req_cons;
1644
1645                 req_prod = vif->ctrl.sring->req_prod;
1646                 req_cons = vif->ctrl.req_cons;
1647
1648                 /* Make sure we can see requests before we process them. */
1649                 rmb();
1650
1651                 if (req_cons == req_prod)
1652                         break;
1653
1654                 while (req_cons != req_prod) {
1655                         struct xen_netif_ctrl_request req;
1656
1657                         RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1658                         req_cons++;
1659
1660                         process_ctrl_request(vif, &req);
1661                 }
1662
1663                 vif->ctrl.req_cons = req_cons;
1664                 vif->ctrl.sring->req_event = req_cons + 1;
1665         }
1666 }
1667
1668 static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1669 {
1670         if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1671                 return 1;
1672
1673         return 0;
1674 }
1675
1676 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
1677 {
1678         struct xenvif *vif = data;
1679         unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
1680
1681         while (xenvif_ctrl_work_todo(vif)) {
1682                 xenvif_ctrl_action(vif);
1683                 eoi_flag = 0;
1684         }
1685
1686         xen_irq_lateeoi(irq, eoi_flag);
1687
1688         return IRQ_HANDLED;
1689 }
1690
1691 static int __init netback_init(void)
1692 {
1693         int rc = 0;
1694
1695         if (!xen_domain())
1696                 return -ENODEV;
1697
1698         /* Allow as many queues as there are CPUs but max. 8 if user has not
1699          * specified a value.
1700          */
1701         if (xenvif_max_queues == 0)
1702                 xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1703                                           num_online_cpus());
1704
1705         if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1706                 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1707                         fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1708                 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1709         }
1710
1711         rc = xenvif_xenbus_init();
1712         if (rc)
1713                 goto failed_init;
1714
1715 #ifdef CONFIG_DEBUG_FS
1716         xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1717         if (IS_ERR_OR_NULL(xen_netback_dbg_root))
1718                 pr_warn("Init of debugfs returned %ld!\n",
1719                         PTR_ERR(xen_netback_dbg_root));
1720 #endif /* CONFIG_DEBUG_FS */
1721
1722         return 0;
1723
1724 failed_init:
1725         return rc;
1726 }
1727
1728 module_init(netback_init);
1729
1730 static void __exit netback_fini(void)
1731 {
1732 #ifdef CONFIG_DEBUG_FS
1733         if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
1734                 debugfs_remove_recursive(xen_netback_dbg_root);
1735 #endif /* CONFIG_DEBUG_FS */
1736         xenvif_xenbus_fini();
1737 }
1738 module_exit(netback_fini);
1739
1740 MODULE_LICENSE("Dual BSD/GPL");
1741 MODULE_ALIAS("xen-backend:vif");