2 * Network-device interface management.
4 * Copyright (c) 2004-2005, Keir Fraser
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/kthread.h>
34 #include <linux/sched/task.h>
35 #include <linux/ethtool.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/if_vlan.h>
38 #include <linux/vmalloc.h>
40 #include <xen/events.h>
41 #include <asm/xen/hypercall.h>
42 #include <xen/balloon.h>
44 #define XENVIF_QUEUE_LENGTH 32
45 #define XENVIF_NAPI_WEIGHT 64
47 /* Number of bytes allowed on the internal guest Rx queue. */
48 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
50 /* This function is used to set SKBTX_DEV_ZEROCOPY as well as
51 * increasing the inflight counter. We need to increase the inflight
52 * counter because core driver calls into xenvif_zerocopy_callback
53 * which calls xenvif_skb_zerocopy_complete.
55 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
58 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
59 atomic_inc(&queue->inflight_packets);
62 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
64 atomic_dec(&queue->inflight_packets);
66 /* Wake the dealloc thread _after_ decrementing inflight_packets so
67 * that if kthread_stop() has already been called, the dealloc thread
68 * does not wait forever with nothing to wake it.
70 wake_up(&queue->dealloc_wq);
73 int xenvif_schedulable(struct xenvif *vif)
75 return netif_running(vif->dev) &&
76 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
80 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
84 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
86 napi_schedule(&queue->napi);
90 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
92 struct xenvif_queue *queue = dev_id;
95 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
96 WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
98 if (!xenvif_handle_tx_interrupt(queue)) {
99 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
100 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
106 static int xenvif_poll(struct napi_struct *napi, int budget)
108 struct xenvif_queue *queue =
109 container_of(napi, struct xenvif_queue, napi);
112 /* This vif is rogue, we pretend we've there is nothing to do
113 * for this vif to deschedule it from NAPI. But this interface
114 * will be turned off in thread context later.
116 if (unlikely(queue->vif->disabled)) {
121 work_done = xenvif_tx_action(queue, budget);
123 if (work_done < budget) {
124 napi_complete_done(napi, work_done);
125 /* If the queue is rate-limited, it shall be
126 * rescheduled in the timer callback.
128 if (likely(!queue->rate_limited))
129 xenvif_napi_schedule_or_enable_events(queue);
135 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
139 rc = xenvif_have_rx_work(queue, false);
141 xenvif_kick_thread(queue);
145 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
147 struct xenvif_queue *queue = dev_id;
150 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
151 WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
153 if (!xenvif_handle_rx_interrupt(queue)) {
154 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
155 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
161 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
163 struct xenvif_queue *queue = dev_id;
167 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
168 WARN(old, "Interrupt while EOI pending\n");
170 has_tx = xenvif_handle_tx_interrupt(queue);
171 has_rx = xenvif_handle_rx_interrupt(queue);
173 if (!has_rx && !has_tx) {
174 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
175 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
181 int xenvif_queue_stopped(struct xenvif_queue *queue)
183 struct net_device *dev = queue->vif->dev;
184 unsigned int id = queue->id;
185 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
188 void xenvif_wake_queue(struct xenvif_queue *queue)
190 struct net_device *dev = queue->vif->dev;
191 unsigned int id = queue->id;
192 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
195 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
196 struct net_device *sb_dev,
197 select_queue_fallback_t fallback)
199 struct xenvif *vif = netdev_priv(dev);
200 unsigned int size = vif->hash.size;
201 unsigned int num_queues;
203 /* If queues are not set up internally - always return 0
204 * as the packet going to be dropped anyway */
205 num_queues = READ_ONCE(vif->num_queues);
209 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
210 return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
212 xenvif_set_skb_hash(vif, skb);
215 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
217 return vif->hash.mapping[vif->hash.mapping_sel]
218 [skb_get_hash_raw(skb) % size];
222 xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
224 struct xenvif *vif = netdev_priv(dev);
225 struct xenvif_queue *queue = NULL;
226 unsigned int num_queues;
228 struct xenvif_rx_cb *cb;
230 BUG_ON(skb->dev != dev);
232 /* Drop the packet if queues are not set up.
233 * This handler should be called inside an RCU read section
234 * so we don't need to enter it here explicitly.
236 num_queues = READ_ONCE(vif->num_queues);
240 /* Obtain the queue to be used to transmit this packet */
241 index = skb_get_queue_mapping(skb);
242 if (index >= num_queues) {
243 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
244 index, vif->dev->name);
247 queue = &vif->queues[index];
249 /* Drop the packet if queue is not ready */
250 if (queue->task == NULL ||
251 queue->dealloc_task == NULL ||
252 !xenvif_schedulable(vif))
255 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
256 struct ethhdr *eth = (struct ethhdr *)skb->data;
258 if (!xenvif_mcast_match(vif, eth->h_dest))
262 cb = XENVIF_RX_CB(skb);
263 cb->expires = jiffies + vif->drain_timeout;
265 /* If there is no hash algorithm configured then make sure there
266 * is no hash information in the socket buffer otherwise it
267 * would be incorrectly forwarded to the frontend.
269 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
272 xenvif_rx_queue_tail(queue, skb);
273 xenvif_kick_thread(queue);
278 vif->dev->stats.tx_dropped++;
283 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
285 struct xenvif *vif = netdev_priv(dev);
286 struct xenvif_queue *queue = NULL;
287 unsigned int num_queues;
295 num_queues = READ_ONCE(vif->num_queues);
297 /* Aggregate tx and rx stats from each queue */
298 for (index = 0; index < num_queues; ++index) {
299 queue = &vif->queues[index];
300 rx_bytes += queue->stats.rx_bytes;
301 rx_packets += queue->stats.rx_packets;
302 tx_bytes += queue->stats.tx_bytes;
303 tx_packets += queue->stats.tx_packets;
308 vif->dev->stats.rx_bytes = rx_bytes;
309 vif->dev->stats.rx_packets = rx_packets;
310 vif->dev->stats.tx_bytes = tx_bytes;
311 vif->dev->stats.tx_packets = tx_packets;
313 return &vif->dev->stats;
316 static void xenvif_up(struct xenvif *vif)
318 struct xenvif_queue *queue = NULL;
319 unsigned int num_queues = vif->num_queues;
320 unsigned int queue_index;
322 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
323 queue = &vif->queues[queue_index];
324 napi_enable(&queue->napi);
325 enable_irq(queue->tx_irq);
326 if (queue->tx_irq != queue->rx_irq)
327 enable_irq(queue->rx_irq);
328 xenvif_napi_schedule_or_enable_events(queue);
332 static void xenvif_down(struct xenvif *vif)
334 struct xenvif_queue *queue = NULL;
335 unsigned int num_queues = vif->num_queues;
336 unsigned int queue_index;
338 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
339 queue = &vif->queues[queue_index];
340 disable_irq(queue->tx_irq);
341 if (queue->tx_irq != queue->rx_irq)
342 disable_irq(queue->rx_irq);
343 napi_disable(&queue->napi);
344 del_timer_sync(&queue->credit_timeout);
348 static int xenvif_open(struct net_device *dev)
350 struct xenvif *vif = netdev_priv(dev);
351 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
353 netif_tx_start_all_queues(dev);
357 static int xenvif_close(struct net_device *dev)
359 struct xenvif *vif = netdev_priv(dev);
360 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
362 netif_tx_stop_all_queues(dev);
366 static int xenvif_change_mtu(struct net_device *dev, int mtu)
368 struct xenvif *vif = netdev_priv(dev);
369 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
377 static netdev_features_t xenvif_fix_features(struct net_device *dev,
378 netdev_features_t features)
380 struct xenvif *vif = netdev_priv(dev);
383 features &= ~NETIF_F_SG;
384 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
385 features &= ~NETIF_F_TSO;
386 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
387 features &= ~NETIF_F_TSO6;
389 features &= ~NETIF_F_IP_CSUM;
391 features &= ~NETIF_F_IPV6_CSUM;
396 static const struct xenvif_stat {
397 char name[ETH_GSTRING_LEN];
401 "rx_gso_checksum_fixup",
402 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
404 /* If (sent != success + fail), there are probably packets never
409 offsetof(struct xenvif_stats, tx_zerocopy_sent),
412 "tx_zerocopy_success",
413 offsetof(struct xenvif_stats, tx_zerocopy_success),
417 offsetof(struct xenvif_stats, tx_zerocopy_fail)
419 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
420 * a guest with the same MAX_SKB_FRAG
424 offsetof(struct xenvif_stats, tx_frag_overflow)
428 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
430 switch (string_set) {
432 return ARRAY_SIZE(xenvif_stats);
438 static void xenvif_get_ethtool_stats(struct net_device *dev,
439 struct ethtool_stats *stats, u64 * data)
441 struct xenvif *vif = netdev_priv(dev);
442 unsigned int num_queues;
444 unsigned int queue_index;
447 num_queues = READ_ONCE(vif->num_queues);
449 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
450 unsigned long accum = 0;
451 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
452 void *vif_stats = &vif->queues[queue_index].stats;
453 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
461 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
467 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
468 memcpy(data + i * ETH_GSTRING_LEN,
469 xenvif_stats[i].name, ETH_GSTRING_LEN);
474 static const struct ethtool_ops xenvif_ethtool_ops = {
475 .get_link = ethtool_op_get_link,
477 .get_sset_count = xenvif_get_sset_count,
478 .get_ethtool_stats = xenvif_get_ethtool_stats,
479 .get_strings = xenvif_get_strings,
482 static const struct net_device_ops xenvif_netdev_ops = {
483 .ndo_select_queue = xenvif_select_queue,
484 .ndo_start_xmit = xenvif_start_xmit,
485 .ndo_get_stats = xenvif_get_stats,
486 .ndo_open = xenvif_open,
487 .ndo_stop = xenvif_close,
488 .ndo_change_mtu = xenvif_change_mtu,
489 .ndo_fix_features = xenvif_fix_features,
490 .ndo_set_mac_address = eth_mac_addr,
491 .ndo_validate_addr = eth_validate_addr,
494 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
498 struct net_device *dev;
500 char name[IFNAMSIZ] = {};
502 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
503 /* Allocate a netdev with the max. supported number of queues.
504 * When the guest selects the desired number, it will be updated
505 * via netif_set_real_num_*_queues().
507 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
508 ether_setup, xenvif_max_queues);
510 pr_warn("Could not allocate netdev for %s\n", name);
511 return ERR_PTR(-ENOMEM);
514 SET_NETDEV_DEV(dev, parent);
516 vif = netdev_priv(dev);
519 vif->handle = handle;
523 vif->disabled = false;
524 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
525 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
527 /* Start out with no queues. */
531 spin_lock_init(&vif->lock);
532 INIT_LIST_HEAD(&vif->fe_mcast_addr);
534 dev->netdev_ops = &xenvif_netdev_ops;
535 dev->hw_features = NETIF_F_SG |
536 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
537 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
538 dev->features = dev->hw_features | NETIF_F_RXCSUM;
539 dev->ethtool_ops = &xenvif_ethtool_ops;
541 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
543 dev->min_mtu = ETH_MIN_MTU;
544 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
547 * Initialise a dummy MAC address. We choose the numerically
548 * largest non-broadcast address to prevent the address getting
549 * stolen by an Ethernet bridge for STP purposes.
550 * (FE:FF:FF:FF:FF:FF)
552 eth_broadcast_addr(dev->dev_addr);
553 dev->dev_addr[0] &= ~0x01;
555 netif_carrier_off(dev);
557 err = register_netdev(dev);
559 netdev_warn(dev, "Could not register device: err=%d\n", err);
564 netdev_dbg(dev, "Successfully created xenvif\n");
566 __module_get(THIS_MODULE);
571 int xenvif_init_queue(struct xenvif_queue *queue)
575 queue->credit_bytes = queue->remaining_credit = ~0UL;
576 queue->credit_usec = 0UL;
577 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
578 queue->credit_window_start = get_jiffies_64();
580 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
582 skb_queue_head_init(&queue->rx_queue);
583 skb_queue_head_init(&queue->tx_queue);
585 queue->pending_cons = 0;
586 queue->pending_prod = MAX_PENDING_REQS;
587 for (i = 0; i < MAX_PENDING_REQS; ++i)
588 queue->pending_ring[i] = i;
590 spin_lock_init(&queue->callback_lock);
591 spin_lock_init(&queue->response_lock);
593 /* If ballooning is disabled, this will consume real memory, so you
594 * better enable it. The long term solution would be to use just a
595 * bunch of valid page descriptors, without dependency on ballooning
597 err = gnttab_alloc_pages(MAX_PENDING_REQS,
600 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
604 for (i = 0; i < MAX_PENDING_REQS; i++) {
605 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
606 { .callback = xenvif_zerocopy_callback,
609 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
615 void xenvif_carrier_on(struct xenvif *vif)
618 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
619 dev_set_mtu(vif->dev, ETH_DATA_LEN);
620 netdev_update_features(vif->dev);
621 set_bit(VIF_STATUS_CONNECTED, &vif->status);
622 if (netif_running(vif->dev))
627 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
630 struct net_device *dev = vif->dev;
632 struct xen_netif_ctrl_sring *shared;
635 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
636 &ring_ref, 1, &addr);
640 shared = (struct xen_netif_ctrl_sring *)addr;
641 BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
643 err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
649 xenvif_init_hash(vif);
651 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
652 IRQF_ONESHOT, "xen-netback-ctrl", vif);
654 pr_warn("Could not setup irq handler for %s\n", dev->name);
661 xenvif_deinit_hash(vif);
662 unbind_from_irqhandler(vif->ctrl_irq, vif);
666 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
668 vif->ctrl.sring = NULL;
674 int xenvif_connect_data(struct xenvif_queue *queue,
675 unsigned long tx_ring_ref,
676 unsigned long rx_ring_ref,
677 unsigned int tx_evtchn,
678 unsigned int rx_evtchn)
680 struct task_struct *task;
683 BUG_ON(queue->tx_irq);
685 BUG_ON(queue->dealloc_task);
687 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
692 init_waitqueue_head(&queue->wq);
693 init_waitqueue_head(&queue->dealloc_wq);
694 atomic_set(&queue->inflight_packets, 0);
696 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
699 if (tx_evtchn == rx_evtchn) {
700 /* feature-split-event-channels == 0 */
701 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
702 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
706 queue->tx_irq = queue->rx_irq = err;
707 disable_irq(queue->tx_irq);
709 /* feature-split-event-channels == 1 */
710 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
711 "%s-tx", queue->name);
712 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
713 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
714 queue->tx_irq_name, queue);
718 disable_irq(queue->tx_irq);
720 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
721 "%s-rx", queue->name);
722 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
723 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
724 queue->rx_irq_name, queue);
728 disable_irq(queue->rx_irq);
731 queue->stalled = true;
733 task = kthread_create(xenvif_kthread_guest_rx,
734 (void *)queue, "%s-guest-rx", queue->name);
736 pr_warn("Could not allocate kthread for %s\n", queue->name);
741 get_task_struct(task);
743 task = kthread_create(xenvif_dealloc_kthread,
744 (void *)queue, "%s-dealloc", queue->name);
746 pr_warn("Could not allocate kthread for %s\n", queue->name);
750 queue->dealloc_task = task;
752 wake_up_process(queue->task);
753 wake_up_process(queue->dealloc_task);
758 unbind_from_irqhandler(queue->rx_irq, queue);
761 unbind_from_irqhandler(queue->tx_irq, queue);
764 xenvif_unmap_frontend_data_rings(queue);
765 netif_napi_del(&queue->napi);
770 void xenvif_carrier_off(struct xenvif *vif)
772 struct net_device *dev = vif->dev;
775 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
776 netif_carrier_off(dev); /* discard queued packets */
777 if (netif_running(dev))
783 void xenvif_disconnect_data(struct xenvif *vif)
785 struct xenvif_queue *queue = NULL;
786 unsigned int num_queues = vif->num_queues;
787 unsigned int queue_index;
789 xenvif_carrier_off(vif);
791 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
792 queue = &vif->queues[queue_index];
794 netif_napi_del(&queue->napi);
797 kthread_stop(queue->task);
798 put_task_struct(queue->task);
802 if (queue->dealloc_task) {
803 kthread_stop(queue->dealloc_task);
804 queue->dealloc_task = NULL;
808 if (queue->tx_irq == queue->rx_irq)
809 unbind_from_irqhandler(queue->tx_irq, queue);
811 unbind_from_irqhandler(queue->tx_irq, queue);
812 unbind_from_irqhandler(queue->rx_irq, queue);
817 xenvif_unmap_frontend_data_rings(queue);
820 xenvif_mcast_addr_list_free(vif);
823 void xenvif_disconnect_ctrl(struct xenvif *vif)
826 xenvif_deinit_hash(vif);
827 unbind_from_irqhandler(vif->ctrl_irq, vif);
831 if (vif->ctrl.sring) {
832 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
834 vif->ctrl.sring = NULL;
838 /* Reverse the relevant parts of xenvif_init_queue().
839 * Used for queue teardown from xenvif_free(), and on the
840 * error handling paths in xenbus.c:connect().
842 void xenvif_deinit_queue(struct xenvif_queue *queue)
844 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
847 void xenvif_free(struct xenvif *vif)
849 struct xenvif_queue *queues = vif->queues;
850 unsigned int num_queues = vif->num_queues;
851 unsigned int queue_index;
853 unregister_netdev(vif->dev);
854 free_netdev(vif->dev);
856 for (queue_index = 0; queue_index < num_queues; ++queue_index)
857 xenvif_deinit_queue(&queues[queue_index]);
860 module_put(THIS_MODULE);