2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/pci.h>
33 #include <linux/skbuff.h>
34 #include <linux/if_vlan.h>
36 #include <linux/slab.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/netpoll.h>
41 #include <net/route.h>
43 #include <net/pkt_sched.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
47 #include "hyperv_net.h"
49 #define RING_SIZE_MIN 64
50 #define RETRY_US_LO 5000
51 #define RETRY_US_HI 10000
52 #define RETRY_MAX 2000 /* >10 sec */
54 #define LINKCHANGE_INT (2 * HZ)
55 #define VF_TAKEOVER_INT (HZ / 10)
57 /* Macros to define the context of vf registration */
58 #define VF_REG_IN_PROBE 1
59 #define VF_REG_IN_NOTIFIER 2
61 static unsigned int ring_size __ro_after_init = 128;
62 module_param(ring_size, uint, 0444);
63 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
64 unsigned int netvsc_ring_bytes __ro_after_init;
66 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
67 NETIF_MSG_LINK | NETIF_MSG_IFUP |
68 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
71 static int debug = -1;
72 module_param(debug, int, 0444);
73 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
75 static LIST_HEAD(netvsc_dev_list);
77 static void netvsc_change_rx_flags(struct net_device *net, int change)
79 struct net_device_context *ndev_ctx = netdev_priv(net);
80 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
86 if (change & IFF_PROMISC) {
87 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
88 dev_set_promiscuity(vf_netdev, inc);
91 if (change & IFF_ALLMULTI) {
92 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
93 dev_set_allmulti(vf_netdev, inc);
97 static void netvsc_set_rx_mode(struct net_device *net)
99 struct net_device_context *ndev_ctx = netdev_priv(net);
100 struct net_device *vf_netdev;
101 struct netvsc_device *nvdev;
104 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
106 dev_uc_sync(vf_netdev, net);
107 dev_mc_sync(vf_netdev, net);
110 nvdev = rcu_dereference(ndev_ctx->nvdev);
112 rndis_filter_update(nvdev);
116 static void netvsc_tx_enable(struct netvsc_device *nvscdev,
117 struct net_device *ndev)
119 nvscdev->tx_disable = false;
120 virt_wmb(); /* ensure queue wake up mechanism is on */
122 netif_tx_wake_all_queues(ndev);
125 static int netvsc_open(struct net_device *net)
127 struct net_device_context *ndev_ctx = netdev_priv(net);
128 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
129 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
130 struct rndis_device *rdev;
133 netif_carrier_off(net);
135 /* Open up the device */
136 ret = rndis_filter_open(nvdev);
138 netdev_err(net, "unable to open device (ret %d).\n", ret);
142 rdev = nvdev->extension;
143 if (!rdev->link_state) {
144 netif_carrier_on(net);
145 netvsc_tx_enable(nvdev, net);
149 /* Setting synthetic device up transparently sets
150 * slave as up. If open fails, then slave will be
151 * still be offline (and not used).
153 ret = dev_open(vf_netdev);
156 "unable to open slave: %s: %d\n",
157 vf_netdev->name, ret);
162 static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
164 unsigned int retry = 0;
167 /* Ensure pending bytes in ring are read */
171 for (i = 0; i < nvdev->num_chn; i++) {
172 struct vmbus_channel *chn
173 = nvdev->chan_table[i].channel;
178 /* make sure receive not running now */
179 napi_synchronize(&nvdev->chan_table[i].napi);
181 aread = hv_get_bytes_to_read(&chn->inbound);
185 aread = hv_get_bytes_to_read(&chn->outbound);
193 if (++retry > RETRY_MAX)
196 usleep_range(RETRY_US_LO, RETRY_US_HI);
200 static void netvsc_tx_disable(struct netvsc_device *nvscdev,
201 struct net_device *ndev)
204 nvscdev->tx_disable = true;
205 virt_wmb(); /* ensure txq will not wake up after stop */
208 netif_tx_disable(ndev);
211 static int netvsc_close(struct net_device *net)
213 struct net_device_context *net_device_ctx = netdev_priv(net);
214 struct net_device *vf_netdev
215 = rtnl_dereference(net_device_ctx->vf_netdev);
216 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
219 netvsc_tx_disable(nvdev, net);
221 /* No need to close rndis filter if it is removed already */
225 ret = rndis_filter_close(nvdev);
227 netdev_err(net, "unable to close device (ret %d).\n", ret);
231 ret = netvsc_wait_until_empty(nvdev);
233 netdev_err(net, "Ring buffer not empty after closing rndis\n");
236 dev_close(vf_netdev);
241 static inline void *init_ppi_data(struct rndis_message *msg,
242 u32 ppi_size, u32 pkt_type)
244 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
245 struct rndis_per_packet_info *ppi;
247 rndis_pkt->data_offset += ppi_size;
248 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
249 + rndis_pkt->per_pkt_info_len;
251 ppi->size = ppi_size;
252 ppi->type = pkt_type;
253 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
255 rndis_pkt->per_pkt_info_len += ppi_size;
260 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
261 * packets. We can use ethtool to change UDP hash level when necessary.
263 static inline u32 netvsc_get_hash(
265 const struct net_device_context *ndc)
267 struct flow_keys flow;
268 u32 hash, pkt_proto = 0;
269 static u32 hashrnd __read_mostly;
271 net_get_random_once(&hashrnd, sizeof(hashrnd));
273 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
276 switch (flow.basic.ip_proto) {
278 if (flow.basic.n_proto == htons(ETH_P_IP))
279 pkt_proto = HV_TCP4_L4HASH;
280 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
281 pkt_proto = HV_TCP6_L4HASH;
286 if (flow.basic.n_proto == htons(ETH_P_IP))
287 pkt_proto = HV_UDP4_L4HASH;
288 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
289 pkt_proto = HV_UDP6_L4HASH;
294 if (pkt_proto & ndc->l4_hash) {
295 return skb_get_hash(skb);
297 if (flow.basic.n_proto == htons(ETH_P_IP))
298 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
299 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
300 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
304 __skb_set_sw_hash(skb, hash, false);
310 static inline int netvsc_get_tx_queue(struct net_device *ndev,
311 struct sk_buff *skb, int old_idx)
313 const struct net_device_context *ndc = netdev_priv(ndev);
314 struct sock *sk = skb->sk;
317 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
318 (VRSS_SEND_TAB_SIZE - 1)];
320 /* If queue index changed record the new value */
321 if (q_idx != old_idx &&
322 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
323 sk_tx_queue_set(sk, q_idx);
329 * Select queue for transmit.
331 * If a valid queue has already been assigned, then use that.
332 * Otherwise compute tx queue based on hash and the send table.
334 * This is basically similar to default (__netdev_pick_tx) with the added step
335 * of using the host send_table when no other queue has been assigned.
337 * TODO support XPS - but get_xps_queue not exported
339 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
341 int q_idx = sk_tx_queue_get(skb->sk);
343 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
344 /* If forwarding a packet, we use the recorded queue when
345 * available for better cache locality.
347 if (skb_rx_queue_recorded(skb))
348 q_idx = skb_get_rx_queue(skb);
350 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
356 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
357 struct net_device *sb_dev,
358 select_queue_fallback_t fallback)
360 struct net_device_context *ndc = netdev_priv(ndev);
361 struct net_device *vf_netdev;
365 vf_netdev = rcu_dereference(ndc->vf_netdev);
367 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
369 if (vf_ops->ndo_select_queue)
370 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
373 txq = fallback(vf_netdev, skb, NULL);
375 /* Record the queue selected by VF so that it can be
376 * used for common case where VF has more queues than
377 * the synthetic device.
379 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
381 txq = netvsc_pick_tx(ndev, skb);
385 while (txq >= ndev->real_num_tx_queues)
386 txq -= ndev->real_num_tx_queues;
391 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
392 struct hv_page_buffer *pb)
396 /* Deal with compund pages by ignoring unused part
399 page += (offset >> PAGE_SHIFT);
400 offset &= ~PAGE_MASK;
405 bytes = PAGE_SIZE - offset;
408 pb[j].pfn = page_to_pfn(page);
409 pb[j].offset = offset;
415 if (offset == PAGE_SIZE && len) {
425 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
426 struct hv_netvsc_packet *packet,
427 struct hv_page_buffer *pb)
430 char *data = skb->data;
431 int frags = skb_shinfo(skb)->nr_frags;
434 /* The packet is laid out thus:
435 * 1. hdr: RNDIS header and PPI
437 * 3. skb fragment data
439 slots_used += fill_pg_buf(virt_to_page(hdr),
441 len, &pb[slots_used]);
443 packet->rmsg_size = len;
444 packet->rmsg_pgcnt = slots_used;
446 slots_used += fill_pg_buf(virt_to_page(data),
447 offset_in_page(data),
448 skb_headlen(skb), &pb[slots_used]);
450 for (i = 0; i < frags; i++) {
451 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
453 slots_used += fill_pg_buf(skb_frag_page(frag),
455 skb_frag_size(frag), &pb[slots_used]);
460 static int count_skb_frag_slots(struct sk_buff *skb)
462 int i, frags = skb_shinfo(skb)->nr_frags;
465 for (i = 0; i < frags; i++) {
466 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
467 unsigned long size = skb_frag_size(frag);
468 unsigned long offset = frag->page_offset;
470 /* Skip unused frames from start of page */
471 offset &= ~PAGE_MASK;
472 pages += PFN_UP(offset + size);
477 static int netvsc_get_slots(struct sk_buff *skb)
479 char *data = skb->data;
480 unsigned int offset = offset_in_page(data);
481 unsigned int len = skb_headlen(skb);
485 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
486 frag_slots = count_skb_frag_slots(skb);
487 return slots + frag_slots;
490 static u32 net_checksum_info(struct sk_buff *skb)
492 if (skb->protocol == htons(ETH_P_IP)) {
493 struct iphdr *ip = ip_hdr(skb);
495 if (ip->protocol == IPPROTO_TCP)
496 return TRANSPORT_INFO_IPV4_TCP;
497 else if (ip->protocol == IPPROTO_UDP)
498 return TRANSPORT_INFO_IPV4_UDP;
500 struct ipv6hdr *ip6 = ipv6_hdr(skb);
502 if (ip6->nexthdr == IPPROTO_TCP)
503 return TRANSPORT_INFO_IPV6_TCP;
504 else if (ip6->nexthdr == IPPROTO_UDP)
505 return TRANSPORT_INFO_IPV6_UDP;
508 return TRANSPORT_INFO_NOT_IP;
511 /* Send skb on the slave VF device. */
512 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
515 struct net_device_context *ndev_ctx = netdev_priv(net);
516 unsigned int len = skb->len;
519 skb->dev = vf_netdev;
520 skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
522 rc = dev_queue_xmit(skb);
523 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
524 struct netvsc_vf_pcpu_stats *pcpu_stats
525 = this_cpu_ptr(ndev_ctx->vf_stats);
527 u64_stats_update_begin(&pcpu_stats->syncp);
528 pcpu_stats->tx_packets++;
529 pcpu_stats->tx_bytes += len;
530 u64_stats_update_end(&pcpu_stats->syncp);
532 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
538 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
540 struct net_device_context *net_device_ctx = netdev_priv(net);
541 struct hv_netvsc_packet *packet = NULL;
543 unsigned int num_data_pgs;
544 struct rndis_message *rndis_msg;
545 struct net_device *vf_netdev;
548 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
550 /* If VF is present and up then redirect packets to it.
551 * Skip the VF if it is marked down or has no carrier.
552 * If netpoll is in uses, then VF can not be used either.
554 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
555 if (vf_netdev && netif_running(vf_netdev) &&
556 netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
557 return netvsc_vf_xmit(net, vf_netdev, skb);
559 /* We will atmost need two pages to describe the rndis
560 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
561 * of pages in a single packet. If skb is scattered around
562 * more pages we try linearizing it.
565 num_data_pgs = netvsc_get_slots(skb) + 2;
567 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
568 ++net_device_ctx->eth_stats.tx_scattered;
570 if (skb_linearize(skb))
573 num_data_pgs = netvsc_get_slots(skb) + 2;
574 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
575 ++net_device_ctx->eth_stats.tx_too_big;
581 * Place the rndis header in the skb head room and
582 * the skb->cb will be used for hv_netvsc_packet
585 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
589 /* Use the skb control buffer for building up the packet */
590 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
591 FIELD_SIZEOF(struct sk_buff, cb));
592 packet = (struct hv_netvsc_packet *)skb->cb;
594 packet->q_idx = skb_get_queue_mapping(skb);
596 packet->total_data_buflen = skb->len;
597 packet->total_bytes = skb->len;
598 packet->total_packets = 1;
600 rndis_msg = (struct rndis_message *)skb->head;
602 /* Add the rndis header */
603 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
604 rndis_msg->msg_len = packet->total_data_buflen;
606 rndis_msg->msg.pkt = (struct rndis_packet) {
607 .data_offset = sizeof(struct rndis_packet),
608 .data_len = packet->total_data_buflen,
609 .per_pkt_info_offset = sizeof(struct rndis_packet),
612 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
614 hash = skb_get_hash_raw(skb);
615 if (hash != 0 && net->real_num_tx_queues > 1) {
618 rndis_msg_size += NDIS_HASH_PPI_SIZE;
619 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
624 if (skb_vlan_tag_present(skb)) {
625 struct ndis_pkt_8021q_info *vlan;
627 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
628 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
632 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
633 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
637 if (skb_is_gso(skb)) {
638 struct ndis_tcp_lso_info *lso_info;
640 rndis_msg_size += NDIS_LSO_PPI_SIZE;
641 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
642 TCP_LARGESEND_PKTINFO);
645 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
646 if (skb->protocol == htons(ETH_P_IP)) {
647 lso_info->lso_v2_transmit.ip_version =
648 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
649 ip_hdr(skb)->tot_len = 0;
650 ip_hdr(skb)->check = 0;
651 tcp_hdr(skb)->check =
652 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
653 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
655 lso_info->lso_v2_transmit.ip_version =
656 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
657 ipv6_hdr(skb)->payload_len = 0;
658 tcp_hdr(skb)->check =
659 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
660 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
662 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
663 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
664 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
665 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
666 struct ndis_tcp_ip_checksum_info *csum_info;
668 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
669 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
670 TCPIP_CHKSUM_PKTINFO);
672 csum_info->value = 0;
673 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
675 if (skb->protocol == htons(ETH_P_IP)) {
676 csum_info->transmit.is_ipv4 = 1;
678 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
679 csum_info->transmit.tcp_checksum = 1;
681 csum_info->transmit.udp_checksum = 1;
683 csum_info->transmit.is_ipv6 = 1;
685 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
686 csum_info->transmit.tcp_checksum = 1;
688 csum_info->transmit.udp_checksum = 1;
691 /* Can't do offload of this type of checksum */
692 if (skb_checksum_help(skb))
697 /* Start filling in the page buffers with the rndis hdr */
698 rndis_msg->msg_len += rndis_msg_size;
699 packet->total_data_buflen = rndis_msg->msg_len;
700 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
703 /* timestamp packet in software */
704 skb_tx_timestamp(skb);
706 ret = netvsc_send(net, packet, rndis_msg, pb, skb);
707 if (likely(ret == 0))
710 if (ret == -EAGAIN) {
711 ++net_device_ctx->eth_stats.tx_busy;
712 return NETDEV_TX_BUSY;
716 ++net_device_ctx->eth_stats.tx_no_space;
719 dev_kfree_skb_any(skb);
720 net->stats.tx_dropped++;
725 ++net_device_ctx->eth_stats.tx_no_memory;
730 * netvsc_linkstatus_callback - Link up/down notification
732 void netvsc_linkstatus_callback(struct net_device *net,
733 struct rndis_message *resp)
735 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
736 struct net_device_context *ndev_ctx = netdev_priv(net);
737 struct netvsc_reconfig *event;
740 /* Update the physical link speed when changing to another vSwitch */
741 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
744 speed = *(u32 *)((void *)indicate
745 + indicate->status_buf_offset) / 10000;
746 ndev_ctx->speed = speed;
750 /* Handle these link change statuses below */
751 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
752 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
753 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
756 if (net->reg_state != NETREG_REGISTERED)
759 event = kzalloc(sizeof(*event), GFP_ATOMIC);
762 event->event = indicate->status;
764 spin_lock_irqsave(&ndev_ctx->lock, flags);
765 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
766 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
768 schedule_delayed_work(&ndev_ctx->dwork, 0);
771 static void netvsc_comp_ipcsum(struct sk_buff *skb)
773 struct iphdr *iph = (struct iphdr *)skb->data;
776 iph->check = ip_fast_csum(iph, iph->ihl);
779 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
780 struct napi_struct *napi,
781 const struct ndis_tcp_ip_checksum_info *csum_info,
782 const struct ndis_pkt_8021q_info *vlan,
783 void *data, u32 buflen)
787 skb = napi_alloc_skb(napi, buflen);
792 * Copy to skb. This copy is needed here since the memory pointed by
793 * hv_netvsc_packet cannot be deallocated
795 skb_put_data(skb, data, buflen);
797 skb->protocol = eth_type_trans(skb, net);
799 /* skb is already created with CHECKSUM_NONE */
800 skb_checksum_none_assert(skb);
802 /* Incoming packets may have IP header checksum verified by the host.
803 * They may not have IP header checksum computed after coalescing.
804 * We compute it here if the flags are set, because on Linux, the IP
805 * checksum is always checked.
807 if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
808 csum_info->receive.ip_checksum_succeeded &&
809 skb->protocol == htons(ETH_P_IP))
810 netvsc_comp_ipcsum(skb);
812 /* Do L4 checksum offload if enabled and present. */
813 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
814 if (csum_info->receive.tcp_checksum_succeeded ||
815 csum_info->receive.udp_checksum_succeeded)
816 skb->ip_summed = CHECKSUM_UNNECESSARY;
820 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
822 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
830 * netvsc_recv_callback - Callback when we receive a packet from the
831 * "wire" on the specified device.
833 int netvsc_recv_callback(struct net_device *net,
834 struct netvsc_device *net_device,
835 struct vmbus_channel *channel,
837 const struct ndis_tcp_ip_checksum_info *csum_info,
838 const struct ndis_pkt_8021q_info *vlan)
840 struct net_device_context *net_device_ctx = netdev_priv(net);
841 u16 q_idx = channel->offermsg.offer.sub_channel_index;
842 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
844 struct netvsc_stats *rx_stats;
846 if (net->reg_state != NETREG_REGISTERED)
847 return NVSP_STAT_FAIL;
849 /* Allocate a skb - TODO direct I/O to pages? */
850 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
851 csum_info, vlan, data, len);
852 if (unlikely(!skb)) {
853 ++net_device_ctx->eth_stats.rx_no_memory;
854 return NVSP_STAT_FAIL;
857 skb_record_rx_queue(skb, q_idx);
860 * Even if injecting the packet, record the statistics
861 * on the synthetic device because modifying the VF device
862 * statistics will not work correctly.
864 rx_stats = &nvchan->rx_stats;
865 u64_stats_update_begin(&rx_stats->syncp);
867 rx_stats->bytes += len;
869 if (skb->pkt_type == PACKET_BROADCAST)
870 ++rx_stats->broadcast;
871 else if (skb->pkt_type == PACKET_MULTICAST)
872 ++rx_stats->multicast;
873 u64_stats_update_end(&rx_stats->syncp);
875 napi_gro_receive(&nvchan->napi, skb);
876 return NVSP_STAT_SUCCESS;
879 static void netvsc_get_drvinfo(struct net_device *net,
880 struct ethtool_drvinfo *info)
882 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
883 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
886 static void netvsc_get_channels(struct net_device *net,
887 struct ethtool_channels *channel)
889 struct net_device_context *net_device_ctx = netdev_priv(net);
890 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
893 channel->max_combined = nvdev->max_chn;
894 channel->combined_count = nvdev->num_chn;
898 /* Alloc struct netvsc_device_info, and initialize it from either existing
899 * struct netvsc_device, or from default values.
901 static struct netvsc_device_info *netvsc_devinfo_get
902 (struct netvsc_device *nvdev)
904 struct netvsc_device_info *dev_info;
906 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
912 dev_info->num_chn = nvdev->num_chn;
913 dev_info->send_sections = nvdev->send_section_cnt;
914 dev_info->send_section_size = nvdev->send_section_size;
915 dev_info->recv_sections = nvdev->recv_section_cnt;
916 dev_info->recv_section_size = nvdev->recv_section_size;
918 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
921 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
922 dev_info->send_sections = NETVSC_DEFAULT_TX;
923 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
924 dev_info->recv_sections = NETVSC_DEFAULT_RX;
925 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
931 static int netvsc_detach(struct net_device *ndev,
932 struct netvsc_device *nvdev)
934 struct net_device_context *ndev_ctx = netdev_priv(ndev);
935 struct hv_device *hdev = ndev_ctx->device_ctx;
938 /* Don't try continuing to try and setup sub channels */
939 if (cancel_work_sync(&nvdev->subchan_work))
942 /* If device was up (receiving) then shutdown */
943 if (netif_running(ndev)) {
944 netvsc_tx_disable(nvdev, ndev);
946 ret = rndis_filter_close(nvdev);
949 "unable to close device (ret %d).\n", ret);
953 ret = netvsc_wait_until_empty(nvdev);
956 "Ring buffer not empty after closing rndis\n");
961 netif_device_detach(ndev);
963 rndis_filter_device_remove(hdev, nvdev);
968 static int netvsc_attach(struct net_device *ndev,
969 struct netvsc_device_info *dev_info)
971 struct net_device_context *ndev_ctx = netdev_priv(ndev);
972 struct hv_device *hdev = ndev_ctx->device_ctx;
973 struct netvsc_device *nvdev;
974 struct rndis_device *rdev;
977 nvdev = rndis_filter_device_add(hdev, dev_info);
979 return PTR_ERR(nvdev);
981 if (nvdev->num_chn > 1) {
982 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
984 /* if unavailable, just proceed with one queue */
991 /* In any case device is now ready */
992 nvdev->tx_disable = false;
993 netif_device_attach(ndev);
995 /* Note: enable and attach happen when sub-channels setup */
996 netif_carrier_off(ndev);
998 if (netif_running(ndev)) {
999 ret = rndis_filter_open(nvdev);
1003 rdev = nvdev->extension;
1004 if (!rdev->link_state)
1005 netif_carrier_on(ndev);
1011 netif_device_detach(ndev);
1013 rndis_filter_device_remove(hdev, nvdev);
1018 static int netvsc_set_channels(struct net_device *net,
1019 struct ethtool_channels *channels)
1021 struct net_device_context *net_device_ctx = netdev_priv(net);
1022 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1023 unsigned int orig, count = channels->combined_count;
1024 struct netvsc_device_info *device_info;
1027 /* We do not support separate count for rx, tx, or other */
1029 channels->rx_count || channels->tx_count || channels->other_count)
1032 if (!nvdev || nvdev->destroy)
1035 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1038 if (count > nvdev->max_chn)
1041 orig = nvdev->num_chn;
1043 device_info = netvsc_devinfo_get(nvdev);
1048 device_info->num_chn = count;
1050 ret = netvsc_detach(net, nvdev);
1054 ret = netvsc_attach(net, device_info);
1056 device_info->num_chn = orig;
1057 if (netvsc_attach(net, device_info))
1058 netdev_err(net, "restoring channel setting failed\n");
1067 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
1069 struct ethtool_link_ksettings diff1 = *cmd;
1070 struct ethtool_link_ksettings diff2 = {};
1072 diff1.base.speed = 0;
1073 diff1.base.duplex = 0;
1074 /* advertising and cmd are usually set */
1075 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1077 /* We set port to PORT_OTHER */
1078 diff2.base.port = PORT_OTHER;
1080 return !memcmp(&diff1, &diff2, sizeof(diff1));
1083 static void netvsc_init_settings(struct net_device *dev)
1085 struct net_device_context *ndc = netdev_priv(dev);
1087 ndc->l4_hash = HV_DEFAULT_L4HASH;
1089 ndc->speed = SPEED_UNKNOWN;
1090 ndc->duplex = DUPLEX_FULL;
1093 static int netvsc_get_link_ksettings(struct net_device *dev,
1094 struct ethtool_link_ksettings *cmd)
1096 struct net_device_context *ndc = netdev_priv(dev);
1098 cmd->base.speed = ndc->speed;
1099 cmd->base.duplex = ndc->duplex;
1100 cmd->base.port = PORT_OTHER;
1105 static int netvsc_set_link_ksettings(struct net_device *dev,
1106 const struct ethtool_link_ksettings *cmd)
1108 struct net_device_context *ndc = netdev_priv(dev);
1111 speed = cmd->base.speed;
1112 if (!ethtool_validate_speed(speed) ||
1113 !ethtool_validate_duplex(cmd->base.duplex) ||
1114 !netvsc_validate_ethtool_ss_cmd(cmd))
1118 ndc->duplex = cmd->base.duplex;
1123 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1125 struct net_device_context *ndevctx = netdev_priv(ndev);
1126 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1127 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1128 int orig_mtu = ndev->mtu;
1129 struct netvsc_device_info *device_info;
1132 if (!nvdev || nvdev->destroy)
1135 device_info = netvsc_devinfo_get(nvdev);
1140 /* Change MTU of underlying VF netdev first. */
1142 ret = dev_set_mtu(vf_netdev, mtu);
1147 ret = netvsc_detach(ndev, nvdev);
1153 ret = netvsc_attach(ndev, device_info);
1157 /* Attempt rollback to original MTU */
1158 ndev->mtu = orig_mtu;
1160 if (netvsc_attach(ndev, device_info))
1161 netdev_err(ndev, "restoring mtu failed\n");
1164 dev_set_mtu(vf_netdev, orig_mtu);
1171 static void netvsc_get_vf_stats(struct net_device *net,
1172 struct netvsc_vf_pcpu_stats *tot)
1174 struct net_device_context *ndev_ctx = netdev_priv(net);
1177 memset(tot, 0, sizeof(*tot));
1179 for_each_possible_cpu(i) {
1180 const struct netvsc_vf_pcpu_stats *stats
1181 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1182 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1186 start = u64_stats_fetch_begin_irq(&stats->syncp);
1187 rx_packets = stats->rx_packets;
1188 tx_packets = stats->tx_packets;
1189 rx_bytes = stats->rx_bytes;
1190 tx_bytes = stats->tx_bytes;
1191 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1193 tot->rx_packets += rx_packets;
1194 tot->tx_packets += tx_packets;
1195 tot->rx_bytes += rx_bytes;
1196 tot->tx_bytes += tx_bytes;
1197 tot->tx_dropped += stats->tx_dropped;
1201 static void netvsc_get_pcpu_stats(struct net_device *net,
1202 struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1204 struct net_device_context *ndev_ctx = netdev_priv(net);
1205 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1208 /* fetch percpu stats of vf */
1209 for_each_possible_cpu(i) {
1210 const struct netvsc_vf_pcpu_stats *stats =
1211 per_cpu_ptr(ndev_ctx->vf_stats, i);
1212 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1216 start = u64_stats_fetch_begin_irq(&stats->syncp);
1217 this_tot->vf_rx_packets = stats->rx_packets;
1218 this_tot->vf_tx_packets = stats->tx_packets;
1219 this_tot->vf_rx_bytes = stats->rx_bytes;
1220 this_tot->vf_tx_bytes = stats->tx_bytes;
1221 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1222 this_tot->rx_packets = this_tot->vf_rx_packets;
1223 this_tot->tx_packets = this_tot->vf_tx_packets;
1224 this_tot->rx_bytes = this_tot->vf_rx_bytes;
1225 this_tot->tx_bytes = this_tot->vf_tx_bytes;
1228 /* fetch percpu stats of netvsc */
1229 for (i = 0; i < nvdev->num_chn; i++) {
1230 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1231 const struct netvsc_stats *stats;
1232 struct netvsc_ethtool_pcpu_stats *this_tot =
1233 &pcpu_tot[nvchan->channel->target_cpu];
1237 stats = &nvchan->tx_stats;
1239 start = u64_stats_fetch_begin_irq(&stats->syncp);
1240 packets = stats->packets;
1241 bytes = stats->bytes;
1242 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1244 this_tot->tx_bytes += bytes;
1245 this_tot->tx_packets += packets;
1247 stats = &nvchan->rx_stats;
1249 start = u64_stats_fetch_begin_irq(&stats->syncp);
1250 packets = stats->packets;
1251 bytes = stats->bytes;
1252 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1254 this_tot->rx_bytes += bytes;
1255 this_tot->rx_packets += packets;
1259 static void netvsc_get_stats64(struct net_device *net,
1260 struct rtnl_link_stats64 *t)
1262 struct net_device_context *ndev_ctx = netdev_priv(net);
1263 struct netvsc_device *nvdev;
1264 struct netvsc_vf_pcpu_stats vf_tot;
1269 nvdev = rcu_dereference(ndev_ctx->nvdev);
1273 netdev_stats_to_stats64(t, &net->stats);
1275 netvsc_get_vf_stats(net, &vf_tot);
1276 t->rx_packets += vf_tot.rx_packets;
1277 t->tx_packets += vf_tot.tx_packets;
1278 t->rx_bytes += vf_tot.rx_bytes;
1279 t->tx_bytes += vf_tot.tx_bytes;
1280 t->tx_dropped += vf_tot.tx_dropped;
1282 for (i = 0; i < nvdev->num_chn; i++) {
1283 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1284 const struct netvsc_stats *stats;
1285 u64 packets, bytes, multicast;
1288 stats = &nvchan->tx_stats;
1290 start = u64_stats_fetch_begin_irq(&stats->syncp);
1291 packets = stats->packets;
1292 bytes = stats->bytes;
1293 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1295 t->tx_bytes += bytes;
1296 t->tx_packets += packets;
1298 stats = &nvchan->rx_stats;
1300 start = u64_stats_fetch_begin_irq(&stats->syncp);
1301 packets = stats->packets;
1302 bytes = stats->bytes;
1303 multicast = stats->multicast + stats->broadcast;
1304 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1306 t->rx_bytes += bytes;
1307 t->rx_packets += packets;
1308 t->multicast += multicast;
1314 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1316 struct net_device_context *ndc = netdev_priv(ndev);
1317 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1318 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1319 struct sockaddr *addr = p;
1322 err = eth_prepare_mac_addr_change(ndev, p);
1330 err = dev_set_mac_address(vf_netdev, addr);
1335 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1337 eth_commit_mac_addr_change(ndev, p);
1338 } else if (vf_netdev) {
1339 /* rollback change on VF */
1340 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1341 dev_set_mac_address(vf_netdev, addr);
1347 static const struct {
1348 char name[ETH_GSTRING_LEN];
1350 } netvsc_stats[] = {
1351 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1352 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1353 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1354 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1355 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
1356 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1357 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1358 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1359 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1360 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1362 { "cpu%u_rx_packets",
1363 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1365 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1366 { "cpu%u_tx_packets",
1367 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1369 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1370 { "cpu%u_vf_rx_packets",
1371 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1372 { "cpu%u_vf_rx_bytes",
1373 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1374 { "cpu%u_vf_tx_packets",
1375 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1376 { "cpu%u_vf_tx_bytes",
1377 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
1379 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1380 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1381 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1382 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1383 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1386 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1387 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
1389 /* statistics per queue (rx/tx packets/bytes) */
1390 #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1392 /* 4 statistics per queue (rx/tx packets/bytes) */
1393 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1395 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1397 struct net_device_context *ndc = netdev_priv(dev);
1398 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1403 switch (string_set) {
1405 return NETVSC_GLOBAL_STATS_LEN
1406 + NETVSC_VF_STATS_LEN
1407 + NETVSC_QUEUE_STATS_LEN(nvdev)
1408 + NETVSC_PCPU_STATS_LEN;
1414 static void netvsc_get_ethtool_stats(struct net_device *dev,
1415 struct ethtool_stats *stats, u64 *data)
1417 struct net_device_context *ndc = netdev_priv(dev);
1418 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1419 const void *nds = &ndc->eth_stats;
1420 const struct netvsc_stats *qstats;
1421 struct netvsc_vf_pcpu_stats sum;
1422 struct netvsc_ethtool_pcpu_stats *pcpu_sum;
1430 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1431 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1433 netvsc_get_vf_stats(dev, &sum);
1434 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1435 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1437 for (j = 0; j < nvdev->num_chn; j++) {
1438 qstats = &nvdev->chan_table[j].tx_stats;
1441 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1442 packets = qstats->packets;
1443 bytes = qstats->bytes;
1444 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1445 data[i++] = packets;
1448 qstats = &nvdev->chan_table[j].rx_stats;
1450 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1451 packets = qstats->packets;
1452 bytes = qstats->bytes;
1453 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1454 data[i++] = packets;
1458 pcpu_sum = kvmalloc_array(num_possible_cpus(),
1459 sizeof(struct netvsc_ethtool_pcpu_stats),
1464 netvsc_get_pcpu_stats(dev, pcpu_sum);
1465 for_each_present_cpu(cpu) {
1466 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1468 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1469 data[i++] = *(u64 *)((void *)this_sum
1470 + pcpu_stats[j].offset);
1475 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1477 struct net_device_context *ndc = netdev_priv(dev);
1478 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1485 switch (stringset) {
1487 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1488 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1489 p += ETH_GSTRING_LEN;
1492 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1493 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1494 p += ETH_GSTRING_LEN;
1497 for (i = 0; i < nvdev->num_chn; i++) {
1498 sprintf(p, "tx_queue_%u_packets", i);
1499 p += ETH_GSTRING_LEN;
1500 sprintf(p, "tx_queue_%u_bytes", i);
1501 p += ETH_GSTRING_LEN;
1502 sprintf(p, "rx_queue_%u_packets", i);
1503 p += ETH_GSTRING_LEN;
1504 sprintf(p, "rx_queue_%u_bytes", i);
1505 p += ETH_GSTRING_LEN;
1508 for_each_present_cpu(cpu) {
1509 for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1510 sprintf(p, pcpu_stats[i].name, cpu);
1511 p += ETH_GSTRING_LEN;
1520 netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1521 struct ethtool_rxnfc *info)
1523 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1525 info->data = RXH_IP_SRC | RXH_IP_DST;
1527 switch (info->flow_type) {
1529 if (ndc->l4_hash & HV_TCP4_L4HASH)
1530 info->data |= l4_flag;
1535 if (ndc->l4_hash & HV_TCP6_L4HASH)
1536 info->data |= l4_flag;
1541 if (ndc->l4_hash & HV_UDP4_L4HASH)
1542 info->data |= l4_flag;
1547 if (ndc->l4_hash & HV_UDP6_L4HASH)
1548 info->data |= l4_flag;
1564 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1567 struct net_device_context *ndc = netdev_priv(dev);
1568 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1573 switch (info->cmd) {
1574 case ETHTOOL_GRXRINGS:
1575 info->data = nvdev->num_chn;
1579 return netvsc_get_rss_hash_opts(ndc, info);
1584 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1585 struct ethtool_rxnfc *info)
1587 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1588 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1589 switch (info->flow_type) {
1591 ndc->l4_hash |= HV_TCP4_L4HASH;
1595 ndc->l4_hash |= HV_TCP6_L4HASH;
1599 ndc->l4_hash |= HV_UDP4_L4HASH;
1603 ndc->l4_hash |= HV_UDP6_L4HASH;
1613 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1614 switch (info->flow_type) {
1616 ndc->l4_hash &= ~HV_TCP4_L4HASH;
1620 ndc->l4_hash &= ~HV_TCP6_L4HASH;
1624 ndc->l4_hash &= ~HV_UDP4_L4HASH;
1628 ndc->l4_hash &= ~HV_UDP6_L4HASH;
1642 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1644 struct net_device_context *ndc = netdev_priv(ndev);
1646 if (info->cmd == ETHTOOL_SRXFH)
1647 return netvsc_set_rss_hash_opts(ndc, info);
1652 #ifdef CONFIG_NET_POLL_CONTROLLER
1653 static void netvsc_poll_controller(struct net_device *dev)
1655 struct net_device_context *ndc = netdev_priv(dev);
1656 struct netvsc_device *ndev;
1660 ndev = rcu_dereference(ndc->nvdev);
1662 for (i = 0; i < ndev->num_chn; i++) {
1663 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1665 napi_schedule(&nvchan->napi);
1672 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1674 return NETVSC_HASH_KEYLEN;
1677 static u32 netvsc_rss_indir_size(struct net_device *dev)
1682 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1685 struct net_device_context *ndc = netdev_priv(dev);
1686 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1687 struct rndis_device *rndis_dev;
1694 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1696 rndis_dev = ndev->extension;
1698 for (i = 0; i < ITAB_NUM; i++)
1699 indir[i] = ndc->rx_table[i];
1703 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1708 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1709 const u8 *key, const u8 hfunc)
1711 struct net_device_context *ndc = netdev_priv(dev);
1712 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1713 struct rndis_device *rndis_dev;
1719 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1722 rndis_dev = ndev->extension;
1724 for (i = 0; i < ITAB_NUM; i++)
1725 if (indir[i] >= ndev->num_chn)
1728 for (i = 0; i < ITAB_NUM; i++)
1729 ndc->rx_table[i] = indir[i];
1736 key = rndis_dev->rss_key;
1739 return rndis_filter_set_rss_param(rndis_dev, key);
1742 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1743 * It does have pre-allocated receive area which is divided into sections.
1745 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1746 struct ethtool_ringparam *ring)
1750 ring->rx_pending = nvdev->recv_section_cnt;
1751 ring->tx_pending = nvdev->send_section_cnt;
1753 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1754 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1756 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1758 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1759 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1760 / nvdev->send_section_size;
1763 static void netvsc_get_ringparam(struct net_device *ndev,
1764 struct ethtool_ringparam *ring)
1766 struct net_device_context *ndevctx = netdev_priv(ndev);
1767 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1772 __netvsc_get_ringparam(nvdev, ring);
1775 static int netvsc_set_ringparam(struct net_device *ndev,
1776 struct ethtool_ringparam *ring)
1778 struct net_device_context *ndevctx = netdev_priv(ndev);
1779 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1780 struct netvsc_device_info *device_info;
1781 struct ethtool_ringparam orig;
1785 if (!nvdev || nvdev->destroy)
1788 memset(&orig, 0, sizeof(orig));
1789 __netvsc_get_ringparam(nvdev, &orig);
1791 new_tx = clamp_t(u32, ring->tx_pending,
1792 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1793 new_rx = clamp_t(u32, ring->rx_pending,
1794 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1796 if (new_tx == orig.tx_pending &&
1797 new_rx == orig.rx_pending)
1798 return 0; /* no change */
1800 device_info = netvsc_devinfo_get(nvdev);
1805 device_info->send_sections = new_tx;
1806 device_info->recv_sections = new_rx;
1808 ret = netvsc_detach(ndev, nvdev);
1812 ret = netvsc_attach(ndev, device_info);
1814 device_info->send_sections = orig.tx_pending;
1815 device_info->recv_sections = orig.rx_pending;
1817 if (netvsc_attach(ndev, device_info))
1818 netdev_err(ndev, "restoring ringparam failed");
1826 static u32 netvsc_get_msglevel(struct net_device *ndev)
1828 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1830 return ndev_ctx->msg_enable;
1833 static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1835 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1837 ndev_ctx->msg_enable = val;
1840 static const struct ethtool_ops ethtool_ops = {
1841 .get_drvinfo = netvsc_get_drvinfo,
1842 .get_msglevel = netvsc_get_msglevel,
1843 .set_msglevel = netvsc_set_msglevel,
1844 .get_link = ethtool_op_get_link,
1845 .get_ethtool_stats = netvsc_get_ethtool_stats,
1846 .get_sset_count = netvsc_get_sset_count,
1847 .get_strings = netvsc_get_strings,
1848 .get_channels = netvsc_get_channels,
1849 .set_channels = netvsc_set_channels,
1850 .get_ts_info = ethtool_op_get_ts_info,
1851 .get_rxnfc = netvsc_get_rxnfc,
1852 .set_rxnfc = netvsc_set_rxnfc,
1853 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1854 .get_rxfh_indir_size = netvsc_rss_indir_size,
1855 .get_rxfh = netvsc_get_rxfh,
1856 .set_rxfh = netvsc_set_rxfh,
1857 .get_link_ksettings = netvsc_get_link_ksettings,
1858 .set_link_ksettings = netvsc_set_link_ksettings,
1859 .get_ringparam = netvsc_get_ringparam,
1860 .set_ringparam = netvsc_set_ringparam,
1863 static const struct net_device_ops device_ops = {
1864 .ndo_open = netvsc_open,
1865 .ndo_stop = netvsc_close,
1866 .ndo_start_xmit = netvsc_start_xmit,
1867 .ndo_change_rx_flags = netvsc_change_rx_flags,
1868 .ndo_set_rx_mode = netvsc_set_rx_mode,
1869 .ndo_change_mtu = netvsc_change_mtu,
1870 .ndo_validate_addr = eth_validate_addr,
1871 .ndo_set_mac_address = netvsc_set_mac_addr,
1872 .ndo_select_queue = netvsc_select_queue,
1873 .ndo_get_stats64 = netvsc_get_stats64,
1874 #ifdef CONFIG_NET_POLL_CONTROLLER
1875 .ndo_poll_controller = netvsc_poll_controller,
1880 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1881 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1882 * present send GARP packet to network peers with netif_notify_peers().
1884 static void netvsc_link_change(struct work_struct *w)
1886 struct net_device_context *ndev_ctx =
1887 container_of(w, struct net_device_context, dwork.work);
1888 struct hv_device *device_obj = ndev_ctx->device_ctx;
1889 struct net_device *net = hv_get_drvdata(device_obj);
1890 struct netvsc_device *net_device;
1891 struct rndis_device *rdev;
1892 struct netvsc_reconfig *event = NULL;
1893 bool notify = false, reschedule = false;
1894 unsigned long flags, next_reconfig, delay;
1896 /* if changes are happening, comeback later */
1897 if (!rtnl_trylock()) {
1898 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1902 net_device = rtnl_dereference(ndev_ctx->nvdev);
1906 rdev = net_device->extension;
1908 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1909 if (time_is_after_jiffies(next_reconfig)) {
1910 /* link_watch only sends one notification with current state
1911 * per second, avoid doing reconfig more frequently. Handle
1914 delay = next_reconfig - jiffies;
1915 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1916 schedule_delayed_work(&ndev_ctx->dwork, delay);
1919 ndev_ctx->last_reconfig = jiffies;
1921 spin_lock_irqsave(&ndev_ctx->lock, flags);
1922 if (!list_empty(&ndev_ctx->reconfig_events)) {
1923 event = list_first_entry(&ndev_ctx->reconfig_events,
1924 struct netvsc_reconfig, list);
1925 list_del(&event->list);
1926 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1928 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1933 switch (event->event) {
1934 /* Only the following events are possible due to the check in
1935 * netvsc_linkstatus_callback()
1937 case RNDIS_STATUS_MEDIA_CONNECT:
1938 if (rdev->link_state) {
1939 rdev->link_state = false;
1940 netif_carrier_on(net);
1941 netvsc_tx_enable(net_device, net);
1947 case RNDIS_STATUS_MEDIA_DISCONNECT:
1948 if (!rdev->link_state) {
1949 rdev->link_state = true;
1950 netif_carrier_off(net);
1951 netvsc_tx_disable(net_device, net);
1955 case RNDIS_STATUS_NETWORK_CHANGE:
1956 /* Only makes sense if carrier is present */
1957 if (!rdev->link_state) {
1958 rdev->link_state = true;
1959 netif_carrier_off(net);
1960 netvsc_tx_disable(net_device, net);
1961 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1962 spin_lock_irqsave(&ndev_ctx->lock, flags);
1963 list_add(&event->list, &ndev_ctx->reconfig_events);
1964 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1973 netdev_notify_peers(net);
1975 /* link_watch only sends one notification with current state per
1976 * second, handle next reconfig event in 2 seconds.
1979 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1987 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1989 struct net_device_context *net_device_ctx;
1990 struct net_device *dev;
1992 dev = netdev_master_upper_dev_get(vf_netdev);
1993 if (!dev || dev->netdev_ops != &device_ops)
1994 return NULL; /* not a netvsc device */
1996 net_device_ctx = netdev_priv(dev);
1997 if (!rtnl_dereference(net_device_ctx->nvdev))
1998 return NULL; /* device is removed */
2003 /* Called when VF is injecting data into network stack.
2004 * Change the associated network device from VF to netvsc.
2005 * note: already called with rcu_read_lock
2007 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
2009 struct sk_buff *skb = *pskb;
2010 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
2011 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2012 struct netvsc_vf_pcpu_stats *pcpu_stats
2013 = this_cpu_ptr(ndev_ctx->vf_stats);
2015 skb = skb_share_check(skb, GFP_ATOMIC);
2017 return RX_HANDLER_CONSUMED;
2023 u64_stats_update_begin(&pcpu_stats->syncp);
2024 pcpu_stats->rx_packets++;
2025 pcpu_stats->rx_bytes += skb->len;
2026 u64_stats_update_end(&pcpu_stats->syncp);
2028 return RX_HANDLER_ANOTHER;
2031 static int netvsc_vf_join(struct net_device *vf_netdev,
2032 struct net_device *ndev, int context)
2034 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2037 ret = netdev_rx_handler_register(vf_netdev,
2038 netvsc_vf_handle_frame, ndev);
2040 netdev_err(vf_netdev,
2041 "can not register netvsc VF receive handler (err = %d)\n",
2043 goto rx_handler_failed;
2046 ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2049 netdev_err(vf_netdev,
2050 "can not set master device %s (err = %d)\n",
2052 goto upper_link_failed;
2055 /* If this registration is called from probe context vf_takeover
2056 * is taken care of later in probe itself.
2058 if (context == VF_REG_IN_NOTIFIER)
2059 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2061 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2063 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2067 netdev_rx_handler_unregister(vf_netdev);
2072 static void __netvsc_vf_setup(struct net_device *ndev,
2073 struct net_device *vf_netdev)
2077 /* Align MTU of VF with master */
2078 ret = dev_set_mtu(vf_netdev, ndev->mtu);
2080 netdev_warn(vf_netdev,
2081 "unable to change mtu to %u\n", ndev->mtu);
2083 /* set multicast etc flags on VF */
2084 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
2086 /* sync address list from ndev to VF */
2087 netif_addr_lock_bh(ndev);
2088 dev_uc_sync(vf_netdev, ndev);
2089 dev_mc_sync(vf_netdev, ndev);
2090 netif_addr_unlock_bh(ndev);
2092 if (netif_running(ndev)) {
2093 ret = dev_open(vf_netdev);
2095 netdev_warn(vf_netdev,
2096 "unable to open: %d\n", ret);
2100 /* Setup VF as slave of the synthetic device.
2101 * Runs in workqueue to avoid recursion in netlink callbacks.
2103 static void netvsc_vf_setup(struct work_struct *w)
2105 struct net_device_context *ndev_ctx
2106 = container_of(w, struct net_device_context, vf_takeover.work);
2107 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2108 struct net_device *vf_netdev;
2110 if (!rtnl_trylock()) {
2111 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2115 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2117 __netvsc_vf_setup(ndev, vf_netdev);
2122 /* Find netvsc by VF serial number.
2123 * The PCI hyperv controller records the serial number as the slot kobj name.
2125 static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2127 struct device *parent = vf_netdev->dev.parent;
2128 struct net_device_context *ndev_ctx;
2129 struct net_device *ndev;
2130 struct pci_dev *pdev;
2133 if (!parent || !dev_is_pci(parent))
2134 return NULL; /* not a PCI device */
2136 pdev = to_pci_dev(parent);
2138 netdev_notice(vf_netdev, "no PCI slot information\n");
2142 if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2143 netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2144 pci_slot_name(pdev->slot));
2148 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2149 if (!ndev_ctx->vf_alloc)
2152 if (ndev_ctx->vf_serial != serial)
2155 ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2156 if (ndev->addr_len != vf_netdev->addr_len ||
2157 memcmp(ndev->perm_addr, vf_netdev->perm_addr,
2158 ndev->addr_len) != 0)
2165 /* Fallback path to check synthetic vf with help of mac addr.
2166 * Because this function can be called before vf_netdev is
2167 * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
2168 * from dev_addr, also try to match to its dev_addr.
2169 * Note: On Hyper-V and Azure, it's not possible to set a MAC address
2170 * on a VF that matches to the MAC of a unrelated NETVSC device.
2172 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2173 ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2174 if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
2175 ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
2179 netdev_notice(vf_netdev,
2180 "no netdev found for vf serial:%u\n", serial);
2184 static int netvsc_prepare_bonding(struct net_device *vf_netdev)
2186 struct net_device *ndev;
2188 ndev = get_netvsc_byslot(vf_netdev);
2192 /* set slave flag before open to prevent IPv6 addrconf */
2193 vf_netdev->flags |= IFF_SLAVE;
2197 static int netvsc_register_vf(struct net_device *vf_netdev, int context)
2199 struct net_device_context *net_device_ctx;
2200 struct netvsc_device *netvsc_dev;
2201 struct net_device *ndev;
2204 if (vf_netdev->addr_len != ETH_ALEN)
2207 ndev = get_netvsc_byslot(vf_netdev);
2211 net_device_ctx = netdev_priv(ndev);
2212 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2213 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2216 /* if syntihetic interface is a different namespace,
2217 * then move the VF to that namespace; join will be
2218 * done again in that context.
2220 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2221 ret = dev_change_net_namespace(vf_netdev,
2222 dev_net(ndev), "eth%d");
2224 netdev_err(vf_netdev,
2225 "could not move to same namespace as %s: %d\n",
2228 netdev_info(vf_netdev,
2229 "VF moved to namespace with: %s\n",
2234 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
2236 if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
2239 dev_hold(vf_netdev);
2240 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2244 /* VF up/down change detected, schedule to change data path */
2245 static int netvsc_vf_changed(struct net_device *vf_netdev)
2247 struct net_device_context *net_device_ctx;
2248 struct netvsc_device *netvsc_dev;
2249 struct net_device *ndev;
2250 bool vf_is_up = netif_running(vf_netdev);
2252 ndev = get_netvsc_byref(vf_netdev);
2256 net_device_ctx = netdev_priv(ndev);
2257 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2261 if (vf_is_up && !net_device_ctx->vf_alloc) {
2262 netdev_info(ndev, "Waiting for the VF association from host\n");
2263 wait_for_completion(&net_device_ctx->vf_add);
2266 netvsc_switch_datapath(ndev, vf_is_up);
2267 netdev_info(ndev, "Data path switched %s VF: %s\n",
2268 vf_is_up ? "to" : "from", vf_netdev->name);
2273 static int netvsc_unregister_vf(struct net_device *vf_netdev)
2275 struct net_device *ndev;
2276 struct net_device_context *net_device_ctx;
2278 ndev = get_netvsc_byref(vf_netdev);
2282 net_device_ctx = netdev_priv(ndev);
2283 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2285 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2287 reinit_completion(&net_device_ctx->vf_add);
2288 netdev_rx_handler_unregister(vf_netdev);
2289 netdev_upper_dev_unlink(vf_netdev, ndev);
2290 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2296 static int check_dev_is_matching_vf(struct net_device *event_ndev)
2298 /* Skip NetVSC interfaces */
2299 if (event_ndev->netdev_ops == &device_ops)
2302 /* Avoid non-Ethernet type devices */
2303 if (event_ndev->type != ARPHRD_ETHER)
2306 /* Avoid Vlan dev with same MAC registering as VF */
2307 if (is_vlan_dev(event_ndev))
2310 /* Avoid Bonding master dev with same MAC registering as VF */
2311 if (netif_is_bond_master(event_ndev))
2317 static int netvsc_probe(struct hv_device *dev,
2318 const struct hv_vmbus_device_id *dev_id)
2320 struct net_device *net = NULL, *vf_netdev;
2321 struct net_device_context *net_device_ctx;
2322 struct netvsc_device_info *device_info = NULL;
2323 struct netvsc_device *nvdev;
2326 net = alloc_etherdev_mq(sizeof(struct net_device_context),
2331 netif_carrier_off(net);
2333 netvsc_init_settings(net);
2335 net_device_ctx = netdev_priv(net);
2336 net_device_ctx->device_ctx = dev;
2337 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2338 if (netif_msg_probe(net_device_ctx))
2339 netdev_dbg(net, "netvsc msg_enable: %d\n",
2340 net_device_ctx->msg_enable);
2342 hv_set_drvdata(dev, net);
2344 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2346 init_completion(&net_device_ctx->vf_add);
2347 spin_lock_init(&net_device_ctx->lock);
2348 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2349 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2351 net_device_ctx->vf_stats
2352 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2353 if (!net_device_ctx->vf_stats)
2356 net->netdev_ops = &device_ops;
2357 net->ethtool_ops = ðtool_ops;
2358 SET_NETDEV_DEV(net, &dev->device);
2360 /* We always need headroom for rndis header */
2361 net->needed_headroom = RNDIS_AND_PPI_SIZE;
2363 /* Initialize the number of queues to be 1, we may change it if more
2364 * channels are offered later.
2366 netif_set_real_num_tx_queues(net, 1);
2367 netif_set_real_num_rx_queues(net, 1);
2369 /* Notify the netvsc driver of the new device */
2370 device_info = netvsc_devinfo_get(NULL);
2374 goto devinfo_failed;
2377 nvdev = rndis_filter_device_add(dev, device_info);
2378 if (IS_ERR(nvdev)) {
2379 ret = PTR_ERR(nvdev);
2380 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2384 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2386 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2387 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2388 * all subchannels to show up, but that may not happen because
2389 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2390 * -> ... -> device_add() -> ... -> __device_attach() can't get
2391 * the device lock, so all the subchannels can't be processed --
2392 * finally netvsc_subchan_work() hangs for ever.
2396 if (nvdev->num_chn > 1)
2397 schedule_work(&nvdev->subchan_work);
2399 /* hw_features computed in rndis_netdev_set_hwcaps() */
2400 net->features = net->hw_features |
2401 NETIF_F_HIGHDMA | NETIF_F_SG |
2402 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2403 net->vlan_features = net->features;
2405 netdev_lockdep_set_classes(net);
2407 /* MTU range: 68 - 1500 or 65521 */
2408 net->min_mtu = NETVSC_MTU_MIN;
2409 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2410 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2412 net->max_mtu = ETH_DATA_LEN;
2414 nvdev->tx_disable = false;
2416 ret = register_netdevice(net);
2418 pr_err("Unable to register netdev.\n");
2419 goto register_failed;
2422 list_add(&net_device_ctx->list, &netvsc_dev_list);
2424 /* When the hv_netvsc driver is unloaded and reloaded, the
2425 * NET_DEVICE_REGISTER for the vf device is replayed before probe
2426 * is complete. This is because register_netdevice_notifier() gets
2427 * registered before vmbus_driver_register() so that callback func
2428 * is set before probe and we don't miss events like NETDEV_POST_INIT
2429 * So, in this section we try to register the matching vf device that
2430 * is present as a netdevice, knowing that its register call is not
2431 * processed in the netvsc_netdev_notifier(as probing is progress and
2432 * get_netvsc_byslot fails).
2434 for_each_netdev(dev_net(net), vf_netdev) {
2435 ret = check_dev_is_matching_vf(vf_netdev);
2439 if (net != get_netvsc_byslot(vf_netdev))
2442 netvsc_prepare_bonding(vf_netdev);
2443 netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
2444 __netvsc_vf_setup(net, vf_netdev);
2454 rndis_filter_device_remove(dev, nvdev);
2458 free_percpu(net_device_ctx->vf_stats);
2460 hv_set_drvdata(dev, NULL);
2466 static int netvsc_remove(struct hv_device *dev)
2468 struct net_device_context *ndev_ctx;
2469 struct net_device *vf_netdev, *net;
2470 struct netvsc_device *nvdev;
2472 net = hv_get_drvdata(dev);
2474 dev_err(&dev->device, "No net device to remove\n");
2478 ndev_ctx = netdev_priv(net);
2480 cancel_delayed_work_sync(&ndev_ctx->dwork);
2483 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2485 cancel_work_sync(&nvdev->subchan_work);
2488 * Call to the vsc driver to let it know that the device is being
2489 * removed. Also blocks mtu and channel changes.
2491 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2493 netvsc_unregister_vf(vf_netdev);
2496 rndis_filter_device_remove(dev, nvdev);
2498 unregister_netdevice(net);
2499 list_del(&ndev_ctx->list);
2503 hv_set_drvdata(dev, NULL);
2505 free_percpu(ndev_ctx->vf_stats);
2510 static const struct hv_vmbus_device_id id_table[] = {
2516 MODULE_DEVICE_TABLE(vmbus, id_table);
2518 /* The one and only one */
2519 static struct hv_driver netvsc_drv = {
2520 .name = KBUILD_MODNAME,
2521 .id_table = id_table,
2522 .probe = netvsc_probe,
2523 .remove = netvsc_remove,
2525 .probe_type = PROBE_FORCE_SYNCHRONOUS,
2530 * On Hyper-V, every VF interface is matched with a corresponding
2531 * synthetic interface. The synthetic interface is presented first
2532 * to the guest. When the corresponding VF instance is registered,
2533 * we will take care of switching the data path.
2535 static int netvsc_netdev_event(struct notifier_block *this,
2536 unsigned long event, void *ptr)
2538 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2541 ret = check_dev_is_matching_vf(event_dev);
2546 case NETDEV_POST_INIT:
2547 return netvsc_prepare_bonding(event_dev);
2548 case NETDEV_REGISTER:
2549 return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
2550 case NETDEV_UNREGISTER:
2551 return netvsc_unregister_vf(event_dev);
2554 return netvsc_vf_changed(event_dev);
2560 static struct notifier_block netvsc_netdev_notifier = {
2561 .notifier_call = netvsc_netdev_event,
2564 static void __exit netvsc_drv_exit(void)
2566 unregister_netdevice_notifier(&netvsc_netdev_notifier);
2567 vmbus_driver_unregister(&netvsc_drv);
2570 static int __init netvsc_drv_init(void)
2574 if (ring_size < RING_SIZE_MIN) {
2575 ring_size = RING_SIZE_MIN;
2576 pr_info("Increased ring_size to %u (min allowed)\n",
2579 netvsc_ring_bytes = ring_size * PAGE_SIZE;
2581 register_netdevice_notifier(&netvsc_netdev_notifier);
2583 ret = vmbus_driver_register(&netvsc_drv);
2590 unregister_netdevice_notifier(&netvsc_netdev_notifier);
2594 MODULE_LICENSE("GPL");
2595 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2597 module_init(netvsc_drv_init);
2598 module_exit(netvsc_drv_exit);