2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/pci.h>
33 #include <linux/skbuff.h>
34 #include <linux/if_vlan.h>
36 #include <linux/slab.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/netpoll.h>
41 #include <net/route.h>
43 #include <net/pkt_sched.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
47 #include "hyperv_net.h"
49 #define RING_SIZE_MIN 64
50 #define RETRY_US_LO 5000
51 #define RETRY_US_HI 10000
52 #define RETRY_MAX 2000 /* >10 sec */
54 #define LINKCHANGE_INT (2 * HZ)
55 #define VF_TAKEOVER_INT (HZ / 10)
57 static int ring_size = 128;
58 module_param(ring_size, int, S_IRUGO);
59 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
61 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
62 NETIF_MSG_LINK | NETIF_MSG_IFUP |
63 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
66 static int debug = -1;
67 module_param(debug, int, S_IRUGO);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 static LIST_HEAD(netvsc_dev_list);
72 static void netvsc_change_rx_flags(struct net_device *net, int change)
74 struct net_device_context *ndev_ctx = netdev_priv(net);
75 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
81 if (change & IFF_PROMISC) {
82 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
83 dev_set_promiscuity(vf_netdev, inc);
86 if (change & IFF_ALLMULTI) {
87 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
88 dev_set_allmulti(vf_netdev, inc);
92 static void netvsc_set_rx_mode(struct net_device *net)
94 struct net_device_context *ndev_ctx = netdev_priv(net);
95 struct net_device *vf_netdev;
96 struct netvsc_device *nvdev;
99 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
101 dev_uc_sync(vf_netdev, net);
102 dev_mc_sync(vf_netdev, net);
105 nvdev = rcu_dereference(ndev_ctx->nvdev);
107 rndis_filter_update(nvdev);
111 static void netvsc_tx_enable(struct netvsc_device *nvscdev,
112 struct net_device *ndev)
114 nvscdev->tx_disable = false;
115 virt_wmb(); /* ensure queue wake up mechanism is on */
117 netif_tx_wake_all_queues(ndev);
120 static int netvsc_open(struct net_device *net)
122 struct net_device_context *ndev_ctx = netdev_priv(net);
123 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
124 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
125 struct rndis_device *rdev;
128 netif_carrier_off(net);
130 /* Open up the device */
131 ret = rndis_filter_open(nvdev);
133 netdev_err(net, "unable to open device (ret %d).\n", ret);
137 rdev = nvdev->extension;
138 if (!rdev->link_state) {
139 netif_carrier_on(net);
140 netvsc_tx_enable(nvdev, net);
144 /* Setting synthetic device up transparently sets
145 * slave as up. If open fails, then slave will be
146 * still be offline (and not used).
148 ret = dev_open(vf_netdev);
151 "unable to open slave: %s: %d\n",
152 vf_netdev->name, ret);
157 static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
159 unsigned int retry = 0;
162 /* Ensure pending bytes in ring are read */
166 for (i = 0; i < nvdev->num_chn; i++) {
167 struct vmbus_channel *chn
168 = nvdev->chan_table[i].channel;
173 /* make sure receive not running now */
174 napi_synchronize(&nvdev->chan_table[i].napi);
176 aread = hv_get_bytes_to_read(&chn->inbound);
180 aread = hv_get_bytes_to_read(&chn->outbound);
188 if (++retry > RETRY_MAX)
191 usleep_range(RETRY_US_LO, RETRY_US_HI);
195 static void netvsc_tx_disable(struct netvsc_device *nvscdev,
196 struct net_device *ndev)
199 nvscdev->tx_disable = true;
200 virt_wmb(); /* ensure txq will not wake up after stop */
203 netif_tx_disable(ndev);
206 static int netvsc_close(struct net_device *net)
208 struct net_device_context *net_device_ctx = netdev_priv(net);
209 struct net_device *vf_netdev
210 = rtnl_dereference(net_device_ctx->vf_netdev);
211 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
214 netvsc_tx_disable(nvdev, net);
216 /* No need to close rndis filter if it is removed already */
220 ret = rndis_filter_close(nvdev);
222 netdev_err(net, "unable to close device (ret %d).\n", ret);
226 ret = netvsc_wait_until_empty(nvdev);
228 netdev_err(net, "Ring buffer not empty after closing rndis\n");
231 dev_close(vf_netdev);
236 static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
239 struct rndis_packet *rndis_pkt;
240 struct rndis_per_packet_info *ppi;
242 rndis_pkt = &msg->msg.pkt;
243 rndis_pkt->data_offset += ppi_size;
245 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
246 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
248 ppi->size = ppi_size;
249 ppi->type = pkt_type;
250 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
252 rndis_pkt->per_pkt_info_len += ppi_size;
257 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
258 * packets. We can use ethtool to change UDP hash level when necessary.
260 static inline u32 netvsc_get_hash(
262 const struct net_device_context *ndc)
264 struct flow_keys flow;
266 static u32 hashrnd __read_mostly;
268 net_get_random_once(&hashrnd, sizeof(hashrnd));
270 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
273 if (flow.basic.ip_proto == IPPROTO_TCP ||
274 (flow.basic.ip_proto == IPPROTO_UDP &&
275 ((flow.basic.n_proto == htons(ETH_P_IP) && ndc->udp4_l4_hash) ||
276 (flow.basic.n_proto == htons(ETH_P_IPV6) &&
277 ndc->udp6_l4_hash)))) {
278 return skb_get_hash(skb);
280 if (flow.basic.n_proto == htons(ETH_P_IP))
281 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
282 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
283 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
287 __skb_set_sw_hash(skb, hash, false);
293 static inline int netvsc_get_tx_queue(struct net_device *ndev,
294 struct sk_buff *skb, int old_idx)
296 const struct net_device_context *ndc = netdev_priv(ndev);
297 struct sock *sk = skb->sk;
300 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
301 (VRSS_SEND_TAB_SIZE - 1)];
303 /* If queue index changed record the new value */
304 if (q_idx != old_idx &&
305 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
306 sk_tx_queue_set(sk, q_idx);
312 * Select queue for transmit.
314 * If a valid queue has already been assigned, then use that.
315 * Otherwise compute tx queue based on hash and the send table.
317 * This is basically similar to default (__netdev_pick_tx) with the added step
318 * of using the host send_table when no other queue has been assigned.
320 * TODO support XPS - but get_xps_queue not exported
322 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
324 int q_idx = sk_tx_queue_get(skb->sk);
326 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
327 /* If forwarding a packet, we use the recorded queue when
328 * available for better cache locality.
330 if (skb_rx_queue_recorded(skb))
331 q_idx = skb_get_rx_queue(skb);
333 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
339 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
341 select_queue_fallback_t fallback)
343 struct net_device_context *ndc = netdev_priv(ndev);
344 struct net_device *vf_netdev;
348 vf_netdev = rcu_dereference(ndc->vf_netdev);
350 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
352 if (vf_ops->ndo_select_queue)
353 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
354 accel_priv, fallback);
356 txq = fallback(vf_netdev, skb);
358 /* Record the queue selected by VF so that it can be
359 * used for common case where VF has more queues than
360 * the synthetic device.
362 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
364 txq = netvsc_pick_tx(ndev, skb);
368 while (txq >= ndev->real_num_tx_queues)
369 txq -= ndev->real_num_tx_queues;
374 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
375 struct hv_page_buffer *pb)
379 /* Deal with compund pages by ignoring unused part
382 page += (offset >> PAGE_SHIFT);
383 offset &= ~PAGE_MASK;
388 bytes = PAGE_SIZE - offset;
391 pb[j].pfn = page_to_pfn(page);
392 pb[j].offset = offset;
398 if (offset == PAGE_SIZE && len) {
408 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
409 struct hv_netvsc_packet *packet,
410 struct hv_page_buffer *pb)
413 char *data = skb->data;
414 int frags = skb_shinfo(skb)->nr_frags;
417 /* The packet is laid out thus:
418 * 1. hdr: RNDIS header and PPI
420 * 3. skb fragment data
422 slots_used += fill_pg_buf(virt_to_page(hdr),
424 len, &pb[slots_used]);
426 packet->rmsg_size = len;
427 packet->rmsg_pgcnt = slots_used;
429 slots_used += fill_pg_buf(virt_to_page(data),
430 offset_in_page(data),
431 skb_headlen(skb), &pb[slots_used]);
433 for (i = 0; i < frags; i++) {
434 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
436 slots_used += fill_pg_buf(skb_frag_page(frag),
438 skb_frag_size(frag), &pb[slots_used]);
443 static int count_skb_frag_slots(struct sk_buff *skb)
445 int i, frags = skb_shinfo(skb)->nr_frags;
448 for (i = 0; i < frags; i++) {
449 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
450 unsigned long size = skb_frag_size(frag);
451 unsigned long offset = frag->page_offset;
453 /* Skip unused frames from start of page */
454 offset &= ~PAGE_MASK;
455 pages += PFN_UP(offset + size);
460 static int netvsc_get_slots(struct sk_buff *skb)
462 char *data = skb->data;
463 unsigned int offset = offset_in_page(data);
464 unsigned int len = skb_headlen(skb);
468 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
469 frag_slots = count_skb_frag_slots(skb);
470 return slots + frag_slots;
473 static u32 net_checksum_info(struct sk_buff *skb)
475 if (skb->protocol == htons(ETH_P_IP)) {
476 struct iphdr *ip = ip_hdr(skb);
478 if (ip->protocol == IPPROTO_TCP)
479 return TRANSPORT_INFO_IPV4_TCP;
480 else if (ip->protocol == IPPROTO_UDP)
481 return TRANSPORT_INFO_IPV4_UDP;
483 struct ipv6hdr *ip6 = ipv6_hdr(skb);
485 if (ip6->nexthdr == IPPROTO_TCP)
486 return TRANSPORT_INFO_IPV6_TCP;
487 else if (ip6->nexthdr == IPPROTO_UDP)
488 return TRANSPORT_INFO_IPV6_UDP;
491 return TRANSPORT_INFO_NOT_IP;
494 /* Send skb on the slave VF device. */
495 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
498 struct net_device_context *ndev_ctx = netdev_priv(net);
499 unsigned int len = skb->len;
502 skb->dev = vf_netdev;
503 skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
505 rc = dev_queue_xmit(skb);
506 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
507 struct netvsc_vf_pcpu_stats *pcpu_stats
508 = this_cpu_ptr(ndev_ctx->vf_stats);
510 u64_stats_update_begin(&pcpu_stats->syncp);
511 pcpu_stats->tx_packets++;
512 pcpu_stats->tx_bytes += len;
513 u64_stats_update_end(&pcpu_stats->syncp);
515 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
521 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
523 struct net_device_context *net_device_ctx = netdev_priv(net);
524 struct hv_netvsc_packet *packet = NULL;
526 unsigned int num_data_pgs;
527 struct rndis_message *rndis_msg;
528 struct rndis_packet *rndis_pkt;
529 struct net_device *vf_netdev;
531 struct rndis_per_packet_info *ppi;
533 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
535 /* If VF is present and up then redirect packets to it.
536 * Skip the VF if it is marked down or has no carrier.
537 * If netpoll is in uses, then VF can not be used either.
539 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
540 if (vf_netdev && netif_running(vf_netdev) &&
541 netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
542 return netvsc_vf_xmit(net, vf_netdev, skb);
544 /* We will atmost need two pages to describe the rndis
545 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
546 * of pages in a single packet. If skb is scattered around
547 * more pages we try linearizing it.
550 num_data_pgs = netvsc_get_slots(skb) + 2;
552 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
553 ++net_device_ctx->eth_stats.tx_scattered;
555 if (skb_linearize(skb))
558 num_data_pgs = netvsc_get_slots(skb) + 2;
559 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
560 ++net_device_ctx->eth_stats.tx_too_big;
566 * Place the rndis header in the skb head room and
567 * the skb->cb will be used for hv_netvsc_packet
570 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
574 /* Use the skb control buffer for building up the packet */
575 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
576 FIELD_SIZEOF(struct sk_buff, cb));
577 packet = (struct hv_netvsc_packet *)skb->cb;
579 packet->q_idx = skb_get_queue_mapping(skb);
581 packet->total_data_buflen = skb->len;
582 packet->total_bytes = skb->len;
583 packet->total_packets = 1;
585 rndis_msg = (struct rndis_message *)skb->head;
587 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
589 /* Add the rndis header */
590 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
591 rndis_msg->msg_len = packet->total_data_buflen;
592 rndis_pkt = &rndis_msg->msg.pkt;
593 rndis_pkt->data_offset = sizeof(struct rndis_packet);
594 rndis_pkt->data_len = packet->total_data_buflen;
595 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
597 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
599 hash = skb_get_hash_raw(skb);
600 if (hash != 0 && net->real_num_tx_queues > 1) {
601 rndis_msg_size += NDIS_HASH_PPI_SIZE;
602 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
604 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
607 if (skb_vlan_tag_present(skb)) {
608 struct ndis_pkt_8021q_info *vlan;
610 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
611 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
614 vlan = (void *)ppi + ppi->ppi_offset;
615 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
616 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
620 if (skb_is_gso(skb)) {
621 struct ndis_tcp_lso_info *lso_info;
623 rndis_msg_size += NDIS_LSO_PPI_SIZE;
624 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
625 TCP_LARGESEND_PKTINFO);
627 lso_info = (void *)ppi + ppi->ppi_offset;
629 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
630 if (skb->protocol == htons(ETH_P_IP)) {
631 lso_info->lso_v2_transmit.ip_version =
632 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
633 ip_hdr(skb)->tot_len = 0;
634 ip_hdr(skb)->check = 0;
635 tcp_hdr(skb)->check =
636 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
637 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
639 lso_info->lso_v2_transmit.ip_version =
640 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
641 ipv6_hdr(skb)->payload_len = 0;
642 tcp_hdr(skb)->check =
643 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
644 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
646 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
647 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
648 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
649 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
650 struct ndis_tcp_ip_checksum_info *csum_info;
652 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
653 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
654 TCPIP_CHKSUM_PKTINFO);
656 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
659 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
661 if (skb->protocol == htons(ETH_P_IP)) {
662 csum_info->transmit.is_ipv4 = 1;
664 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
665 csum_info->transmit.tcp_checksum = 1;
667 csum_info->transmit.udp_checksum = 1;
669 csum_info->transmit.is_ipv6 = 1;
671 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
672 csum_info->transmit.tcp_checksum = 1;
674 csum_info->transmit.udp_checksum = 1;
677 /* Can't do offload of this type of checksum */
678 if (skb_checksum_help(skb))
683 /* Start filling in the page buffers with the rndis hdr */
684 rndis_msg->msg_len += rndis_msg_size;
685 packet->total_data_buflen = rndis_msg->msg_len;
686 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
689 /* timestamp packet in software */
690 skb_tx_timestamp(skb);
692 ret = netvsc_send(net, packet, rndis_msg, pb, skb);
693 if (likely(ret == 0))
696 if (ret == -EAGAIN) {
697 ++net_device_ctx->eth_stats.tx_busy;
698 return NETDEV_TX_BUSY;
702 ++net_device_ctx->eth_stats.tx_no_space;
705 dev_kfree_skb_any(skb);
706 net->stats.tx_dropped++;
711 ++net_device_ctx->eth_stats.tx_no_memory;
716 * netvsc_linkstatus_callback - Link up/down notification
718 void netvsc_linkstatus_callback(struct hv_device *device_obj,
719 struct rndis_message *resp)
721 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
722 struct net_device *net;
723 struct net_device_context *ndev_ctx;
724 struct netvsc_reconfig *event;
727 net = hv_get_drvdata(device_obj);
732 ndev_ctx = netdev_priv(net);
734 /* Update the physical link speed when changing to another vSwitch */
735 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
738 speed = *(u32 *)((void *)indicate
739 + indicate->status_buf_offset) / 10000;
740 ndev_ctx->speed = speed;
744 /* Handle these link change statuses below */
745 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
746 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
747 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
750 if (net->reg_state != NETREG_REGISTERED)
753 event = kzalloc(sizeof(*event), GFP_ATOMIC);
756 event->event = indicate->status;
758 spin_lock_irqsave(&ndev_ctx->lock, flags);
759 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
760 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
762 schedule_delayed_work(&ndev_ctx->dwork, 0);
765 static void netvsc_comp_ipcsum(struct sk_buff *skb)
767 struct iphdr *iph = (struct iphdr *)skb->data;
770 iph->check = ip_fast_csum(iph, iph->ihl);
773 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
774 struct napi_struct *napi,
775 const struct ndis_tcp_ip_checksum_info *csum_info,
776 const struct ndis_pkt_8021q_info *vlan,
777 void *data, u32 buflen)
781 skb = napi_alloc_skb(napi, buflen);
786 * Copy to skb. This copy is needed here since the memory pointed by
787 * hv_netvsc_packet cannot be deallocated
789 skb_put_data(skb, data, buflen);
791 skb->protocol = eth_type_trans(skb, net);
793 /* skb is already created with CHECKSUM_NONE */
794 skb_checksum_none_assert(skb);
796 /* Incoming packets may have IP header checksum verified by the host.
797 * They may not have IP header checksum computed after coalescing.
798 * We compute it here if the flags are set, because on Linux, the IP
799 * checksum is always checked.
801 if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
802 csum_info->receive.ip_checksum_succeeded &&
803 skb->protocol == htons(ETH_P_IP))
804 netvsc_comp_ipcsum(skb);
806 /* Do L4 checksum offload if enabled and present. */
807 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
808 if (csum_info->receive.tcp_checksum_succeeded ||
809 csum_info->receive.udp_checksum_succeeded)
810 skb->ip_summed = CHECKSUM_UNNECESSARY;
814 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
816 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
824 * netvsc_recv_callback - Callback when we receive a packet from the
825 * "wire" on the specified device.
827 int netvsc_recv_callback(struct net_device *net,
828 struct vmbus_channel *channel,
830 const struct ndis_tcp_ip_checksum_info *csum_info,
831 const struct ndis_pkt_8021q_info *vlan)
833 struct net_device_context *net_device_ctx = netdev_priv(net);
834 struct netvsc_device *net_device;
835 u16 q_idx = channel->offermsg.offer.sub_channel_index;
836 struct netvsc_channel *nvchan;
838 struct netvsc_stats *rx_stats;
840 if (net->reg_state != NETREG_REGISTERED)
841 return NVSP_STAT_FAIL;
844 net_device = rcu_dereference(net_device_ctx->nvdev);
845 if (unlikely(!net_device))
848 nvchan = &net_device->chan_table[q_idx];
850 /* Allocate a skb - TODO direct I/O to pages? */
851 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
852 csum_info, vlan, data, len);
853 if (unlikely(!skb)) {
855 ++net->stats.rx_dropped;
857 return NVSP_STAT_FAIL;
860 skb_record_rx_queue(skb, q_idx);
863 * Even if injecting the packet, record the statistics
864 * on the synthetic device because modifying the VF device
865 * statistics will not work correctly.
867 rx_stats = &nvchan->rx_stats;
868 u64_stats_update_begin(&rx_stats->syncp);
870 rx_stats->bytes += len;
872 if (skb->pkt_type == PACKET_BROADCAST)
873 ++rx_stats->broadcast;
874 else if (skb->pkt_type == PACKET_MULTICAST)
875 ++rx_stats->multicast;
876 u64_stats_update_end(&rx_stats->syncp);
878 napi_gro_receive(&nvchan->napi, skb);
884 static void netvsc_get_drvinfo(struct net_device *net,
885 struct ethtool_drvinfo *info)
887 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
888 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
891 static void netvsc_get_channels(struct net_device *net,
892 struct ethtool_channels *channel)
894 struct net_device_context *net_device_ctx = netdev_priv(net);
895 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
898 channel->max_combined = nvdev->max_chn;
899 channel->combined_count = nvdev->num_chn;
903 static int netvsc_detach(struct net_device *ndev,
904 struct netvsc_device *nvdev)
906 struct net_device_context *ndev_ctx = netdev_priv(ndev);
907 struct hv_device *hdev = ndev_ctx->device_ctx;
910 /* Don't try continuing to try and setup sub channels */
911 if (cancel_work_sync(&nvdev->subchan_work))
914 /* If device was up (receiving) then shutdown */
915 if (netif_running(ndev)) {
916 netvsc_tx_disable(nvdev, ndev);
918 ret = rndis_filter_close(nvdev);
921 "unable to close device (ret %d).\n", ret);
925 ret = netvsc_wait_until_empty(nvdev);
928 "Ring buffer not empty after closing rndis\n");
933 netif_device_detach(ndev);
935 rndis_filter_device_remove(hdev, nvdev);
940 static int netvsc_attach(struct net_device *ndev,
941 struct netvsc_device_info *dev_info)
943 struct net_device_context *ndev_ctx = netdev_priv(ndev);
944 struct hv_device *hdev = ndev_ctx->device_ctx;
945 struct netvsc_device *nvdev;
946 struct rndis_device *rdev;
949 nvdev = rndis_filter_device_add(hdev, dev_info);
951 return PTR_ERR(nvdev);
953 if (nvdev->num_chn > 1) {
954 ret = rndis_set_subchannel(ndev, nvdev);
956 /* if unavailable, just proceed with one queue */
963 /* In any case device is now ready */
964 netif_device_attach(ndev);
966 /* Note: enable and attach happen when sub-channels setup */
967 netif_carrier_off(ndev);
969 if (netif_running(ndev)) {
970 ret = rndis_filter_open(nvdev);
974 rdev = nvdev->extension;
975 if (!rdev->link_state)
976 netif_carrier_on(ndev);
982 netif_device_detach(ndev);
984 rndis_filter_device_remove(hdev, nvdev);
989 static int netvsc_set_channels(struct net_device *net,
990 struct ethtool_channels *channels)
992 struct net_device_context *net_device_ctx = netdev_priv(net);
993 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
994 unsigned int orig, count = channels->combined_count;
995 struct netvsc_device_info device_info;
998 /* We do not support separate count for rx, tx, or other */
1000 channels->rx_count || channels->tx_count || channels->other_count)
1003 if (!nvdev || nvdev->destroy)
1006 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1009 if (count > nvdev->max_chn)
1012 orig = nvdev->num_chn;
1014 memset(&device_info, 0, sizeof(device_info));
1015 device_info.num_chn = count;
1016 device_info.ring_size = ring_size;
1017 device_info.send_sections = nvdev->send_section_cnt;
1018 device_info.send_section_size = nvdev->send_section_size;
1019 device_info.recv_sections = nvdev->recv_section_cnt;
1020 device_info.recv_section_size = nvdev->recv_section_size;
1022 ret = netvsc_detach(net, nvdev);
1026 ret = netvsc_attach(net, &device_info);
1028 device_info.num_chn = orig;
1029 if (netvsc_attach(net, &device_info))
1030 netdev_err(net, "restoring channel setting failed\n");
1037 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
1039 struct ethtool_link_ksettings diff1 = *cmd;
1040 struct ethtool_link_ksettings diff2 = {};
1042 diff1.base.speed = 0;
1043 diff1.base.duplex = 0;
1044 /* advertising and cmd are usually set */
1045 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1047 /* We set port to PORT_OTHER */
1048 diff2.base.port = PORT_OTHER;
1050 return !memcmp(&diff1, &diff2, sizeof(diff1));
1053 static void netvsc_init_settings(struct net_device *dev)
1055 struct net_device_context *ndc = netdev_priv(dev);
1057 ndc->udp4_l4_hash = true;
1058 ndc->udp6_l4_hash = true;
1060 ndc->speed = SPEED_UNKNOWN;
1061 ndc->duplex = DUPLEX_FULL;
1064 static int netvsc_get_link_ksettings(struct net_device *dev,
1065 struct ethtool_link_ksettings *cmd)
1067 struct net_device_context *ndc = netdev_priv(dev);
1069 cmd->base.speed = ndc->speed;
1070 cmd->base.duplex = ndc->duplex;
1071 cmd->base.port = PORT_OTHER;
1076 static int netvsc_set_link_ksettings(struct net_device *dev,
1077 const struct ethtool_link_ksettings *cmd)
1079 struct net_device_context *ndc = netdev_priv(dev);
1082 speed = cmd->base.speed;
1083 if (!ethtool_validate_speed(speed) ||
1084 !ethtool_validate_duplex(cmd->base.duplex) ||
1085 !netvsc_validate_ethtool_ss_cmd(cmd))
1089 ndc->duplex = cmd->base.duplex;
1094 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1096 struct net_device_context *ndevctx = netdev_priv(ndev);
1097 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1098 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1099 int orig_mtu = ndev->mtu;
1100 struct netvsc_device_info device_info;
1103 if (!nvdev || nvdev->destroy)
1106 /* Change MTU of underlying VF netdev first. */
1108 ret = dev_set_mtu(vf_netdev, mtu);
1113 memset(&device_info, 0, sizeof(device_info));
1114 device_info.ring_size = ring_size;
1115 device_info.num_chn = nvdev->num_chn;
1116 device_info.send_sections = nvdev->send_section_cnt;
1117 device_info.send_section_size = nvdev->send_section_size;
1118 device_info.recv_sections = nvdev->recv_section_cnt;
1119 device_info.recv_section_size = nvdev->recv_section_size;
1121 ret = netvsc_detach(ndev, nvdev);
1127 ret = netvsc_attach(ndev, &device_info);
1134 /* Attempt rollback to original MTU */
1135 ndev->mtu = orig_mtu;
1137 if (netvsc_attach(ndev, &device_info))
1138 netdev_err(ndev, "restoring mtu failed\n");
1141 dev_set_mtu(vf_netdev, orig_mtu);
1146 static void netvsc_get_vf_stats(struct net_device *net,
1147 struct netvsc_vf_pcpu_stats *tot)
1149 struct net_device_context *ndev_ctx = netdev_priv(net);
1152 memset(tot, 0, sizeof(*tot));
1154 for_each_possible_cpu(i) {
1155 const struct netvsc_vf_pcpu_stats *stats
1156 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1157 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1161 start = u64_stats_fetch_begin_irq(&stats->syncp);
1162 rx_packets = stats->rx_packets;
1163 tx_packets = stats->tx_packets;
1164 rx_bytes = stats->rx_bytes;
1165 tx_bytes = stats->tx_bytes;
1166 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1168 tot->rx_packets += rx_packets;
1169 tot->tx_packets += tx_packets;
1170 tot->rx_bytes += rx_bytes;
1171 tot->tx_bytes += tx_bytes;
1172 tot->tx_dropped += stats->tx_dropped;
1176 static void netvsc_get_stats64(struct net_device *net,
1177 struct rtnl_link_stats64 *t)
1179 struct net_device_context *ndev_ctx = netdev_priv(net);
1180 struct netvsc_device *nvdev;
1181 struct netvsc_vf_pcpu_stats vf_tot;
1186 nvdev = rcu_dereference(ndev_ctx->nvdev);
1190 netdev_stats_to_stats64(t, &net->stats);
1192 netvsc_get_vf_stats(net, &vf_tot);
1193 t->rx_packets += vf_tot.rx_packets;
1194 t->tx_packets += vf_tot.tx_packets;
1195 t->rx_bytes += vf_tot.rx_bytes;
1196 t->tx_bytes += vf_tot.tx_bytes;
1197 t->tx_dropped += vf_tot.tx_dropped;
1199 for (i = 0; i < nvdev->num_chn; i++) {
1200 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1201 const struct netvsc_stats *stats;
1202 u64 packets, bytes, multicast;
1205 stats = &nvchan->tx_stats;
1207 start = u64_stats_fetch_begin_irq(&stats->syncp);
1208 packets = stats->packets;
1209 bytes = stats->bytes;
1210 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1212 t->tx_bytes += bytes;
1213 t->tx_packets += packets;
1215 stats = &nvchan->rx_stats;
1217 start = u64_stats_fetch_begin_irq(&stats->syncp);
1218 packets = stats->packets;
1219 bytes = stats->bytes;
1220 multicast = stats->multicast + stats->broadcast;
1221 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1223 t->rx_bytes += bytes;
1224 t->rx_packets += packets;
1225 t->multicast += multicast;
1231 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1233 struct net_device_context *ndc = netdev_priv(ndev);
1234 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1235 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1236 struct sockaddr *addr = p;
1239 err = eth_prepare_mac_addr_change(ndev, p);
1247 err = dev_set_mac_address(vf_netdev, addr);
1252 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1254 eth_commit_mac_addr_change(ndev, p);
1255 } else if (vf_netdev) {
1256 /* rollback change on VF */
1257 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1258 dev_set_mac_address(vf_netdev, addr);
1264 static const struct {
1265 char name[ETH_GSTRING_LEN];
1267 } netvsc_stats[] = {
1268 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1269 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1270 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1271 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1272 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
1273 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1274 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1276 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1277 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1278 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1279 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1280 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1283 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1284 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
1286 /* 4 statistics per queue (rx/tx packets/bytes) */
1287 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1289 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1291 struct net_device_context *ndc = netdev_priv(dev);
1292 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1297 switch (string_set) {
1299 return NETVSC_GLOBAL_STATS_LEN
1300 + NETVSC_VF_STATS_LEN
1301 + NETVSC_QUEUE_STATS_LEN(nvdev);
1307 static void netvsc_get_ethtool_stats(struct net_device *dev,
1308 struct ethtool_stats *stats, u64 *data)
1310 struct net_device_context *ndc = netdev_priv(dev);
1311 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1312 const void *nds = &ndc->eth_stats;
1313 const struct netvsc_stats *qstats;
1314 struct netvsc_vf_pcpu_stats sum;
1322 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1323 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1325 netvsc_get_vf_stats(dev, &sum);
1326 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1327 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1329 for (j = 0; j < nvdev->num_chn; j++) {
1330 qstats = &nvdev->chan_table[j].tx_stats;
1333 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1334 packets = qstats->packets;
1335 bytes = qstats->bytes;
1336 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1337 data[i++] = packets;
1340 qstats = &nvdev->chan_table[j].rx_stats;
1342 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1343 packets = qstats->packets;
1344 bytes = qstats->bytes;
1345 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1346 data[i++] = packets;
1351 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1353 struct net_device_context *ndc = netdev_priv(dev);
1354 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1361 switch (stringset) {
1363 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1364 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1365 p += ETH_GSTRING_LEN;
1368 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1369 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1370 p += ETH_GSTRING_LEN;
1373 for (i = 0; i < nvdev->num_chn; i++) {
1374 sprintf(p, "tx_queue_%u_packets", i);
1375 p += ETH_GSTRING_LEN;
1376 sprintf(p, "tx_queue_%u_bytes", i);
1377 p += ETH_GSTRING_LEN;
1378 sprintf(p, "rx_queue_%u_packets", i);
1379 p += ETH_GSTRING_LEN;
1380 sprintf(p, "rx_queue_%u_bytes", i);
1381 p += ETH_GSTRING_LEN;
1389 netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1390 struct ethtool_rxnfc *info)
1392 info->data = RXH_IP_SRC | RXH_IP_DST;
1394 switch (info->flow_type) {
1397 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1401 if (ndc->udp4_l4_hash)
1402 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1407 if (ndc->udp6_l4_hash)
1408 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1424 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1427 struct net_device_context *ndc = netdev_priv(dev);
1428 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1433 switch (info->cmd) {
1434 case ETHTOOL_GRXRINGS:
1435 info->data = nvdev->num_chn;
1439 return netvsc_get_rss_hash_opts(ndc, info);
1444 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1445 struct ethtool_rxnfc *info)
1447 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1448 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1449 if (info->flow_type == UDP_V4_FLOW)
1450 ndc->udp4_l4_hash = true;
1451 else if (info->flow_type == UDP_V6_FLOW)
1452 ndc->udp6_l4_hash = true;
1459 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1460 if (info->flow_type == UDP_V4_FLOW)
1461 ndc->udp4_l4_hash = false;
1462 else if (info->flow_type == UDP_V6_FLOW)
1463 ndc->udp6_l4_hash = false;
1474 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1476 struct net_device_context *ndc = netdev_priv(ndev);
1478 if (info->cmd == ETHTOOL_SRXFH)
1479 return netvsc_set_rss_hash_opts(ndc, info);
1484 #ifdef CONFIG_NET_POLL_CONTROLLER
1485 static void netvsc_poll_controller(struct net_device *dev)
1487 struct net_device_context *ndc = netdev_priv(dev);
1488 struct netvsc_device *ndev;
1492 ndev = rcu_dereference(ndc->nvdev);
1494 for (i = 0; i < ndev->num_chn; i++) {
1495 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1497 napi_schedule(&nvchan->napi);
1504 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1506 return NETVSC_HASH_KEYLEN;
1509 static u32 netvsc_rss_indir_size(struct net_device *dev)
1514 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1517 struct net_device_context *ndc = netdev_priv(dev);
1518 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1519 struct rndis_device *rndis_dev;
1526 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1528 rndis_dev = ndev->extension;
1530 for (i = 0; i < ITAB_NUM; i++)
1531 indir[i] = ndc->rx_table[i];
1535 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1540 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1541 const u8 *key, const u8 hfunc)
1543 struct net_device_context *ndc = netdev_priv(dev);
1544 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1545 struct rndis_device *rndis_dev;
1551 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1554 rndis_dev = ndev->extension;
1556 for (i = 0; i < ITAB_NUM; i++)
1557 if (indir[i] >= ndev->num_chn)
1560 for (i = 0; i < ITAB_NUM; i++)
1561 ndc->rx_table[i] = indir[i];
1568 key = rndis_dev->rss_key;
1571 return rndis_filter_set_rss_param(rndis_dev, key);
1574 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1575 * It does have pre-allocated receive area which is divided into sections.
1577 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1578 struct ethtool_ringparam *ring)
1582 ring->rx_pending = nvdev->recv_section_cnt;
1583 ring->tx_pending = nvdev->send_section_cnt;
1585 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1586 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1588 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1590 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1591 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1592 / nvdev->send_section_size;
1595 static void netvsc_get_ringparam(struct net_device *ndev,
1596 struct ethtool_ringparam *ring)
1598 struct net_device_context *ndevctx = netdev_priv(ndev);
1599 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1604 __netvsc_get_ringparam(nvdev, ring);
1607 static int netvsc_set_ringparam(struct net_device *ndev,
1608 struct ethtool_ringparam *ring)
1610 struct net_device_context *ndevctx = netdev_priv(ndev);
1611 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1612 struct netvsc_device_info device_info;
1613 struct ethtool_ringparam orig;
1617 if (!nvdev || nvdev->destroy)
1620 memset(&orig, 0, sizeof(orig));
1621 __netvsc_get_ringparam(nvdev, &orig);
1623 new_tx = clamp_t(u32, ring->tx_pending,
1624 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1625 new_rx = clamp_t(u32, ring->rx_pending,
1626 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1628 if (new_tx == orig.tx_pending &&
1629 new_rx == orig.rx_pending)
1630 return 0; /* no change */
1632 memset(&device_info, 0, sizeof(device_info));
1633 device_info.num_chn = nvdev->num_chn;
1634 device_info.ring_size = ring_size;
1635 device_info.send_sections = new_tx;
1636 device_info.send_section_size = nvdev->send_section_size;
1637 device_info.recv_sections = new_rx;
1638 device_info.recv_section_size = nvdev->recv_section_size;
1640 ret = netvsc_detach(ndev, nvdev);
1644 ret = netvsc_attach(ndev, &device_info);
1646 device_info.send_sections = orig.tx_pending;
1647 device_info.recv_sections = orig.rx_pending;
1649 if (netvsc_attach(ndev, &device_info))
1650 netdev_err(ndev, "restoring ringparam failed");
1656 static const struct ethtool_ops ethtool_ops = {
1657 .get_drvinfo = netvsc_get_drvinfo,
1658 .get_link = ethtool_op_get_link,
1659 .get_ethtool_stats = netvsc_get_ethtool_stats,
1660 .get_sset_count = netvsc_get_sset_count,
1661 .get_strings = netvsc_get_strings,
1662 .get_channels = netvsc_get_channels,
1663 .set_channels = netvsc_set_channels,
1664 .get_ts_info = ethtool_op_get_ts_info,
1665 .get_rxnfc = netvsc_get_rxnfc,
1666 .set_rxnfc = netvsc_set_rxnfc,
1667 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1668 .get_rxfh_indir_size = netvsc_rss_indir_size,
1669 .get_rxfh = netvsc_get_rxfh,
1670 .set_rxfh = netvsc_set_rxfh,
1671 .get_link_ksettings = netvsc_get_link_ksettings,
1672 .set_link_ksettings = netvsc_set_link_ksettings,
1673 .get_ringparam = netvsc_get_ringparam,
1674 .set_ringparam = netvsc_set_ringparam,
1677 static const struct net_device_ops device_ops = {
1678 .ndo_open = netvsc_open,
1679 .ndo_stop = netvsc_close,
1680 .ndo_start_xmit = netvsc_start_xmit,
1681 .ndo_change_rx_flags = netvsc_change_rx_flags,
1682 .ndo_set_rx_mode = netvsc_set_rx_mode,
1683 .ndo_change_mtu = netvsc_change_mtu,
1684 .ndo_validate_addr = eth_validate_addr,
1685 .ndo_set_mac_address = netvsc_set_mac_addr,
1686 .ndo_select_queue = netvsc_select_queue,
1687 .ndo_get_stats64 = netvsc_get_stats64,
1688 #ifdef CONFIG_NET_POLL_CONTROLLER
1689 .ndo_poll_controller = netvsc_poll_controller,
1694 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1695 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1696 * present send GARP packet to network peers with netif_notify_peers().
1698 static void netvsc_link_change(struct work_struct *w)
1700 struct net_device_context *ndev_ctx =
1701 container_of(w, struct net_device_context, dwork.work);
1702 struct hv_device *device_obj = ndev_ctx->device_ctx;
1703 struct net_device *net = hv_get_drvdata(device_obj);
1704 struct netvsc_device *net_device;
1705 struct rndis_device *rdev;
1706 struct netvsc_reconfig *event = NULL;
1707 bool notify = false, reschedule = false;
1708 unsigned long flags, next_reconfig, delay;
1710 /* if changes are happening, comeback later */
1711 if (!rtnl_trylock()) {
1712 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1716 net_device = rtnl_dereference(ndev_ctx->nvdev);
1720 rdev = net_device->extension;
1722 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1723 if (time_is_after_jiffies(next_reconfig)) {
1724 /* link_watch only sends one notification with current state
1725 * per second, avoid doing reconfig more frequently. Handle
1728 delay = next_reconfig - jiffies;
1729 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1730 schedule_delayed_work(&ndev_ctx->dwork, delay);
1733 ndev_ctx->last_reconfig = jiffies;
1735 spin_lock_irqsave(&ndev_ctx->lock, flags);
1736 if (!list_empty(&ndev_ctx->reconfig_events)) {
1737 event = list_first_entry(&ndev_ctx->reconfig_events,
1738 struct netvsc_reconfig, list);
1739 list_del(&event->list);
1740 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1742 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1747 switch (event->event) {
1748 /* Only the following events are possible due to the check in
1749 * netvsc_linkstatus_callback()
1751 case RNDIS_STATUS_MEDIA_CONNECT:
1752 if (rdev->link_state) {
1753 rdev->link_state = false;
1754 netif_carrier_on(net);
1755 netvsc_tx_enable(net_device, net);
1761 case RNDIS_STATUS_MEDIA_DISCONNECT:
1762 if (!rdev->link_state) {
1763 rdev->link_state = true;
1764 netif_carrier_off(net);
1765 netvsc_tx_disable(net_device, net);
1769 case RNDIS_STATUS_NETWORK_CHANGE:
1770 /* Only makes sense if carrier is present */
1771 if (!rdev->link_state) {
1772 rdev->link_state = true;
1773 netif_carrier_off(net);
1774 netvsc_tx_disable(net_device, net);
1775 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1776 spin_lock_irqsave(&ndev_ctx->lock, flags);
1777 list_add(&event->list, &ndev_ctx->reconfig_events);
1778 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1787 netdev_notify_peers(net);
1789 /* link_watch only sends one notification with current state per
1790 * second, handle next reconfig event in 2 seconds.
1793 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1801 static struct net_device *get_netvsc_bymac(const u8 *mac)
1803 struct net_device_context *ndev_ctx;
1805 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
1806 struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
1808 if (ether_addr_equal(mac, dev->perm_addr))
1815 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1817 struct net_device_context *net_device_ctx;
1818 struct net_device *dev;
1820 dev = netdev_master_upper_dev_get(vf_netdev);
1821 if (!dev || dev->netdev_ops != &device_ops)
1822 return NULL; /* not a netvsc device */
1824 net_device_ctx = netdev_priv(dev);
1825 if (!rtnl_dereference(net_device_ctx->nvdev))
1826 return NULL; /* device is removed */
1831 /* Called when VF is injecting data into network stack.
1832 * Change the associated network device from VF to netvsc.
1833 * note: already called with rcu_read_lock
1835 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1837 struct sk_buff *skb = *pskb;
1838 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1839 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1840 struct netvsc_vf_pcpu_stats *pcpu_stats
1841 = this_cpu_ptr(ndev_ctx->vf_stats);
1843 skb = skb_share_check(skb, GFP_ATOMIC);
1845 return RX_HANDLER_CONSUMED;
1851 u64_stats_update_begin(&pcpu_stats->syncp);
1852 pcpu_stats->rx_packets++;
1853 pcpu_stats->rx_bytes += skb->len;
1854 u64_stats_update_end(&pcpu_stats->syncp);
1856 return RX_HANDLER_ANOTHER;
1859 static int netvsc_vf_join(struct net_device *vf_netdev,
1860 struct net_device *ndev)
1862 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1865 ret = netdev_rx_handler_register(vf_netdev,
1866 netvsc_vf_handle_frame, ndev);
1868 netdev_err(vf_netdev,
1869 "can not register netvsc VF receive handler (err = %d)\n",
1871 goto rx_handler_failed;
1874 ret = netdev_master_upper_dev_link(vf_netdev, ndev,
1877 netdev_err(vf_netdev,
1878 "can not set master device %s (err = %d)\n",
1880 goto upper_link_failed;
1883 /* set slave flag before open to prevent IPv6 addrconf */
1884 vf_netdev->flags |= IFF_SLAVE;
1886 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
1888 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
1890 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
1894 netdev_rx_handler_unregister(vf_netdev);
1899 static void __netvsc_vf_setup(struct net_device *ndev,
1900 struct net_device *vf_netdev)
1904 /* Align MTU of VF with master */
1905 ret = dev_set_mtu(vf_netdev, ndev->mtu);
1907 netdev_warn(vf_netdev,
1908 "unable to change mtu to %u\n", ndev->mtu);
1910 /* set multicast etc flags on VF */
1911 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1913 /* sync address list from ndev to VF */
1914 netif_addr_lock_bh(ndev);
1915 dev_uc_sync(vf_netdev, ndev);
1916 dev_mc_sync(vf_netdev, ndev);
1917 netif_addr_unlock_bh(ndev);
1919 if (netif_running(ndev)) {
1920 ret = dev_open(vf_netdev);
1922 netdev_warn(vf_netdev,
1923 "unable to open: %d\n", ret);
1927 /* Setup VF as slave of the synthetic device.
1928 * Runs in workqueue to avoid recursion in netlink callbacks.
1930 static void netvsc_vf_setup(struct work_struct *w)
1932 struct net_device_context *ndev_ctx
1933 = container_of(w, struct net_device_context, vf_takeover.work);
1934 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
1935 struct net_device *vf_netdev;
1937 if (!rtnl_trylock()) {
1938 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
1942 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
1944 __netvsc_vf_setup(ndev, vf_netdev);
1949 static int netvsc_register_vf(struct net_device *vf_netdev)
1951 struct net_device *ndev;
1952 struct net_device_context *net_device_ctx;
1953 struct device *pdev = vf_netdev->dev.parent;
1954 struct netvsc_device *netvsc_dev;
1956 if (vf_netdev->addr_len != ETH_ALEN)
1959 if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
1963 * We will use the MAC address to locate the synthetic interface to
1964 * associate with the VF interface. If we don't find a matching
1965 * synthetic interface, move on.
1967 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
1971 net_device_ctx = netdev_priv(ndev);
1972 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1973 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
1976 if (netvsc_vf_join(vf_netdev, ndev) != 0)
1979 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
1981 dev_hold(vf_netdev);
1982 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
1986 /* VF up/down change detected, schedule to change data path */
1987 static int netvsc_vf_changed(struct net_device *vf_netdev)
1989 struct net_device_context *net_device_ctx;
1990 struct netvsc_device *netvsc_dev;
1991 struct net_device *ndev;
1992 bool vf_is_up = netif_running(vf_netdev);
1994 ndev = get_netvsc_byref(vf_netdev);
1998 net_device_ctx = netdev_priv(ndev);
1999 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2003 netvsc_switch_datapath(ndev, vf_is_up);
2004 netdev_info(ndev, "Data path switched %s VF: %s\n",
2005 vf_is_up ? "to" : "from", vf_netdev->name);
2010 static int netvsc_unregister_vf(struct net_device *vf_netdev)
2012 struct net_device *ndev;
2013 struct net_device_context *net_device_ctx;
2015 ndev = get_netvsc_byref(vf_netdev);
2019 net_device_ctx = netdev_priv(ndev);
2020 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2022 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2024 netdev_rx_handler_unregister(vf_netdev);
2025 netdev_upper_dev_unlink(vf_netdev, ndev);
2026 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2032 static int netvsc_probe(struct hv_device *dev,
2033 const struct hv_vmbus_device_id *dev_id)
2035 struct net_device *net = NULL;
2036 struct net_device_context *net_device_ctx;
2037 struct netvsc_device_info device_info;
2038 struct netvsc_device *nvdev;
2041 net = alloc_etherdev_mq(sizeof(struct net_device_context),
2046 netif_carrier_off(net);
2048 netvsc_init_settings(net);
2050 net_device_ctx = netdev_priv(net);
2051 net_device_ctx->device_ctx = dev;
2052 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2053 if (netif_msg_probe(net_device_ctx))
2054 netdev_dbg(net, "netvsc msg_enable: %d\n",
2055 net_device_ctx->msg_enable);
2057 hv_set_drvdata(dev, net);
2059 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2061 spin_lock_init(&net_device_ctx->lock);
2062 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2063 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2065 net_device_ctx->vf_stats
2066 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2067 if (!net_device_ctx->vf_stats)
2070 net->netdev_ops = &device_ops;
2071 net->ethtool_ops = ðtool_ops;
2072 SET_NETDEV_DEV(net, &dev->device);
2074 /* We always need headroom for rndis header */
2075 net->needed_headroom = RNDIS_AND_PPI_SIZE;
2077 /* Initialize the number of queues to be 1, we may change it if more
2078 * channels are offered later.
2080 netif_set_real_num_tx_queues(net, 1);
2081 netif_set_real_num_rx_queues(net, 1);
2083 /* Notify the netvsc driver of the new device */
2084 memset(&device_info, 0, sizeof(device_info));
2085 device_info.ring_size = ring_size;
2086 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
2087 device_info.send_sections = NETVSC_DEFAULT_TX;
2088 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
2089 device_info.recv_sections = NETVSC_DEFAULT_RX;
2090 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
2092 nvdev = rndis_filter_device_add(dev, &device_info);
2093 if (IS_ERR(nvdev)) {
2094 ret = PTR_ERR(nvdev);
2095 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2099 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2101 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2102 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2103 * all subchannels to show up, but that may not happen because
2104 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2105 * -> ... -> device_add() -> ... -> __device_attach() can't get
2106 * the device lock, so all the subchannels can't be processed --
2107 * finally netvsc_subchan_work() hangs for ever.
2111 if (nvdev->num_chn > 1)
2112 schedule_work(&nvdev->subchan_work);
2114 /* hw_features computed in rndis_netdev_set_hwcaps() */
2115 net->features = net->hw_features |
2116 NETIF_F_HIGHDMA | NETIF_F_SG |
2117 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2118 net->vlan_features = net->features;
2120 netdev_lockdep_set_classes(net);
2122 /* MTU range: 68 - 1500 or 65521 */
2123 net->min_mtu = NETVSC_MTU_MIN;
2124 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2125 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2127 net->max_mtu = ETH_DATA_LEN;
2129 ret = register_netdevice(net);
2131 pr_err("Unable to register netdev.\n");
2132 goto register_failed;
2135 list_add(&net_device_ctx->list, &netvsc_dev_list);
2141 rndis_filter_device_remove(dev, nvdev);
2143 free_percpu(net_device_ctx->vf_stats);
2145 hv_set_drvdata(dev, NULL);
2151 static int netvsc_remove(struct hv_device *dev)
2153 struct net_device_context *ndev_ctx;
2154 struct net_device *vf_netdev, *net;
2155 struct netvsc_device *nvdev;
2157 net = hv_get_drvdata(dev);
2159 dev_err(&dev->device, "No net device to remove\n");
2163 ndev_ctx = netdev_priv(net);
2165 cancel_delayed_work_sync(&ndev_ctx->dwork);
2168 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2170 cancel_work_sync(&nvdev->subchan_work);
2173 * Call to the vsc driver to let it know that the device is being
2174 * removed. Also blocks mtu and channel changes.
2176 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2178 netvsc_unregister_vf(vf_netdev);
2181 rndis_filter_device_remove(dev, nvdev);
2183 unregister_netdevice(net);
2184 list_del(&ndev_ctx->list);
2188 hv_set_drvdata(dev, NULL);
2190 free_percpu(ndev_ctx->vf_stats);
2195 static const struct hv_vmbus_device_id id_table[] = {
2201 MODULE_DEVICE_TABLE(vmbus, id_table);
2203 /* The one and only one */
2204 static struct hv_driver netvsc_drv = {
2205 .name = KBUILD_MODNAME,
2206 .id_table = id_table,
2207 .probe = netvsc_probe,
2208 .remove = netvsc_remove,
2212 * On Hyper-V, every VF interface is matched with a corresponding
2213 * synthetic interface. The synthetic interface is presented first
2214 * to the guest. When the corresponding VF instance is registered,
2215 * we will take care of switching the data path.
2217 static int netvsc_netdev_event(struct notifier_block *this,
2218 unsigned long event, void *ptr)
2220 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2222 /* Skip our own events */
2223 if (event_dev->netdev_ops == &device_ops)
2226 /* Avoid non-Ethernet type devices */
2227 if (event_dev->type != ARPHRD_ETHER)
2230 /* Avoid Vlan dev with same MAC registering as VF */
2231 if (is_vlan_dev(event_dev))
2234 /* Avoid Bonding master dev with same MAC registering as VF */
2235 if ((event_dev->priv_flags & IFF_BONDING) &&
2236 (event_dev->flags & IFF_MASTER))
2240 case NETDEV_REGISTER:
2241 return netvsc_register_vf(event_dev);
2242 case NETDEV_UNREGISTER:
2243 return netvsc_unregister_vf(event_dev);
2246 return netvsc_vf_changed(event_dev);
2252 static struct notifier_block netvsc_netdev_notifier = {
2253 .notifier_call = netvsc_netdev_event,
2256 static void __exit netvsc_drv_exit(void)
2258 unregister_netdevice_notifier(&netvsc_netdev_notifier);
2259 vmbus_driver_unregister(&netvsc_drv);
2262 static int __init netvsc_drv_init(void)
2266 if (ring_size < RING_SIZE_MIN) {
2267 ring_size = RING_SIZE_MIN;
2268 pr_info("Increased ring_size to %d (min allowed)\n",
2271 ret = vmbus_driver_register(&netvsc_drv);
2276 register_netdevice_notifier(&netvsc_netdev_notifier);
2280 MODULE_LICENSE("GPL");
2281 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2283 module_init(netvsc_drv_init);
2284 module_exit(netvsc_drv_exit);