GNU Linux-libre 4.19.314-gnu1
[releases.git] / drivers / net / hyperv / netvsc_drv.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Authors:
17  *   Haiyang Zhang <haiyangz@microsoft.com>
18  *   Hank Janssen  <hjanssen@microsoft.com>
19  */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
27 #include <linux/io.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/pci.h>
33 #include <linux/skbuff.h>
34 #include <linux/if_vlan.h>
35 #include <linux/in.h>
36 #include <linux/slab.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/netpoll.h>
39
40 #include <net/arp.h>
41 #include <net/route.h>
42 #include <net/sock.h>
43 #include <net/pkt_sched.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46
47 #include "hyperv_net.h"
48
49 #define RING_SIZE_MIN   64
50 #define RETRY_US_LO     5000
51 #define RETRY_US_HI     10000
52 #define RETRY_MAX       2000    /* >10 sec */
53
54 #define LINKCHANGE_INT (2 * HZ)
55 #define VF_TAKEOVER_INT (HZ / 10)
56
57 /* Macros to define the context of vf registration */
58 #define VF_REG_IN_PROBE         1
59 #define VF_REG_IN_NOTIFIER      2
60
61 static unsigned int ring_size __ro_after_init = 128;
62 module_param(ring_size, uint, 0444);
63 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
64 unsigned int netvsc_ring_bytes __ro_after_init;
65
66 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
67                                 NETIF_MSG_LINK | NETIF_MSG_IFUP |
68                                 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
69                                 NETIF_MSG_TX_ERR;
70
71 static int debug = -1;
72 module_param(debug, int, 0444);
73 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
74
75 static LIST_HEAD(netvsc_dev_list);
76
77 static void netvsc_change_rx_flags(struct net_device *net, int change)
78 {
79         struct net_device_context *ndev_ctx = netdev_priv(net);
80         struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
81         int inc;
82
83         if (!vf_netdev)
84                 return;
85
86         if (change & IFF_PROMISC) {
87                 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
88                 dev_set_promiscuity(vf_netdev, inc);
89         }
90
91         if (change & IFF_ALLMULTI) {
92                 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
93                 dev_set_allmulti(vf_netdev, inc);
94         }
95 }
96
97 static void netvsc_set_rx_mode(struct net_device *net)
98 {
99         struct net_device_context *ndev_ctx = netdev_priv(net);
100         struct net_device *vf_netdev;
101         struct netvsc_device *nvdev;
102
103         rcu_read_lock();
104         vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
105         if (vf_netdev) {
106                 dev_uc_sync(vf_netdev, net);
107                 dev_mc_sync(vf_netdev, net);
108         }
109
110         nvdev = rcu_dereference(ndev_ctx->nvdev);
111         if (nvdev)
112                 rndis_filter_update(nvdev);
113         rcu_read_unlock();
114 }
115
116 static void netvsc_tx_enable(struct netvsc_device *nvscdev,
117                              struct net_device *ndev)
118 {
119         nvscdev->tx_disable = false;
120         virt_wmb(); /* ensure queue wake up mechanism is on */
121
122         netif_tx_wake_all_queues(ndev);
123 }
124
125 static int netvsc_open(struct net_device *net)
126 {
127         struct net_device_context *ndev_ctx = netdev_priv(net);
128         struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
129         struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
130         struct rndis_device *rdev;
131         int ret = 0;
132
133         netif_carrier_off(net);
134
135         /* Open up the device */
136         ret = rndis_filter_open(nvdev);
137         if (ret != 0) {
138                 netdev_err(net, "unable to open device (ret %d).\n", ret);
139                 return ret;
140         }
141
142         rdev = nvdev->extension;
143         if (!rdev->link_state) {
144                 netif_carrier_on(net);
145                 netvsc_tx_enable(nvdev, net);
146         }
147
148         if (vf_netdev) {
149                 /* Setting synthetic device up transparently sets
150                  * slave as up. If open fails, then slave will be
151                  * still be offline (and not used).
152                  */
153                 ret = dev_open(vf_netdev);
154                 if (ret)
155                         netdev_warn(net,
156                                     "unable to open slave: %s: %d\n",
157                                     vf_netdev->name, ret);
158         }
159         return 0;
160 }
161
162 static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
163 {
164         unsigned int retry = 0;
165         int i;
166
167         /* Ensure pending bytes in ring are read */
168         for (;;) {
169                 u32 aread = 0;
170
171                 for (i = 0; i < nvdev->num_chn; i++) {
172                         struct vmbus_channel *chn
173                                 = nvdev->chan_table[i].channel;
174
175                         if (!chn)
176                                 continue;
177
178                         /* make sure receive not running now */
179                         napi_synchronize(&nvdev->chan_table[i].napi);
180
181                         aread = hv_get_bytes_to_read(&chn->inbound);
182                         if (aread)
183                                 break;
184
185                         aread = hv_get_bytes_to_read(&chn->outbound);
186                         if (aread)
187                                 break;
188                 }
189
190                 if (aread == 0)
191                         return 0;
192
193                 if (++retry > RETRY_MAX)
194                         return -ETIMEDOUT;
195
196                 usleep_range(RETRY_US_LO, RETRY_US_HI);
197         }
198 }
199
200 static void netvsc_tx_disable(struct netvsc_device *nvscdev,
201                               struct net_device *ndev)
202 {
203         if (nvscdev) {
204                 nvscdev->tx_disable = true;
205                 virt_wmb(); /* ensure txq will not wake up after stop */
206         }
207
208         netif_tx_disable(ndev);
209 }
210
211 static int netvsc_close(struct net_device *net)
212 {
213         struct net_device_context *net_device_ctx = netdev_priv(net);
214         struct net_device *vf_netdev
215                 = rtnl_dereference(net_device_ctx->vf_netdev);
216         struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
217         int ret;
218
219         netvsc_tx_disable(nvdev, net);
220
221         /* No need to close rndis filter if it is removed already */
222         if (!nvdev)
223                 return 0;
224
225         ret = rndis_filter_close(nvdev);
226         if (ret != 0) {
227                 netdev_err(net, "unable to close device (ret %d).\n", ret);
228                 return ret;
229         }
230
231         ret = netvsc_wait_until_empty(nvdev);
232         if (ret)
233                 netdev_err(net, "Ring buffer not empty after closing rndis\n");
234
235         if (vf_netdev)
236                 dev_close(vf_netdev);
237
238         return ret;
239 }
240
241 static inline void *init_ppi_data(struct rndis_message *msg,
242                                   u32 ppi_size, u32 pkt_type)
243 {
244         struct rndis_packet *rndis_pkt = &msg->msg.pkt;
245         struct rndis_per_packet_info *ppi;
246
247         rndis_pkt->data_offset += ppi_size;
248         ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
249                 + rndis_pkt->per_pkt_info_len;
250
251         ppi->size = ppi_size;
252         ppi->type = pkt_type;
253         ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
254
255         rndis_pkt->per_pkt_info_len += ppi_size;
256
257         return ppi + 1;
258 }
259
260 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
261  * packets. We can use ethtool to change UDP hash level when necessary.
262  */
263 static inline u32 netvsc_get_hash(
264         struct sk_buff *skb,
265         const struct net_device_context *ndc)
266 {
267         struct flow_keys flow;
268         u32 hash, pkt_proto = 0;
269         static u32 hashrnd __read_mostly;
270
271         net_get_random_once(&hashrnd, sizeof(hashrnd));
272
273         if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
274                 return 0;
275
276         switch (flow.basic.ip_proto) {
277         case IPPROTO_TCP:
278                 if (flow.basic.n_proto == htons(ETH_P_IP))
279                         pkt_proto = HV_TCP4_L4HASH;
280                 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
281                         pkt_proto = HV_TCP6_L4HASH;
282
283                 break;
284
285         case IPPROTO_UDP:
286                 if (flow.basic.n_proto == htons(ETH_P_IP))
287                         pkt_proto = HV_UDP4_L4HASH;
288                 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
289                         pkt_proto = HV_UDP6_L4HASH;
290
291                 break;
292         }
293
294         if (pkt_proto & ndc->l4_hash) {
295                 return skb_get_hash(skb);
296         } else {
297                 if (flow.basic.n_proto == htons(ETH_P_IP))
298                         hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
299                 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
300                         hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
301                 else
302                         return 0;
303
304                 __skb_set_sw_hash(skb, hash, false);
305         }
306
307         return hash;
308 }
309
310 static inline int netvsc_get_tx_queue(struct net_device *ndev,
311                                       struct sk_buff *skb, int old_idx)
312 {
313         const struct net_device_context *ndc = netdev_priv(ndev);
314         struct sock *sk = skb->sk;
315         int q_idx;
316
317         q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
318                               (VRSS_SEND_TAB_SIZE - 1)];
319
320         /* If queue index changed record the new value */
321         if (q_idx != old_idx &&
322             sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
323                 sk_tx_queue_set(sk, q_idx);
324
325         return q_idx;
326 }
327
328 /*
329  * Select queue for transmit.
330  *
331  * If a valid queue has already been assigned, then use that.
332  * Otherwise compute tx queue based on hash and the send table.
333  *
334  * This is basically similar to default (__netdev_pick_tx) with the added step
335  * of using the host send_table when no other queue has been assigned.
336  *
337  * TODO support XPS - but get_xps_queue not exported
338  */
339 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
340 {
341         int q_idx = sk_tx_queue_get(skb->sk);
342
343         if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
344                 /* If forwarding a packet, we use the recorded queue when
345                  * available for better cache locality.
346                  */
347                 if (skb_rx_queue_recorded(skb))
348                         q_idx = skb_get_rx_queue(skb);
349                 else
350                         q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
351         }
352
353         return q_idx;
354 }
355
356 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
357                                struct net_device *sb_dev,
358                                select_queue_fallback_t fallback)
359 {
360         struct net_device_context *ndc = netdev_priv(ndev);
361         struct net_device *vf_netdev;
362         u16 txq;
363
364         rcu_read_lock();
365         vf_netdev = rcu_dereference(ndc->vf_netdev);
366         if (vf_netdev) {
367                 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
368
369                 if (vf_ops->ndo_select_queue)
370                         txq = vf_ops->ndo_select_queue(vf_netdev, skb,
371                                                        sb_dev, fallback);
372                 else
373                         txq = fallback(vf_netdev, skb, NULL);
374
375                 /* Record the queue selected by VF so that it can be
376                  * used for common case where VF has more queues than
377                  * the synthetic device.
378                  */
379                 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
380         } else {
381                 txq = netvsc_pick_tx(ndev, skb);
382         }
383         rcu_read_unlock();
384
385         while (txq >= ndev->real_num_tx_queues)
386                 txq -= ndev->real_num_tx_queues;
387
388         return txq;
389 }
390
391 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
392                        struct hv_page_buffer *pb)
393 {
394         int j = 0;
395
396         /* Deal with compund pages by ignoring unused part
397          * of the page.
398          */
399         page += (offset >> PAGE_SHIFT);
400         offset &= ~PAGE_MASK;
401
402         while (len > 0) {
403                 unsigned long bytes;
404
405                 bytes = PAGE_SIZE - offset;
406                 if (bytes > len)
407                         bytes = len;
408                 pb[j].pfn = page_to_pfn(page);
409                 pb[j].offset = offset;
410                 pb[j].len = bytes;
411
412                 offset += bytes;
413                 len -= bytes;
414
415                 if (offset == PAGE_SIZE && len) {
416                         page++;
417                         offset = 0;
418                         j++;
419                 }
420         }
421
422         return j + 1;
423 }
424
425 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
426                            struct hv_netvsc_packet *packet,
427                            struct hv_page_buffer *pb)
428 {
429         u32 slots_used = 0;
430         char *data = skb->data;
431         int frags = skb_shinfo(skb)->nr_frags;
432         int i;
433
434         /* The packet is laid out thus:
435          * 1. hdr: RNDIS header and PPI
436          * 2. skb linear data
437          * 3. skb fragment data
438          */
439         slots_used += fill_pg_buf(virt_to_page(hdr),
440                                   offset_in_page(hdr),
441                                   len, &pb[slots_used]);
442
443         packet->rmsg_size = len;
444         packet->rmsg_pgcnt = slots_used;
445
446         slots_used += fill_pg_buf(virt_to_page(data),
447                                 offset_in_page(data),
448                                 skb_headlen(skb), &pb[slots_used]);
449
450         for (i = 0; i < frags; i++) {
451                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
452
453                 slots_used += fill_pg_buf(skb_frag_page(frag),
454                                         frag->page_offset,
455                                         skb_frag_size(frag), &pb[slots_used]);
456         }
457         return slots_used;
458 }
459
460 static int count_skb_frag_slots(struct sk_buff *skb)
461 {
462         int i, frags = skb_shinfo(skb)->nr_frags;
463         int pages = 0;
464
465         for (i = 0; i < frags; i++) {
466                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
467                 unsigned long size = skb_frag_size(frag);
468                 unsigned long offset = frag->page_offset;
469
470                 /* Skip unused frames from start of page */
471                 offset &= ~PAGE_MASK;
472                 pages += PFN_UP(offset + size);
473         }
474         return pages;
475 }
476
477 static int netvsc_get_slots(struct sk_buff *skb)
478 {
479         char *data = skb->data;
480         unsigned int offset = offset_in_page(data);
481         unsigned int len = skb_headlen(skb);
482         int slots;
483         int frag_slots;
484
485         slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
486         frag_slots = count_skb_frag_slots(skb);
487         return slots + frag_slots;
488 }
489
490 static u32 net_checksum_info(struct sk_buff *skb)
491 {
492         if (skb->protocol == htons(ETH_P_IP)) {
493                 struct iphdr *ip = ip_hdr(skb);
494
495                 if (ip->protocol == IPPROTO_TCP)
496                         return TRANSPORT_INFO_IPV4_TCP;
497                 else if (ip->protocol == IPPROTO_UDP)
498                         return TRANSPORT_INFO_IPV4_UDP;
499         } else {
500                 struct ipv6hdr *ip6 = ipv6_hdr(skb);
501
502                 if (ip6->nexthdr == IPPROTO_TCP)
503                         return TRANSPORT_INFO_IPV6_TCP;
504                 else if (ip6->nexthdr == IPPROTO_UDP)
505                         return TRANSPORT_INFO_IPV6_UDP;
506         }
507
508         return TRANSPORT_INFO_NOT_IP;
509 }
510
511 /* Send skb on the slave VF device. */
512 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
513                           struct sk_buff *skb)
514 {
515         struct net_device_context *ndev_ctx = netdev_priv(net);
516         unsigned int len = skb->len;
517         int rc;
518
519         skb->dev = vf_netdev;
520         skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
521
522         rc = dev_queue_xmit(skb);
523         if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
524                 struct netvsc_vf_pcpu_stats *pcpu_stats
525                         = this_cpu_ptr(ndev_ctx->vf_stats);
526
527                 u64_stats_update_begin(&pcpu_stats->syncp);
528                 pcpu_stats->tx_packets++;
529                 pcpu_stats->tx_bytes += len;
530                 u64_stats_update_end(&pcpu_stats->syncp);
531         } else {
532                 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
533         }
534
535         return rc;
536 }
537
538 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
539 {
540         struct net_device_context *net_device_ctx = netdev_priv(net);
541         struct hv_netvsc_packet *packet = NULL;
542         int ret;
543         unsigned int num_data_pgs;
544         struct rndis_message *rndis_msg;
545         struct net_device *vf_netdev;
546         u32 rndis_msg_size;
547         u32 hash;
548         struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
549
550         /* If VF is present and up then redirect packets to it.
551          * Skip the VF if it is marked down or has no carrier.
552          * If netpoll is in uses, then VF can not be used either.
553          */
554         vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
555         if (vf_netdev && netif_running(vf_netdev) &&
556             netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
557                 return netvsc_vf_xmit(net, vf_netdev, skb);
558
559         /* We will atmost need two pages to describe the rndis
560          * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
561          * of pages in a single packet. If skb is scattered around
562          * more pages we try linearizing it.
563          */
564
565         num_data_pgs = netvsc_get_slots(skb) + 2;
566
567         if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
568                 ++net_device_ctx->eth_stats.tx_scattered;
569
570                 if (skb_linearize(skb))
571                         goto no_memory;
572
573                 num_data_pgs = netvsc_get_slots(skb) + 2;
574                 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
575                         ++net_device_ctx->eth_stats.tx_too_big;
576                         goto drop;
577                 }
578         }
579
580         /*
581          * Place the rndis header in the skb head room and
582          * the skb->cb will be used for hv_netvsc_packet
583          * structure.
584          */
585         ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
586         if (ret)
587                 goto no_memory;
588
589         /* Use the skb control buffer for building up the packet */
590         BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
591                         FIELD_SIZEOF(struct sk_buff, cb));
592         packet = (struct hv_netvsc_packet *)skb->cb;
593
594         packet->q_idx = skb_get_queue_mapping(skb);
595
596         packet->total_data_buflen = skb->len;
597         packet->total_bytes = skb->len;
598         packet->total_packets = 1;
599
600         rndis_msg = (struct rndis_message *)skb->head;
601
602         /* Add the rndis header */
603         rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
604         rndis_msg->msg_len = packet->total_data_buflen;
605
606         rndis_msg->msg.pkt = (struct rndis_packet) {
607                 .data_offset = sizeof(struct rndis_packet),
608                 .data_len = packet->total_data_buflen,
609                 .per_pkt_info_offset = sizeof(struct rndis_packet),
610         };
611
612         rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
613
614         hash = skb_get_hash_raw(skb);
615         if (hash != 0 && net->real_num_tx_queues > 1) {
616                 u32 *hash_info;
617
618                 rndis_msg_size += NDIS_HASH_PPI_SIZE;
619                 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
620                                           NBL_HASH_VALUE);
621                 *hash_info = hash;
622         }
623
624         if (skb_vlan_tag_present(skb)) {
625                 struct ndis_pkt_8021q_info *vlan;
626
627                 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
628                 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
629                                      IEEE_8021Q_INFO);
630
631                 vlan->value = 0;
632                 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
633                 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
634                                 VLAN_PRIO_SHIFT;
635         }
636
637         if (skb_is_gso(skb)) {
638                 struct ndis_tcp_lso_info *lso_info;
639
640                 rndis_msg_size += NDIS_LSO_PPI_SIZE;
641                 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
642                                          TCP_LARGESEND_PKTINFO);
643
644                 lso_info->value = 0;
645                 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
646                 if (skb->protocol == htons(ETH_P_IP)) {
647                         lso_info->lso_v2_transmit.ip_version =
648                                 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
649                         ip_hdr(skb)->tot_len = 0;
650                         ip_hdr(skb)->check = 0;
651                         tcp_hdr(skb)->check =
652                                 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
653                                                    ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
654                 } else {
655                         lso_info->lso_v2_transmit.ip_version =
656                                 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
657                         ipv6_hdr(skb)->payload_len = 0;
658                         tcp_hdr(skb)->check =
659                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
660                                                  &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
661                 }
662                 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
663                 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
664         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
665                 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
666                         struct ndis_tcp_ip_checksum_info *csum_info;
667
668                         rndis_msg_size += NDIS_CSUM_PPI_SIZE;
669                         csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
670                                                   TCPIP_CHKSUM_PKTINFO);
671
672                         csum_info->value = 0;
673                         csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
674
675                         if (skb->protocol == htons(ETH_P_IP)) {
676                                 csum_info->transmit.is_ipv4 = 1;
677
678                                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
679                                         csum_info->transmit.tcp_checksum = 1;
680                                 else
681                                         csum_info->transmit.udp_checksum = 1;
682                         } else {
683                                 csum_info->transmit.is_ipv6 = 1;
684
685                                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
686                                         csum_info->transmit.tcp_checksum = 1;
687                                 else
688                                         csum_info->transmit.udp_checksum = 1;
689                         }
690                 } else {
691                         /* Can't do offload of this type of checksum */
692                         if (skb_checksum_help(skb))
693                                 goto drop;
694                 }
695         }
696
697         /* Start filling in the page buffers with the rndis hdr */
698         rndis_msg->msg_len += rndis_msg_size;
699         packet->total_data_buflen = rndis_msg->msg_len;
700         packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
701                                                skb, packet, pb);
702
703         /* timestamp packet in software */
704         skb_tx_timestamp(skb);
705
706         ret = netvsc_send(net, packet, rndis_msg, pb, skb);
707         if (likely(ret == 0))
708                 return NETDEV_TX_OK;
709
710         if (ret == -EAGAIN) {
711                 ++net_device_ctx->eth_stats.tx_busy;
712                 return NETDEV_TX_BUSY;
713         }
714
715         if (ret == -ENOSPC)
716                 ++net_device_ctx->eth_stats.tx_no_space;
717
718 drop:
719         dev_kfree_skb_any(skb);
720         net->stats.tx_dropped++;
721
722         return NETDEV_TX_OK;
723
724 no_memory:
725         ++net_device_ctx->eth_stats.tx_no_memory;
726         goto drop;
727 }
728
729 /*
730  * netvsc_linkstatus_callback - Link up/down notification
731  */
732 void netvsc_linkstatus_callback(struct net_device *net,
733                                 struct rndis_message *resp)
734 {
735         struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
736         struct net_device_context *ndev_ctx = netdev_priv(net);
737         struct netvsc_reconfig *event;
738         unsigned long flags;
739
740         /* Update the physical link speed when changing to another vSwitch */
741         if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
742                 u32 speed;
743
744                 speed = *(u32 *)((void *)indicate
745                                  + indicate->status_buf_offset) / 10000;
746                 ndev_ctx->speed = speed;
747                 return;
748         }
749
750         /* Handle these link change statuses below */
751         if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
752             indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
753             indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
754                 return;
755
756         if (net->reg_state != NETREG_REGISTERED)
757                 return;
758
759         event = kzalloc(sizeof(*event), GFP_ATOMIC);
760         if (!event)
761                 return;
762         event->event = indicate->status;
763
764         spin_lock_irqsave(&ndev_ctx->lock, flags);
765         list_add_tail(&event->list, &ndev_ctx->reconfig_events);
766         spin_unlock_irqrestore(&ndev_ctx->lock, flags);
767
768         schedule_delayed_work(&ndev_ctx->dwork, 0);
769 }
770
771 static void netvsc_comp_ipcsum(struct sk_buff *skb)
772 {
773         struct iphdr *iph = (struct iphdr *)skb->data;
774
775         iph->check = 0;
776         iph->check = ip_fast_csum(iph, iph->ihl);
777 }
778
779 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
780                                              struct napi_struct *napi,
781                                              const struct ndis_tcp_ip_checksum_info *csum_info,
782                                              const struct ndis_pkt_8021q_info *vlan,
783                                              void *data, u32 buflen)
784 {
785         struct sk_buff *skb;
786
787         skb = napi_alloc_skb(napi, buflen);
788         if (!skb)
789                 return skb;
790
791         /*
792          * Copy to skb. This copy is needed here since the memory pointed by
793          * hv_netvsc_packet cannot be deallocated
794          */
795         skb_put_data(skb, data, buflen);
796
797         skb->protocol = eth_type_trans(skb, net);
798
799         /* skb is already created with CHECKSUM_NONE */
800         skb_checksum_none_assert(skb);
801
802         /* Incoming packets may have IP header checksum verified by the host.
803          * They may not have IP header checksum computed after coalescing.
804          * We compute it here if the flags are set, because on Linux, the IP
805          * checksum is always checked.
806          */
807         if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
808             csum_info->receive.ip_checksum_succeeded &&
809             skb->protocol == htons(ETH_P_IP))
810                 netvsc_comp_ipcsum(skb);
811
812         /* Do L4 checksum offload if enabled and present. */
813         if (csum_info && (net->features & NETIF_F_RXCSUM)) {
814                 if (csum_info->receive.tcp_checksum_succeeded ||
815                     csum_info->receive.udp_checksum_succeeded)
816                         skb->ip_summed = CHECKSUM_UNNECESSARY;
817         }
818
819         if (vlan) {
820                 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
821
822                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
823                                        vlan_tci);
824         }
825
826         return skb;
827 }
828
829 /*
830  * netvsc_recv_callback -  Callback when we receive a packet from the
831  * "wire" on the specified device.
832  */
833 int netvsc_recv_callback(struct net_device *net,
834                          struct netvsc_device *net_device,
835                          struct vmbus_channel *channel,
836                          void  *data, u32 len,
837                          const struct ndis_tcp_ip_checksum_info *csum_info,
838                          const struct ndis_pkt_8021q_info *vlan)
839 {
840         struct net_device_context *net_device_ctx = netdev_priv(net);
841         u16 q_idx = channel->offermsg.offer.sub_channel_index;
842         struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
843         struct sk_buff *skb;
844         struct netvsc_stats *rx_stats;
845
846         if (net->reg_state != NETREG_REGISTERED)
847                 return NVSP_STAT_FAIL;
848
849         /* Allocate a skb - TODO direct I/O to pages? */
850         skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
851                                     csum_info, vlan, data, len);
852         if (unlikely(!skb)) {
853                 ++net_device_ctx->eth_stats.rx_no_memory;
854                 return NVSP_STAT_FAIL;
855         }
856
857         skb_record_rx_queue(skb, q_idx);
858
859         /*
860          * Even if injecting the packet, record the statistics
861          * on the synthetic device because modifying the VF device
862          * statistics will not work correctly.
863          */
864         rx_stats = &nvchan->rx_stats;
865         u64_stats_update_begin(&rx_stats->syncp);
866         rx_stats->packets++;
867         rx_stats->bytes += len;
868
869         if (skb->pkt_type == PACKET_BROADCAST)
870                 ++rx_stats->broadcast;
871         else if (skb->pkt_type == PACKET_MULTICAST)
872                 ++rx_stats->multicast;
873         u64_stats_update_end(&rx_stats->syncp);
874
875         napi_gro_receive(&nvchan->napi, skb);
876         return NVSP_STAT_SUCCESS;
877 }
878
879 static void netvsc_get_drvinfo(struct net_device *net,
880                                struct ethtool_drvinfo *info)
881 {
882         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
883         strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
884 }
885
886 static void netvsc_get_channels(struct net_device *net,
887                                 struct ethtool_channels *channel)
888 {
889         struct net_device_context *net_device_ctx = netdev_priv(net);
890         struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
891
892         if (nvdev) {
893                 channel->max_combined   = nvdev->max_chn;
894                 channel->combined_count = nvdev->num_chn;
895         }
896 }
897
898 /* Alloc struct netvsc_device_info, and initialize it from either existing
899  * struct netvsc_device, or from default values.
900  */
901 static struct netvsc_device_info *netvsc_devinfo_get
902                         (struct netvsc_device *nvdev)
903 {
904         struct netvsc_device_info *dev_info;
905
906         dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
907
908         if (!dev_info)
909                 return NULL;
910
911         if (nvdev) {
912                 dev_info->num_chn = nvdev->num_chn;
913                 dev_info->send_sections = nvdev->send_section_cnt;
914                 dev_info->send_section_size = nvdev->send_section_size;
915                 dev_info->recv_sections = nvdev->recv_section_cnt;
916                 dev_info->recv_section_size = nvdev->recv_section_size;
917
918                 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
919                        NETVSC_HASH_KEYLEN);
920         } else {
921                 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
922                 dev_info->send_sections = NETVSC_DEFAULT_TX;
923                 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
924                 dev_info->recv_sections = NETVSC_DEFAULT_RX;
925                 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
926         }
927
928         return dev_info;
929 }
930
931 static int netvsc_detach(struct net_device *ndev,
932                          struct netvsc_device *nvdev)
933 {
934         struct net_device_context *ndev_ctx = netdev_priv(ndev);
935         struct hv_device *hdev = ndev_ctx->device_ctx;
936         int ret;
937
938         /* Don't try continuing to try and setup sub channels */
939         if (cancel_work_sync(&nvdev->subchan_work))
940                 nvdev->num_chn = 1;
941
942         /* If device was up (receiving) then shutdown */
943         if (netif_running(ndev)) {
944                 netvsc_tx_disable(nvdev, ndev);
945
946                 ret = rndis_filter_close(nvdev);
947                 if (ret) {
948                         netdev_err(ndev,
949                                    "unable to close device (ret %d).\n", ret);
950                         return ret;
951                 }
952
953                 ret = netvsc_wait_until_empty(nvdev);
954                 if (ret) {
955                         netdev_err(ndev,
956                                    "Ring buffer not empty after closing rndis\n");
957                         return ret;
958                 }
959         }
960
961         netif_device_detach(ndev);
962
963         rndis_filter_device_remove(hdev, nvdev);
964
965         return 0;
966 }
967
968 static int netvsc_attach(struct net_device *ndev,
969                          struct netvsc_device_info *dev_info)
970 {
971         struct net_device_context *ndev_ctx = netdev_priv(ndev);
972         struct hv_device *hdev = ndev_ctx->device_ctx;
973         struct netvsc_device *nvdev;
974         struct rndis_device *rdev;
975         int ret;
976
977         nvdev = rndis_filter_device_add(hdev, dev_info);
978         if (IS_ERR(nvdev))
979                 return PTR_ERR(nvdev);
980
981         if (nvdev->num_chn > 1) {
982                 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
983
984                 /* if unavailable, just proceed with one queue */
985                 if (ret) {
986                         nvdev->max_chn = 1;
987                         nvdev->num_chn = 1;
988                 }
989         }
990
991         /* In any case device is now ready */
992         nvdev->tx_disable = false;
993         netif_device_attach(ndev);
994
995         /* Note: enable and attach happen when sub-channels setup */
996         netif_carrier_off(ndev);
997
998         if (netif_running(ndev)) {
999                 ret = rndis_filter_open(nvdev);
1000                 if (ret)
1001                         goto err;
1002
1003                 rdev = nvdev->extension;
1004                 if (!rdev->link_state)
1005                         netif_carrier_on(ndev);
1006         }
1007
1008         return 0;
1009
1010 err:
1011         netif_device_detach(ndev);
1012
1013         rndis_filter_device_remove(hdev, nvdev);
1014
1015         return ret;
1016 }
1017
1018 static int netvsc_set_channels(struct net_device *net,
1019                                struct ethtool_channels *channels)
1020 {
1021         struct net_device_context *net_device_ctx = netdev_priv(net);
1022         struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1023         unsigned int orig, count = channels->combined_count;
1024         struct netvsc_device_info *device_info;
1025         int ret;
1026
1027         /* We do not support separate count for rx, tx, or other */
1028         if (count == 0 ||
1029             channels->rx_count || channels->tx_count || channels->other_count)
1030                 return -EINVAL;
1031
1032         if (!nvdev || nvdev->destroy)
1033                 return -ENODEV;
1034
1035         if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1036                 return -EINVAL;
1037
1038         if (count > nvdev->max_chn)
1039                 return -EINVAL;
1040
1041         orig = nvdev->num_chn;
1042
1043         device_info = netvsc_devinfo_get(nvdev);
1044
1045         if (!device_info)
1046                 return -ENOMEM;
1047
1048         device_info->num_chn = count;
1049
1050         ret = netvsc_detach(net, nvdev);
1051         if (ret)
1052                 goto out;
1053
1054         ret = netvsc_attach(net, device_info);
1055         if (ret) {
1056                 device_info->num_chn = orig;
1057                 if (netvsc_attach(net, device_info))
1058                         netdev_err(net, "restoring channel setting failed\n");
1059         }
1060
1061 out:
1062         kfree(device_info);
1063         return ret;
1064 }
1065
1066 static bool
1067 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
1068 {
1069         struct ethtool_link_ksettings diff1 = *cmd;
1070         struct ethtool_link_ksettings diff2 = {};
1071
1072         diff1.base.speed = 0;
1073         diff1.base.duplex = 0;
1074         /* advertising and cmd are usually set */
1075         ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1076         diff1.base.cmd = 0;
1077         /* We set port to PORT_OTHER */
1078         diff2.base.port = PORT_OTHER;
1079
1080         return !memcmp(&diff1, &diff2, sizeof(diff1));
1081 }
1082
1083 static void netvsc_init_settings(struct net_device *dev)
1084 {
1085         struct net_device_context *ndc = netdev_priv(dev);
1086
1087         ndc->l4_hash = HV_DEFAULT_L4HASH;
1088
1089         ndc->speed = SPEED_UNKNOWN;
1090         ndc->duplex = DUPLEX_FULL;
1091 }
1092
1093 static int netvsc_get_link_ksettings(struct net_device *dev,
1094                                      struct ethtool_link_ksettings *cmd)
1095 {
1096         struct net_device_context *ndc = netdev_priv(dev);
1097
1098         cmd->base.speed = ndc->speed;
1099         cmd->base.duplex = ndc->duplex;
1100         cmd->base.port = PORT_OTHER;
1101
1102         return 0;
1103 }
1104
1105 static int netvsc_set_link_ksettings(struct net_device *dev,
1106                                      const struct ethtool_link_ksettings *cmd)
1107 {
1108         struct net_device_context *ndc = netdev_priv(dev);
1109         u32 speed;
1110
1111         speed = cmd->base.speed;
1112         if (!ethtool_validate_speed(speed) ||
1113             !ethtool_validate_duplex(cmd->base.duplex) ||
1114             !netvsc_validate_ethtool_ss_cmd(cmd))
1115                 return -EINVAL;
1116
1117         ndc->speed = speed;
1118         ndc->duplex = cmd->base.duplex;
1119
1120         return 0;
1121 }
1122
1123 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1124 {
1125         struct net_device_context *ndevctx = netdev_priv(ndev);
1126         struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1127         struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1128         int orig_mtu = ndev->mtu;
1129         struct netvsc_device_info *device_info;
1130         int ret = 0;
1131
1132         if (!nvdev || nvdev->destroy)
1133                 return -ENODEV;
1134
1135         device_info = netvsc_devinfo_get(nvdev);
1136
1137         if (!device_info)
1138                 return -ENOMEM;
1139
1140         /* Change MTU of underlying VF netdev first. */
1141         if (vf_netdev) {
1142                 ret = dev_set_mtu(vf_netdev, mtu);
1143                 if (ret)
1144                         goto out;
1145         }
1146
1147         ret = netvsc_detach(ndev, nvdev);
1148         if (ret)
1149                 goto rollback_vf;
1150
1151         ndev->mtu = mtu;
1152
1153         ret = netvsc_attach(ndev, device_info);
1154         if (!ret)
1155                 goto out;
1156
1157         /* Attempt rollback to original MTU */
1158         ndev->mtu = orig_mtu;
1159
1160         if (netvsc_attach(ndev, device_info))
1161                 netdev_err(ndev, "restoring mtu failed\n");
1162 rollback_vf:
1163         if (vf_netdev)
1164                 dev_set_mtu(vf_netdev, orig_mtu);
1165
1166 out:
1167         kfree(device_info);
1168         return ret;
1169 }
1170
1171 static void netvsc_get_vf_stats(struct net_device *net,
1172                                 struct netvsc_vf_pcpu_stats *tot)
1173 {
1174         struct net_device_context *ndev_ctx = netdev_priv(net);
1175         int i;
1176
1177         memset(tot, 0, sizeof(*tot));
1178
1179         for_each_possible_cpu(i) {
1180                 const struct netvsc_vf_pcpu_stats *stats
1181                         = per_cpu_ptr(ndev_ctx->vf_stats, i);
1182                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1183                 unsigned int start;
1184
1185                 do {
1186                         start = u64_stats_fetch_begin_irq(&stats->syncp);
1187                         rx_packets = stats->rx_packets;
1188                         tx_packets = stats->tx_packets;
1189                         rx_bytes = stats->rx_bytes;
1190                         tx_bytes = stats->tx_bytes;
1191                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1192
1193                 tot->rx_packets += rx_packets;
1194                 tot->tx_packets += tx_packets;
1195                 tot->rx_bytes   += rx_bytes;
1196                 tot->tx_bytes   += tx_bytes;
1197                 tot->tx_dropped += stats->tx_dropped;
1198         }
1199 }
1200
1201 static void netvsc_get_pcpu_stats(struct net_device *net,
1202                                   struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1203 {
1204         struct net_device_context *ndev_ctx = netdev_priv(net);
1205         struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1206         int i;
1207
1208         /* fetch percpu stats of vf */
1209         for_each_possible_cpu(i) {
1210                 const struct netvsc_vf_pcpu_stats *stats =
1211                         per_cpu_ptr(ndev_ctx->vf_stats, i);
1212                 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1213                 unsigned int start;
1214
1215                 do {
1216                         start = u64_stats_fetch_begin_irq(&stats->syncp);
1217                         this_tot->vf_rx_packets = stats->rx_packets;
1218                         this_tot->vf_tx_packets = stats->tx_packets;
1219                         this_tot->vf_rx_bytes = stats->rx_bytes;
1220                         this_tot->vf_tx_bytes = stats->tx_bytes;
1221                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1222                 this_tot->rx_packets = this_tot->vf_rx_packets;
1223                 this_tot->tx_packets = this_tot->vf_tx_packets;
1224                 this_tot->rx_bytes   = this_tot->vf_rx_bytes;
1225                 this_tot->tx_bytes   = this_tot->vf_tx_bytes;
1226         }
1227
1228         /* fetch percpu stats of netvsc */
1229         for (i = 0; i < nvdev->num_chn; i++) {
1230                 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1231                 const struct netvsc_stats *stats;
1232                 struct netvsc_ethtool_pcpu_stats *this_tot =
1233                         &pcpu_tot[nvchan->channel->target_cpu];
1234                 u64 packets, bytes;
1235                 unsigned int start;
1236
1237                 stats = &nvchan->tx_stats;
1238                 do {
1239                         start = u64_stats_fetch_begin_irq(&stats->syncp);
1240                         packets = stats->packets;
1241                         bytes = stats->bytes;
1242                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1243
1244                 this_tot->tx_bytes      += bytes;
1245                 this_tot->tx_packets    += packets;
1246
1247                 stats = &nvchan->rx_stats;
1248                 do {
1249                         start = u64_stats_fetch_begin_irq(&stats->syncp);
1250                         packets = stats->packets;
1251                         bytes = stats->bytes;
1252                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1253
1254                 this_tot->rx_bytes      += bytes;
1255                 this_tot->rx_packets    += packets;
1256         }
1257 }
1258
1259 static void netvsc_get_stats64(struct net_device *net,
1260                                struct rtnl_link_stats64 *t)
1261 {
1262         struct net_device_context *ndev_ctx = netdev_priv(net);
1263         struct netvsc_device *nvdev;
1264         struct netvsc_vf_pcpu_stats vf_tot;
1265         int i;
1266
1267         rcu_read_lock();
1268
1269         nvdev = rcu_dereference(ndev_ctx->nvdev);
1270         if (!nvdev)
1271                 goto out;
1272
1273         netdev_stats_to_stats64(t, &net->stats);
1274
1275         netvsc_get_vf_stats(net, &vf_tot);
1276         t->rx_packets += vf_tot.rx_packets;
1277         t->tx_packets += vf_tot.tx_packets;
1278         t->rx_bytes   += vf_tot.rx_bytes;
1279         t->tx_bytes   += vf_tot.tx_bytes;
1280         t->tx_dropped += vf_tot.tx_dropped;
1281
1282         for (i = 0; i < nvdev->num_chn; i++) {
1283                 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1284                 const struct netvsc_stats *stats;
1285                 u64 packets, bytes, multicast;
1286                 unsigned int start;
1287
1288                 stats = &nvchan->tx_stats;
1289                 do {
1290                         start = u64_stats_fetch_begin_irq(&stats->syncp);
1291                         packets = stats->packets;
1292                         bytes = stats->bytes;
1293                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1294
1295                 t->tx_bytes     += bytes;
1296                 t->tx_packets   += packets;
1297
1298                 stats = &nvchan->rx_stats;
1299                 do {
1300                         start = u64_stats_fetch_begin_irq(&stats->syncp);
1301                         packets = stats->packets;
1302                         bytes = stats->bytes;
1303                         multicast = stats->multicast + stats->broadcast;
1304                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1305
1306                 t->rx_bytes     += bytes;
1307                 t->rx_packets   += packets;
1308                 t->multicast    += multicast;
1309         }
1310 out:
1311         rcu_read_unlock();
1312 }
1313
1314 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1315 {
1316         struct net_device_context *ndc = netdev_priv(ndev);
1317         struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1318         struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1319         struct sockaddr *addr = p;
1320         int err;
1321
1322         err = eth_prepare_mac_addr_change(ndev, p);
1323         if (err)
1324                 return err;
1325
1326         if (!nvdev)
1327                 return -ENODEV;
1328
1329         if (vf_netdev) {
1330                 err = dev_set_mac_address(vf_netdev, addr);
1331                 if (err)
1332                         return err;
1333         }
1334
1335         err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1336         if (!err) {
1337                 eth_commit_mac_addr_change(ndev, p);
1338         } else if (vf_netdev) {
1339                 /* rollback change on VF */
1340                 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1341                 dev_set_mac_address(vf_netdev, addr);
1342         }
1343
1344         return err;
1345 }
1346
1347 static const struct {
1348         char name[ETH_GSTRING_LEN];
1349         u16 offset;
1350 } netvsc_stats[] = {
1351         { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1352         { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1353         { "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1354         { "tx_too_big",   offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1355         { "tx_busy",      offsetof(struct netvsc_ethtool_stats, tx_busy) },
1356         { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1357         { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1358         { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1359         { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1360         { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1361 }, pcpu_stats[] = {
1362         { "cpu%u_rx_packets",
1363                 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1364         { "cpu%u_rx_bytes",
1365                 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1366         { "cpu%u_tx_packets",
1367                 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1368         { "cpu%u_tx_bytes",
1369                 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1370         { "cpu%u_vf_rx_packets",
1371                 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1372         { "cpu%u_vf_rx_bytes",
1373                 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1374         { "cpu%u_vf_tx_packets",
1375                 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1376         { "cpu%u_vf_tx_bytes",
1377                 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
1378 }, vf_stats[] = {
1379         { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1380         { "vf_rx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1381         { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1382         { "vf_tx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1383         { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1384 };
1385
1386 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1387 #define NETVSC_VF_STATS_LEN     ARRAY_SIZE(vf_stats)
1388
1389 /* statistics per queue (rx/tx packets/bytes) */
1390 #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1391
1392 /* 4 statistics per queue (rx/tx packets/bytes) */
1393 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1394
1395 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1396 {
1397         struct net_device_context *ndc = netdev_priv(dev);
1398         struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1399
1400         if (!nvdev)
1401                 return -ENODEV;
1402
1403         switch (string_set) {
1404         case ETH_SS_STATS:
1405                 return NETVSC_GLOBAL_STATS_LEN
1406                         + NETVSC_VF_STATS_LEN
1407                         + NETVSC_QUEUE_STATS_LEN(nvdev)
1408                         + NETVSC_PCPU_STATS_LEN;
1409         default:
1410                 return -EINVAL;
1411         }
1412 }
1413
1414 static void netvsc_get_ethtool_stats(struct net_device *dev,
1415                                      struct ethtool_stats *stats, u64 *data)
1416 {
1417         struct net_device_context *ndc = netdev_priv(dev);
1418         struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1419         const void *nds = &ndc->eth_stats;
1420         const struct netvsc_stats *qstats;
1421         struct netvsc_vf_pcpu_stats sum;
1422         struct netvsc_ethtool_pcpu_stats *pcpu_sum;
1423         unsigned int start;
1424         u64 packets, bytes;
1425         int i, j, cpu;
1426
1427         if (!nvdev)
1428                 return;
1429
1430         for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1431                 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1432
1433         netvsc_get_vf_stats(dev, &sum);
1434         for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1435                 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1436
1437         for (j = 0; j < nvdev->num_chn; j++) {
1438                 qstats = &nvdev->chan_table[j].tx_stats;
1439
1440                 do {
1441                         start = u64_stats_fetch_begin_irq(&qstats->syncp);
1442                         packets = qstats->packets;
1443                         bytes = qstats->bytes;
1444                 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1445                 data[i++] = packets;
1446                 data[i++] = bytes;
1447
1448                 qstats = &nvdev->chan_table[j].rx_stats;
1449                 do {
1450                         start = u64_stats_fetch_begin_irq(&qstats->syncp);
1451                         packets = qstats->packets;
1452                         bytes = qstats->bytes;
1453                 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1454                 data[i++] = packets;
1455                 data[i++] = bytes;
1456         }
1457
1458         pcpu_sum = kvmalloc_array(num_possible_cpus(),
1459                                   sizeof(struct netvsc_ethtool_pcpu_stats),
1460                                   GFP_KERNEL);
1461         if (!pcpu_sum)
1462                 return;
1463
1464         netvsc_get_pcpu_stats(dev, pcpu_sum);
1465         for_each_present_cpu(cpu) {
1466                 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1467
1468                 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1469                         data[i++] = *(u64 *)((void *)this_sum
1470                                              + pcpu_stats[j].offset);
1471         }
1472         kvfree(pcpu_sum);
1473 }
1474
1475 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1476 {
1477         struct net_device_context *ndc = netdev_priv(dev);
1478         struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1479         u8 *p = data;
1480         int i, cpu;
1481
1482         if (!nvdev)
1483                 return;
1484
1485         switch (stringset) {
1486         case ETH_SS_STATS:
1487                 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1488                         memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1489                         p += ETH_GSTRING_LEN;
1490                 }
1491
1492                 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1493                         memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1494                         p += ETH_GSTRING_LEN;
1495                 }
1496
1497                 for (i = 0; i < nvdev->num_chn; i++) {
1498                         sprintf(p, "tx_queue_%u_packets", i);
1499                         p += ETH_GSTRING_LEN;
1500                         sprintf(p, "tx_queue_%u_bytes", i);
1501                         p += ETH_GSTRING_LEN;
1502                         sprintf(p, "rx_queue_%u_packets", i);
1503                         p += ETH_GSTRING_LEN;
1504                         sprintf(p, "rx_queue_%u_bytes", i);
1505                         p += ETH_GSTRING_LEN;
1506                 }
1507
1508                 for_each_present_cpu(cpu) {
1509                         for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1510                                 sprintf(p, pcpu_stats[i].name, cpu);
1511                                 p += ETH_GSTRING_LEN;
1512                         }
1513                 }
1514
1515                 break;
1516         }
1517 }
1518
1519 static int
1520 netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1521                          struct ethtool_rxnfc *info)
1522 {
1523         const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1524
1525         info->data = RXH_IP_SRC | RXH_IP_DST;
1526
1527         switch (info->flow_type) {
1528         case TCP_V4_FLOW:
1529                 if (ndc->l4_hash & HV_TCP4_L4HASH)
1530                         info->data |= l4_flag;
1531
1532                 break;
1533
1534         case TCP_V6_FLOW:
1535                 if (ndc->l4_hash & HV_TCP6_L4HASH)
1536                         info->data |= l4_flag;
1537
1538                 break;
1539
1540         case UDP_V4_FLOW:
1541                 if (ndc->l4_hash & HV_UDP4_L4HASH)
1542                         info->data |= l4_flag;
1543
1544                 break;
1545
1546         case UDP_V6_FLOW:
1547                 if (ndc->l4_hash & HV_UDP6_L4HASH)
1548                         info->data |= l4_flag;
1549
1550                 break;
1551
1552         case IPV4_FLOW:
1553         case IPV6_FLOW:
1554                 break;
1555         default:
1556                 info->data = 0;
1557                 break;
1558         }
1559
1560         return 0;
1561 }
1562
1563 static int
1564 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1565                  u32 *rules)
1566 {
1567         struct net_device_context *ndc = netdev_priv(dev);
1568         struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1569
1570         if (!nvdev)
1571                 return -ENODEV;
1572
1573         switch (info->cmd) {
1574         case ETHTOOL_GRXRINGS:
1575                 info->data = nvdev->num_chn;
1576                 return 0;
1577
1578         case ETHTOOL_GRXFH:
1579                 return netvsc_get_rss_hash_opts(ndc, info);
1580         }
1581         return -EOPNOTSUPP;
1582 }
1583
1584 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1585                                     struct ethtool_rxnfc *info)
1586 {
1587         if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1588                            RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1589                 switch (info->flow_type) {
1590                 case TCP_V4_FLOW:
1591                         ndc->l4_hash |= HV_TCP4_L4HASH;
1592                         break;
1593
1594                 case TCP_V6_FLOW:
1595                         ndc->l4_hash |= HV_TCP6_L4HASH;
1596                         break;
1597
1598                 case UDP_V4_FLOW:
1599                         ndc->l4_hash |= HV_UDP4_L4HASH;
1600                         break;
1601
1602                 case UDP_V6_FLOW:
1603                         ndc->l4_hash |= HV_UDP6_L4HASH;
1604                         break;
1605
1606                 default:
1607                         return -EOPNOTSUPP;
1608                 }
1609
1610                 return 0;
1611         }
1612
1613         if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1614                 switch (info->flow_type) {
1615                 case TCP_V4_FLOW:
1616                         ndc->l4_hash &= ~HV_TCP4_L4HASH;
1617                         break;
1618
1619                 case TCP_V6_FLOW:
1620                         ndc->l4_hash &= ~HV_TCP6_L4HASH;
1621                         break;
1622
1623                 case UDP_V4_FLOW:
1624                         ndc->l4_hash &= ~HV_UDP4_L4HASH;
1625                         break;
1626
1627                 case UDP_V6_FLOW:
1628                         ndc->l4_hash &= ~HV_UDP6_L4HASH;
1629                         break;
1630
1631                 default:
1632                         return -EOPNOTSUPP;
1633                 }
1634
1635                 return 0;
1636         }
1637
1638         return -EOPNOTSUPP;
1639 }
1640
1641 static int
1642 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1643 {
1644         struct net_device_context *ndc = netdev_priv(ndev);
1645
1646         if (info->cmd == ETHTOOL_SRXFH)
1647                 return netvsc_set_rss_hash_opts(ndc, info);
1648
1649         return -EOPNOTSUPP;
1650 }
1651
1652 #ifdef CONFIG_NET_POLL_CONTROLLER
1653 static void netvsc_poll_controller(struct net_device *dev)
1654 {
1655         struct net_device_context *ndc = netdev_priv(dev);
1656         struct netvsc_device *ndev;
1657         int i;
1658
1659         rcu_read_lock();
1660         ndev = rcu_dereference(ndc->nvdev);
1661         if (ndev) {
1662                 for (i = 0; i < ndev->num_chn; i++) {
1663                         struct netvsc_channel *nvchan = &ndev->chan_table[i];
1664
1665                         napi_schedule(&nvchan->napi);
1666                 }
1667         }
1668         rcu_read_unlock();
1669 }
1670 #endif
1671
1672 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1673 {
1674         return NETVSC_HASH_KEYLEN;
1675 }
1676
1677 static u32 netvsc_rss_indir_size(struct net_device *dev)
1678 {
1679         return ITAB_NUM;
1680 }
1681
1682 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1683                            u8 *hfunc)
1684 {
1685         struct net_device_context *ndc = netdev_priv(dev);
1686         struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1687         struct rndis_device *rndis_dev;
1688         int i;
1689
1690         if (!ndev)
1691                 return -ENODEV;
1692
1693         if (hfunc)
1694                 *hfunc = ETH_RSS_HASH_TOP;      /* Toeplitz */
1695
1696         rndis_dev = ndev->extension;
1697         if (indir) {
1698                 for (i = 0; i < ITAB_NUM; i++)
1699                         indir[i] = ndc->rx_table[i];
1700         }
1701
1702         if (key)
1703                 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1704
1705         return 0;
1706 }
1707
1708 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1709                            const u8 *key, const u8 hfunc)
1710 {
1711         struct net_device_context *ndc = netdev_priv(dev);
1712         struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1713         struct rndis_device *rndis_dev;
1714         int i;
1715
1716         if (!ndev)
1717                 return -ENODEV;
1718
1719         if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1720                 return -EOPNOTSUPP;
1721
1722         rndis_dev = ndev->extension;
1723         if (indir) {
1724                 for (i = 0; i < ITAB_NUM; i++)
1725                         if (indir[i] >= ndev->num_chn)
1726                                 return -EINVAL;
1727
1728                 for (i = 0; i < ITAB_NUM; i++)
1729                         ndc->rx_table[i] = indir[i];
1730         }
1731
1732         if (!key) {
1733                 if (!indir)
1734                         return 0;
1735
1736                 key = rndis_dev->rss_key;
1737         }
1738
1739         return rndis_filter_set_rss_param(rndis_dev, key);
1740 }
1741
1742 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1743  * It does have pre-allocated receive area which is divided into sections.
1744  */
1745 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1746                                    struct ethtool_ringparam *ring)
1747 {
1748         u32 max_buf_size;
1749
1750         ring->rx_pending = nvdev->recv_section_cnt;
1751         ring->tx_pending = nvdev->send_section_cnt;
1752
1753         if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1754                 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1755         else
1756                 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1757
1758         ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1759         ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1760                 / nvdev->send_section_size;
1761 }
1762
1763 static void netvsc_get_ringparam(struct net_device *ndev,
1764                                  struct ethtool_ringparam *ring)
1765 {
1766         struct net_device_context *ndevctx = netdev_priv(ndev);
1767         struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1768
1769         if (!nvdev)
1770                 return;
1771
1772         __netvsc_get_ringparam(nvdev, ring);
1773 }
1774
1775 static int netvsc_set_ringparam(struct net_device *ndev,
1776                                 struct ethtool_ringparam *ring)
1777 {
1778         struct net_device_context *ndevctx = netdev_priv(ndev);
1779         struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1780         struct netvsc_device_info *device_info;
1781         struct ethtool_ringparam orig;
1782         u32 new_tx, new_rx;
1783         int ret = 0;
1784
1785         if (!nvdev || nvdev->destroy)
1786                 return -ENODEV;
1787
1788         memset(&orig, 0, sizeof(orig));
1789         __netvsc_get_ringparam(nvdev, &orig);
1790
1791         new_tx = clamp_t(u32, ring->tx_pending,
1792                          NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1793         new_rx = clamp_t(u32, ring->rx_pending,
1794                          NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1795
1796         if (new_tx == orig.tx_pending &&
1797             new_rx == orig.rx_pending)
1798                 return 0;        /* no change */
1799
1800         device_info = netvsc_devinfo_get(nvdev);
1801
1802         if (!device_info)
1803                 return -ENOMEM;
1804
1805         device_info->send_sections = new_tx;
1806         device_info->recv_sections = new_rx;
1807
1808         ret = netvsc_detach(ndev, nvdev);
1809         if (ret)
1810                 goto out;
1811
1812         ret = netvsc_attach(ndev, device_info);
1813         if (ret) {
1814                 device_info->send_sections = orig.tx_pending;
1815                 device_info->recv_sections = orig.rx_pending;
1816
1817                 if (netvsc_attach(ndev, device_info))
1818                         netdev_err(ndev, "restoring ringparam failed");
1819         }
1820
1821 out:
1822         kfree(device_info);
1823         return ret;
1824 }
1825
1826 static u32 netvsc_get_msglevel(struct net_device *ndev)
1827 {
1828         struct net_device_context *ndev_ctx = netdev_priv(ndev);
1829
1830         return ndev_ctx->msg_enable;
1831 }
1832
1833 static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1834 {
1835         struct net_device_context *ndev_ctx = netdev_priv(ndev);
1836
1837         ndev_ctx->msg_enable = val;
1838 }
1839
1840 static const struct ethtool_ops ethtool_ops = {
1841         .get_drvinfo    = netvsc_get_drvinfo,
1842         .get_msglevel   = netvsc_get_msglevel,
1843         .set_msglevel   = netvsc_set_msglevel,
1844         .get_link       = ethtool_op_get_link,
1845         .get_ethtool_stats = netvsc_get_ethtool_stats,
1846         .get_sset_count = netvsc_get_sset_count,
1847         .get_strings    = netvsc_get_strings,
1848         .get_channels   = netvsc_get_channels,
1849         .set_channels   = netvsc_set_channels,
1850         .get_ts_info    = ethtool_op_get_ts_info,
1851         .get_rxnfc      = netvsc_get_rxnfc,
1852         .set_rxnfc      = netvsc_set_rxnfc,
1853         .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1854         .get_rxfh_indir_size = netvsc_rss_indir_size,
1855         .get_rxfh       = netvsc_get_rxfh,
1856         .set_rxfh       = netvsc_set_rxfh,
1857         .get_link_ksettings = netvsc_get_link_ksettings,
1858         .set_link_ksettings = netvsc_set_link_ksettings,
1859         .get_ringparam  = netvsc_get_ringparam,
1860         .set_ringparam  = netvsc_set_ringparam,
1861 };
1862
1863 static const struct net_device_ops device_ops = {
1864         .ndo_open =                     netvsc_open,
1865         .ndo_stop =                     netvsc_close,
1866         .ndo_start_xmit =               netvsc_start_xmit,
1867         .ndo_change_rx_flags =          netvsc_change_rx_flags,
1868         .ndo_set_rx_mode =              netvsc_set_rx_mode,
1869         .ndo_change_mtu =               netvsc_change_mtu,
1870         .ndo_validate_addr =            eth_validate_addr,
1871         .ndo_set_mac_address =          netvsc_set_mac_addr,
1872         .ndo_select_queue =             netvsc_select_queue,
1873         .ndo_get_stats64 =              netvsc_get_stats64,
1874 #ifdef CONFIG_NET_POLL_CONTROLLER
1875         .ndo_poll_controller =          netvsc_poll_controller,
1876 #endif
1877 };
1878
1879 /*
1880  * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1881  * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1882  * present send GARP packet to network peers with netif_notify_peers().
1883  */
1884 static void netvsc_link_change(struct work_struct *w)
1885 {
1886         struct net_device_context *ndev_ctx =
1887                 container_of(w, struct net_device_context, dwork.work);
1888         struct hv_device *device_obj = ndev_ctx->device_ctx;
1889         struct net_device *net = hv_get_drvdata(device_obj);
1890         struct netvsc_device *net_device;
1891         struct rndis_device *rdev;
1892         struct netvsc_reconfig *event = NULL;
1893         bool notify = false, reschedule = false;
1894         unsigned long flags, next_reconfig, delay;
1895
1896         /* if changes are happening, comeback later */
1897         if (!rtnl_trylock()) {
1898                 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1899                 return;
1900         }
1901
1902         net_device = rtnl_dereference(ndev_ctx->nvdev);
1903         if (!net_device)
1904                 goto out_unlock;
1905
1906         rdev = net_device->extension;
1907
1908         next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1909         if (time_is_after_jiffies(next_reconfig)) {
1910                 /* link_watch only sends one notification with current state
1911                  * per second, avoid doing reconfig more frequently. Handle
1912                  * wrap around.
1913                  */
1914                 delay = next_reconfig - jiffies;
1915                 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1916                 schedule_delayed_work(&ndev_ctx->dwork, delay);
1917                 goto out_unlock;
1918         }
1919         ndev_ctx->last_reconfig = jiffies;
1920
1921         spin_lock_irqsave(&ndev_ctx->lock, flags);
1922         if (!list_empty(&ndev_ctx->reconfig_events)) {
1923                 event = list_first_entry(&ndev_ctx->reconfig_events,
1924                                          struct netvsc_reconfig, list);
1925                 list_del(&event->list);
1926                 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1927         }
1928         spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1929
1930         if (!event)
1931                 goto out_unlock;
1932
1933         switch (event->event) {
1934                 /* Only the following events are possible due to the check in
1935                  * netvsc_linkstatus_callback()
1936                  */
1937         case RNDIS_STATUS_MEDIA_CONNECT:
1938                 if (rdev->link_state) {
1939                         rdev->link_state = false;
1940                         netif_carrier_on(net);
1941                         netvsc_tx_enable(net_device, net);
1942                 } else {
1943                         notify = true;
1944                 }
1945                 kfree(event);
1946                 break;
1947         case RNDIS_STATUS_MEDIA_DISCONNECT:
1948                 if (!rdev->link_state) {
1949                         rdev->link_state = true;
1950                         netif_carrier_off(net);
1951                         netvsc_tx_disable(net_device, net);
1952                 }
1953                 kfree(event);
1954                 break;
1955         case RNDIS_STATUS_NETWORK_CHANGE:
1956                 /* Only makes sense if carrier is present */
1957                 if (!rdev->link_state) {
1958                         rdev->link_state = true;
1959                         netif_carrier_off(net);
1960                         netvsc_tx_disable(net_device, net);
1961                         event->event = RNDIS_STATUS_MEDIA_CONNECT;
1962                         spin_lock_irqsave(&ndev_ctx->lock, flags);
1963                         list_add(&event->list, &ndev_ctx->reconfig_events);
1964                         spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1965                         reschedule = true;
1966                 }
1967                 break;
1968         }
1969
1970         rtnl_unlock();
1971
1972         if (notify)
1973                 netdev_notify_peers(net);
1974
1975         /* link_watch only sends one notification with current state per
1976          * second, handle next reconfig event in 2 seconds.
1977          */
1978         if (reschedule)
1979                 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1980
1981         return;
1982
1983 out_unlock:
1984         rtnl_unlock();
1985 }
1986
1987 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1988 {
1989         struct net_device_context *net_device_ctx;
1990         struct net_device *dev;
1991
1992         dev = netdev_master_upper_dev_get(vf_netdev);
1993         if (!dev || dev->netdev_ops != &device_ops)
1994                 return NULL;    /* not a netvsc device */
1995
1996         net_device_ctx = netdev_priv(dev);
1997         if (!rtnl_dereference(net_device_ctx->nvdev))
1998                 return NULL;    /* device is removed */
1999
2000         return dev;
2001 }
2002
2003 /* Called when VF is injecting data into network stack.
2004  * Change the associated network device from VF to netvsc.
2005  * note: already called with rcu_read_lock
2006  */
2007 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
2008 {
2009         struct sk_buff *skb = *pskb;
2010         struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
2011         struct net_device_context *ndev_ctx = netdev_priv(ndev);
2012         struct netvsc_vf_pcpu_stats *pcpu_stats
2013                  = this_cpu_ptr(ndev_ctx->vf_stats);
2014
2015         skb = skb_share_check(skb, GFP_ATOMIC);
2016         if (unlikely(!skb))
2017                 return RX_HANDLER_CONSUMED;
2018
2019         *pskb = skb;
2020
2021         skb->dev = ndev;
2022
2023         u64_stats_update_begin(&pcpu_stats->syncp);
2024         pcpu_stats->rx_packets++;
2025         pcpu_stats->rx_bytes += skb->len;
2026         u64_stats_update_end(&pcpu_stats->syncp);
2027
2028         return RX_HANDLER_ANOTHER;
2029 }
2030
2031 static int netvsc_vf_join(struct net_device *vf_netdev,
2032                           struct net_device *ndev, int context)
2033 {
2034         struct net_device_context *ndev_ctx = netdev_priv(ndev);
2035         int ret;
2036
2037         ret = netdev_rx_handler_register(vf_netdev,
2038                                          netvsc_vf_handle_frame, ndev);
2039         if (ret != 0) {
2040                 netdev_err(vf_netdev,
2041                            "can not register netvsc VF receive handler (err = %d)\n",
2042                            ret);
2043                 goto rx_handler_failed;
2044         }
2045
2046         ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2047                                            NULL, NULL, NULL);
2048         if (ret != 0) {
2049                 netdev_err(vf_netdev,
2050                            "can not set master device %s (err = %d)\n",
2051                            ndev->name, ret);
2052                 goto upper_link_failed;
2053         }
2054
2055         /* If this registration is called from probe context vf_takeover
2056          * is taken care of later in probe itself.
2057          */
2058         if (context == VF_REG_IN_NOTIFIER)
2059                 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2060
2061         call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2062
2063         netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2064         return 0;
2065
2066 upper_link_failed:
2067         netdev_rx_handler_unregister(vf_netdev);
2068 rx_handler_failed:
2069         return ret;
2070 }
2071
2072 static void __netvsc_vf_setup(struct net_device *ndev,
2073                               struct net_device *vf_netdev)
2074 {
2075         int ret;
2076
2077         /* Align MTU of VF with master */
2078         ret = dev_set_mtu(vf_netdev, ndev->mtu);
2079         if (ret)
2080                 netdev_warn(vf_netdev,
2081                             "unable to change mtu to %u\n", ndev->mtu);
2082
2083         /* set multicast etc flags on VF */
2084         dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
2085
2086         /* sync address list from ndev to VF */
2087         netif_addr_lock_bh(ndev);
2088         dev_uc_sync(vf_netdev, ndev);
2089         dev_mc_sync(vf_netdev, ndev);
2090         netif_addr_unlock_bh(ndev);
2091
2092         if (netif_running(ndev)) {
2093                 ret = dev_open(vf_netdev);
2094                 if (ret)
2095                         netdev_warn(vf_netdev,
2096                                     "unable to open: %d\n", ret);
2097         }
2098 }
2099
2100 /* Setup VF as slave of the synthetic device.
2101  * Runs in workqueue to avoid recursion in netlink callbacks.
2102  */
2103 static void netvsc_vf_setup(struct work_struct *w)
2104 {
2105         struct net_device_context *ndev_ctx
2106                 = container_of(w, struct net_device_context, vf_takeover.work);
2107         struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2108         struct net_device *vf_netdev;
2109
2110         if (!rtnl_trylock()) {
2111                 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2112                 return;
2113         }
2114
2115         vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2116         if (vf_netdev)
2117                 __netvsc_vf_setup(ndev, vf_netdev);
2118
2119         rtnl_unlock();
2120 }
2121
2122 /* Find netvsc by VF serial number.
2123  * The PCI hyperv controller records the serial number as the slot kobj name.
2124  */
2125 static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2126 {
2127         struct device *parent = vf_netdev->dev.parent;
2128         struct net_device_context *ndev_ctx;
2129         struct net_device *ndev;
2130         struct pci_dev *pdev;
2131         u32 serial;
2132
2133         if (!parent || !dev_is_pci(parent))
2134                 return NULL; /* not a PCI device */
2135
2136         pdev = to_pci_dev(parent);
2137         if (!pdev->slot) {
2138                 netdev_notice(vf_netdev, "no PCI slot information\n");
2139                 return NULL;
2140         }
2141
2142         if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2143                 netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2144                               pci_slot_name(pdev->slot));
2145                 return NULL;
2146         }
2147
2148         list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2149                 if (!ndev_ctx->vf_alloc)
2150                         continue;
2151
2152                 if (ndev_ctx->vf_serial != serial)
2153                         continue;
2154
2155                 ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2156                 if (ndev->addr_len != vf_netdev->addr_len ||
2157                     memcmp(ndev->perm_addr, vf_netdev->perm_addr,
2158                            ndev->addr_len) != 0)
2159                         continue;
2160
2161                 return ndev;
2162
2163         }
2164
2165         /* Fallback path to check synthetic vf with help of mac addr.
2166          * Because this function can be called before vf_netdev is
2167          * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
2168          * from dev_addr, also try to match to its dev_addr.
2169          * Note: On Hyper-V and Azure, it's not possible to set a MAC address
2170          * on a VF that matches to the MAC of a unrelated NETVSC device.
2171          */
2172         list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2173                 ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2174                 if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
2175                     ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
2176                         return ndev;
2177         }
2178
2179         netdev_notice(vf_netdev,
2180                       "no netdev found for vf serial:%u\n", serial);
2181         return NULL;
2182 }
2183
2184 static int netvsc_prepare_bonding(struct net_device *vf_netdev)
2185 {
2186         struct net_device *ndev;
2187
2188         ndev = get_netvsc_byslot(vf_netdev);
2189         if (!ndev)
2190                 return NOTIFY_DONE;
2191
2192         /* set slave flag before open to prevent IPv6 addrconf */
2193         vf_netdev->flags |= IFF_SLAVE;
2194         return NOTIFY_DONE;
2195 }
2196
2197 static int netvsc_register_vf(struct net_device *vf_netdev, int context)
2198 {
2199         struct net_device_context *net_device_ctx;
2200         struct netvsc_device *netvsc_dev;
2201         struct net_device *ndev;
2202         int ret;
2203
2204         if (vf_netdev->addr_len != ETH_ALEN)
2205                 return NOTIFY_DONE;
2206
2207         ndev = get_netvsc_byslot(vf_netdev);
2208         if (!ndev)
2209                 return NOTIFY_DONE;
2210
2211         net_device_ctx = netdev_priv(ndev);
2212         netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2213         if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2214                 return NOTIFY_DONE;
2215
2216         /* if syntihetic interface is a different namespace,
2217          * then move the VF to that namespace; join will be
2218          * done again in that context.
2219          */
2220         if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2221                 ret = dev_change_net_namespace(vf_netdev,
2222                                                dev_net(ndev), "eth%d");
2223                 if (ret)
2224                         netdev_err(vf_netdev,
2225                                    "could not move to same namespace as %s: %d\n",
2226                                    ndev->name, ret);
2227                 else
2228                         netdev_info(vf_netdev,
2229                                     "VF moved to namespace with: %s\n",
2230                                     ndev->name);
2231                 return NOTIFY_DONE;
2232         }
2233
2234         netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
2235
2236         if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
2237                 return NOTIFY_DONE;
2238
2239         dev_hold(vf_netdev);
2240         rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2241         return NOTIFY_OK;
2242 }
2243
2244 /* VF up/down change detected, schedule to change data path */
2245 static int netvsc_vf_changed(struct net_device *vf_netdev)
2246 {
2247         struct net_device_context *net_device_ctx;
2248         struct netvsc_device *netvsc_dev;
2249         struct net_device *ndev;
2250         bool vf_is_up = netif_running(vf_netdev);
2251
2252         ndev = get_netvsc_byref(vf_netdev);
2253         if (!ndev)
2254                 return NOTIFY_DONE;
2255
2256         net_device_ctx = netdev_priv(ndev);
2257         netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2258         if (!netvsc_dev)
2259                 return NOTIFY_DONE;
2260
2261         if (vf_is_up && !net_device_ctx->vf_alloc) {
2262                 netdev_info(ndev, "Waiting for the VF association from host\n");
2263                 wait_for_completion(&net_device_ctx->vf_add);
2264         }
2265
2266         netvsc_switch_datapath(ndev, vf_is_up);
2267         netdev_info(ndev, "Data path switched %s VF: %s\n",
2268                     vf_is_up ? "to" : "from", vf_netdev->name);
2269
2270         return NOTIFY_OK;
2271 }
2272
2273 static int netvsc_unregister_vf(struct net_device *vf_netdev)
2274 {
2275         struct net_device *ndev;
2276         struct net_device_context *net_device_ctx;
2277
2278         ndev = get_netvsc_byref(vf_netdev);
2279         if (!ndev)
2280                 return NOTIFY_DONE;
2281
2282         net_device_ctx = netdev_priv(ndev);
2283         cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2284
2285         netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2286
2287         reinit_completion(&net_device_ctx->vf_add);
2288         netdev_rx_handler_unregister(vf_netdev);
2289         netdev_upper_dev_unlink(vf_netdev, ndev);
2290         RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2291         dev_put(vf_netdev);
2292
2293         return NOTIFY_OK;
2294 }
2295
2296 static int check_dev_is_matching_vf(struct net_device *event_ndev)
2297 {
2298         /* Skip NetVSC interfaces */
2299         if (event_ndev->netdev_ops == &device_ops)
2300                 return -ENODEV;
2301
2302         /* Avoid non-Ethernet type devices */
2303         if (event_ndev->type != ARPHRD_ETHER)
2304                 return -ENODEV;
2305
2306         /* Avoid Vlan dev with same MAC registering as VF */
2307         if (is_vlan_dev(event_ndev))
2308                 return -ENODEV;
2309
2310         /* Avoid Bonding master dev with same MAC registering as VF */
2311         if (netif_is_bond_master(event_ndev))
2312                 return -ENODEV;
2313
2314         return 0;
2315 }
2316
2317 static int netvsc_probe(struct hv_device *dev,
2318                         const struct hv_vmbus_device_id *dev_id)
2319 {
2320         struct net_device *net = NULL, *vf_netdev;
2321         struct net_device_context *net_device_ctx;
2322         struct netvsc_device_info *device_info = NULL;
2323         struct netvsc_device *nvdev;
2324         int ret = -ENOMEM;
2325
2326         net = alloc_etherdev_mq(sizeof(struct net_device_context),
2327                                 VRSS_CHANNEL_MAX);
2328         if (!net)
2329                 goto no_net;
2330
2331         netif_carrier_off(net);
2332
2333         netvsc_init_settings(net);
2334
2335         net_device_ctx = netdev_priv(net);
2336         net_device_ctx->device_ctx = dev;
2337         net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2338         if (netif_msg_probe(net_device_ctx))
2339                 netdev_dbg(net, "netvsc msg_enable: %d\n",
2340                            net_device_ctx->msg_enable);
2341
2342         hv_set_drvdata(dev, net);
2343
2344         INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2345
2346         init_completion(&net_device_ctx->vf_add);
2347         spin_lock_init(&net_device_ctx->lock);
2348         INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2349         INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2350
2351         net_device_ctx->vf_stats
2352                 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2353         if (!net_device_ctx->vf_stats)
2354                 goto no_stats;
2355
2356         net->netdev_ops = &device_ops;
2357         net->ethtool_ops = &ethtool_ops;
2358         SET_NETDEV_DEV(net, &dev->device);
2359
2360         /* We always need headroom for rndis header */
2361         net->needed_headroom = RNDIS_AND_PPI_SIZE;
2362
2363         /* Initialize the number of queues to be 1, we may change it if more
2364          * channels are offered later.
2365          */
2366         netif_set_real_num_tx_queues(net, 1);
2367         netif_set_real_num_rx_queues(net, 1);
2368
2369         /* Notify the netvsc driver of the new device */
2370         device_info = netvsc_devinfo_get(NULL);
2371
2372         if (!device_info) {
2373                 ret = -ENOMEM;
2374                 goto devinfo_failed;
2375         }
2376
2377         nvdev = rndis_filter_device_add(dev, device_info);
2378         if (IS_ERR(nvdev)) {
2379                 ret = PTR_ERR(nvdev);
2380                 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2381                 goto rndis_failed;
2382         }
2383
2384         memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2385
2386         /* We must get rtnl lock before scheduling nvdev->subchan_work,
2387          * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2388          * all subchannels to show up, but that may not happen because
2389          * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2390          * -> ... -> device_add() -> ... -> __device_attach() can't get
2391          * the device lock, so all the subchannels can't be processed --
2392          * finally netvsc_subchan_work() hangs for ever.
2393          */
2394         rtnl_lock();
2395
2396         if (nvdev->num_chn > 1)
2397                 schedule_work(&nvdev->subchan_work);
2398
2399         /* hw_features computed in rndis_netdev_set_hwcaps() */
2400         net->features = net->hw_features |
2401                 NETIF_F_HIGHDMA | NETIF_F_SG |
2402                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2403         net->vlan_features = net->features;
2404
2405         netdev_lockdep_set_classes(net);
2406
2407         /* MTU range: 68 - 1500 or 65521 */
2408         net->min_mtu = NETVSC_MTU_MIN;
2409         if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2410                 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2411         else
2412                 net->max_mtu = ETH_DATA_LEN;
2413
2414         nvdev->tx_disable = false;
2415
2416         ret = register_netdevice(net);
2417         if (ret != 0) {
2418                 pr_err("Unable to register netdev.\n");
2419                 goto register_failed;
2420         }
2421
2422         list_add(&net_device_ctx->list, &netvsc_dev_list);
2423
2424         /* When the hv_netvsc driver is unloaded and reloaded, the
2425          * NET_DEVICE_REGISTER for the vf device is replayed before probe
2426          * is complete. This is because register_netdevice_notifier() gets
2427          * registered before vmbus_driver_register() so that callback func
2428          * is set before probe and we don't miss events like NETDEV_POST_INIT
2429          * So, in this section we try to register the matching vf device that
2430          * is present as a netdevice, knowing that its register call is not
2431          * processed in the netvsc_netdev_notifier(as probing is progress and
2432          * get_netvsc_byslot fails).
2433          */
2434         for_each_netdev(dev_net(net), vf_netdev) {
2435                 ret = check_dev_is_matching_vf(vf_netdev);
2436                 if (ret != 0)
2437                         continue;
2438
2439                 if (net != get_netvsc_byslot(vf_netdev))
2440                         continue;
2441
2442                 netvsc_prepare_bonding(vf_netdev);
2443                 netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
2444                 __netvsc_vf_setup(net, vf_netdev);
2445                 break;
2446         }
2447         rtnl_unlock();
2448
2449         kfree(device_info);
2450         return 0;
2451
2452 register_failed:
2453         rtnl_unlock();
2454         rndis_filter_device_remove(dev, nvdev);
2455 rndis_failed:
2456         kfree(device_info);
2457 devinfo_failed:
2458         free_percpu(net_device_ctx->vf_stats);
2459 no_stats:
2460         hv_set_drvdata(dev, NULL);
2461         free_netdev(net);
2462 no_net:
2463         return ret;
2464 }
2465
2466 static int netvsc_remove(struct hv_device *dev)
2467 {
2468         struct net_device_context *ndev_ctx;
2469         struct net_device *vf_netdev, *net;
2470         struct netvsc_device *nvdev;
2471
2472         net = hv_get_drvdata(dev);
2473         if (net == NULL) {
2474                 dev_err(&dev->device, "No net device to remove\n");
2475                 return 0;
2476         }
2477
2478         ndev_ctx = netdev_priv(net);
2479
2480         cancel_delayed_work_sync(&ndev_ctx->dwork);
2481
2482         rtnl_lock();
2483         nvdev = rtnl_dereference(ndev_ctx->nvdev);
2484         if (nvdev)
2485                 cancel_work_sync(&nvdev->subchan_work);
2486
2487         /*
2488          * Call to the vsc driver to let it know that the device is being
2489          * removed. Also blocks mtu and channel changes.
2490          */
2491         vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2492         if (vf_netdev)
2493                 netvsc_unregister_vf(vf_netdev);
2494
2495         if (nvdev)
2496                 rndis_filter_device_remove(dev, nvdev);
2497
2498         unregister_netdevice(net);
2499         list_del(&ndev_ctx->list);
2500
2501         rtnl_unlock();
2502
2503         hv_set_drvdata(dev, NULL);
2504
2505         free_percpu(ndev_ctx->vf_stats);
2506         free_netdev(net);
2507         return 0;
2508 }
2509
2510 static const struct hv_vmbus_device_id id_table[] = {
2511         /* Network guid */
2512         { HV_NIC_GUID, },
2513         { },
2514 };
2515
2516 MODULE_DEVICE_TABLE(vmbus, id_table);
2517
2518 /* The one and only one */
2519 static struct  hv_driver netvsc_drv = {
2520         .name = KBUILD_MODNAME,
2521         .id_table = id_table,
2522         .probe = netvsc_probe,
2523         .remove = netvsc_remove,
2524         .driver = {
2525                 .probe_type = PROBE_FORCE_SYNCHRONOUS,
2526         },
2527 };
2528
2529 /*
2530  * On Hyper-V, every VF interface is matched with a corresponding
2531  * synthetic interface. The synthetic interface is presented first
2532  * to the guest. When the corresponding VF instance is registered,
2533  * we will take care of switching the data path.
2534  */
2535 static int netvsc_netdev_event(struct notifier_block *this,
2536                                unsigned long event, void *ptr)
2537 {
2538         struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2539         int ret = 0;
2540
2541         ret = check_dev_is_matching_vf(event_dev);
2542         if (ret != 0)
2543                 return NOTIFY_DONE;
2544
2545         switch (event) {
2546         case NETDEV_POST_INIT:
2547                 return netvsc_prepare_bonding(event_dev);
2548         case NETDEV_REGISTER:
2549                 return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
2550         case NETDEV_UNREGISTER:
2551                 return netvsc_unregister_vf(event_dev);
2552         case NETDEV_UP:
2553         case NETDEV_DOWN:
2554                 return netvsc_vf_changed(event_dev);
2555         default:
2556                 return NOTIFY_DONE;
2557         }
2558 }
2559
2560 static struct notifier_block netvsc_netdev_notifier = {
2561         .notifier_call = netvsc_netdev_event,
2562 };
2563
2564 static void __exit netvsc_drv_exit(void)
2565 {
2566         unregister_netdevice_notifier(&netvsc_netdev_notifier);
2567         vmbus_driver_unregister(&netvsc_drv);
2568 }
2569
2570 static int __init netvsc_drv_init(void)
2571 {
2572         int ret;
2573
2574         if (ring_size < RING_SIZE_MIN) {
2575                 ring_size = RING_SIZE_MIN;
2576                 pr_info("Increased ring_size to %u (min allowed)\n",
2577                         ring_size);
2578         }
2579         netvsc_ring_bytes = ring_size * PAGE_SIZE;
2580
2581         register_netdevice_notifier(&netvsc_netdev_notifier);
2582
2583         ret = vmbus_driver_register(&netvsc_drv);
2584         if (ret)
2585                 goto err_vmbus_reg;
2586
2587         return 0;
2588
2589 err_vmbus_reg:
2590         unregister_netdevice_notifier(&netvsc_netdev_notifier);
2591         return ret;
2592 }
2593
2594 MODULE_LICENSE("GPL");
2595 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2596
2597 module_init(netvsc_drv_init);
2598 module_exit(netvsc_drv_exit);