GNU Linux-libre 4.9.290-gnu1
[releases.git] / drivers / hv / vmbus_drv.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  *   K. Y. Srinivasan <kys@microsoft.com>
21  *
22  */
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/device.h>
28 #include <linux/interrupt.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/completion.h>
33 #include <linux/hyperv.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/clockchips.h>
36 #include <linux/cpu.h>
37 #include <asm/hyperv.h>
38 #include <asm/hypervisor.h>
39 #include <asm/mshyperv.h>
40 #include <linux/notifier.h>
41 #include <linux/ptrace.h>
42 #include <linux/screen_info.h>
43 #include <linux/kdebug.h>
44 #include <linux/efi.h>
45 #include <linux/random.h>
46 #include "hyperv_vmbus.h"
47
48 static struct acpi_device  *hv_acpi_dev;
49
50 static struct completion probe_event;
51
52
53 static void hyperv_report_panic(struct pt_regs *regs)
54 {
55         static bool panic_reported;
56
57         /*
58          * We prefer to report panic on 'die' chain as we have proper
59          * registers to report, but if we miss it (e.g. on BUG()) we need
60          * to report it on 'panic'.
61          */
62         if (panic_reported)
63                 return;
64         panic_reported = true;
65
66         wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
67         wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
68         wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
69         wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
70         wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
71
72         /*
73          * Let Hyper-V know there is crash data available
74          */
75         wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
76 }
77
78 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
79                               void *args)
80 {
81         struct pt_regs *regs;
82
83         regs = current_pt_regs();
84
85         hyperv_report_panic(regs);
86         return NOTIFY_DONE;
87 }
88
89 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
90                             void *args)
91 {
92         struct die_args *die = (struct die_args *)args;
93         struct pt_regs *regs = die->regs;
94
95         hyperv_report_panic(regs);
96         return NOTIFY_DONE;
97 }
98
99 static struct notifier_block hyperv_die_block = {
100         .notifier_call = hyperv_die_event,
101 };
102 static struct notifier_block hyperv_panic_block = {
103         .notifier_call = hyperv_panic_event,
104 };
105
106 static const char *fb_mmio_name = "fb_range";
107 static struct resource *fb_mmio;
108 static struct resource *hyperv_mmio;
109 static DEFINE_SEMAPHORE(hyperv_mmio_lock);
110
111 static int vmbus_exists(void)
112 {
113         if (hv_acpi_dev == NULL)
114                 return -ENODEV;
115
116         return 0;
117 }
118
119 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
120 static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
121 {
122         int i;
123         for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
124                 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
125 }
126
127 static u8 channel_monitor_group(struct vmbus_channel *channel)
128 {
129         return (u8)channel->offermsg.monitorid / 32;
130 }
131
132 static u8 channel_monitor_offset(struct vmbus_channel *channel)
133 {
134         return (u8)channel->offermsg.monitorid % 32;
135 }
136
137 static u32 channel_pending(struct vmbus_channel *channel,
138                            struct hv_monitor_page *monitor_page)
139 {
140         u8 monitor_group = channel_monitor_group(channel);
141         return monitor_page->trigger_group[monitor_group].pending;
142 }
143
144 static u32 channel_latency(struct vmbus_channel *channel,
145                            struct hv_monitor_page *monitor_page)
146 {
147         u8 monitor_group = channel_monitor_group(channel);
148         u8 monitor_offset = channel_monitor_offset(channel);
149         return monitor_page->latency[monitor_group][monitor_offset];
150 }
151
152 static u32 channel_conn_id(struct vmbus_channel *channel,
153                            struct hv_monitor_page *monitor_page)
154 {
155         u8 monitor_group = channel_monitor_group(channel);
156         u8 monitor_offset = channel_monitor_offset(channel);
157         return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
158 }
159
160 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
161                        char *buf)
162 {
163         struct hv_device *hv_dev = device_to_hv_device(dev);
164
165         if (!hv_dev->channel)
166                 return -ENODEV;
167         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
168 }
169 static DEVICE_ATTR_RO(id);
170
171 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
172                           char *buf)
173 {
174         struct hv_device *hv_dev = device_to_hv_device(dev);
175
176         if (!hv_dev->channel)
177                 return -ENODEV;
178         return sprintf(buf, "%d\n", hv_dev->channel->state);
179 }
180 static DEVICE_ATTR_RO(state);
181
182 static ssize_t monitor_id_show(struct device *dev,
183                                struct device_attribute *dev_attr, char *buf)
184 {
185         struct hv_device *hv_dev = device_to_hv_device(dev);
186
187         if (!hv_dev->channel)
188                 return -ENODEV;
189         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
190 }
191 static DEVICE_ATTR_RO(monitor_id);
192
193 static ssize_t class_id_show(struct device *dev,
194                                struct device_attribute *dev_attr, char *buf)
195 {
196         struct hv_device *hv_dev = device_to_hv_device(dev);
197
198         if (!hv_dev->channel)
199                 return -ENODEV;
200         return sprintf(buf, "{%pUl}\n",
201                        hv_dev->channel->offermsg.offer.if_type.b);
202 }
203 static DEVICE_ATTR_RO(class_id);
204
205 static ssize_t device_id_show(struct device *dev,
206                               struct device_attribute *dev_attr, char *buf)
207 {
208         struct hv_device *hv_dev = device_to_hv_device(dev);
209
210         if (!hv_dev->channel)
211                 return -ENODEV;
212         return sprintf(buf, "{%pUl}\n",
213                        hv_dev->channel->offermsg.offer.if_instance.b);
214 }
215 static DEVICE_ATTR_RO(device_id);
216
217 static ssize_t modalias_show(struct device *dev,
218                              struct device_attribute *dev_attr, char *buf)
219 {
220         struct hv_device *hv_dev = device_to_hv_device(dev);
221         char alias_name[VMBUS_ALIAS_LEN + 1];
222
223         print_alias_name(hv_dev, alias_name);
224         return sprintf(buf, "vmbus:%s\n", alias_name);
225 }
226 static DEVICE_ATTR_RO(modalias);
227
228 static ssize_t server_monitor_pending_show(struct device *dev,
229                                            struct device_attribute *dev_attr,
230                                            char *buf)
231 {
232         struct hv_device *hv_dev = device_to_hv_device(dev);
233
234         if (!hv_dev->channel)
235                 return -ENODEV;
236         return sprintf(buf, "%d\n",
237                        channel_pending(hv_dev->channel,
238                                        vmbus_connection.monitor_pages[1]));
239 }
240 static DEVICE_ATTR_RO(server_monitor_pending);
241
242 static ssize_t client_monitor_pending_show(struct device *dev,
243                                            struct device_attribute *dev_attr,
244                                            char *buf)
245 {
246         struct hv_device *hv_dev = device_to_hv_device(dev);
247
248         if (!hv_dev->channel)
249                 return -ENODEV;
250         return sprintf(buf, "%d\n",
251                        channel_pending(hv_dev->channel,
252                                        vmbus_connection.monitor_pages[1]));
253 }
254 static DEVICE_ATTR_RO(client_monitor_pending);
255
256 static ssize_t server_monitor_latency_show(struct device *dev,
257                                            struct device_attribute *dev_attr,
258                                            char *buf)
259 {
260         struct hv_device *hv_dev = device_to_hv_device(dev);
261
262         if (!hv_dev->channel)
263                 return -ENODEV;
264         return sprintf(buf, "%d\n",
265                        channel_latency(hv_dev->channel,
266                                        vmbus_connection.monitor_pages[0]));
267 }
268 static DEVICE_ATTR_RO(server_monitor_latency);
269
270 static ssize_t client_monitor_latency_show(struct device *dev,
271                                            struct device_attribute *dev_attr,
272                                            char *buf)
273 {
274         struct hv_device *hv_dev = device_to_hv_device(dev);
275
276         if (!hv_dev->channel)
277                 return -ENODEV;
278         return sprintf(buf, "%d\n",
279                        channel_latency(hv_dev->channel,
280                                        vmbus_connection.monitor_pages[1]));
281 }
282 static DEVICE_ATTR_RO(client_monitor_latency);
283
284 static ssize_t server_monitor_conn_id_show(struct device *dev,
285                                            struct device_attribute *dev_attr,
286                                            char *buf)
287 {
288         struct hv_device *hv_dev = device_to_hv_device(dev);
289
290         if (!hv_dev->channel)
291                 return -ENODEV;
292         return sprintf(buf, "%d\n",
293                        channel_conn_id(hv_dev->channel,
294                                        vmbus_connection.monitor_pages[0]));
295 }
296 static DEVICE_ATTR_RO(server_monitor_conn_id);
297
298 static ssize_t client_monitor_conn_id_show(struct device *dev,
299                                            struct device_attribute *dev_attr,
300                                            char *buf)
301 {
302         struct hv_device *hv_dev = device_to_hv_device(dev);
303
304         if (!hv_dev->channel)
305                 return -ENODEV;
306         return sprintf(buf, "%d\n",
307                        channel_conn_id(hv_dev->channel,
308                                        vmbus_connection.monitor_pages[1]));
309 }
310 static DEVICE_ATTR_RO(client_monitor_conn_id);
311
312 static ssize_t out_intr_mask_show(struct device *dev,
313                                   struct device_attribute *dev_attr, char *buf)
314 {
315         struct hv_device *hv_dev = device_to_hv_device(dev);
316         struct hv_ring_buffer_debug_info outbound;
317
318         if (!hv_dev->channel)
319                 return -ENODEV;
320         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
321                 return -EINVAL;
322         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
323         return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
324 }
325 static DEVICE_ATTR_RO(out_intr_mask);
326
327 static ssize_t out_read_index_show(struct device *dev,
328                                    struct device_attribute *dev_attr, char *buf)
329 {
330         struct hv_device *hv_dev = device_to_hv_device(dev);
331         struct hv_ring_buffer_debug_info outbound;
332
333         if (!hv_dev->channel)
334                 return -ENODEV;
335         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
336                 return -EINVAL;
337         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
338         return sprintf(buf, "%d\n", outbound.current_read_index);
339 }
340 static DEVICE_ATTR_RO(out_read_index);
341
342 static ssize_t out_write_index_show(struct device *dev,
343                                     struct device_attribute *dev_attr,
344                                     char *buf)
345 {
346         struct hv_device *hv_dev = device_to_hv_device(dev);
347         struct hv_ring_buffer_debug_info outbound;
348
349         if (!hv_dev->channel)
350                 return -ENODEV;
351         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
352                 return -EINVAL;
353         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
354         return sprintf(buf, "%d\n", outbound.current_write_index);
355 }
356 static DEVICE_ATTR_RO(out_write_index);
357
358 static ssize_t out_read_bytes_avail_show(struct device *dev,
359                                          struct device_attribute *dev_attr,
360                                          char *buf)
361 {
362         struct hv_device *hv_dev = device_to_hv_device(dev);
363         struct hv_ring_buffer_debug_info outbound;
364
365         if (!hv_dev->channel)
366                 return -ENODEV;
367         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
368                 return -EINVAL;
369         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
370         return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
371 }
372 static DEVICE_ATTR_RO(out_read_bytes_avail);
373
374 static ssize_t out_write_bytes_avail_show(struct device *dev,
375                                           struct device_attribute *dev_attr,
376                                           char *buf)
377 {
378         struct hv_device *hv_dev = device_to_hv_device(dev);
379         struct hv_ring_buffer_debug_info outbound;
380
381         if (!hv_dev->channel)
382                 return -ENODEV;
383         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
384                 return -EINVAL;
385         hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
386         return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
387 }
388 static DEVICE_ATTR_RO(out_write_bytes_avail);
389
390 static ssize_t in_intr_mask_show(struct device *dev,
391                                  struct device_attribute *dev_attr, char *buf)
392 {
393         struct hv_device *hv_dev = device_to_hv_device(dev);
394         struct hv_ring_buffer_debug_info inbound;
395
396         if (!hv_dev->channel)
397                 return -ENODEV;
398         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
399                 return -EINVAL;
400         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
401         return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
402 }
403 static DEVICE_ATTR_RO(in_intr_mask);
404
405 static ssize_t in_read_index_show(struct device *dev,
406                                   struct device_attribute *dev_attr, char *buf)
407 {
408         struct hv_device *hv_dev = device_to_hv_device(dev);
409         struct hv_ring_buffer_debug_info inbound;
410
411         if (!hv_dev->channel)
412                 return -ENODEV;
413         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
414                 return -EINVAL;
415         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
416         return sprintf(buf, "%d\n", inbound.current_read_index);
417 }
418 static DEVICE_ATTR_RO(in_read_index);
419
420 static ssize_t in_write_index_show(struct device *dev,
421                                    struct device_attribute *dev_attr, char *buf)
422 {
423         struct hv_device *hv_dev = device_to_hv_device(dev);
424         struct hv_ring_buffer_debug_info inbound;
425
426         if (!hv_dev->channel)
427                 return -ENODEV;
428         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
429                 return -EINVAL;
430         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
431         return sprintf(buf, "%d\n", inbound.current_write_index);
432 }
433 static DEVICE_ATTR_RO(in_write_index);
434
435 static ssize_t in_read_bytes_avail_show(struct device *dev,
436                                         struct device_attribute *dev_attr,
437                                         char *buf)
438 {
439         struct hv_device *hv_dev = device_to_hv_device(dev);
440         struct hv_ring_buffer_debug_info inbound;
441
442         if (!hv_dev->channel)
443                 return -ENODEV;
444         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
445                 return -EINVAL;
446         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
447         return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
448 }
449 static DEVICE_ATTR_RO(in_read_bytes_avail);
450
451 static ssize_t in_write_bytes_avail_show(struct device *dev,
452                                          struct device_attribute *dev_attr,
453                                          char *buf)
454 {
455         struct hv_device *hv_dev = device_to_hv_device(dev);
456         struct hv_ring_buffer_debug_info inbound;
457
458         if (!hv_dev->channel)
459                 return -ENODEV;
460         if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
461                 return -EINVAL;
462         hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
463         return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
464 }
465 static DEVICE_ATTR_RO(in_write_bytes_avail);
466
467 static ssize_t channel_vp_mapping_show(struct device *dev,
468                                        struct device_attribute *dev_attr,
469                                        char *buf)
470 {
471         struct hv_device *hv_dev = device_to_hv_device(dev);
472         struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
473         unsigned long flags;
474         int buf_size = PAGE_SIZE, n_written, tot_written;
475         struct list_head *cur;
476
477         if (!channel)
478                 return -ENODEV;
479
480         tot_written = snprintf(buf, buf_size, "%u:%u\n",
481                 channel->offermsg.child_relid, channel->target_cpu);
482
483         spin_lock_irqsave(&channel->lock, flags);
484
485         list_for_each(cur, &channel->sc_list) {
486                 if (tot_written >= buf_size - 1)
487                         break;
488
489                 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
490                 n_written = scnprintf(buf + tot_written,
491                                      buf_size - tot_written,
492                                      "%u:%u\n",
493                                      cur_sc->offermsg.child_relid,
494                                      cur_sc->target_cpu);
495                 tot_written += n_written;
496         }
497
498         spin_unlock_irqrestore(&channel->lock, flags);
499
500         return tot_written;
501 }
502 static DEVICE_ATTR_RO(channel_vp_mapping);
503
504 static ssize_t vendor_show(struct device *dev,
505                            struct device_attribute *dev_attr,
506                            char *buf)
507 {
508         struct hv_device *hv_dev = device_to_hv_device(dev);
509         return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
510 }
511 static DEVICE_ATTR_RO(vendor);
512
513 static ssize_t device_show(struct device *dev,
514                            struct device_attribute *dev_attr,
515                            char *buf)
516 {
517         struct hv_device *hv_dev = device_to_hv_device(dev);
518         return sprintf(buf, "0x%x\n", hv_dev->device_id);
519 }
520 static DEVICE_ATTR_RO(device);
521
522 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
523 static struct attribute *vmbus_attrs[] = {
524         &dev_attr_id.attr,
525         &dev_attr_state.attr,
526         &dev_attr_monitor_id.attr,
527         &dev_attr_class_id.attr,
528         &dev_attr_device_id.attr,
529         &dev_attr_modalias.attr,
530         &dev_attr_server_monitor_pending.attr,
531         &dev_attr_client_monitor_pending.attr,
532         &dev_attr_server_monitor_latency.attr,
533         &dev_attr_client_monitor_latency.attr,
534         &dev_attr_server_monitor_conn_id.attr,
535         &dev_attr_client_monitor_conn_id.attr,
536         &dev_attr_out_intr_mask.attr,
537         &dev_attr_out_read_index.attr,
538         &dev_attr_out_write_index.attr,
539         &dev_attr_out_read_bytes_avail.attr,
540         &dev_attr_out_write_bytes_avail.attr,
541         &dev_attr_in_intr_mask.attr,
542         &dev_attr_in_read_index.attr,
543         &dev_attr_in_write_index.attr,
544         &dev_attr_in_read_bytes_avail.attr,
545         &dev_attr_in_write_bytes_avail.attr,
546         &dev_attr_channel_vp_mapping.attr,
547         &dev_attr_vendor.attr,
548         &dev_attr_device.attr,
549         NULL,
550 };
551 ATTRIBUTE_GROUPS(vmbus);
552
553 /*
554  * vmbus_uevent - add uevent for our device
555  *
556  * This routine is invoked when a device is added or removed on the vmbus to
557  * generate a uevent to udev in the userspace. The udev will then look at its
558  * rule and the uevent generated here to load the appropriate driver
559  *
560  * The alias string will be of the form vmbus:guid where guid is the string
561  * representation of the device guid (each byte of the guid will be
562  * represented with two hex characters.
563  */
564 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
565 {
566         struct hv_device *dev = device_to_hv_device(device);
567         int ret;
568         char alias_name[VMBUS_ALIAS_LEN + 1];
569
570         print_alias_name(dev, alias_name);
571         ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
572         return ret;
573 }
574
575 static const uuid_le null_guid;
576
577 static inline bool is_null_guid(const uuid_le *guid)
578 {
579         if (uuid_le_cmp(*guid, null_guid))
580                 return false;
581         return true;
582 }
583
584 /*
585  * Return a matching hv_vmbus_device_id pointer.
586  * If there is no match, return NULL.
587  */
588 static const struct hv_vmbus_device_id *hv_vmbus_get_id(
589                                         const struct hv_vmbus_device_id *id,
590                                         const uuid_le *guid)
591 {
592         for (; !is_null_guid(&id->guid); id++)
593                 if (!uuid_le_cmp(id->guid, *guid))
594                         return id;
595
596         return NULL;
597 }
598
599
600
601 /*
602  * vmbus_match - Attempt to match the specified device to the specified driver
603  */
604 static int vmbus_match(struct device *device, struct device_driver *driver)
605 {
606         struct hv_driver *drv = drv_to_hv_drv(driver);
607         struct hv_device *hv_dev = device_to_hv_device(device);
608
609         /* The hv_sock driver handles all hv_sock offers. */
610         if (is_hvsock_channel(hv_dev->channel))
611                 return drv->hvsock;
612
613         if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
614                 return 1;
615
616         return 0;
617 }
618
619 /*
620  * vmbus_probe - Add the new vmbus's child device
621  */
622 static int vmbus_probe(struct device *child_device)
623 {
624         int ret = 0;
625         struct hv_driver *drv =
626                         drv_to_hv_drv(child_device->driver);
627         struct hv_device *dev = device_to_hv_device(child_device);
628         const struct hv_vmbus_device_id *dev_id;
629
630         dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type);
631         if (drv->probe) {
632                 ret = drv->probe(dev, dev_id);
633                 if (ret != 0)
634                         pr_err("probe failed for device %s (%d)\n",
635                                dev_name(child_device), ret);
636
637         } else {
638                 pr_err("probe not set for driver %s\n",
639                        dev_name(child_device));
640                 ret = -ENODEV;
641         }
642         return ret;
643 }
644
645 /*
646  * vmbus_remove - Remove a vmbus device
647  */
648 static int vmbus_remove(struct device *child_device)
649 {
650         struct hv_driver *drv;
651         struct hv_device *dev = device_to_hv_device(child_device);
652
653         if (child_device->driver) {
654                 drv = drv_to_hv_drv(child_device->driver);
655                 if (drv->remove)
656                         drv->remove(dev);
657         }
658
659         return 0;
660 }
661
662
663 /*
664  * vmbus_shutdown - Shutdown a vmbus device
665  */
666 static void vmbus_shutdown(struct device *child_device)
667 {
668         struct hv_driver *drv;
669         struct hv_device *dev = device_to_hv_device(child_device);
670
671
672         /* The device may not be attached yet */
673         if (!child_device->driver)
674                 return;
675
676         drv = drv_to_hv_drv(child_device->driver);
677
678         if (drv->shutdown)
679                 drv->shutdown(dev);
680
681         return;
682 }
683
684
685 /*
686  * vmbus_device_release - Final callback release of the vmbus child device
687  */
688 static void vmbus_device_release(struct device *device)
689 {
690         struct hv_device *hv_dev = device_to_hv_device(device);
691         struct vmbus_channel *channel = hv_dev->channel;
692
693         hv_process_channel_removal(channel,
694                                    channel->offermsg.child_relid);
695         kfree(hv_dev);
696
697 }
698
699 /* The one and only one */
700 static struct bus_type  hv_bus = {
701         .name =         "vmbus",
702         .match =                vmbus_match,
703         .shutdown =             vmbus_shutdown,
704         .remove =               vmbus_remove,
705         .probe =                vmbus_probe,
706         .uevent =               vmbus_uevent,
707         .dev_groups =           vmbus_groups,
708 };
709
710 struct onmessage_work_context {
711         struct work_struct work;
712         struct hv_message msg;
713 };
714
715 static void vmbus_onmessage_work(struct work_struct *work)
716 {
717         struct onmessage_work_context *ctx;
718
719         /* Do not process messages if we're in DISCONNECTED state */
720         if (vmbus_connection.conn_state == DISCONNECTED)
721                 return;
722
723         ctx = container_of(work, struct onmessage_work_context,
724                            work);
725         vmbus_onmessage(&ctx->msg);
726         kfree(ctx);
727 }
728
729 static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
730 {
731         struct clock_event_device *dev = hv_context.clk_evt[cpu];
732
733         if (dev->event_handler)
734                 dev->event_handler(dev);
735
736         vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
737 }
738
739 void vmbus_on_msg_dpc(unsigned long data)
740 {
741         int cpu = smp_processor_id();
742         void *page_addr = hv_context.synic_message_page[cpu];
743         struct hv_message *msg = (struct hv_message *)page_addr +
744                                   VMBUS_MESSAGE_SINT;
745         struct vmbus_channel_message_header *hdr;
746         struct vmbus_channel_message_table_entry *entry;
747         struct onmessage_work_context *ctx;
748         u32 message_type = msg->header.message_type;
749
750         if (message_type == HVMSG_NONE)
751                 /* no msg */
752                 return;
753
754         hdr = (struct vmbus_channel_message_header *)msg->u.payload;
755
756         if (hdr->msgtype >= CHANNELMSG_COUNT) {
757                 WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
758                 goto msg_handled;
759         }
760
761         entry = &channel_message_table[hdr->msgtype];
762         if (entry->handler_type == VMHT_BLOCKING) {
763                 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
764                 if (ctx == NULL)
765                         return;
766
767                 INIT_WORK(&ctx->work, vmbus_onmessage_work);
768                 memcpy(&ctx->msg, msg, sizeof(*msg));
769
770                 queue_work(vmbus_connection.work_queue, &ctx->work);
771         } else
772                 entry->message_handler(hdr);
773
774 msg_handled:
775         vmbus_signal_eom(msg, message_type);
776 }
777
778 static void vmbus_isr(void)
779 {
780         int cpu = smp_processor_id();
781         void *page_addr;
782         struct hv_message *msg;
783         union hv_synic_event_flags *event;
784         bool handled = false;
785
786         page_addr = hv_context.synic_event_page[cpu];
787         if (page_addr == NULL)
788                 return;
789
790         event = (union hv_synic_event_flags *)page_addr +
791                                          VMBUS_MESSAGE_SINT;
792         /*
793          * Check for events before checking for messages. This is the order
794          * in which events and messages are checked in Windows guests on
795          * Hyper-V, and the Windows team suggested we do the same.
796          */
797
798         if ((vmbus_proto_version == VERSION_WS2008) ||
799                 (vmbus_proto_version == VERSION_WIN7)) {
800
801                 /* Since we are a child, we only need to check bit 0 */
802                 if (sync_test_and_clear_bit(0,
803                         (unsigned long *) &event->flags32[0])) {
804                         handled = true;
805                 }
806         } else {
807                 /*
808                  * Our host is win8 or above. The signaling mechanism
809                  * has changed and we can directly look at the event page.
810                  * If bit n is set then we have an interrup on the channel
811                  * whose id is n.
812                  */
813                 handled = true;
814         }
815
816         if (handled)
817                 tasklet_schedule(hv_context.event_dpc[cpu]);
818
819
820         page_addr = hv_context.synic_message_page[cpu];
821         msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
822
823         /* Check if there are actual msgs to be processed */
824         if (msg->header.message_type != HVMSG_NONE) {
825                 if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
826                         hv_process_timer_expiration(msg, cpu);
827                 else
828                         tasklet_schedule(hv_context.msg_dpc[cpu]);
829         }
830
831         add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
832 }
833
834
835 /*
836  * vmbus_bus_init -Main vmbus driver initialization routine.
837  *
838  * Here, we
839  *      - initialize the vmbus driver context
840  *      - invoke the vmbus hv main init routine
841  *      - retrieve the channel offers
842  */
843 static int vmbus_bus_init(void)
844 {
845         int ret;
846
847         /* Hypervisor initialization...setup hypercall page..etc */
848         ret = hv_init();
849         if (ret != 0) {
850                 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
851                 return ret;
852         }
853
854         ret = bus_register(&hv_bus);
855         if (ret)
856                 goto err_cleanup;
857
858         hv_setup_vmbus_irq(vmbus_isr);
859
860         ret = hv_synic_alloc();
861         if (ret)
862                 goto err_alloc;
863         /*
864          * Initialize the per-cpu interrupt state and
865          * connect to the host.
866          */
867         on_each_cpu(hv_synic_init, NULL, 1);
868         ret = vmbus_connect();
869         if (ret)
870                 goto err_connect;
871
872         if (vmbus_proto_version > VERSION_WIN7)
873                 cpu_hotplug_disable();
874
875         /*
876          * Only register if the crash MSRs are available
877          */
878         if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
879                 register_die_notifier(&hyperv_die_block);
880                 atomic_notifier_chain_register(&panic_notifier_list,
881                                                &hyperv_panic_block);
882         }
883
884         vmbus_request_offers();
885
886         return 0;
887
888 err_connect:
889         on_each_cpu(hv_synic_cleanup, NULL, 1);
890 err_alloc:
891         hv_synic_free();
892         hv_remove_vmbus_irq();
893
894         bus_unregister(&hv_bus);
895
896 err_cleanup:
897         hv_cleanup(false);
898
899         return ret;
900 }
901
902 /**
903  * __vmbus_child_driver_register() - Register a vmbus's driver
904  * @hv_driver: Pointer to driver structure you want to register
905  * @owner: owner module of the drv
906  * @mod_name: module name string
907  *
908  * Registers the given driver with Linux through the 'driver_register()' call
909  * and sets up the hyper-v vmbus handling for this driver.
910  * It will return the state of the 'driver_register()' call.
911  *
912  */
913 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
914 {
915         int ret;
916
917         pr_info("registering driver %s\n", hv_driver->name);
918
919         ret = vmbus_exists();
920         if (ret < 0)
921                 return ret;
922
923         hv_driver->driver.name = hv_driver->name;
924         hv_driver->driver.owner = owner;
925         hv_driver->driver.mod_name = mod_name;
926         hv_driver->driver.bus = &hv_bus;
927
928         ret = driver_register(&hv_driver->driver);
929
930         return ret;
931 }
932 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
933
934 /**
935  * vmbus_driver_unregister() - Unregister a vmbus's driver
936  * @hv_driver: Pointer to driver structure you want to
937  *             un-register
938  *
939  * Un-register the given driver that was previous registered with a call to
940  * vmbus_driver_register()
941  */
942 void vmbus_driver_unregister(struct hv_driver *hv_driver)
943 {
944         pr_info("unregistering driver %s\n", hv_driver->name);
945
946         if (!vmbus_exists())
947                 driver_unregister(&hv_driver->driver);
948 }
949 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
950
951 /*
952  * vmbus_device_create - Creates and registers a new child device
953  * on the vmbus.
954  */
955 struct hv_device *vmbus_device_create(const uuid_le *type,
956                                       const uuid_le *instance,
957                                       struct vmbus_channel *channel)
958 {
959         struct hv_device *child_device_obj;
960
961         child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
962         if (!child_device_obj) {
963                 pr_err("Unable to allocate device object for child device\n");
964                 return NULL;
965         }
966
967         child_device_obj->channel = channel;
968         memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
969         memcpy(&child_device_obj->dev_instance, instance,
970                sizeof(uuid_le));
971         child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
972
973
974         return child_device_obj;
975 }
976
977 /*
978  * vmbus_device_register - Register the child device
979  */
980 int vmbus_device_register(struct hv_device *child_device_obj)
981 {
982         int ret = 0;
983
984         dev_set_name(&child_device_obj->device, "%pUl",
985                      child_device_obj->channel->offermsg.offer.if_instance.b);
986
987         child_device_obj->device.bus = &hv_bus;
988         child_device_obj->device.parent = &hv_acpi_dev->dev;
989         child_device_obj->device.release = vmbus_device_release;
990
991         /*
992          * Register with the LDM. This will kick off the driver/device
993          * binding...which will eventually call vmbus_match() and vmbus_probe()
994          */
995         ret = device_register(&child_device_obj->device);
996
997         if (ret)
998                 pr_err("Unable to register child device\n");
999         else
1000                 pr_debug("child device %s registered\n",
1001                         dev_name(&child_device_obj->device));
1002
1003         return ret;
1004 }
1005
1006 /*
1007  * vmbus_device_unregister - Remove the specified child device
1008  * from the vmbus.
1009  */
1010 void vmbus_device_unregister(struct hv_device *device_obj)
1011 {
1012         pr_debug("child device %s unregistered\n",
1013                 dev_name(&device_obj->device));
1014
1015         /*
1016          * Kick off the process of unregistering the device.
1017          * This will call vmbus_remove() and eventually vmbus_device_release()
1018          */
1019         device_unregister(&device_obj->device);
1020 }
1021
1022
1023 /*
1024  * VMBUS is an acpi enumerated device. Get the information we
1025  * need from DSDT.
1026  */
1027 #define VTPM_BASE_ADDRESS 0xfed40000
1028 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
1029 {
1030         resource_size_t start = 0;
1031         resource_size_t end = 0;
1032         struct resource *new_res;
1033         struct resource **old_res = &hyperv_mmio;
1034         struct resource **prev_res = NULL;
1035
1036         switch (res->type) {
1037
1038         /*
1039          * "Address" descriptors are for bus windows. Ignore
1040          * "memory" descriptors, which are for registers on
1041          * devices.
1042          */
1043         case ACPI_RESOURCE_TYPE_ADDRESS32:
1044                 start = res->data.address32.address.minimum;
1045                 end = res->data.address32.address.maximum;
1046                 break;
1047
1048         case ACPI_RESOURCE_TYPE_ADDRESS64:
1049                 start = res->data.address64.address.minimum;
1050                 end = res->data.address64.address.maximum;
1051                 break;
1052
1053         default:
1054                 /* Unused resource type */
1055                 return AE_OK;
1056
1057         }
1058         /*
1059          * Ignore ranges that are below 1MB, as they're not
1060          * necessary or useful here.
1061          */
1062         if (end < 0x100000)
1063                 return AE_OK;
1064
1065         new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
1066         if (!new_res)
1067                 return AE_NO_MEMORY;
1068
1069         /* If this range overlaps the virtual TPM, truncate it. */
1070         if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
1071                 end = VTPM_BASE_ADDRESS;
1072
1073         new_res->name = "hyperv mmio";
1074         new_res->flags = IORESOURCE_MEM;
1075         new_res->start = start;
1076         new_res->end = end;
1077
1078         /*
1079          * If two ranges are adjacent, merge them.
1080          */
1081         do {
1082                 if (!*old_res) {
1083                         *old_res = new_res;
1084                         break;
1085                 }
1086
1087                 if (((*old_res)->end + 1) == new_res->start) {
1088                         (*old_res)->end = new_res->end;
1089                         kfree(new_res);
1090                         break;
1091                 }
1092
1093                 if ((*old_res)->start == new_res->end + 1) {
1094                         (*old_res)->start = new_res->start;
1095                         kfree(new_res);
1096                         break;
1097                 }
1098
1099                 if ((*old_res)->start > new_res->end) {
1100                         new_res->sibling = *old_res;
1101                         if (prev_res)
1102                                 (*prev_res)->sibling = new_res;
1103                         *old_res = new_res;
1104                         break;
1105                 }
1106
1107                 prev_res = old_res;
1108                 old_res = &(*old_res)->sibling;
1109
1110         } while (1);
1111
1112         return AE_OK;
1113 }
1114
1115 static int vmbus_acpi_remove(struct acpi_device *device)
1116 {
1117         struct resource *cur_res;
1118         struct resource *next_res;
1119
1120         if (hyperv_mmio) {
1121                 if (fb_mmio) {
1122                         __release_region(hyperv_mmio, fb_mmio->start,
1123                                          resource_size(fb_mmio));
1124                         fb_mmio = NULL;
1125                 }
1126
1127                 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
1128                         next_res = cur_res->sibling;
1129                         kfree(cur_res);
1130                 }
1131         }
1132
1133         return 0;
1134 }
1135
1136 static void vmbus_reserve_fb(void)
1137 {
1138         int size;
1139         /*
1140          * Make a claim for the frame buffer in the resource tree under the
1141          * first node, which will be the one below 4GB.  The length seems to
1142          * be underreported, particularly in a Generation 1 VM.  So start out
1143          * reserving a larger area and make it smaller until it succeeds.
1144          */
1145
1146         if (screen_info.lfb_base) {
1147                 if (efi_enabled(EFI_BOOT))
1148                         size = max_t(__u32, screen_info.lfb_size, 0x800000);
1149                 else
1150                         size = max_t(__u32, screen_info.lfb_size, 0x4000000);
1151
1152                 for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
1153                         fb_mmio = __request_region(hyperv_mmio,
1154                                                    screen_info.lfb_base, size,
1155                                                    fb_mmio_name, 0);
1156                 }
1157         }
1158 }
1159
1160 /**
1161  * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
1162  * @new:                If successful, supplied a pointer to the
1163  *                      allocated MMIO space.
1164  * @device_obj:         Identifies the caller
1165  * @min:                Minimum guest physical address of the
1166  *                      allocation
1167  * @max:                Maximum guest physical address
1168  * @size:               Size of the range to be allocated
1169  * @align:              Alignment of the range to be allocated
1170  * @fb_overlap_ok:      Whether this allocation can be allowed
1171  *                      to overlap the video frame buffer.
1172  *
1173  * This function walks the resources granted to VMBus by the
1174  * _CRS object in the ACPI namespace underneath the parent
1175  * "bridge" whether that's a root PCI bus in the Generation 1
1176  * case or a Module Device in the Generation 2 case.  It then
1177  * attempts to allocate from the global MMIO pool in a way that
1178  * matches the constraints supplied in these parameters and by
1179  * that _CRS.
1180  *
1181  * Return: 0 on success, -errno on failure
1182  */
1183 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1184                         resource_size_t min, resource_size_t max,
1185                         resource_size_t size, resource_size_t align,
1186                         bool fb_overlap_ok)
1187 {
1188         struct resource *iter, *shadow;
1189         resource_size_t range_min, range_max, start;
1190         const char *dev_n = dev_name(&device_obj->device);
1191         int retval;
1192
1193         retval = -ENXIO;
1194         down(&hyperv_mmio_lock);
1195
1196         /*
1197          * If overlaps with frame buffers are allowed, then first attempt to
1198          * make the allocation from within the reserved region.  Because it
1199          * is already reserved, no shadow allocation is necessary.
1200          */
1201         if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
1202             !(max < fb_mmio->start)) {
1203
1204                 range_min = fb_mmio->start;
1205                 range_max = fb_mmio->end;
1206                 start = (range_min + align - 1) & ~(align - 1);
1207                 for (; start + size - 1 <= range_max; start += align) {
1208                         *new = request_mem_region_exclusive(start, size, dev_n);
1209                         if (*new) {
1210                                 retval = 0;
1211                                 goto exit;
1212                         }
1213                 }
1214         }
1215
1216         for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1217                 if ((iter->start >= max) || (iter->end <= min))
1218                         continue;
1219
1220                 range_min = iter->start;
1221                 range_max = iter->end;
1222                 start = (range_min + align - 1) & ~(align - 1);
1223                 for (; start + size - 1 <= range_max; start += align) {
1224                         shadow = __request_region(iter, start, size, NULL,
1225                                                   IORESOURCE_BUSY);
1226                         if (!shadow)
1227                                 continue;
1228
1229                         *new = request_mem_region_exclusive(start, size, dev_n);
1230                         if (*new) {
1231                                 shadow->name = (char *)*new;
1232                                 retval = 0;
1233                                 goto exit;
1234                         }
1235
1236                         __release_region(iter, start, size);
1237                 }
1238         }
1239
1240 exit:
1241         up(&hyperv_mmio_lock);
1242         return retval;
1243 }
1244 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
1245
1246 /**
1247  * vmbus_free_mmio() - Free a memory-mapped I/O range.
1248  * @start:              Base address of region to release.
1249  * @size:               Size of the range to be allocated
1250  *
1251  * This function releases anything requested by
1252  * vmbus_mmio_allocate().
1253  */
1254 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
1255 {
1256         struct resource *iter;
1257
1258         down(&hyperv_mmio_lock);
1259         for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1260                 if ((iter->start >= start + size) || (iter->end <= start))
1261                         continue;
1262
1263                 __release_region(iter, start, size);
1264         }
1265         release_mem_region(start, size);
1266         up(&hyperv_mmio_lock);
1267
1268 }
1269 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
1270
1271 /**
1272  * vmbus_cpu_number_to_vp_number() - Map CPU to VP.
1273  * @cpu_number: CPU number in Linux terms
1274  *
1275  * This function returns the mapping between the Linux processor
1276  * number and the hypervisor's virtual processor number, useful
1277  * in making hypercalls and such that talk about specific
1278  * processors.
1279  *
1280  * Return: Virtual processor number in Hyper-V terms
1281  */
1282 int vmbus_cpu_number_to_vp_number(int cpu_number)
1283 {
1284         return hv_context.vp_index[cpu_number];
1285 }
1286 EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number);
1287
1288 static int vmbus_acpi_add(struct acpi_device *device)
1289 {
1290         acpi_status result;
1291         int ret_val = -ENODEV;
1292         struct acpi_device *ancestor;
1293
1294         hv_acpi_dev = device;
1295
1296         result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
1297                                         vmbus_walk_resources, NULL);
1298
1299         if (ACPI_FAILURE(result))
1300                 goto acpi_walk_err;
1301         /*
1302          * Some ancestor of the vmbus acpi device (Gen1 or Gen2
1303          * firmware) is the VMOD that has the mmio ranges. Get that.
1304          */
1305         for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
1306                 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
1307                                              vmbus_walk_resources, NULL);
1308
1309                 if (ACPI_FAILURE(result))
1310                         continue;
1311                 if (hyperv_mmio) {
1312                         vmbus_reserve_fb();
1313                         break;
1314                 }
1315         }
1316         ret_val = 0;
1317
1318 acpi_walk_err:
1319         complete(&probe_event);
1320         if (ret_val)
1321                 vmbus_acpi_remove(device);
1322         return ret_val;
1323 }
1324
1325 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
1326         {"VMBUS", 0},
1327         {"VMBus", 0},
1328         {"", 0},
1329 };
1330 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
1331
1332 static struct acpi_driver vmbus_acpi_driver = {
1333         .name = "vmbus",
1334         .ids = vmbus_acpi_device_ids,
1335         .ops = {
1336                 .add = vmbus_acpi_add,
1337                 .remove = vmbus_acpi_remove,
1338         },
1339 };
1340
1341 static void hv_kexec_handler(void)
1342 {
1343         int cpu;
1344
1345         hv_synic_clockevents_cleanup();
1346         vmbus_initiate_unload(false);
1347         for_each_online_cpu(cpu)
1348                 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
1349         hv_cleanup(false);
1350 };
1351
1352 static void hv_crash_handler(struct pt_regs *regs)
1353 {
1354         vmbus_initiate_unload(true);
1355         /*
1356          * In crash handler we can't schedule synic cleanup for all CPUs,
1357          * doing the cleanup for current CPU only. This should be sufficient
1358          * for kdump.
1359          */
1360         hv_synic_cleanup(NULL);
1361         hv_cleanup(true);
1362 };
1363
1364 static int __init hv_acpi_init(void)
1365 {
1366         int ret, t;
1367
1368         if (x86_hyper != &x86_hyper_ms_hyperv)
1369                 return -ENODEV;
1370
1371         init_completion(&probe_event);
1372
1373         /*
1374          * Get ACPI resources first.
1375          */
1376         ret = acpi_bus_register_driver(&vmbus_acpi_driver);
1377
1378         if (ret)
1379                 return ret;
1380
1381         t = wait_for_completion_timeout(&probe_event, 5*HZ);
1382         if (t == 0) {
1383                 ret = -ETIMEDOUT;
1384                 goto cleanup;
1385         }
1386
1387         ret = vmbus_bus_init();
1388         if (ret)
1389                 goto cleanup;
1390
1391         hv_setup_kexec_handler(hv_kexec_handler);
1392         hv_setup_crash_handler(hv_crash_handler);
1393
1394         return 0;
1395
1396 cleanup:
1397         acpi_bus_unregister_driver(&vmbus_acpi_driver);
1398         hv_acpi_dev = NULL;
1399         return ret;
1400 }
1401
1402 static void __exit vmbus_exit(void)
1403 {
1404         int cpu;
1405
1406         hv_remove_kexec_handler();
1407         hv_remove_crash_handler();
1408         vmbus_connection.conn_state = DISCONNECTED;
1409         hv_synic_clockevents_cleanup();
1410         vmbus_disconnect();
1411         hv_remove_vmbus_irq();
1412         for_each_online_cpu(cpu)
1413                 tasklet_kill(hv_context.msg_dpc[cpu]);
1414         vmbus_free_channels();
1415         if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1416                 unregister_die_notifier(&hyperv_die_block);
1417                 atomic_notifier_chain_unregister(&panic_notifier_list,
1418                                                  &hyperv_panic_block);
1419         }
1420         bus_unregister(&hv_bus);
1421         hv_cleanup(false);
1422         for_each_online_cpu(cpu) {
1423                 tasklet_kill(hv_context.event_dpc[cpu]);
1424                 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
1425         }
1426         hv_synic_free();
1427         acpi_bus_unregister_driver(&vmbus_acpi_driver);
1428         if (vmbus_proto_version > VERSION_WIN7)
1429                 cpu_hotplug_enable();
1430 }
1431
1432
1433 MODULE_LICENSE("GPL");
1434
1435 subsys_initcall(hv_acpi_init);
1436 module_exit(vmbus_exit);