2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/device.h>
28 #include <linux/interrupt.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/completion.h>
33 #include <linux/hyperv.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/clockchips.h>
36 #include <linux/cpu.h>
37 #include <asm/hyperv.h>
38 #include <asm/hypervisor.h>
39 #include <asm/mshyperv.h>
40 #include <linux/notifier.h>
41 #include <linux/ptrace.h>
42 #include <linux/screen_info.h>
43 #include <linux/kdebug.h>
44 #include <linux/efi.h>
45 #include <linux/random.h>
46 #include "hyperv_vmbus.h"
48 static struct acpi_device *hv_acpi_dev;
50 static struct completion probe_event;
53 static void hyperv_report_panic(struct pt_regs *regs)
55 static bool panic_reported;
58 * We prefer to report panic on 'die' chain as we have proper
59 * registers to report, but if we miss it (e.g. on BUG()) we need
60 * to report it on 'panic'.
64 panic_reported = true;
66 wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
67 wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
68 wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
69 wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
70 wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
73 * Let Hyper-V know there is crash data available
75 wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
78 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
83 regs = current_pt_regs();
85 hyperv_report_panic(regs);
89 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
92 struct die_args *die = (struct die_args *)args;
93 struct pt_regs *regs = die->regs;
95 hyperv_report_panic(regs);
99 static struct notifier_block hyperv_die_block = {
100 .notifier_call = hyperv_die_event,
102 static struct notifier_block hyperv_panic_block = {
103 .notifier_call = hyperv_panic_event,
106 static const char *fb_mmio_name = "fb_range";
107 static struct resource *fb_mmio;
108 static struct resource *hyperv_mmio;
109 static DEFINE_SEMAPHORE(hyperv_mmio_lock);
111 static int vmbus_exists(void)
113 if (hv_acpi_dev == NULL)
119 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
120 static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
123 for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
124 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
127 static u8 channel_monitor_group(struct vmbus_channel *channel)
129 return (u8)channel->offermsg.monitorid / 32;
132 static u8 channel_monitor_offset(struct vmbus_channel *channel)
134 return (u8)channel->offermsg.monitorid % 32;
137 static u32 channel_pending(struct vmbus_channel *channel,
138 struct hv_monitor_page *monitor_page)
140 u8 monitor_group = channel_monitor_group(channel);
141 return monitor_page->trigger_group[monitor_group].pending;
144 static u32 channel_latency(struct vmbus_channel *channel,
145 struct hv_monitor_page *monitor_page)
147 u8 monitor_group = channel_monitor_group(channel);
148 u8 monitor_offset = channel_monitor_offset(channel);
149 return monitor_page->latency[monitor_group][monitor_offset];
152 static u32 channel_conn_id(struct vmbus_channel *channel,
153 struct hv_monitor_page *monitor_page)
155 u8 monitor_group = channel_monitor_group(channel);
156 u8 monitor_offset = channel_monitor_offset(channel);
157 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
160 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
163 struct hv_device *hv_dev = device_to_hv_device(dev);
165 if (!hv_dev->channel)
167 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
169 static DEVICE_ATTR_RO(id);
171 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
174 struct hv_device *hv_dev = device_to_hv_device(dev);
176 if (!hv_dev->channel)
178 return sprintf(buf, "%d\n", hv_dev->channel->state);
180 static DEVICE_ATTR_RO(state);
182 static ssize_t monitor_id_show(struct device *dev,
183 struct device_attribute *dev_attr, char *buf)
185 struct hv_device *hv_dev = device_to_hv_device(dev);
187 if (!hv_dev->channel)
189 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
191 static DEVICE_ATTR_RO(monitor_id);
193 static ssize_t class_id_show(struct device *dev,
194 struct device_attribute *dev_attr, char *buf)
196 struct hv_device *hv_dev = device_to_hv_device(dev);
198 if (!hv_dev->channel)
200 return sprintf(buf, "{%pUl}\n",
201 hv_dev->channel->offermsg.offer.if_type.b);
203 static DEVICE_ATTR_RO(class_id);
205 static ssize_t device_id_show(struct device *dev,
206 struct device_attribute *dev_attr, char *buf)
208 struct hv_device *hv_dev = device_to_hv_device(dev);
210 if (!hv_dev->channel)
212 return sprintf(buf, "{%pUl}\n",
213 hv_dev->channel->offermsg.offer.if_instance.b);
215 static DEVICE_ATTR_RO(device_id);
217 static ssize_t modalias_show(struct device *dev,
218 struct device_attribute *dev_attr, char *buf)
220 struct hv_device *hv_dev = device_to_hv_device(dev);
221 char alias_name[VMBUS_ALIAS_LEN + 1];
223 print_alias_name(hv_dev, alias_name);
224 return sprintf(buf, "vmbus:%s\n", alias_name);
226 static DEVICE_ATTR_RO(modalias);
228 static ssize_t server_monitor_pending_show(struct device *dev,
229 struct device_attribute *dev_attr,
232 struct hv_device *hv_dev = device_to_hv_device(dev);
234 if (!hv_dev->channel)
236 return sprintf(buf, "%d\n",
237 channel_pending(hv_dev->channel,
238 vmbus_connection.monitor_pages[1]));
240 static DEVICE_ATTR_RO(server_monitor_pending);
242 static ssize_t client_monitor_pending_show(struct device *dev,
243 struct device_attribute *dev_attr,
246 struct hv_device *hv_dev = device_to_hv_device(dev);
248 if (!hv_dev->channel)
250 return sprintf(buf, "%d\n",
251 channel_pending(hv_dev->channel,
252 vmbus_connection.monitor_pages[1]));
254 static DEVICE_ATTR_RO(client_monitor_pending);
256 static ssize_t server_monitor_latency_show(struct device *dev,
257 struct device_attribute *dev_attr,
260 struct hv_device *hv_dev = device_to_hv_device(dev);
262 if (!hv_dev->channel)
264 return sprintf(buf, "%d\n",
265 channel_latency(hv_dev->channel,
266 vmbus_connection.monitor_pages[0]));
268 static DEVICE_ATTR_RO(server_monitor_latency);
270 static ssize_t client_monitor_latency_show(struct device *dev,
271 struct device_attribute *dev_attr,
274 struct hv_device *hv_dev = device_to_hv_device(dev);
276 if (!hv_dev->channel)
278 return sprintf(buf, "%d\n",
279 channel_latency(hv_dev->channel,
280 vmbus_connection.monitor_pages[1]));
282 static DEVICE_ATTR_RO(client_monitor_latency);
284 static ssize_t server_monitor_conn_id_show(struct device *dev,
285 struct device_attribute *dev_attr,
288 struct hv_device *hv_dev = device_to_hv_device(dev);
290 if (!hv_dev->channel)
292 return sprintf(buf, "%d\n",
293 channel_conn_id(hv_dev->channel,
294 vmbus_connection.monitor_pages[0]));
296 static DEVICE_ATTR_RO(server_monitor_conn_id);
298 static ssize_t client_monitor_conn_id_show(struct device *dev,
299 struct device_attribute *dev_attr,
302 struct hv_device *hv_dev = device_to_hv_device(dev);
304 if (!hv_dev->channel)
306 return sprintf(buf, "%d\n",
307 channel_conn_id(hv_dev->channel,
308 vmbus_connection.monitor_pages[1]));
310 static DEVICE_ATTR_RO(client_monitor_conn_id);
312 static ssize_t out_intr_mask_show(struct device *dev,
313 struct device_attribute *dev_attr, char *buf)
315 struct hv_device *hv_dev = device_to_hv_device(dev);
316 struct hv_ring_buffer_debug_info outbound;
318 if (!hv_dev->channel)
320 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
322 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
323 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
325 static DEVICE_ATTR_RO(out_intr_mask);
327 static ssize_t out_read_index_show(struct device *dev,
328 struct device_attribute *dev_attr, char *buf)
330 struct hv_device *hv_dev = device_to_hv_device(dev);
331 struct hv_ring_buffer_debug_info outbound;
333 if (!hv_dev->channel)
335 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
337 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
338 return sprintf(buf, "%d\n", outbound.current_read_index);
340 static DEVICE_ATTR_RO(out_read_index);
342 static ssize_t out_write_index_show(struct device *dev,
343 struct device_attribute *dev_attr,
346 struct hv_device *hv_dev = device_to_hv_device(dev);
347 struct hv_ring_buffer_debug_info outbound;
349 if (!hv_dev->channel)
351 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
353 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
354 return sprintf(buf, "%d\n", outbound.current_write_index);
356 static DEVICE_ATTR_RO(out_write_index);
358 static ssize_t out_read_bytes_avail_show(struct device *dev,
359 struct device_attribute *dev_attr,
362 struct hv_device *hv_dev = device_to_hv_device(dev);
363 struct hv_ring_buffer_debug_info outbound;
365 if (!hv_dev->channel)
367 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
369 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
370 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
372 static DEVICE_ATTR_RO(out_read_bytes_avail);
374 static ssize_t out_write_bytes_avail_show(struct device *dev,
375 struct device_attribute *dev_attr,
378 struct hv_device *hv_dev = device_to_hv_device(dev);
379 struct hv_ring_buffer_debug_info outbound;
381 if (!hv_dev->channel)
383 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
385 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
386 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
388 static DEVICE_ATTR_RO(out_write_bytes_avail);
390 static ssize_t in_intr_mask_show(struct device *dev,
391 struct device_attribute *dev_attr, char *buf)
393 struct hv_device *hv_dev = device_to_hv_device(dev);
394 struct hv_ring_buffer_debug_info inbound;
396 if (!hv_dev->channel)
398 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
400 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
401 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
403 static DEVICE_ATTR_RO(in_intr_mask);
405 static ssize_t in_read_index_show(struct device *dev,
406 struct device_attribute *dev_attr, char *buf)
408 struct hv_device *hv_dev = device_to_hv_device(dev);
409 struct hv_ring_buffer_debug_info inbound;
411 if (!hv_dev->channel)
413 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
415 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
416 return sprintf(buf, "%d\n", inbound.current_read_index);
418 static DEVICE_ATTR_RO(in_read_index);
420 static ssize_t in_write_index_show(struct device *dev,
421 struct device_attribute *dev_attr, char *buf)
423 struct hv_device *hv_dev = device_to_hv_device(dev);
424 struct hv_ring_buffer_debug_info inbound;
426 if (!hv_dev->channel)
428 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
430 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
431 return sprintf(buf, "%d\n", inbound.current_write_index);
433 static DEVICE_ATTR_RO(in_write_index);
435 static ssize_t in_read_bytes_avail_show(struct device *dev,
436 struct device_attribute *dev_attr,
439 struct hv_device *hv_dev = device_to_hv_device(dev);
440 struct hv_ring_buffer_debug_info inbound;
442 if (!hv_dev->channel)
444 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
446 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
447 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
449 static DEVICE_ATTR_RO(in_read_bytes_avail);
451 static ssize_t in_write_bytes_avail_show(struct device *dev,
452 struct device_attribute *dev_attr,
455 struct hv_device *hv_dev = device_to_hv_device(dev);
456 struct hv_ring_buffer_debug_info inbound;
458 if (!hv_dev->channel)
460 if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
462 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
463 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
465 static DEVICE_ATTR_RO(in_write_bytes_avail);
467 static ssize_t channel_vp_mapping_show(struct device *dev,
468 struct device_attribute *dev_attr,
471 struct hv_device *hv_dev = device_to_hv_device(dev);
472 struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
474 int buf_size = PAGE_SIZE, n_written, tot_written;
475 struct list_head *cur;
480 tot_written = snprintf(buf, buf_size, "%u:%u\n",
481 channel->offermsg.child_relid, channel->target_cpu);
483 spin_lock_irqsave(&channel->lock, flags);
485 list_for_each(cur, &channel->sc_list) {
486 if (tot_written >= buf_size - 1)
489 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
490 n_written = scnprintf(buf + tot_written,
491 buf_size - tot_written,
493 cur_sc->offermsg.child_relid,
495 tot_written += n_written;
498 spin_unlock_irqrestore(&channel->lock, flags);
502 static DEVICE_ATTR_RO(channel_vp_mapping);
504 static ssize_t vendor_show(struct device *dev,
505 struct device_attribute *dev_attr,
508 struct hv_device *hv_dev = device_to_hv_device(dev);
509 return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
511 static DEVICE_ATTR_RO(vendor);
513 static ssize_t device_show(struct device *dev,
514 struct device_attribute *dev_attr,
517 struct hv_device *hv_dev = device_to_hv_device(dev);
518 return sprintf(buf, "0x%x\n", hv_dev->device_id);
520 static DEVICE_ATTR_RO(device);
522 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
523 static struct attribute *vmbus_attrs[] = {
525 &dev_attr_state.attr,
526 &dev_attr_monitor_id.attr,
527 &dev_attr_class_id.attr,
528 &dev_attr_device_id.attr,
529 &dev_attr_modalias.attr,
530 &dev_attr_server_monitor_pending.attr,
531 &dev_attr_client_monitor_pending.attr,
532 &dev_attr_server_monitor_latency.attr,
533 &dev_attr_client_monitor_latency.attr,
534 &dev_attr_server_monitor_conn_id.attr,
535 &dev_attr_client_monitor_conn_id.attr,
536 &dev_attr_out_intr_mask.attr,
537 &dev_attr_out_read_index.attr,
538 &dev_attr_out_write_index.attr,
539 &dev_attr_out_read_bytes_avail.attr,
540 &dev_attr_out_write_bytes_avail.attr,
541 &dev_attr_in_intr_mask.attr,
542 &dev_attr_in_read_index.attr,
543 &dev_attr_in_write_index.attr,
544 &dev_attr_in_read_bytes_avail.attr,
545 &dev_attr_in_write_bytes_avail.attr,
546 &dev_attr_channel_vp_mapping.attr,
547 &dev_attr_vendor.attr,
548 &dev_attr_device.attr,
551 ATTRIBUTE_GROUPS(vmbus);
554 * vmbus_uevent - add uevent for our device
556 * This routine is invoked when a device is added or removed on the vmbus to
557 * generate a uevent to udev in the userspace. The udev will then look at its
558 * rule and the uevent generated here to load the appropriate driver
560 * The alias string will be of the form vmbus:guid where guid is the string
561 * representation of the device guid (each byte of the guid will be
562 * represented with two hex characters.
564 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
566 struct hv_device *dev = device_to_hv_device(device);
568 char alias_name[VMBUS_ALIAS_LEN + 1];
570 print_alias_name(dev, alias_name);
571 ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
575 static const uuid_le null_guid;
577 static inline bool is_null_guid(const uuid_le *guid)
579 if (uuid_le_cmp(*guid, null_guid))
585 * Return a matching hv_vmbus_device_id pointer.
586 * If there is no match, return NULL.
588 static const struct hv_vmbus_device_id *hv_vmbus_get_id(
589 const struct hv_vmbus_device_id *id,
592 for (; !is_null_guid(&id->guid); id++)
593 if (!uuid_le_cmp(id->guid, *guid))
602 * vmbus_match - Attempt to match the specified device to the specified driver
604 static int vmbus_match(struct device *device, struct device_driver *driver)
606 struct hv_driver *drv = drv_to_hv_drv(driver);
607 struct hv_device *hv_dev = device_to_hv_device(device);
609 /* The hv_sock driver handles all hv_sock offers. */
610 if (is_hvsock_channel(hv_dev->channel))
613 if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
620 * vmbus_probe - Add the new vmbus's child device
622 static int vmbus_probe(struct device *child_device)
625 struct hv_driver *drv =
626 drv_to_hv_drv(child_device->driver);
627 struct hv_device *dev = device_to_hv_device(child_device);
628 const struct hv_vmbus_device_id *dev_id;
630 dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type);
632 ret = drv->probe(dev, dev_id);
634 pr_err("probe failed for device %s (%d)\n",
635 dev_name(child_device), ret);
638 pr_err("probe not set for driver %s\n",
639 dev_name(child_device));
646 * vmbus_remove - Remove a vmbus device
648 static int vmbus_remove(struct device *child_device)
650 struct hv_driver *drv;
651 struct hv_device *dev = device_to_hv_device(child_device);
653 if (child_device->driver) {
654 drv = drv_to_hv_drv(child_device->driver);
664 * vmbus_shutdown - Shutdown a vmbus device
666 static void vmbus_shutdown(struct device *child_device)
668 struct hv_driver *drv;
669 struct hv_device *dev = device_to_hv_device(child_device);
672 /* The device may not be attached yet */
673 if (!child_device->driver)
676 drv = drv_to_hv_drv(child_device->driver);
686 * vmbus_device_release - Final callback release of the vmbus child device
688 static void vmbus_device_release(struct device *device)
690 struct hv_device *hv_dev = device_to_hv_device(device);
691 struct vmbus_channel *channel = hv_dev->channel;
693 hv_process_channel_removal(channel,
694 channel->offermsg.child_relid);
699 /* The one and only one */
700 static struct bus_type hv_bus = {
702 .match = vmbus_match,
703 .shutdown = vmbus_shutdown,
704 .remove = vmbus_remove,
705 .probe = vmbus_probe,
706 .uevent = vmbus_uevent,
707 .dev_groups = vmbus_groups,
710 struct onmessage_work_context {
711 struct work_struct work;
712 struct hv_message msg;
715 static void vmbus_onmessage_work(struct work_struct *work)
717 struct onmessage_work_context *ctx;
719 /* Do not process messages if we're in DISCONNECTED state */
720 if (vmbus_connection.conn_state == DISCONNECTED)
723 ctx = container_of(work, struct onmessage_work_context,
725 vmbus_onmessage(&ctx->msg);
729 static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
731 struct clock_event_device *dev = hv_context.clk_evt[cpu];
733 if (dev->event_handler)
734 dev->event_handler(dev);
736 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
739 void vmbus_on_msg_dpc(unsigned long data)
741 int cpu = smp_processor_id();
742 void *page_addr = hv_context.synic_message_page[cpu];
743 struct hv_message *msg = (struct hv_message *)page_addr +
745 struct vmbus_channel_message_header *hdr;
746 struct vmbus_channel_message_table_entry *entry;
747 struct onmessage_work_context *ctx;
748 u32 message_type = msg->header.message_type;
750 if (message_type == HVMSG_NONE)
754 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
756 if (hdr->msgtype >= CHANNELMSG_COUNT) {
757 WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
761 entry = &channel_message_table[hdr->msgtype];
762 if (entry->handler_type == VMHT_BLOCKING) {
763 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
767 INIT_WORK(&ctx->work, vmbus_onmessage_work);
768 memcpy(&ctx->msg, msg, sizeof(*msg));
770 queue_work(vmbus_connection.work_queue, &ctx->work);
772 entry->message_handler(hdr);
775 vmbus_signal_eom(msg, message_type);
778 static void vmbus_isr(void)
780 int cpu = smp_processor_id();
782 struct hv_message *msg;
783 union hv_synic_event_flags *event;
784 bool handled = false;
786 page_addr = hv_context.synic_event_page[cpu];
787 if (page_addr == NULL)
790 event = (union hv_synic_event_flags *)page_addr +
793 * Check for events before checking for messages. This is the order
794 * in which events and messages are checked in Windows guests on
795 * Hyper-V, and the Windows team suggested we do the same.
798 if ((vmbus_proto_version == VERSION_WS2008) ||
799 (vmbus_proto_version == VERSION_WIN7)) {
801 /* Since we are a child, we only need to check bit 0 */
802 if (sync_test_and_clear_bit(0,
803 (unsigned long *) &event->flags32[0])) {
808 * Our host is win8 or above. The signaling mechanism
809 * has changed and we can directly look at the event page.
810 * If bit n is set then we have an interrup on the channel
817 tasklet_schedule(hv_context.event_dpc[cpu]);
820 page_addr = hv_context.synic_message_page[cpu];
821 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
823 /* Check if there are actual msgs to be processed */
824 if (msg->header.message_type != HVMSG_NONE) {
825 if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
826 hv_process_timer_expiration(msg, cpu);
828 tasklet_schedule(hv_context.msg_dpc[cpu]);
831 add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
836 * vmbus_bus_init -Main vmbus driver initialization routine.
839 * - initialize the vmbus driver context
840 * - invoke the vmbus hv main init routine
841 * - retrieve the channel offers
843 static int vmbus_bus_init(void)
847 /* Hypervisor initialization...setup hypercall page..etc */
850 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
854 ret = bus_register(&hv_bus);
858 hv_setup_vmbus_irq(vmbus_isr);
860 ret = hv_synic_alloc();
864 * Initialize the per-cpu interrupt state and
865 * connect to the host.
867 on_each_cpu(hv_synic_init, NULL, 1);
868 ret = vmbus_connect();
872 if (vmbus_proto_version > VERSION_WIN7)
873 cpu_hotplug_disable();
876 * Only register if the crash MSRs are available
878 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
879 register_die_notifier(&hyperv_die_block);
880 atomic_notifier_chain_register(&panic_notifier_list,
881 &hyperv_panic_block);
884 vmbus_request_offers();
889 on_each_cpu(hv_synic_cleanup, NULL, 1);
892 hv_remove_vmbus_irq();
894 bus_unregister(&hv_bus);
903 * __vmbus_child_driver_register() - Register a vmbus's driver
904 * @hv_driver: Pointer to driver structure you want to register
905 * @owner: owner module of the drv
906 * @mod_name: module name string
908 * Registers the given driver with Linux through the 'driver_register()' call
909 * and sets up the hyper-v vmbus handling for this driver.
910 * It will return the state of the 'driver_register()' call.
913 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
917 pr_info("registering driver %s\n", hv_driver->name);
919 ret = vmbus_exists();
923 hv_driver->driver.name = hv_driver->name;
924 hv_driver->driver.owner = owner;
925 hv_driver->driver.mod_name = mod_name;
926 hv_driver->driver.bus = &hv_bus;
928 ret = driver_register(&hv_driver->driver);
932 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
935 * vmbus_driver_unregister() - Unregister a vmbus's driver
936 * @hv_driver: Pointer to driver structure you want to
939 * Un-register the given driver that was previous registered with a call to
940 * vmbus_driver_register()
942 void vmbus_driver_unregister(struct hv_driver *hv_driver)
944 pr_info("unregistering driver %s\n", hv_driver->name);
947 driver_unregister(&hv_driver->driver);
949 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
952 * vmbus_device_create - Creates and registers a new child device
955 struct hv_device *vmbus_device_create(const uuid_le *type,
956 const uuid_le *instance,
957 struct vmbus_channel *channel)
959 struct hv_device *child_device_obj;
961 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
962 if (!child_device_obj) {
963 pr_err("Unable to allocate device object for child device\n");
967 child_device_obj->channel = channel;
968 memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
969 memcpy(&child_device_obj->dev_instance, instance,
971 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
974 return child_device_obj;
978 * vmbus_device_register - Register the child device
980 int vmbus_device_register(struct hv_device *child_device_obj)
984 dev_set_name(&child_device_obj->device, "%pUl",
985 child_device_obj->channel->offermsg.offer.if_instance.b);
987 child_device_obj->device.bus = &hv_bus;
988 child_device_obj->device.parent = &hv_acpi_dev->dev;
989 child_device_obj->device.release = vmbus_device_release;
992 * Register with the LDM. This will kick off the driver/device
993 * binding...which will eventually call vmbus_match() and vmbus_probe()
995 ret = device_register(&child_device_obj->device);
998 pr_err("Unable to register child device\n");
1000 pr_debug("child device %s registered\n",
1001 dev_name(&child_device_obj->device));
1007 * vmbus_device_unregister - Remove the specified child device
1010 void vmbus_device_unregister(struct hv_device *device_obj)
1012 pr_debug("child device %s unregistered\n",
1013 dev_name(&device_obj->device));
1016 * Kick off the process of unregistering the device.
1017 * This will call vmbus_remove() and eventually vmbus_device_release()
1019 device_unregister(&device_obj->device);
1024 * VMBUS is an acpi enumerated device. Get the information we
1027 #define VTPM_BASE_ADDRESS 0xfed40000
1028 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
1030 resource_size_t start = 0;
1031 resource_size_t end = 0;
1032 struct resource *new_res;
1033 struct resource **old_res = &hyperv_mmio;
1034 struct resource **prev_res = NULL;
1036 switch (res->type) {
1039 * "Address" descriptors are for bus windows. Ignore
1040 * "memory" descriptors, which are for registers on
1043 case ACPI_RESOURCE_TYPE_ADDRESS32:
1044 start = res->data.address32.address.minimum;
1045 end = res->data.address32.address.maximum;
1048 case ACPI_RESOURCE_TYPE_ADDRESS64:
1049 start = res->data.address64.address.minimum;
1050 end = res->data.address64.address.maximum;
1054 /* Unused resource type */
1059 * Ignore ranges that are below 1MB, as they're not
1060 * necessary or useful here.
1065 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
1067 return AE_NO_MEMORY;
1069 /* If this range overlaps the virtual TPM, truncate it. */
1070 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
1071 end = VTPM_BASE_ADDRESS;
1073 new_res->name = "hyperv mmio";
1074 new_res->flags = IORESOURCE_MEM;
1075 new_res->start = start;
1079 * If two ranges are adjacent, merge them.
1087 if (((*old_res)->end + 1) == new_res->start) {
1088 (*old_res)->end = new_res->end;
1093 if ((*old_res)->start == new_res->end + 1) {
1094 (*old_res)->start = new_res->start;
1099 if ((*old_res)->start > new_res->end) {
1100 new_res->sibling = *old_res;
1102 (*prev_res)->sibling = new_res;
1108 old_res = &(*old_res)->sibling;
1115 static int vmbus_acpi_remove(struct acpi_device *device)
1117 struct resource *cur_res;
1118 struct resource *next_res;
1122 __release_region(hyperv_mmio, fb_mmio->start,
1123 resource_size(fb_mmio));
1127 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
1128 next_res = cur_res->sibling;
1136 static void vmbus_reserve_fb(void)
1140 * Make a claim for the frame buffer in the resource tree under the
1141 * first node, which will be the one below 4GB. The length seems to
1142 * be underreported, particularly in a Generation 1 VM. So start out
1143 * reserving a larger area and make it smaller until it succeeds.
1146 if (screen_info.lfb_base) {
1147 if (efi_enabled(EFI_BOOT))
1148 size = max_t(__u32, screen_info.lfb_size, 0x800000);
1150 size = max_t(__u32, screen_info.lfb_size, 0x4000000);
1152 for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
1153 fb_mmio = __request_region(hyperv_mmio,
1154 screen_info.lfb_base, size,
1161 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
1162 * @new: If successful, supplied a pointer to the
1163 * allocated MMIO space.
1164 * @device_obj: Identifies the caller
1165 * @min: Minimum guest physical address of the
1167 * @max: Maximum guest physical address
1168 * @size: Size of the range to be allocated
1169 * @align: Alignment of the range to be allocated
1170 * @fb_overlap_ok: Whether this allocation can be allowed
1171 * to overlap the video frame buffer.
1173 * This function walks the resources granted to VMBus by the
1174 * _CRS object in the ACPI namespace underneath the parent
1175 * "bridge" whether that's a root PCI bus in the Generation 1
1176 * case or a Module Device in the Generation 2 case. It then
1177 * attempts to allocate from the global MMIO pool in a way that
1178 * matches the constraints supplied in these parameters and by
1181 * Return: 0 on success, -errno on failure
1183 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1184 resource_size_t min, resource_size_t max,
1185 resource_size_t size, resource_size_t align,
1188 struct resource *iter, *shadow;
1189 resource_size_t range_min, range_max, start;
1190 const char *dev_n = dev_name(&device_obj->device);
1194 down(&hyperv_mmio_lock);
1197 * If overlaps with frame buffers are allowed, then first attempt to
1198 * make the allocation from within the reserved region. Because it
1199 * is already reserved, no shadow allocation is necessary.
1201 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
1202 !(max < fb_mmio->start)) {
1204 range_min = fb_mmio->start;
1205 range_max = fb_mmio->end;
1206 start = (range_min + align - 1) & ~(align - 1);
1207 for (; start + size - 1 <= range_max; start += align) {
1208 *new = request_mem_region_exclusive(start, size, dev_n);
1216 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1217 if ((iter->start >= max) || (iter->end <= min))
1220 range_min = iter->start;
1221 range_max = iter->end;
1222 start = (range_min + align - 1) & ~(align - 1);
1223 for (; start + size - 1 <= range_max; start += align) {
1224 shadow = __request_region(iter, start, size, NULL,
1229 *new = request_mem_region_exclusive(start, size, dev_n);
1231 shadow->name = (char *)*new;
1236 __release_region(iter, start, size);
1241 up(&hyperv_mmio_lock);
1244 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
1247 * vmbus_free_mmio() - Free a memory-mapped I/O range.
1248 * @start: Base address of region to release.
1249 * @size: Size of the range to be allocated
1251 * This function releases anything requested by
1252 * vmbus_mmio_allocate().
1254 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
1256 struct resource *iter;
1258 down(&hyperv_mmio_lock);
1259 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1260 if ((iter->start >= start + size) || (iter->end <= start))
1263 __release_region(iter, start, size);
1265 release_mem_region(start, size);
1266 up(&hyperv_mmio_lock);
1269 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
1272 * vmbus_cpu_number_to_vp_number() - Map CPU to VP.
1273 * @cpu_number: CPU number in Linux terms
1275 * This function returns the mapping between the Linux processor
1276 * number and the hypervisor's virtual processor number, useful
1277 * in making hypercalls and such that talk about specific
1280 * Return: Virtual processor number in Hyper-V terms
1282 int vmbus_cpu_number_to_vp_number(int cpu_number)
1284 return hv_context.vp_index[cpu_number];
1286 EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number);
1288 static int vmbus_acpi_add(struct acpi_device *device)
1291 int ret_val = -ENODEV;
1292 struct acpi_device *ancestor;
1294 hv_acpi_dev = device;
1296 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
1297 vmbus_walk_resources, NULL);
1299 if (ACPI_FAILURE(result))
1302 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
1303 * firmware) is the VMOD that has the mmio ranges. Get that.
1305 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
1306 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
1307 vmbus_walk_resources, NULL);
1309 if (ACPI_FAILURE(result))
1319 complete(&probe_event);
1321 vmbus_acpi_remove(device);
1325 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
1330 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
1332 static struct acpi_driver vmbus_acpi_driver = {
1334 .ids = vmbus_acpi_device_ids,
1336 .add = vmbus_acpi_add,
1337 .remove = vmbus_acpi_remove,
1341 static void hv_kexec_handler(void)
1345 hv_synic_clockevents_cleanup();
1346 vmbus_initiate_unload(false);
1347 for_each_online_cpu(cpu)
1348 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
1352 static void hv_crash_handler(struct pt_regs *regs)
1354 vmbus_initiate_unload(true);
1356 * In crash handler we can't schedule synic cleanup for all CPUs,
1357 * doing the cleanup for current CPU only. This should be sufficient
1360 hv_synic_cleanup(NULL);
1364 static int __init hv_acpi_init(void)
1368 if (x86_hyper != &x86_hyper_ms_hyperv)
1371 init_completion(&probe_event);
1374 * Get ACPI resources first.
1376 ret = acpi_bus_register_driver(&vmbus_acpi_driver);
1381 t = wait_for_completion_timeout(&probe_event, 5*HZ);
1387 ret = vmbus_bus_init();
1391 hv_setup_kexec_handler(hv_kexec_handler);
1392 hv_setup_crash_handler(hv_crash_handler);
1397 acpi_bus_unregister_driver(&vmbus_acpi_driver);
1402 static void __exit vmbus_exit(void)
1406 hv_remove_kexec_handler();
1407 hv_remove_crash_handler();
1408 vmbus_connection.conn_state = DISCONNECTED;
1409 hv_synic_clockevents_cleanup();
1411 hv_remove_vmbus_irq();
1412 for_each_online_cpu(cpu)
1413 tasklet_kill(hv_context.msg_dpc[cpu]);
1414 vmbus_free_channels();
1415 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1416 unregister_die_notifier(&hyperv_die_block);
1417 atomic_notifier_chain_unregister(&panic_notifier_list,
1418 &hyperv_panic_block);
1420 bus_unregister(&hv_bus);
1422 for_each_online_cpu(cpu) {
1423 tasklet_kill(hv_context.event_dpc[cpu]);
1424 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
1427 acpi_bus_unregister_driver(&vmbus_acpi_driver);
1428 if (vmbus_proto_version > VERSION_WIN7)
1429 cpu_hotplug_enable();
1433 MODULE_LICENSE("GPL");
1435 subsys_initcall(hv_acpi_init);
1436 module_exit(vmbus_exit);