2 * KVM Microsoft Hyper-V emulation
4 * derived from arch/x86/kvm/x86.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/nospec.h>
32 #include <asm/apicdef.h>
33 #include <trace/events/kvm.h>
37 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
39 return atomic64_read(&synic->sint[sint]);
42 static inline int synic_get_sint_vector(u64 sint_value)
44 if (sint_value & HV_SYNIC_SINT_MASKED)
46 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
49 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
54 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
55 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
61 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
67 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
68 sint_value = synic_read_sint(synic, i);
69 if (synic_get_sint_vector(sint_value) == vector &&
70 sint_value & HV_SYNIC_SINT_AUTO_EOI)
76 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
81 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
82 if (vector < 16 && !host)
85 * Guest may configure multiple SINTs to use the same vector, so
86 * we maintain a bitmap of vectors handled by synic, and a
87 * bitmap of vectors with auto-eoi behavior. The bitmaps are
88 * updated here, and atomically queried on fast paths.
91 atomic64_set(&synic->sint[sint], data);
93 if (synic_has_vector_connected(synic, vector))
94 __set_bit(vector, synic->vec_bitmap);
96 __clear_bit(vector, synic->vec_bitmap);
98 if (synic_has_vector_auto_eoi(synic, vector))
99 __set_bit(vector, synic->auto_eoi_bitmap);
101 __clear_bit(vector, synic->auto_eoi_bitmap);
103 /* Load SynIC vectors into EOI exit bitmap */
104 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
108 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vcpu_id)
110 struct kvm_vcpu *vcpu;
111 struct kvm_vcpu_hv_synic *synic;
113 if (vcpu_id >= atomic_read(&kvm->online_vcpus))
115 vcpu = kvm_get_vcpu(kvm, vcpu_id);
118 synic = vcpu_to_synic(vcpu);
119 return (synic->active) ? synic : NULL;
122 static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
125 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
128 struct hv_message *msg;
129 struct hv_message_page *msg_page;
131 gpa = synic->msg_page & PAGE_MASK;
132 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
133 if (is_error_page(page)) {
134 vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
138 msg_page = kmap_atomic(page);
140 msg = &msg_page->sint_message[sint];
141 msg->header.message_flags.msg_pending = 0;
143 kunmap_atomic(msg_page);
144 kvm_release_page_dirty(page);
145 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
148 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
150 struct kvm *kvm = vcpu->kvm;
151 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
152 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
153 struct kvm_vcpu_hv_stimer *stimer;
154 int gsi, idx, stimers_pending;
156 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
158 if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
159 synic_clear_sint_msg_pending(synic, sint);
161 /* Try to deliver pending Hyper-V SynIC timers messages */
163 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
164 stimer = &hv_vcpu->stimer[idx];
165 if (stimer->msg_pending &&
166 (stimer->config & HV_STIMER_ENABLE) &&
167 HV_STIMER_SINT(stimer->config) == sint) {
168 set_bit(stimer->index,
169 hv_vcpu->stimer_pending_bitmap);
174 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
176 idx = srcu_read_lock(&kvm->irq_srcu);
177 gsi = atomic_read(&synic->sint_to_gsi[sint]);
179 kvm_notify_acked_gsi(kvm, gsi);
180 srcu_read_unlock(&kvm->irq_srcu, idx);
183 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
185 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
186 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
188 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
189 hv_vcpu->exit.u.synic.msr = msr;
190 hv_vcpu->exit.u.synic.control = synic->control;
191 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
192 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
194 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
197 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
198 u32 msr, u64 data, bool host)
200 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
206 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
210 case HV_X64_MSR_SCONTROL:
211 synic->control = data;
213 synic_exit(synic, msr);
215 case HV_X64_MSR_SVERSION:
220 synic->version = data;
222 case HV_X64_MSR_SIEFP:
223 if (data & HV_SYNIC_SIEFP_ENABLE)
224 if (kvm_clear_guest(vcpu->kvm,
225 data & PAGE_MASK, PAGE_SIZE)) {
229 synic->evt_page = data;
231 synic_exit(synic, msr);
233 case HV_X64_MSR_SIMP:
234 if (data & HV_SYNIC_SIMP_ENABLE)
235 if (kvm_clear_guest(vcpu->kvm,
236 data & PAGE_MASK, PAGE_SIZE)) {
240 synic->msg_page = data;
242 synic_exit(synic, msr);
244 case HV_X64_MSR_EOM: {
250 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
251 kvm_hv_notify_acked_sint(vcpu, i);
254 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
255 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
264 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
273 case HV_X64_MSR_SCONTROL:
274 *pdata = synic->control;
276 case HV_X64_MSR_SVERSION:
277 *pdata = synic->version;
279 case HV_X64_MSR_SIEFP:
280 *pdata = synic->evt_page;
282 case HV_X64_MSR_SIMP:
283 *pdata = synic->msg_page;
288 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
289 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
298 int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
300 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
301 struct kvm_lapic_irq irq;
304 if (sint >= ARRAY_SIZE(synic->sint))
307 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
311 memset(&irq, 0, sizeof(irq));
312 irq.dest_id = kvm_apic_id(vcpu->arch.apic);
313 irq.dest_mode = APIC_DEST_PHYSICAL;
314 irq.delivery_mode = APIC_DM_FIXED;
318 ret = kvm_irq_delivery_to_apic(vcpu->kvm, NULL, &irq, NULL);
319 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
323 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint)
325 struct kvm_vcpu_hv_synic *synic;
327 synic = synic_get(kvm, vcpu_id);
331 return synic_set_irq(synic, sint);
334 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
336 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
339 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
341 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
342 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
343 kvm_hv_notify_acked_sint(vcpu, i);
346 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vcpu_id, u32 sint, int gsi)
348 struct kvm_vcpu_hv_synic *synic;
350 synic = synic_get(kvm, vcpu_id);
354 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
357 atomic_set(&synic->sint_to_gsi[sint], gsi);
361 void kvm_hv_irq_routing_update(struct kvm *kvm)
363 struct kvm_irq_routing_table *irq_rt;
364 struct kvm_kernel_irq_routing_entry *e;
367 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
368 lockdep_is_held(&kvm->irq_lock));
370 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
371 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
372 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
373 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
374 e->hv_sint.sint, gsi);
379 static void synic_init(struct kvm_vcpu_hv_synic *synic)
383 memset(synic, 0, sizeof(*synic));
384 synic->version = HV_SYNIC_VERSION_1;
385 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
386 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
387 atomic_set(&synic->sint_to_gsi[i], -1);
391 static u64 get_time_ref_counter(struct kvm *kvm)
393 struct kvm_hv *hv = &kvm->arch.hyperv;
394 struct kvm_vcpu *vcpu;
398 * The guest has not set up the TSC page or the clock isn't
399 * stable, fall back to get_kvmclock_ns.
401 if (!hv->tsc_ref.tsc_sequence)
402 return div_u64(get_kvmclock_ns(kvm), 100);
404 vcpu = kvm_get_vcpu(kvm, 0);
405 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
406 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
407 + hv->tsc_ref.tsc_offset;
410 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
413 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
415 set_bit(stimer->index,
416 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
417 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
422 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
424 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
426 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
429 hrtimer_cancel(&stimer->timer);
430 clear_bit(stimer->index,
431 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
432 stimer->msg_pending = false;
433 stimer->exp_time = 0;
436 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
438 struct kvm_vcpu_hv_stimer *stimer;
440 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
441 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
443 stimer_mark_pending(stimer, true);
445 return HRTIMER_NORESTART;
449 * stimer_start() assumptions:
450 * a) stimer->count is not equal to 0
451 * b) stimer->config has HV_STIMER_ENABLE flag
453 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
458 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
459 ktime_now = ktime_get();
461 if (stimer->config & HV_STIMER_PERIODIC) {
462 if (stimer->exp_time) {
463 if (time_now >= stimer->exp_time) {
466 div64_u64_rem(time_now - stimer->exp_time,
467 stimer->count, &remainder);
469 time_now + (stimer->count - remainder);
472 stimer->exp_time = time_now + stimer->count;
474 trace_kvm_hv_stimer_start_periodic(
475 stimer_to_vcpu(stimer)->vcpu_id,
477 time_now, stimer->exp_time);
479 hrtimer_start(&stimer->timer,
480 ktime_add_ns(ktime_now,
481 100 * (stimer->exp_time - time_now)),
485 stimer->exp_time = stimer->count;
486 if (time_now >= stimer->count) {
488 * Expire timer according to Hypervisor Top-Level Functional
489 * specification v4(15.3.1):
490 * "If a one shot is enabled and the specified count is in
491 * the past, it will expire immediately."
493 stimer_mark_pending(stimer, false);
497 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
499 time_now, stimer->count);
501 hrtimer_start(&stimer->timer,
502 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
507 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
510 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
511 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
513 if (!synic->active && (!host || config))
516 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
517 stimer->index, config, host);
519 stimer_cleanup(stimer);
520 if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0)
521 config &= ~HV_STIMER_ENABLE;
522 stimer->config = config;
523 stimer_mark_pending(stimer, false);
527 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
530 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
531 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
533 if (!synic->active && (!host || count))
536 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
537 stimer->index, count, host);
539 stimer_cleanup(stimer);
540 stimer->count = count;
541 if (stimer->count == 0)
542 stimer->config &= ~HV_STIMER_ENABLE;
543 else if (stimer->config & HV_STIMER_AUTOENABLE)
544 stimer->config |= HV_STIMER_ENABLE;
545 stimer_mark_pending(stimer, false);
549 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
551 *pconfig = stimer->config;
555 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
557 *pcount = stimer->count;
561 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
562 struct hv_message *src_msg)
564 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
567 struct hv_message *dst_msg;
569 struct hv_message_page *msg_page;
571 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
574 gpa = synic->msg_page & PAGE_MASK;
575 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
576 if (is_error_page(page))
579 msg_page = kmap_atomic(page);
580 dst_msg = &msg_page->sint_message[sint];
581 if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
582 src_msg->header.message_type) != HVMSG_NONE) {
583 dst_msg->header.message_flags.msg_pending = 1;
586 memcpy(&dst_msg->u.payload, &src_msg->u.payload,
587 src_msg->header.payload_size);
588 dst_msg->header.message_type = src_msg->header.message_type;
589 dst_msg->header.payload_size = src_msg->header.payload_size;
590 r = synic_set_irq(synic, sint);
596 kunmap_atomic(msg_page);
597 kvm_release_page_dirty(page);
598 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
602 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
604 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
605 struct hv_message *msg = &stimer->msg;
606 struct hv_timer_message_payload *payload =
607 (struct hv_timer_message_payload *)&msg->u.payload;
609 payload->expiration_time = stimer->exp_time;
610 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
611 return synic_deliver_msg(vcpu_to_synic(vcpu),
612 HV_STIMER_SINT(stimer->config), msg);
615 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
619 stimer->msg_pending = true;
620 r = stimer_send_msg(stimer);
621 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
624 stimer->msg_pending = false;
625 if (!(stimer->config & HV_STIMER_PERIODIC))
626 stimer->config &= ~HV_STIMER_ENABLE;
630 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
632 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
633 struct kvm_vcpu_hv_stimer *stimer;
634 u64 time_now, exp_time;
637 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
638 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
639 stimer = &hv_vcpu->stimer[i];
640 if (stimer->config & HV_STIMER_ENABLE) {
641 exp_time = stimer->exp_time;
645 get_time_ref_counter(vcpu->kvm);
646 if (time_now >= exp_time)
647 stimer_expiration(stimer);
650 if ((stimer->config & HV_STIMER_ENABLE) &&
652 stimer_start(stimer);
654 stimer_cleanup(stimer);
659 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
661 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
664 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
665 stimer_cleanup(&hv_vcpu->stimer[i]);
668 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
670 struct hv_message *msg = &stimer->msg;
671 struct hv_timer_message_payload *payload =
672 (struct hv_timer_message_payload *)&msg->u.payload;
674 memset(&msg->header, 0, sizeof(msg->header));
675 msg->header.message_type = HVMSG_TIMER_EXPIRED;
676 msg->header.payload_size = sizeof(*payload);
678 payload->timer_index = stimer->index;
679 payload->expiration_time = 0;
680 payload->delivery_time = 0;
683 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
685 memset(stimer, 0, sizeof(*stimer));
686 stimer->index = timer_index;
687 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
688 stimer->timer.function = stimer_timer_callback;
689 stimer_prepare_msg(stimer);
692 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
694 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
697 synic_init(&hv_vcpu->synic);
699 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
700 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
701 stimer_init(&hv_vcpu->stimer[i], i);
704 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu)
707 * Hyper-V SynIC auto EOI SINT's are
708 * not compatible with APICV, so deactivate APICV
710 kvm_vcpu_deactivate_apicv(vcpu);
711 vcpu_to_synic(vcpu)->active = true;
715 static bool kvm_hv_msr_partition_wide(u32 msr)
720 case HV_X64_MSR_GUEST_OS_ID:
721 case HV_X64_MSR_HYPERCALL:
722 case HV_X64_MSR_REFERENCE_TSC:
723 case HV_X64_MSR_TIME_REF_COUNT:
724 case HV_X64_MSR_CRASH_CTL:
725 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
726 case HV_X64_MSR_RESET:
734 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
735 u32 index, u64 *pdata)
737 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
738 size_t size = ARRAY_SIZE(hv->hv_crash_param);
740 if (WARN_ON_ONCE(index >= size))
743 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
747 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
749 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
751 *pdata = hv->hv_crash_ctl;
755 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
757 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
760 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
762 if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
764 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
765 hv->hv_crash_param[0],
766 hv->hv_crash_param[1],
767 hv->hv_crash_param[2],
768 hv->hv_crash_param[3],
769 hv->hv_crash_param[4]);
771 /* Send notification about crash to user space */
772 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
778 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
781 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
782 size_t size = ARRAY_SIZE(hv->hv_crash_param);
784 if (WARN_ON_ONCE(index >= size))
787 hv->hv_crash_param[array_index_nospec(index, size)] = data;
792 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
793 * between them is possible:
796 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
800 * nsec/100 = ticks * scale / 2^64 + offset
802 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
803 * By dividing the kvmclock formula by 100 and equating what's left we get:
804 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
805 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
806 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
808 * Now expand the kvmclock formula and divide by 100:
809 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
810 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
812 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
813 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
814 * + system_time / 100
816 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
817 * nsec/100 = ticks * scale / 2^64
818 * - tsc_timestamp * scale / 2^64
819 * + system_time / 100
821 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
822 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
824 * These two equivalencies are implemented in this function.
826 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
827 HV_REFERENCE_TSC_PAGE *tsc_ref)
831 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
835 * check if scale would overflow, if so we use the time ref counter
836 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
837 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
838 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
840 max_mul = 100ull << (32 - hv_clock->tsc_shift);
841 if (hv_clock->tsc_to_system_mul >= max_mul)
845 * Otherwise compute the scale and offset according to the formulas
849 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
850 hv_clock->tsc_to_system_mul,
853 tsc_ref->tsc_offset = hv_clock->system_time;
854 do_div(tsc_ref->tsc_offset, 100);
855 tsc_ref->tsc_offset -=
856 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
860 void kvm_hv_setup_tsc_page(struct kvm *kvm,
861 struct pvclock_vcpu_time_info *hv_clock)
863 struct kvm_hv *hv = &kvm->arch.hyperv;
867 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
868 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
870 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
873 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
875 * Because the TSC parameters only vary when there is a
876 * change in the master clock, do not bother with caching.
878 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
879 &tsc_seq, sizeof(tsc_seq))))
883 * While we're computing and writing the parameters, force the
884 * guest to use the time reference count MSR.
886 hv->tsc_ref.tsc_sequence = 0;
887 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
888 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
891 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
894 /* Ensure sequence is zero before writing the rest of the struct. */
896 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
900 * Now switch to the TSC page mechanism by writing the sequence.
903 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
906 /* Write the struct entirely before the non-zero sequence. */
909 hv->tsc_ref.tsc_sequence = tsc_seq;
910 kvm_write_guest(kvm, gfn_to_gpa(gfn),
911 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
914 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
917 struct kvm *kvm = vcpu->kvm;
918 struct kvm_hv *hv = &kvm->arch.hyperv;
921 case HV_X64_MSR_GUEST_OS_ID:
922 hv->hv_guest_os_id = data;
923 /* setting guest os id to zero disables hypercall page */
924 if (!hv->hv_guest_os_id)
925 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
927 case HV_X64_MSR_HYPERCALL: {
932 /* if guest os id is not set hypercall should remain disabled */
933 if (!hv->hv_guest_os_id)
935 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
936 hv->hv_hypercall = data;
939 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
940 addr = gfn_to_hva(kvm, gfn);
941 if (kvm_is_error_hva(addr))
943 kvm_x86_ops->patch_hypercall(vcpu, instructions);
944 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
945 if (__copy_to_user((void __user *)addr, instructions, 4))
947 hv->hv_hypercall = data;
948 mark_page_dirty(kvm, gfn);
951 case HV_X64_MSR_REFERENCE_TSC:
952 hv->hv_tsc_page = data;
953 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
954 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
956 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
957 return kvm_hv_msr_set_crash_data(vcpu,
958 msr - HV_X64_MSR_CRASH_P0,
960 case HV_X64_MSR_CRASH_CTL:
961 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
962 case HV_X64_MSR_RESET:
964 vcpu_debug(vcpu, "hyper-v reset requested\n");
965 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
969 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
976 /* Calculate cpu time spent by current task in 100ns units */
977 static u64 current_task_runtime_100ns(void)
979 cputime_t utime, stime;
981 task_cputime_adjusted(current, &utime, &stime);
982 return div_u64(cputime_to_nsecs(utime + stime), 100);
985 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
987 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
990 case HV_X64_MSR_APIC_ASSIST_PAGE: {
994 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
996 if (kvm_lapic_enable_pv_eoi(vcpu, 0))
1000 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
1001 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1002 if (kvm_is_error_hva(addr))
1004 if (__clear_user((void __user *)addr, PAGE_SIZE))
1006 hv->hv_vapic = data;
1007 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1008 if (kvm_lapic_enable_pv_eoi(vcpu,
1009 gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
1013 case HV_X64_MSR_EOI:
1014 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1015 case HV_X64_MSR_ICR:
1016 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1017 case HV_X64_MSR_TPR:
1018 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1019 case HV_X64_MSR_VP_RUNTIME:
1022 hv->runtime_offset = data - current_task_runtime_100ns();
1024 case HV_X64_MSR_SCONTROL:
1025 case HV_X64_MSR_SVERSION:
1026 case HV_X64_MSR_SIEFP:
1027 case HV_X64_MSR_SIMP:
1028 case HV_X64_MSR_EOM:
1029 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1030 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1031 case HV_X64_MSR_STIMER0_CONFIG:
1032 case HV_X64_MSR_STIMER1_CONFIG:
1033 case HV_X64_MSR_STIMER2_CONFIG:
1034 case HV_X64_MSR_STIMER3_CONFIG: {
1035 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1037 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1040 case HV_X64_MSR_STIMER0_COUNT:
1041 case HV_X64_MSR_STIMER1_COUNT:
1042 case HV_X64_MSR_STIMER2_COUNT:
1043 case HV_X64_MSR_STIMER3_COUNT: {
1044 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1046 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1050 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1058 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1061 struct kvm *kvm = vcpu->kvm;
1062 struct kvm_hv *hv = &kvm->arch.hyperv;
1065 case HV_X64_MSR_GUEST_OS_ID:
1066 data = hv->hv_guest_os_id;
1068 case HV_X64_MSR_HYPERCALL:
1069 data = hv->hv_hypercall;
1071 case HV_X64_MSR_TIME_REF_COUNT:
1072 data = get_time_ref_counter(kvm);
1074 case HV_X64_MSR_REFERENCE_TSC:
1075 data = hv->hv_tsc_page;
1077 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1078 return kvm_hv_msr_get_crash_data(vcpu,
1079 msr - HV_X64_MSR_CRASH_P0,
1081 case HV_X64_MSR_CRASH_CTL:
1082 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1083 case HV_X64_MSR_RESET:
1087 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1095 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1098 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1101 case HV_X64_MSR_VP_INDEX: {
1105 kvm_for_each_vcpu(r, v, vcpu->kvm) {
1113 case HV_X64_MSR_EOI:
1114 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1115 case HV_X64_MSR_ICR:
1116 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1117 case HV_X64_MSR_TPR:
1118 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1119 case HV_X64_MSR_APIC_ASSIST_PAGE:
1120 data = hv->hv_vapic;
1122 case HV_X64_MSR_VP_RUNTIME:
1123 data = current_task_runtime_100ns() + hv->runtime_offset;
1125 case HV_X64_MSR_SCONTROL:
1126 case HV_X64_MSR_SVERSION:
1127 case HV_X64_MSR_SIEFP:
1128 case HV_X64_MSR_SIMP:
1129 case HV_X64_MSR_EOM:
1130 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1131 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
1132 case HV_X64_MSR_STIMER0_CONFIG:
1133 case HV_X64_MSR_STIMER1_CONFIG:
1134 case HV_X64_MSR_STIMER2_CONFIG:
1135 case HV_X64_MSR_STIMER3_CONFIG: {
1136 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1138 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1141 case HV_X64_MSR_STIMER0_COUNT:
1142 case HV_X64_MSR_STIMER1_COUNT:
1143 case HV_X64_MSR_STIMER2_COUNT:
1144 case HV_X64_MSR_STIMER3_COUNT: {
1145 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1147 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1151 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1158 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1160 if (kvm_hv_msr_partition_wide(msr)) {
1163 mutex_lock(&vcpu->kvm->lock);
1164 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1165 mutex_unlock(&vcpu->kvm->lock);
1168 return kvm_hv_set_msr(vcpu, msr, data, host);
1171 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1173 if (kvm_hv_msr_partition_wide(msr)) {
1176 mutex_lock(&vcpu->kvm->lock);
1177 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
1178 mutex_unlock(&vcpu->kvm->lock);
1181 return kvm_hv_get_msr(vcpu, msr, pdata);
1184 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1186 return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1189 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1193 longmode = is_64_bit_mode(vcpu);
1195 kvm_register_write(vcpu, VCPU_REGS_RAX, result);
1197 kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
1198 kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
1202 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1204 struct kvm_run *run = vcpu->run;
1206 kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
1210 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1212 u64 param, ingpa, outgpa, ret;
1213 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
1214 bool fast, longmode;
1217 * hypercall generates UD from non zero cpl and real mode
1220 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1221 kvm_queue_exception(vcpu, UD_VECTOR);
1225 longmode = is_64_bit_mode(vcpu);
1228 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
1229 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
1230 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
1231 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
1232 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
1233 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
1235 #ifdef CONFIG_X86_64
1237 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
1238 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
1239 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
1243 code = param & 0xffff;
1244 fast = (param >> 16) & 0x1;
1245 rep_cnt = (param >> 32) & 0xfff;
1246 rep_idx = (param >> 48) & 0xfff;
1248 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1250 /* Hypercall continuation is not supported yet */
1251 if (rep_cnt || rep_idx) {
1252 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1257 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1258 kvm_vcpu_on_spin(vcpu);
1260 case HVCALL_POST_MESSAGE:
1261 case HVCALL_SIGNAL_EVENT:
1262 /* don't bother userspace if it has no way to handle it */
1263 if (!vcpu_to_synic(vcpu)->active) {
1264 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1267 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1268 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1269 vcpu->run->hyperv.u.hcall.input = param;
1270 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1271 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1272 vcpu->arch.complete_userspace_io =
1273 kvm_hv_hypercall_complete_userspace;
1276 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1281 ret = res | (((u64)rep_done & 0xfff) << 32);
1282 kvm_hv_hypercall_set_result(vcpu, ret);