2 * KVM Microsoft Hyper-V emulation
4 * derived from arch/x86/kvm/x86.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/sched/cputime.h>
33 #include <asm/apicdef.h>
34 #include <trace/events/kvm.h>
38 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
40 return atomic64_read(&synic->sint[sint]);
43 static inline int synic_get_sint_vector(u64 sint_value)
45 if (sint_value & HV_SYNIC_SINT_MASKED)
47 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
50 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
55 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
56 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
62 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
68 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
69 sint_value = synic_read_sint(synic, i);
70 if (synic_get_sint_vector(sint_value) == vector &&
71 sint_value & HV_SYNIC_SINT_AUTO_EOI)
77 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
82 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
83 if (vector < 16 && !host)
86 * Guest may configure multiple SINTs to use the same vector, so
87 * we maintain a bitmap of vectors handled by synic, and a
88 * bitmap of vectors with auto-eoi behavior. The bitmaps are
89 * updated here, and atomically queried on fast paths.
92 atomic64_set(&synic->sint[sint], data);
94 if (synic_has_vector_connected(synic, vector))
95 __set_bit(vector, synic->vec_bitmap);
97 __clear_bit(vector, synic->vec_bitmap);
99 if (synic_has_vector_auto_eoi(synic, vector))
100 __set_bit(vector, synic->auto_eoi_bitmap);
102 __clear_bit(vector, synic->auto_eoi_bitmap);
104 /* Load SynIC vectors into EOI exit bitmap */
105 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
109 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
111 struct kvm_vcpu *vcpu = NULL;
114 if (vpidx < KVM_MAX_VCPUS)
115 vcpu = kvm_get_vcpu(kvm, vpidx);
116 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
118 kvm_for_each_vcpu(i, vcpu, kvm)
119 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
124 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
126 struct kvm_vcpu *vcpu;
127 struct kvm_vcpu_hv_synic *synic;
129 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
132 synic = vcpu_to_synic(vcpu);
133 return (synic->active) ? synic : NULL;
136 static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
139 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
142 struct hv_message *msg;
143 struct hv_message_page *msg_page;
145 gpa = synic->msg_page & PAGE_MASK;
146 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
147 if (is_error_page(page)) {
148 vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
152 msg_page = kmap_atomic(page);
154 msg = &msg_page->sint_message[sint];
155 msg->header.message_flags.msg_pending = 0;
157 kunmap_atomic(msg_page);
158 kvm_release_page_dirty(page);
159 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
162 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
164 struct kvm *kvm = vcpu->kvm;
165 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
166 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
167 struct kvm_vcpu_hv_stimer *stimer;
168 int gsi, idx, stimers_pending;
170 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
172 if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
173 synic_clear_sint_msg_pending(synic, sint);
175 /* Try to deliver pending Hyper-V SynIC timers messages */
177 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
178 stimer = &hv_vcpu->stimer[idx];
179 if (stimer->msg_pending &&
180 (stimer->config & HV_STIMER_ENABLE) &&
181 HV_STIMER_SINT(stimer->config) == sint) {
182 set_bit(stimer->index,
183 hv_vcpu->stimer_pending_bitmap);
188 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
190 idx = srcu_read_lock(&kvm->irq_srcu);
191 gsi = atomic_read(&synic->sint_to_gsi[sint]);
193 kvm_notify_acked_gsi(kvm, gsi);
194 srcu_read_unlock(&kvm->irq_srcu, idx);
197 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
199 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
200 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
202 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
203 hv_vcpu->exit.u.synic.msr = msr;
204 hv_vcpu->exit.u.synic.control = synic->control;
205 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
206 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
208 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
211 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
212 u32 msr, u64 data, bool host)
214 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
220 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
224 case HV_X64_MSR_SCONTROL:
225 synic->control = data;
227 synic_exit(synic, msr);
229 case HV_X64_MSR_SVERSION:
234 synic->version = data;
236 case HV_X64_MSR_SIEFP:
237 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
238 !synic->dont_zero_synic_pages)
239 if (kvm_clear_guest(vcpu->kvm,
240 data & PAGE_MASK, PAGE_SIZE)) {
244 synic->evt_page = data;
246 synic_exit(synic, msr);
248 case HV_X64_MSR_SIMP:
249 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
250 !synic->dont_zero_synic_pages)
251 if (kvm_clear_guest(vcpu->kvm,
252 data & PAGE_MASK, PAGE_SIZE)) {
256 synic->msg_page = data;
258 synic_exit(synic, msr);
260 case HV_X64_MSR_EOM: {
266 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
267 kvm_hv_notify_acked_sint(vcpu, i);
270 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
271 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
280 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
289 case HV_X64_MSR_SCONTROL:
290 *pdata = synic->control;
292 case HV_X64_MSR_SVERSION:
293 *pdata = synic->version;
295 case HV_X64_MSR_SIEFP:
296 *pdata = synic->evt_page;
298 case HV_X64_MSR_SIMP:
299 *pdata = synic->msg_page;
304 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
305 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
314 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
316 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
317 struct kvm_lapic_irq irq;
320 if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
323 if (sint >= ARRAY_SIZE(synic->sint))
326 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
330 memset(&irq, 0, sizeof(irq));
331 irq.shorthand = APIC_DEST_SELF;
332 irq.dest_mode = APIC_DEST_PHYSICAL;
333 irq.delivery_mode = APIC_DM_FIXED;
337 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
338 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
342 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
344 struct kvm_vcpu_hv_synic *synic;
346 synic = synic_get(kvm, vpidx);
350 return synic_set_irq(synic, sint);
353 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
355 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
358 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
360 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
361 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
362 kvm_hv_notify_acked_sint(vcpu, i);
365 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
367 struct kvm_vcpu_hv_synic *synic;
369 synic = synic_get(kvm, vpidx);
373 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
376 atomic_set(&synic->sint_to_gsi[sint], gsi);
380 void kvm_hv_irq_routing_update(struct kvm *kvm)
382 struct kvm_irq_routing_table *irq_rt;
383 struct kvm_kernel_irq_routing_entry *e;
386 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
387 lockdep_is_held(&kvm->irq_lock));
389 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
390 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
391 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
392 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
393 e->hv_sint.sint, gsi);
398 static void synic_init(struct kvm_vcpu_hv_synic *synic)
402 memset(synic, 0, sizeof(*synic));
403 synic->version = HV_SYNIC_VERSION_1;
404 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
405 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
406 atomic_set(&synic->sint_to_gsi[i], -1);
410 static u64 get_time_ref_counter(struct kvm *kvm)
412 struct kvm_hv *hv = &kvm->arch.hyperv;
413 struct kvm_vcpu *vcpu;
417 * The guest has not set up the TSC page or the clock isn't
418 * stable, fall back to get_kvmclock_ns.
420 if (!hv->tsc_ref.tsc_sequence)
421 return div_u64(get_kvmclock_ns(kvm), 100);
423 vcpu = kvm_get_vcpu(kvm, 0);
424 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
425 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
426 + hv->tsc_ref.tsc_offset;
429 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
432 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
434 set_bit(stimer->index,
435 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
436 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
441 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
443 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
445 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
448 hrtimer_cancel(&stimer->timer);
449 clear_bit(stimer->index,
450 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
451 stimer->msg_pending = false;
452 stimer->exp_time = 0;
455 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
457 struct kvm_vcpu_hv_stimer *stimer;
459 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
460 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
462 stimer_mark_pending(stimer, true);
464 return HRTIMER_NORESTART;
468 * stimer_start() assumptions:
469 * a) stimer->count is not equal to 0
470 * b) stimer->config has HV_STIMER_ENABLE flag
472 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
477 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
478 ktime_now = ktime_get();
480 if (stimer->config & HV_STIMER_PERIODIC) {
481 if (stimer->exp_time) {
482 if (time_now >= stimer->exp_time) {
485 div64_u64_rem(time_now - stimer->exp_time,
486 stimer->count, &remainder);
488 time_now + (stimer->count - remainder);
491 stimer->exp_time = time_now + stimer->count;
493 trace_kvm_hv_stimer_start_periodic(
494 stimer_to_vcpu(stimer)->vcpu_id,
496 time_now, stimer->exp_time);
498 hrtimer_start(&stimer->timer,
499 ktime_add_ns(ktime_now,
500 100 * (stimer->exp_time - time_now)),
504 stimer->exp_time = stimer->count;
505 if (time_now >= stimer->count) {
507 * Expire timer according to Hypervisor Top-Level Functional
508 * specification v4(15.3.1):
509 * "If a one shot is enabled and the specified count is in
510 * the past, it will expire immediately."
512 stimer_mark_pending(stimer, false);
516 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
518 time_now, stimer->count);
520 hrtimer_start(&stimer->timer,
521 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
526 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
529 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
530 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
532 if (!synic->active && (!host || config))
535 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
536 stimer->index, config, host);
538 stimer_cleanup(stimer);
539 if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0)
540 config &= ~HV_STIMER_ENABLE;
541 stimer->config = config;
542 stimer_mark_pending(stimer, false);
546 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
549 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
550 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
552 if (!synic->active && (!host || count))
555 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
556 stimer->index, count, host);
558 stimer_cleanup(stimer);
559 stimer->count = count;
560 if (stimer->count == 0)
561 stimer->config &= ~HV_STIMER_ENABLE;
562 else if (stimer->config & HV_STIMER_AUTOENABLE)
563 stimer->config |= HV_STIMER_ENABLE;
564 stimer_mark_pending(stimer, false);
568 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
570 *pconfig = stimer->config;
574 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
576 *pcount = stimer->count;
580 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
581 struct hv_message *src_msg)
583 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
586 struct hv_message *dst_msg;
588 struct hv_message_page *msg_page;
590 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
593 gpa = synic->msg_page & PAGE_MASK;
594 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
595 if (is_error_page(page))
598 msg_page = kmap_atomic(page);
599 dst_msg = &msg_page->sint_message[sint];
600 if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
601 src_msg->header.message_type) != HVMSG_NONE) {
602 dst_msg->header.message_flags.msg_pending = 1;
605 memcpy(&dst_msg->u.payload, &src_msg->u.payload,
606 src_msg->header.payload_size);
607 dst_msg->header.message_type = src_msg->header.message_type;
608 dst_msg->header.payload_size = src_msg->header.payload_size;
609 r = synic_set_irq(synic, sint);
615 kunmap_atomic(msg_page);
616 kvm_release_page_dirty(page);
617 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
621 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
623 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
624 struct hv_message *msg = &stimer->msg;
625 struct hv_timer_message_payload *payload =
626 (struct hv_timer_message_payload *)&msg->u.payload;
628 payload->expiration_time = stimer->exp_time;
629 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
630 return synic_deliver_msg(vcpu_to_synic(vcpu),
631 HV_STIMER_SINT(stimer->config), msg);
634 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
638 stimer->msg_pending = true;
639 r = stimer_send_msg(stimer);
640 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
643 stimer->msg_pending = false;
644 if (!(stimer->config & HV_STIMER_PERIODIC))
645 stimer->config &= ~HV_STIMER_ENABLE;
649 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
651 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
652 struct kvm_vcpu_hv_stimer *stimer;
653 u64 time_now, exp_time;
656 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
657 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
658 stimer = &hv_vcpu->stimer[i];
659 if (stimer->config & HV_STIMER_ENABLE) {
660 exp_time = stimer->exp_time;
664 get_time_ref_counter(vcpu->kvm);
665 if (time_now >= exp_time)
666 stimer_expiration(stimer);
669 if ((stimer->config & HV_STIMER_ENABLE) &&
671 if (!stimer->msg_pending)
672 stimer_start(stimer);
674 stimer_cleanup(stimer);
679 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
681 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
684 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
685 stimer_cleanup(&hv_vcpu->stimer[i]);
688 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
690 struct hv_message *msg = &stimer->msg;
691 struct hv_timer_message_payload *payload =
692 (struct hv_timer_message_payload *)&msg->u.payload;
694 memset(&msg->header, 0, sizeof(msg->header));
695 msg->header.message_type = HVMSG_TIMER_EXPIRED;
696 msg->header.payload_size = sizeof(*payload);
698 payload->timer_index = stimer->index;
699 payload->expiration_time = 0;
700 payload->delivery_time = 0;
703 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
705 memset(stimer, 0, sizeof(*stimer));
706 stimer->index = timer_index;
707 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
708 stimer->timer.function = stimer_timer_callback;
709 stimer_prepare_msg(stimer);
712 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
714 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
717 synic_init(&hv_vcpu->synic);
719 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
720 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
721 stimer_init(&hv_vcpu->stimer[i], i);
724 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
726 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
728 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
731 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
733 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
736 * Hyper-V SynIC auto EOI SINT's are
737 * not compatible with APICV, so deactivate APICV
739 kvm_vcpu_deactivate_apicv(vcpu);
740 synic->active = true;
741 synic->dont_zero_synic_pages = dont_zero_synic_pages;
745 static bool kvm_hv_msr_partition_wide(u32 msr)
750 case HV_X64_MSR_GUEST_OS_ID:
751 case HV_X64_MSR_HYPERCALL:
752 case HV_X64_MSR_REFERENCE_TSC:
753 case HV_X64_MSR_TIME_REF_COUNT:
754 case HV_X64_MSR_CRASH_CTL:
755 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
756 case HV_X64_MSR_RESET:
764 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
765 u32 index, u64 *pdata)
767 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
768 size_t size = ARRAY_SIZE(hv->hv_crash_param);
770 if (WARN_ON_ONCE(index >= size))
773 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
777 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
779 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
781 *pdata = hv->hv_crash_ctl;
785 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
787 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
790 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
792 if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
794 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
795 hv->hv_crash_param[0],
796 hv->hv_crash_param[1],
797 hv->hv_crash_param[2],
798 hv->hv_crash_param[3],
799 hv->hv_crash_param[4]);
801 /* Send notification about crash to user space */
802 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
808 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
811 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
812 size_t size = ARRAY_SIZE(hv->hv_crash_param);
814 if (WARN_ON_ONCE(index >= size))
817 hv->hv_crash_param[array_index_nospec(index, size)] = data;
822 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
823 * between them is possible:
826 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
830 * nsec/100 = ticks * scale / 2^64 + offset
832 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
833 * By dividing the kvmclock formula by 100 and equating what's left we get:
834 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
835 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
836 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
838 * Now expand the kvmclock formula and divide by 100:
839 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
840 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
842 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
843 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
844 * + system_time / 100
846 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
847 * nsec/100 = ticks * scale / 2^64
848 * - tsc_timestamp * scale / 2^64
849 * + system_time / 100
851 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
852 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
854 * These two equivalencies are implemented in this function.
856 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
857 HV_REFERENCE_TSC_PAGE *tsc_ref)
861 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
865 * check if scale would overflow, if so we use the time ref counter
866 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
867 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
868 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
870 max_mul = 100ull << (32 - hv_clock->tsc_shift);
871 if (hv_clock->tsc_to_system_mul >= max_mul)
875 * Otherwise compute the scale and offset according to the formulas
879 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
880 hv_clock->tsc_to_system_mul,
883 tsc_ref->tsc_offset = hv_clock->system_time;
884 do_div(tsc_ref->tsc_offset, 100);
885 tsc_ref->tsc_offset -=
886 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
890 void kvm_hv_setup_tsc_page(struct kvm *kvm,
891 struct pvclock_vcpu_time_info *hv_clock)
893 struct kvm_hv *hv = &kvm->arch.hyperv;
897 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
898 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
900 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
903 mutex_lock(&kvm->arch.hyperv.hv_lock);
904 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
907 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
909 * Because the TSC parameters only vary when there is a
910 * change in the master clock, do not bother with caching.
912 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
913 &tsc_seq, sizeof(tsc_seq))))
917 * While we're computing and writing the parameters, force the
918 * guest to use the time reference count MSR.
920 hv->tsc_ref.tsc_sequence = 0;
921 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
922 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
925 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
928 /* Ensure sequence is zero before writing the rest of the struct. */
930 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
934 * Now switch to the TSC page mechanism by writing the sequence.
937 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
940 /* Write the struct entirely before the non-zero sequence. */
943 hv->tsc_ref.tsc_sequence = tsc_seq;
944 kvm_write_guest(kvm, gfn_to_gpa(gfn),
945 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
947 mutex_unlock(&kvm->arch.hyperv.hv_lock);
950 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
953 struct kvm *kvm = vcpu->kvm;
954 struct kvm_hv *hv = &kvm->arch.hyperv;
957 case HV_X64_MSR_GUEST_OS_ID:
958 hv->hv_guest_os_id = data;
959 /* setting guest os id to zero disables hypercall page */
960 if (!hv->hv_guest_os_id)
961 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
963 case HV_X64_MSR_HYPERCALL: {
968 /* if guest os id is not set hypercall should remain disabled */
969 if (!hv->hv_guest_os_id)
971 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
972 hv->hv_hypercall = data;
975 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
976 addr = gfn_to_hva(kvm, gfn);
977 if (kvm_is_error_hva(addr))
979 kvm_x86_ops->patch_hypercall(vcpu, instructions);
980 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
981 if (__copy_to_user((void __user *)addr, instructions, 4))
983 hv->hv_hypercall = data;
984 mark_page_dirty(kvm, gfn);
987 case HV_X64_MSR_REFERENCE_TSC:
988 hv->hv_tsc_page = data;
989 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
990 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
992 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
993 return kvm_hv_msr_set_crash_data(vcpu,
994 msr - HV_X64_MSR_CRASH_P0,
996 case HV_X64_MSR_CRASH_CTL:
997 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
998 case HV_X64_MSR_RESET:
1000 vcpu_debug(vcpu, "hyper-v reset requested\n");
1001 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1005 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1012 /* Calculate cpu time spent by current task in 100ns units */
1013 static u64 current_task_runtime_100ns(void)
1017 task_cputime_adjusted(current, &utime, &stime);
1019 return div_u64(utime + stime, 100);
1022 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1024 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1027 case HV_X64_MSR_VP_INDEX:
1030 hv->vp_index = (u32)data;
1032 case HV_X64_MSR_APIC_ASSIST_PAGE: {
1036 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1037 hv->hv_vapic = data;
1038 if (kvm_lapic_enable_pv_eoi(vcpu, 0))
1042 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
1043 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1044 if (kvm_is_error_hva(addr))
1046 if (__clear_user((void __user *)addr, PAGE_SIZE))
1048 hv->hv_vapic = data;
1049 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1050 if (kvm_lapic_enable_pv_eoi(vcpu,
1051 gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
1055 case HV_X64_MSR_EOI:
1056 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1057 case HV_X64_MSR_ICR:
1058 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1059 case HV_X64_MSR_TPR:
1060 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1061 case HV_X64_MSR_VP_RUNTIME:
1064 hv->runtime_offset = data - current_task_runtime_100ns();
1066 case HV_X64_MSR_SCONTROL:
1067 case HV_X64_MSR_SVERSION:
1068 case HV_X64_MSR_SIEFP:
1069 case HV_X64_MSR_SIMP:
1070 case HV_X64_MSR_EOM:
1071 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1072 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1073 case HV_X64_MSR_STIMER0_CONFIG:
1074 case HV_X64_MSR_STIMER1_CONFIG:
1075 case HV_X64_MSR_STIMER2_CONFIG:
1076 case HV_X64_MSR_STIMER3_CONFIG: {
1077 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1079 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1082 case HV_X64_MSR_STIMER0_COUNT:
1083 case HV_X64_MSR_STIMER1_COUNT:
1084 case HV_X64_MSR_STIMER2_COUNT:
1085 case HV_X64_MSR_STIMER3_COUNT: {
1086 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1088 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1092 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1100 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1103 struct kvm *kvm = vcpu->kvm;
1104 struct kvm_hv *hv = &kvm->arch.hyperv;
1107 case HV_X64_MSR_GUEST_OS_ID:
1108 data = hv->hv_guest_os_id;
1110 case HV_X64_MSR_HYPERCALL:
1111 data = hv->hv_hypercall;
1113 case HV_X64_MSR_TIME_REF_COUNT:
1114 data = get_time_ref_counter(kvm);
1116 case HV_X64_MSR_REFERENCE_TSC:
1117 data = hv->hv_tsc_page;
1119 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1120 return kvm_hv_msr_get_crash_data(vcpu,
1121 msr - HV_X64_MSR_CRASH_P0,
1123 case HV_X64_MSR_CRASH_CTL:
1124 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1125 case HV_X64_MSR_RESET:
1129 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1137 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1140 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1143 case HV_X64_MSR_VP_INDEX:
1144 data = hv->vp_index;
1146 case HV_X64_MSR_EOI:
1147 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1148 case HV_X64_MSR_ICR:
1149 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1150 case HV_X64_MSR_TPR:
1151 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1152 case HV_X64_MSR_APIC_ASSIST_PAGE:
1153 data = hv->hv_vapic;
1155 case HV_X64_MSR_VP_RUNTIME:
1156 data = current_task_runtime_100ns() + hv->runtime_offset;
1158 case HV_X64_MSR_SCONTROL:
1159 case HV_X64_MSR_SVERSION:
1160 case HV_X64_MSR_SIEFP:
1161 case HV_X64_MSR_SIMP:
1162 case HV_X64_MSR_EOM:
1163 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1164 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
1165 case HV_X64_MSR_STIMER0_CONFIG:
1166 case HV_X64_MSR_STIMER1_CONFIG:
1167 case HV_X64_MSR_STIMER2_CONFIG:
1168 case HV_X64_MSR_STIMER3_CONFIG: {
1169 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1171 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1174 case HV_X64_MSR_STIMER0_COUNT:
1175 case HV_X64_MSR_STIMER1_COUNT:
1176 case HV_X64_MSR_STIMER2_COUNT:
1177 case HV_X64_MSR_STIMER3_COUNT: {
1178 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1180 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1183 case HV_X64_MSR_TSC_FREQUENCY:
1184 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1186 case HV_X64_MSR_APIC_FREQUENCY:
1187 data = APIC_BUS_FREQUENCY;
1190 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1197 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1199 if (kvm_hv_msr_partition_wide(msr)) {
1202 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1203 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1204 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1207 return kvm_hv_set_msr(vcpu, msr, data, host);
1210 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1212 if (kvm_hv_msr_partition_wide(msr)) {
1215 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1216 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
1217 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1220 return kvm_hv_get_msr(vcpu, msr, pdata);
1223 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1225 return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
1228 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1232 longmode = is_64_bit_mode(vcpu);
1234 kvm_register_write(vcpu, VCPU_REGS_RAX, result);
1236 kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
1237 kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
1241 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1243 struct kvm_run *run = vcpu->run;
1245 kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
1246 return kvm_skip_emulated_instruction(vcpu);
1249 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1251 u64 param, ingpa, outgpa, ret;
1252 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
1253 bool fast, longmode;
1256 * hypercall generates UD from non zero cpl and real mode
1259 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1260 kvm_queue_exception(vcpu, UD_VECTOR);
1264 longmode = is_64_bit_mode(vcpu);
1267 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
1268 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
1269 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
1270 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
1271 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
1272 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
1274 #ifdef CONFIG_X86_64
1276 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
1277 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
1278 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
1282 code = param & 0xffff;
1283 fast = (param >> 16) & 0x1;
1284 rep_cnt = (param >> 32) & 0xfff;
1285 rep_idx = (param >> 48) & 0xfff;
1287 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1289 /* Hypercall continuation is not supported yet */
1290 if (rep_cnt || rep_idx) {
1291 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1296 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1297 kvm_vcpu_on_spin(vcpu, true);
1299 case HVCALL_POST_MESSAGE:
1300 case HVCALL_SIGNAL_EVENT:
1301 /* don't bother userspace if it has no way to handle it */
1302 if (!vcpu_to_synic(vcpu)->active) {
1303 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1306 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1307 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1308 vcpu->run->hyperv.u.hcall.input = param;
1309 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1310 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1311 vcpu->arch.complete_userspace_io =
1312 kvm_hv_hypercall_complete_userspace;
1315 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1320 ret = res | (((u64)rep_done & 0xfff) << 32);
1321 kvm_hv_hypercall_set_result(vcpu, ret);