1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM Microsoft Hyper-V emulation
5 * derived from arch/x86/kvm/x86.c
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
26 #include <linux/cpu.h>
27 #include <linux/kvm_host.h>
28 #include <linux/highmem.h>
29 #include <linux/sched/cputime.h>
30 #include <linux/eventfd.h>
32 #include <asm/apicdef.h>
33 #include <trace/events/kvm.h>
37 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
39 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
42 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
44 return atomic64_read(&synic->sint[sint]);
47 static inline int synic_get_sint_vector(u64 sint_value)
49 if (sint_value & HV_SYNIC_SINT_MASKED)
51 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
54 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
59 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
60 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
66 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
72 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
73 sint_value = synic_read_sint(synic, i);
74 if (synic_get_sint_vector(sint_value) == vector &&
75 sint_value & HV_SYNIC_SINT_AUTO_EOI)
81 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
84 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
87 if (synic_has_vector_connected(synic, vector))
88 __set_bit(vector, synic->vec_bitmap);
90 __clear_bit(vector, synic->vec_bitmap);
92 if (synic_has_vector_auto_eoi(synic, vector))
93 __set_bit(vector, synic->auto_eoi_bitmap);
95 __clear_bit(vector, synic->auto_eoi_bitmap);
98 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
101 int vector, old_vector;
104 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
105 masked = data & HV_SYNIC_SINT_MASKED;
108 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
109 * default '0x10000' value on boot and this should not #GP. We need to
110 * allow zero-initing the register from host as well.
112 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
115 * Guest may configure multiple SINTs to use the same vector, so
116 * we maintain a bitmap of vectors handled by synic, and a
117 * bitmap of vectors with auto-eoi behavior. The bitmaps are
118 * updated here, and atomically queried on fast paths.
120 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
122 atomic64_set(&synic->sint[sint], data);
124 synic_update_vector(synic, old_vector);
126 synic_update_vector(synic, vector);
128 /* Load SynIC vectors into EOI exit bitmap */
129 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
133 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
135 struct kvm_vcpu *vcpu = NULL;
138 if (vpidx >= KVM_MAX_VCPUS)
141 vcpu = kvm_get_vcpu(kvm, vpidx);
142 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
144 kvm_for_each_vcpu(i, vcpu, kvm)
145 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
150 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
152 struct kvm_vcpu *vcpu;
153 struct kvm_vcpu_hv_synic *synic;
155 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
158 synic = vcpu_to_synic(vcpu);
159 return (synic->active) ? synic : NULL;
162 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
164 struct kvm *kvm = vcpu->kvm;
165 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
166 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
167 struct kvm_vcpu_hv_stimer *stimer;
170 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
172 /* Try to deliver pending Hyper-V SynIC timers messages */
173 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
174 stimer = &hv_vcpu->stimer[idx];
175 if (stimer->msg_pending && stimer->config.enable &&
176 !stimer->config.direct_mode &&
177 stimer->config.sintx == sint)
178 stimer_mark_pending(stimer, false);
181 idx = srcu_read_lock(&kvm->irq_srcu);
182 gsi = atomic_read(&synic->sint_to_gsi[sint]);
184 kvm_notify_acked_gsi(kvm, gsi);
185 srcu_read_unlock(&kvm->irq_srcu, idx);
188 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
190 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
191 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
193 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
194 hv_vcpu->exit.u.synic.msr = msr;
195 hv_vcpu->exit.u.synic.control = synic->control;
196 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
197 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
199 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
202 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
203 u32 msr, u64 data, bool host)
205 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
208 if (!synic->active && (!host || data))
211 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
215 case HV_X64_MSR_SCONTROL:
216 synic->control = data;
218 synic_exit(synic, msr);
220 case HV_X64_MSR_SVERSION:
225 synic->version = data;
227 case HV_X64_MSR_SIEFP:
228 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
229 !synic->dont_zero_synic_pages)
230 if (kvm_clear_guest(vcpu->kvm,
231 data & PAGE_MASK, PAGE_SIZE)) {
235 synic->evt_page = data;
237 synic_exit(synic, msr);
239 case HV_X64_MSR_SIMP:
240 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
241 !synic->dont_zero_synic_pages)
242 if (kvm_clear_guest(vcpu->kvm,
243 data & PAGE_MASK, PAGE_SIZE)) {
247 synic->msg_page = data;
249 synic_exit(synic, msr);
251 case HV_X64_MSR_EOM: {
257 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
258 kvm_hv_notify_acked_sint(vcpu, i);
261 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
262 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
271 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
276 if (!synic->active && !host)
281 case HV_X64_MSR_SCONTROL:
282 *pdata = synic->control;
284 case HV_X64_MSR_SVERSION:
285 *pdata = synic->version;
287 case HV_X64_MSR_SIEFP:
288 *pdata = synic->evt_page;
290 case HV_X64_MSR_SIMP:
291 *pdata = synic->msg_page;
296 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
297 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
306 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
308 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
309 struct kvm_lapic_irq irq;
312 if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
315 if (sint >= ARRAY_SIZE(synic->sint))
318 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
322 memset(&irq, 0, sizeof(irq));
323 irq.shorthand = APIC_DEST_SELF;
324 irq.dest_mode = APIC_DEST_PHYSICAL;
325 irq.delivery_mode = APIC_DM_FIXED;
329 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
330 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
334 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
336 struct kvm_vcpu_hv_synic *synic;
338 synic = synic_get(kvm, vpidx);
342 return synic_set_irq(synic, sint);
345 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
347 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
350 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
352 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
353 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
354 kvm_hv_notify_acked_sint(vcpu, i);
357 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
359 struct kvm_vcpu_hv_synic *synic;
361 synic = synic_get(kvm, vpidx);
365 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
368 atomic_set(&synic->sint_to_gsi[sint], gsi);
372 void kvm_hv_irq_routing_update(struct kvm *kvm)
374 struct kvm_irq_routing_table *irq_rt;
375 struct kvm_kernel_irq_routing_entry *e;
378 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
379 lockdep_is_held(&kvm->irq_lock));
381 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
382 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
383 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
384 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
385 e->hv_sint.sint, gsi);
390 static void synic_init(struct kvm_vcpu_hv_synic *synic)
394 memset(synic, 0, sizeof(*synic));
395 synic->version = HV_SYNIC_VERSION_1;
396 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
397 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
398 atomic_set(&synic->sint_to_gsi[i], -1);
402 static u64 get_time_ref_counter(struct kvm *kvm)
404 struct kvm_hv *hv = &kvm->arch.hyperv;
405 struct kvm_vcpu *vcpu;
409 * The guest has not set up the TSC page or the clock isn't
410 * stable, fall back to get_kvmclock_ns.
412 if (!hv->tsc_ref.tsc_sequence)
413 return div_u64(get_kvmclock_ns(kvm), 100);
415 vcpu = kvm_get_vcpu(kvm, 0);
416 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
417 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
418 + hv->tsc_ref.tsc_offset;
421 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
424 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
426 set_bit(stimer->index,
427 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
428 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
433 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
435 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
437 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
440 hrtimer_cancel(&stimer->timer);
441 clear_bit(stimer->index,
442 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
443 stimer->msg_pending = false;
444 stimer->exp_time = 0;
447 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
449 struct kvm_vcpu_hv_stimer *stimer;
451 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
452 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
454 stimer_mark_pending(stimer, true);
456 return HRTIMER_NORESTART;
460 * stimer_start() assumptions:
461 * a) stimer->count is not equal to 0
462 * b) stimer->config has HV_STIMER_ENABLE flag
464 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
469 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
470 ktime_now = ktime_get();
472 if (stimer->config.periodic) {
473 if (stimer->exp_time) {
474 if (time_now >= stimer->exp_time) {
477 div64_u64_rem(time_now - stimer->exp_time,
478 stimer->count, &remainder);
480 time_now + (stimer->count - remainder);
483 stimer->exp_time = time_now + stimer->count;
485 trace_kvm_hv_stimer_start_periodic(
486 stimer_to_vcpu(stimer)->vcpu_id,
488 time_now, stimer->exp_time);
490 hrtimer_start(&stimer->timer,
491 ktime_add_ns(ktime_now,
492 100 * (stimer->exp_time - time_now)),
496 stimer->exp_time = stimer->count;
497 if (time_now >= stimer->count) {
499 * Expire timer according to Hypervisor Top-Level Functional
500 * specification v4(15.3.1):
501 * "If a one shot is enabled and the specified count is in
502 * the past, it will expire immediately."
504 stimer_mark_pending(stimer, false);
508 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
510 time_now, stimer->count);
512 hrtimer_start(&stimer->timer,
513 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
518 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
521 union hv_stimer_config new_config = {.as_uint64 = config},
522 old_config = {.as_uint64 = stimer->config.as_uint64};
523 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
524 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
526 if (!synic->active && (!host || config))
529 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
530 stimer->index, config, host);
532 stimer_cleanup(stimer);
533 if (old_config.enable &&
534 !new_config.direct_mode && new_config.sintx == 0)
535 new_config.enable = 0;
536 stimer->config.as_uint64 = new_config.as_uint64;
538 if (stimer->config.enable)
539 stimer_mark_pending(stimer, false);
544 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
547 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
548 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
550 if (!synic->active && (!host || count))
553 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
554 stimer->index, count, host);
556 stimer_cleanup(stimer);
557 stimer->count = count;
559 if (stimer->count == 0)
560 stimer->config.enable = 0;
561 else if (stimer->config.auto_enable)
562 stimer->config.enable = 1;
565 if (stimer->config.enable)
566 stimer_mark_pending(stimer, false);
571 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
573 *pconfig = stimer->config.as_uint64;
577 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
579 *pcount = stimer->count;
583 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
584 struct hv_message *src_msg, bool no_retry)
586 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
587 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
589 struct hv_message_header hv_hdr;
592 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
595 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
598 * Strictly following the spec-mandated ordering would assume setting
599 * .msg_pending before checking .message_type. However, this function
600 * is only called in vcpu context so the entire update is atomic from
601 * guest POV and thus the exact order here doesn't matter.
603 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
604 msg_off + offsetof(struct hv_message,
605 header.message_type),
606 sizeof(hv_hdr.message_type));
610 if (hv_hdr.message_type != HVMSG_NONE) {
614 hv_hdr.message_flags.msg_pending = 1;
615 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
616 &hv_hdr.message_flags,
618 offsetof(struct hv_message,
619 header.message_flags),
620 sizeof(hv_hdr.message_flags));
626 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
627 sizeof(src_msg->header) +
628 src_msg->header.payload_size);
632 r = synic_set_irq(synic, sint);
640 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
642 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
643 struct hv_message *msg = &stimer->msg;
644 struct hv_timer_message_payload *payload =
645 (struct hv_timer_message_payload *)&msg->u.payload;
648 * To avoid piling up periodic ticks, don't retry message
649 * delivery for them (within "lazy" lost ticks policy).
651 bool no_retry = stimer->config.periodic;
653 payload->expiration_time = stimer->exp_time;
654 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
655 return synic_deliver_msg(vcpu_to_synic(vcpu),
656 stimer->config.sintx, msg,
660 static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
662 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
663 struct kvm_lapic_irq irq = {
664 .delivery_mode = APIC_DM_FIXED,
665 .vector = stimer->config.apic_vector
668 if (lapic_in_kernel(vcpu))
669 return !kvm_apic_set_irq(vcpu, &irq, NULL);
673 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
675 int r, direct = stimer->config.direct_mode;
677 stimer->msg_pending = true;
679 r = stimer_send_msg(stimer);
681 r = stimer_notify_direct(stimer);
682 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
683 stimer->index, direct, r);
685 stimer->msg_pending = false;
686 if (!(stimer->config.periodic))
687 stimer->config.enable = 0;
691 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
693 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
694 struct kvm_vcpu_hv_stimer *stimer;
695 u64 time_now, exp_time;
698 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
699 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
700 stimer = &hv_vcpu->stimer[i];
701 if (stimer->config.enable) {
702 exp_time = stimer->exp_time;
706 get_time_ref_counter(vcpu->kvm);
707 if (time_now >= exp_time)
708 stimer_expiration(stimer);
711 if ((stimer->config.enable) &&
713 if (!stimer->msg_pending)
714 stimer_start(stimer);
716 stimer_cleanup(stimer);
721 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
723 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
726 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
727 stimer_cleanup(&hv_vcpu->stimer[i]);
730 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
732 if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
734 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
736 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
738 bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
739 struct hv_vp_assist_page *assist_page)
741 if (!kvm_hv_assist_page_enabled(vcpu))
743 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
744 assist_page, sizeof(*assist_page));
746 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
748 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
750 struct hv_message *msg = &stimer->msg;
751 struct hv_timer_message_payload *payload =
752 (struct hv_timer_message_payload *)&msg->u.payload;
754 memset(&msg->header, 0, sizeof(msg->header));
755 msg->header.message_type = HVMSG_TIMER_EXPIRED;
756 msg->header.payload_size = sizeof(*payload);
758 payload->timer_index = stimer->index;
759 payload->expiration_time = 0;
760 payload->delivery_time = 0;
763 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
765 memset(stimer, 0, sizeof(*stimer));
766 stimer->index = timer_index;
767 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
768 stimer->timer.function = stimer_timer_callback;
769 stimer_prepare_msg(stimer);
772 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
774 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
777 synic_init(&hv_vcpu->synic);
779 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
780 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
781 stimer_init(&hv_vcpu->stimer[i], i);
784 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
786 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
788 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
791 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
793 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
796 * Hyper-V SynIC auto EOI SINT's are
797 * not compatible with APICV, so deactivate APICV
799 kvm_vcpu_deactivate_apicv(vcpu);
800 synic->active = true;
801 synic->dont_zero_synic_pages = dont_zero_synic_pages;
805 static bool kvm_hv_msr_partition_wide(u32 msr)
810 case HV_X64_MSR_GUEST_OS_ID:
811 case HV_X64_MSR_HYPERCALL:
812 case HV_X64_MSR_REFERENCE_TSC:
813 case HV_X64_MSR_TIME_REF_COUNT:
814 case HV_X64_MSR_CRASH_CTL:
815 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
816 case HV_X64_MSR_RESET:
817 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
818 case HV_X64_MSR_TSC_EMULATION_CONTROL:
819 case HV_X64_MSR_TSC_EMULATION_STATUS:
827 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
828 u32 index, u64 *pdata)
830 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
831 size_t size = ARRAY_SIZE(hv->hv_crash_param);
833 if (WARN_ON_ONCE(index >= size))
836 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
840 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
842 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
844 *pdata = hv->hv_crash_ctl;
848 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
850 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
853 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
855 if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) {
857 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
858 hv->hv_crash_param[0],
859 hv->hv_crash_param[1],
860 hv->hv_crash_param[2],
861 hv->hv_crash_param[3],
862 hv->hv_crash_param[4]);
864 /* Send notification about crash to user space */
865 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
871 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
874 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
875 size_t size = ARRAY_SIZE(hv->hv_crash_param);
877 if (WARN_ON_ONCE(index >= size))
880 hv->hv_crash_param[array_index_nospec(index, size)] = data;
885 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
886 * between them is possible:
889 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
893 * nsec/100 = ticks * scale / 2^64 + offset
895 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
896 * By dividing the kvmclock formula by 100 and equating what's left we get:
897 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
898 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
899 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
901 * Now expand the kvmclock formula and divide by 100:
902 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
903 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
905 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
906 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
907 * + system_time / 100
909 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
910 * nsec/100 = ticks * scale / 2^64
911 * - tsc_timestamp * scale / 2^64
912 * + system_time / 100
914 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
915 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
917 * These two equivalencies are implemented in this function.
919 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
920 HV_REFERENCE_TSC_PAGE *tsc_ref)
924 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
928 * check if scale would overflow, if so we use the time ref counter
929 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
930 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
931 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
933 max_mul = 100ull << (32 - hv_clock->tsc_shift);
934 if (hv_clock->tsc_to_system_mul >= max_mul)
938 * Otherwise compute the scale and offset according to the formulas
942 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
943 hv_clock->tsc_to_system_mul,
946 tsc_ref->tsc_offset = hv_clock->system_time;
947 do_div(tsc_ref->tsc_offset, 100);
948 tsc_ref->tsc_offset -=
949 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
953 void kvm_hv_setup_tsc_page(struct kvm *kvm,
954 struct pvclock_vcpu_time_info *hv_clock)
956 struct kvm_hv *hv = &kvm->arch.hyperv;
960 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
961 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
963 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
966 mutex_lock(&kvm->arch.hyperv.hv_lock);
967 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
970 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
972 * Because the TSC parameters only vary when there is a
973 * change in the master clock, do not bother with caching.
975 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
976 &tsc_seq, sizeof(tsc_seq))))
980 * While we're computing and writing the parameters, force the
981 * guest to use the time reference count MSR.
983 hv->tsc_ref.tsc_sequence = 0;
984 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
985 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
988 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
991 /* Ensure sequence is zero before writing the rest of the struct. */
993 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
997 * Now switch to the TSC page mechanism by writing the sequence.
1000 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1003 /* Write the struct entirely before the non-zero sequence. */
1006 hv->tsc_ref.tsc_sequence = tsc_seq;
1007 kvm_write_guest(kvm, gfn_to_gpa(gfn),
1008 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
1010 mutex_unlock(&kvm->arch.hyperv.hv_lock);
1013 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1016 struct kvm *kvm = vcpu->kvm;
1017 struct kvm_hv *hv = &kvm->arch.hyperv;
1020 case HV_X64_MSR_GUEST_OS_ID:
1021 hv->hv_guest_os_id = data;
1022 /* setting guest os id to zero disables hypercall page */
1023 if (!hv->hv_guest_os_id)
1024 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1026 case HV_X64_MSR_HYPERCALL: {
1031 /* if guest os id is not set hypercall should remain disabled */
1032 if (!hv->hv_guest_os_id)
1034 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1035 hv->hv_hypercall = data;
1038 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1039 addr = gfn_to_hva(kvm, gfn);
1040 if (kvm_is_error_hva(addr))
1042 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1043 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1044 if (__copy_to_user((void __user *)addr, instructions, 4))
1046 hv->hv_hypercall = data;
1047 mark_page_dirty(kvm, gfn);
1050 case HV_X64_MSR_REFERENCE_TSC:
1051 hv->hv_tsc_page = data;
1052 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
1053 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1055 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1056 return kvm_hv_msr_set_crash_data(vcpu,
1057 msr - HV_X64_MSR_CRASH_P0,
1059 case HV_X64_MSR_CRASH_CTL:
1060 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
1061 case HV_X64_MSR_RESET:
1063 vcpu_debug(vcpu, "hyper-v reset requested\n");
1064 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1067 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1068 hv->hv_reenlightenment_control = data;
1070 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1071 hv->hv_tsc_emulation_control = data;
1073 case HV_X64_MSR_TSC_EMULATION_STATUS:
1074 hv->hv_tsc_emulation_status = data;
1076 case HV_X64_MSR_TIME_REF_COUNT:
1077 /* read-only, but still ignore it if host-initiated */
1082 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1089 /* Calculate cpu time spent by current task in 100ns units */
1090 static u64 current_task_runtime_100ns(void)
1094 task_cputime_adjusted(current, &utime, &stime);
1096 return div_u64(utime + stime, 100);
1099 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1101 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1104 case HV_X64_MSR_VP_INDEX: {
1105 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
1106 int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1107 u32 new_vp_index = (u32)data;
1109 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1112 if (new_vp_index == hv_vcpu->vp_index)
1116 * The VP index is initialized to vcpu_index by
1117 * kvm_hv_vcpu_postcreate so they initially match. Now the
1118 * VP index is changing, adjust num_mismatched_vp_indexes if
1119 * it now matches or no longer matches vcpu_idx.
1121 if (hv_vcpu->vp_index == vcpu_idx)
1122 atomic_inc(&hv->num_mismatched_vp_indexes);
1123 else if (new_vp_index == vcpu_idx)
1124 atomic_dec(&hv->num_mismatched_vp_indexes);
1126 hv_vcpu->vp_index = new_vp_index;
1129 case HV_X64_MSR_VP_ASSIST_PAGE: {
1133 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1134 hv_vcpu->hv_vapic = data;
1135 if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1139 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1140 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1141 if (kvm_is_error_hva(addr))
1145 * Clear apic_assist portion of f(struct hv_vp_assist_page
1146 * only, there can be valuable data in the rest which needs
1147 * to be preserved e.g. on migration.
1149 if (__clear_user((void __user *)addr, sizeof(u32)))
1151 hv_vcpu->hv_vapic = data;
1152 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1153 if (kvm_lapic_enable_pv_eoi(vcpu,
1154 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1155 sizeof(struct hv_vp_assist_page)))
1159 case HV_X64_MSR_EOI:
1160 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1161 case HV_X64_MSR_ICR:
1162 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1163 case HV_X64_MSR_TPR:
1164 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1165 case HV_X64_MSR_VP_RUNTIME:
1168 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1170 case HV_X64_MSR_SCONTROL:
1171 case HV_X64_MSR_SVERSION:
1172 case HV_X64_MSR_SIEFP:
1173 case HV_X64_MSR_SIMP:
1174 case HV_X64_MSR_EOM:
1175 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1176 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1177 case HV_X64_MSR_STIMER0_CONFIG:
1178 case HV_X64_MSR_STIMER1_CONFIG:
1179 case HV_X64_MSR_STIMER2_CONFIG:
1180 case HV_X64_MSR_STIMER3_CONFIG: {
1181 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1183 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1186 case HV_X64_MSR_STIMER0_COUNT:
1187 case HV_X64_MSR_STIMER1_COUNT:
1188 case HV_X64_MSR_STIMER2_COUNT:
1189 case HV_X64_MSR_STIMER3_COUNT: {
1190 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1192 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1195 case HV_X64_MSR_TSC_FREQUENCY:
1196 case HV_X64_MSR_APIC_FREQUENCY:
1197 /* read-only, but still ignore it if host-initiated */
1202 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1210 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1213 struct kvm *kvm = vcpu->kvm;
1214 struct kvm_hv *hv = &kvm->arch.hyperv;
1217 case HV_X64_MSR_GUEST_OS_ID:
1218 data = hv->hv_guest_os_id;
1220 case HV_X64_MSR_HYPERCALL:
1221 data = hv->hv_hypercall;
1223 case HV_X64_MSR_TIME_REF_COUNT:
1224 data = get_time_ref_counter(kvm);
1226 case HV_X64_MSR_REFERENCE_TSC:
1227 data = hv->hv_tsc_page;
1229 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1230 return kvm_hv_msr_get_crash_data(vcpu,
1231 msr - HV_X64_MSR_CRASH_P0,
1233 case HV_X64_MSR_CRASH_CTL:
1234 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1235 case HV_X64_MSR_RESET:
1238 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1239 data = hv->hv_reenlightenment_control;
1241 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1242 data = hv->hv_tsc_emulation_control;
1244 case HV_X64_MSR_TSC_EMULATION_STATUS:
1245 data = hv->hv_tsc_emulation_status;
1248 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1256 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1260 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1263 case HV_X64_MSR_VP_INDEX:
1264 data = hv_vcpu->vp_index;
1266 case HV_X64_MSR_EOI:
1267 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1268 case HV_X64_MSR_ICR:
1269 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1270 case HV_X64_MSR_TPR:
1271 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1272 case HV_X64_MSR_VP_ASSIST_PAGE:
1273 data = hv_vcpu->hv_vapic;
1275 case HV_X64_MSR_VP_RUNTIME:
1276 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1278 case HV_X64_MSR_SCONTROL:
1279 case HV_X64_MSR_SVERSION:
1280 case HV_X64_MSR_SIEFP:
1281 case HV_X64_MSR_SIMP:
1282 case HV_X64_MSR_EOM:
1283 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1284 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
1285 case HV_X64_MSR_STIMER0_CONFIG:
1286 case HV_X64_MSR_STIMER1_CONFIG:
1287 case HV_X64_MSR_STIMER2_CONFIG:
1288 case HV_X64_MSR_STIMER3_CONFIG: {
1289 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1291 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1294 case HV_X64_MSR_STIMER0_COUNT:
1295 case HV_X64_MSR_STIMER1_COUNT:
1296 case HV_X64_MSR_STIMER2_COUNT:
1297 case HV_X64_MSR_STIMER3_COUNT: {
1298 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1300 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1303 case HV_X64_MSR_TSC_FREQUENCY:
1304 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1306 case HV_X64_MSR_APIC_FREQUENCY:
1307 data = APIC_BUS_FREQUENCY;
1310 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1317 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1319 if (kvm_hv_msr_partition_wide(msr)) {
1322 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1323 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1324 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1327 return kvm_hv_set_msr(vcpu, msr, data, host);
1330 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1332 if (kvm_hv_msr_partition_wide(msr)) {
1335 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1336 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
1337 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1340 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1343 static __always_inline unsigned long *sparse_set_to_vcpu_mask(
1344 struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
1345 u64 *vp_bitmap, unsigned long *vcpu_bitmap)
1347 struct kvm_hv *hv = &kvm->arch.hyperv;
1348 struct kvm_vcpu *vcpu;
1349 int i, bank, sbank = 0;
1351 memset(vp_bitmap, 0,
1352 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
1353 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1354 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1355 vp_bitmap[bank] = sparse_banks[sbank++];
1357 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
1358 /* for all vcpus vp_index == vcpu_idx */
1359 return (unsigned long *)vp_bitmap;
1362 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
1363 kvm_for_each_vcpu(i, vcpu, kvm) {
1364 if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index,
1365 (unsigned long *)vp_bitmap))
1366 __set_bit(i, vcpu_bitmap);
1371 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1372 u16 rep_cnt, bool ex)
1374 struct kvm *kvm = current_vcpu->kvm;
1375 struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv;
1376 struct hv_tlb_flush_ex flush_ex;
1377 struct hv_tlb_flush flush;
1378 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1379 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1380 unsigned long *vcpu_mask;
1381 u64 valid_bank_mask;
1382 u64 sparse_banks[64];
1383 int sparse_banks_len;
1387 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
1388 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1390 trace_kvm_hv_flush_tlb(flush.processor_mask,
1391 flush.address_space, flush.flags);
1393 valid_bank_mask = BIT_ULL(0);
1394 sparse_banks[0] = flush.processor_mask;
1397 * Work around possible WS2012 bug: it sends hypercalls
1398 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1399 * while also expecting us to flush something and crashing if
1400 * we don't. Let's treat processor_mask == 0 same as
1401 * HV_FLUSH_ALL_PROCESSORS.
1403 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1404 flush.processor_mask == 0;
1406 if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
1408 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1410 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1411 flush_ex.hv_vp_set.format,
1412 flush_ex.address_space,
1415 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1416 all_cpus = flush_ex.hv_vp_set.format !=
1417 HV_GENERIC_SET_SPARSE_4K;
1420 bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
1421 sizeof(sparse_banks[0]);
1423 if (!sparse_banks_len && !all_cpus)
1428 ingpa + offsetof(struct hv_tlb_flush_ex,
1429 hv_vp_set.bank_contents),
1432 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1435 cpumask_clear(&hv_vcpu->tlb_flush);
1437 vcpu_mask = all_cpus ? NULL :
1438 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1439 vp_bitmap, vcpu_bitmap);
1442 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1443 * analyze it here, flush TLB regardless of the specified address space.
1445 kvm_make_vcpus_request_mask(kvm,
1446 KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
1447 vcpu_mask, &hv_vcpu->tlb_flush);
1450 /* We always do full TLB flush, set rep_done = rep_cnt. */
1451 return (u64)HV_STATUS_SUCCESS |
1452 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1455 static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1456 unsigned long *vcpu_bitmap)
1458 struct kvm_lapic_irq irq = {
1459 .delivery_mode = APIC_DM_FIXED,
1462 struct kvm_vcpu *vcpu;
1465 kvm_for_each_vcpu(i, vcpu, kvm) {
1466 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1469 /* We fail only when APIC is disabled */
1470 kvm_apic_set_irq(vcpu, &irq, NULL);
1474 static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
1477 struct kvm *kvm = current_vcpu->kvm;
1478 struct hv_send_ipi_ex send_ipi_ex;
1479 struct hv_send_ipi send_ipi;
1480 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1481 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1482 unsigned long *vcpu_mask;
1483 unsigned long valid_bank_mask;
1484 u64 sparse_banks[64];
1485 int sparse_banks_len;
1491 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
1493 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1494 sparse_banks[0] = send_ipi.cpu_mask;
1495 vector = send_ipi.vector;
1497 /* 'reserved' part of hv_send_ipi should be 0 */
1498 if (unlikely(ingpa >> 32 != 0))
1499 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1500 sparse_banks[0] = outgpa;
1501 vector = (u32)ingpa;
1504 valid_bank_mask = BIT_ULL(0);
1506 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1508 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
1509 sizeof(send_ipi_ex))))
1510 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1512 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1513 send_ipi_ex.vp_set.format,
1514 send_ipi_ex.vp_set.valid_bank_mask);
1516 vector = send_ipi_ex.vector;
1517 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1518 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1519 sizeof(sparse_banks[0]);
1521 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1524 goto check_and_send_ipi;
1526 if (!sparse_banks_len)
1529 if (kvm_read_guest(kvm,
1530 ingpa + offsetof(struct hv_send_ipi_ex,
1531 vp_set.bank_contents),
1534 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1538 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1539 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1541 vcpu_mask = all_cpus ? NULL :
1542 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1543 vp_bitmap, vcpu_bitmap);
1545 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1548 return HV_STATUS_SUCCESS;
1551 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1553 return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
1556 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1560 longmode = is_64_bit_mode(vcpu);
1562 kvm_rax_write(vcpu, result);
1564 kvm_rdx_write(vcpu, result >> 32);
1565 kvm_rax_write(vcpu, result & 0xffffffff);
1569 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1571 kvm_hv_hypercall_set_result(vcpu, result);
1572 ++vcpu->stat.hypercalls;
1573 return kvm_skip_emulated_instruction(vcpu);
1576 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1578 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
1581 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1583 struct eventfd_ctx *eventfd;
1585 if (unlikely(!fast)) {
1589 if ((gpa & (__alignof__(param) - 1)) ||
1590 offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
1591 return HV_STATUS_INVALID_ALIGNMENT;
1593 ret = kvm_vcpu_read_guest(vcpu, gpa, ¶m, sizeof(param));
1595 return HV_STATUS_INVALID_ALIGNMENT;
1599 * Per spec, bits 32-47 contain the extra "flag number". However, we
1600 * have no use for it, and in all known usecases it is zero, so just
1601 * report lookup failure if it isn't.
1603 if (param & 0xffff00000000ULL)
1604 return HV_STATUS_INVALID_PORT_ID;
1605 /* remaining bits are reserved-zero */
1606 if (param & ~KVM_HYPERV_CONN_ID_MASK)
1607 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1609 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
1611 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
1614 return HV_STATUS_INVALID_PORT_ID;
1616 eventfd_signal(eventfd, 1);
1617 return HV_STATUS_SUCCESS;
1620 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1622 u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
1623 uint16_t code, rep_idx, rep_cnt;
1627 * hypercall generates UD from non zero cpl and real mode
1630 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1631 kvm_queue_exception(vcpu, UD_VECTOR);
1635 #ifdef CONFIG_X86_64
1636 if (is_64_bit_mode(vcpu)) {
1637 param = kvm_rcx_read(vcpu);
1638 ingpa = kvm_rdx_read(vcpu);
1639 outgpa = kvm_r8_read(vcpu);
1643 param = ((u64)kvm_rdx_read(vcpu) << 32) |
1644 (kvm_rax_read(vcpu) & 0xffffffff);
1645 ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
1646 (kvm_rcx_read(vcpu) & 0xffffffff);
1647 outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
1648 (kvm_rsi_read(vcpu) & 0xffffffff);
1651 code = param & 0xffff;
1652 fast = !!(param & HV_HYPERCALL_FAST_BIT);
1653 rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
1654 rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
1655 rep = !!(rep_cnt || rep_idx);
1657 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1660 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1661 if (unlikely(rep)) {
1662 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1665 kvm_vcpu_on_spin(vcpu, true);
1667 case HVCALL_SIGNAL_EVENT:
1668 if (unlikely(rep)) {
1669 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1672 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1673 if (ret != HV_STATUS_INVALID_PORT_ID)
1675 /* fall through - maybe userspace knows this conn_id. */
1676 case HVCALL_POST_MESSAGE:
1677 /* don't bother userspace if it has no way to handle it */
1678 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
1679 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1682 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1683 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1684 vcpu->run->hyperv.u.hcall.input = param;
1685 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1686 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1687 vcpu->arch.complete_userspace_io =
1688 kvm_hv_hypercall_complete_userspace;
1690 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
1691 if (unlikely(fast || !rep_cnt || rep_idx)) {
1692 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1695 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1697 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
1698 if (unlikely(fast || rep)) {
1699 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1702 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1704 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
1705 if (unlikely(fast || !rep_cnt || rep_idx)) {
1706 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1709 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1711 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
1712 if (unlikely(fast || rep)) {
1713 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1716 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1718 case HVCALL_SEND_IPI:
1719 if (unlikely(rep)) {
1720 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1723 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
1725 case HVCALL_SEND_IPI_EX:
1726 if (unlikely(fast || rep)) {
1727 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1730 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
1733 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1737 return kvm_hv_hypercall_complete(vcpu, ret);
1740 void kvm_hv_init_vm(struct kvm *kvm)
1742 mutex_init(&kvm->arch.hyperv.hv_lock);
1743 idr_init(&kvm->arch.hyperv.conn_to_evt);
1746 void kvm_hv_destroy_vm(struct kvm *kvm)
1748 struct eventfd_ctx *eventfd;
1751 idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
1752 eventfd_ctx_put(eventfd);
1753 idr_destroy(&kvm->arch.hyperv.conn_to_evt);
1756 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
1758 struct kvm_hv *hv = &kvm->arch.hyperv;
1759 struct eventfd_ctx *eventfd;
1762 eventfd = eventfd_ctx_fdget(fd);
1763 if (IS_ERR(eventfd))
1764 return PTR_ERR(eventfd);
1766 mutex_lock(&hv->hv_lock);
1767 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
1768 GFP_KERNEL_ACCOUNT);
1769 mutex_unlock(&hv->hv_lock);
1776 eventfd_ctx_put(eventfd);
1780 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
1782 struct kvm_hv *hv = &kvm->arch.hyperv;
1783 struct eventfd_ctx *eventfd;
1785 mutex_lock(&hv->hv_lock);
1786 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
1787 mutex_unlock(&hv->hv_lock);
1792 synchronize_srcu(&kvm->srcu);
1793 eventfd_ctx_put(eventfd);
1797 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1799 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
1800 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
1803 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
1804 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
1805 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
1808 int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1809 struct kvm_cpuid_entry2 __user *entries)
1811 uint16_t evmcs_ver = 0;
1812 struct kvm_cpuid_entry2 cpuid_entries[] = {
1813 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
1814 { .function = HYPERV_CPUID_INTERFACE },
1815 { .function = HYPERV_CPUID_VERSION },
1816 { .function = HYPERV_CPUID_FEATURES },
1817 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
1818 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
1819 { .function = HYPERV_CPUID_NESTED_FEATURES },
1821 int i, nent = ARRAY_SIZE(cpuid_entries);
1823 if (kvm_x86_ops->nested_get_evmcs_version)
1824 evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
1826 /* Skip NESTED_FEATURES if eVMCS is not supported */
1830 if (cpuid->nent < nent)
1833 if (cpuid->nent > nent)
1836 for (i = 0; i < nent; i++) {
1837 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
1840 switch (ent->function) {
1841 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
1842 memcpy(signature, "Linux KVM Hv", 12);
1844 ent->eax = HYPERV_CPUID_NESTED_FEATURES;
1845 ent->ebx = signature[0];
1846 ent->ecx = signature[1];
1847 ent->edx = signature[2];
1850 case HYPERV_CPUID_INTERFACE:
1851 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
1852 ent->eax = signature[0];
1855 case HYPERV_CPUID_VERSION:
1857 * We implement some Hyper-V 2016 functions so let's use
1860 ent->eax = 0x00003839;
1861 ent->ebx = 0x000A0000;
1864 case HYPERV_CPUID_FEATURES:
1865 ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
1866 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
1867 ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
1868 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
1869 ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
1870 ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
1871 ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
1872 ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
1873 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
1874 ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
1875 ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
1877 ent->ebx |= HV_X64_POST_MESSAGES;
1878 ent->ebx |= HV_X64_SIGNAL_EVENTS;
1880 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
1881 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1884 * Direct Synthetic timers only make sense with in-kernel
1887 if (lapic_in_kernel(vcpu))
1888 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
1892 case HYPERV_CPUID_ENLIGHTMENT_INFO:
1893 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
1894 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
1895 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
1896 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
1897 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
1899 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
1900 if (!cpu_smt_possible())
1901 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
1903 * Default number of spinlock retry attempts, matches
1906 ent->ebx = 0x00000FFF;
1910 case HYPERV_CPUID_IMPLEMENT_LIMITS:
1911 /* Maximum number of virtual processors */
1912 ent->eax = KVM_MAX_VCPUS;
1914 * Maximum number of logical processors, matches
1921 case HYPERV_CPUID_NESTED_FEATURES:
1922 ent->eax = evmcs_ver;
1931 if (copy_to_user(entries, cpuid_entries,
1932 nent * sizeof(struct kvm_cpuid_entry2)))