3 * Local APIC virtualization
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
24 #include <linux/highmem.h>
25 #include <linux/smp.h>
26 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/math64.h>
30 #include <linux/slab.h>
31 #include <linux/nospec.h>
32 #include <asm/processor.h>
35 #include <asm/current.h>
36 #include <asm/apicdef.h>
37 #include <asm/delay.h>
38 #include <linux/atomic.h>
39 #include <linux/jump_label.h>
40 #include "kvm_cache_regs.h"
48 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
50 #define mod_64(x, y) ((x) % (y))
58 #define APIC_BUS_CYCLE_NS 1
60 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
61 #define apic_debug(fmt, arg...) do {} while (0)
63 /* 14 is the version for Xeon and Pentium 8.4.8*/
64 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
65 #define LAPIC_MMIO_LENGTH (1 << 12)
66 /* followed define is not in apicdef.h */
67 #define APIC_SHORT_MASK 0xc0000
68 #define APIC_DEST_NOSHORT 0x0
69 #define APIC_DEST_MASK 0x800
70 #define MAX_APIC_VECTOR 256
71 #define APIC_VECTORS_PER_REG 32
73 #define APIC_BROADCAST 0xFF
74 #define X2APIC_BROADCAST 0xFFFFFFFFul
76 static inline int apic_test_vector(int vec, void *bitmap)
78 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
81 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
83 struct kvm_lapic *apic = vcpu->arch.apic;
85 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
86 apic_test_vector(vector, apic->regs + APIC_IRR);
89 static inline void apic_clear_vector(int vec, void *bitmap)
91 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
94 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
96 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
99 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
101 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
104 struct static_key_deferred apic_hw_disabled __read_mostly;
105 struct static_key_deferred apic_sw_disabled __read_mostly;
107 static inline int apic_enabled(struct kvm_lapic *apic)
109 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
113 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
116 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
117 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
119 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
120 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
122 case KVM_APIC_MODE_X2APIC: {
123 u32 offset = (dest_id >> 16) * 16;
124 u32 max_apic_id = map->max_apic_id;
126 if (offset <= max_apic_id) {
127 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
129 *cluster = &map->phys_map[offset];
130 *mask = dest_id & (0xffff >> (16 - cluster_size));
137 case KVM_APIC_MODE_XAPIC_FLAT:
138 *cluster = map->xapic_flat_map;
139 *mask = dest_id & 0xff;
141 case KVM_APIC_MODE_XAPIC_CLUSTER:
142 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
143 *mask = dest_id & 0xf;
151 static void kvm_apic_map_free(struct rcu_head *rcu)
153 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
158 static void recalculate_apic_map(struct kvm *kvm)
160 struct kvm_apic_map *new, *old = NULL;
161 struct kvm_vcpu *vcpu;
165 mutex_lock(&kvm->arch.apic_map_lock);
167 kvm_for_each_vcpu(i, vcpu, kvm)
168 if (kvm_apic_present(vcpu))
169 max_id = max(max_id, kvm_apic_id(vcpu->arch.apic));
171 new = kvm_kvzalloc(sizeof(struct kvm_apic_map) +
172 sizeof(struct kvm_lapic *) * ((u64)max_id + 1));
177 new->max_apic_id = max_id;
179 kvm_for_each_vcpu(i, vcpu, kvm) {
180 struct kvm_lapic *apic = vcpu->arch.apic;
181 struct kvm_lapic **cluster;
185 if (!kvm_apic_present(vcpu))
188 aid = kvm_apic_id(apic);
189 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
191 if (aid <= new->max_apic_id)
192 new->phys_map[aid] = apic;
194 if (apic_x2apic_mode(apic)) {
195 new->mode |= KVM_APIC_MODE_X2APIC;
197 ldr = GET_APIC_LOGICAL_ID(ldr);
198 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
199 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
201 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
204 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
208 cluster[ffs(mask) - 1] = apic;
211 old = rcu_dereference_protected(kvm->arch.apic_map,
212 lockdep_is_held(&kvm->arch.apic_map_lock));
213 rcu_assign_pointer(kvm->arch.apic_map, new);
214 mutex_unlock(&kvm->arch.apic_map_lock);
217 call_rcu(&old->rcu, kvm_apic_map_free);
219 kvm_make_scan_ioapic_request(kvm);
222 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
224 bool enabled = val & APIC_SPIV_APIC_ENABLED;
226 kvm_lapic_set_reg(apic, APIC_SPIV, val);
228 if (enabled != apic->sw_enabled) {
229 apic->sw_enabled = enabled;
231 static_key_slow_dec_deferred(&apic_sw_disabled);
232 recalculate_apic_map(apic->vcpu->kvm);
234 static_key_slow_inc(&apic_sw_disabled.key);
238 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
240 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
241 recalculate_apic_map(apic->vcpu->kvm);
244 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
246 kvm_lapic_set_reg(apic, APIC_LDR, id);
247 recalculate_apic_map(apic->vcpu->kvm);
250 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
252 return ((id >> 4) << 16) | (1 << (id & 0xf));
255 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
257 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
259 kvm_lapic_set_reg(apic, APIC_ID, id);
260 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
261 recalculate_apic_map(apic->vcpu->kvm);
264 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
266 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
269 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
271 return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
274 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
276 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
279 static inline int apic_lvtt_period(struct kvm_lapic *apic)
281 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
284 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
286 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
289 static inline int apic_lvt_nmi_mode(u32 lvt_val)
291 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
294 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
296 struct kvm_lapic *apic = vcpu->arch.apic;
297 struct kvm_cpuid_entry2 *feat;
298 u32 v = APIC_VERSION;
300 if (!lapic_in_kernel(vcpu))
304 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
305 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
306 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
307 * version first and level-triggered interrupts never get EOIed in
310 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
311 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
312 !ioapic_in_kernel(vcpu->kvm))
313 v |= APIC_LVR_DIRECTED_EOI;
314 kvm_lapic_set_reg(apic, APIC_LVR, v);
317 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
318 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
319 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
320 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
321 LINT_MASK, LINT_MASK, /* LVT0-1 */
322 LVT_MASK /* LVTERR */
325 static int find_highest_vector(void *bitmap)
330 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
331 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
332 reg = bitmap + REG_POS(vec);
334 return fls(*reg) - 1 + vec;
340 static u8 count_vectors(void *bitmap)
346 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
347 reg = bitmap + REG_POS(vec);
348 count += hweight32(*reg);
354 void __kvm_apic_update_irr(u32 *pir, void *regs)
358 for (i = 0; i <= 7; i++) {
359 pir_val = xchg(&pir[i], 0);
361 *((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
364 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
366 void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
368 struct kvm_lapic *apic = vcpu->arch.apic;
370 __kvm_apic_update_irr(pir, apic->regs);
372 kvm_make_request(KVM_REQ_EVENT, vcpu);
374 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
376 static inline int apic_search_irr(struct kvm_lapic *apic)
378 return find_highest_vector(apic->regs + APIC_IRR);
381 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
386 * Note that irr_pending is just a hint. It will be always
387 * true with virtual interrupt delivery enabled.
389 if (!apic->irr_pending)
392 if (apic->vcpu->arch.apicv_active)
393 kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
394 result = apic_search_irr(apic);
395 ASSERT(result == -1 || result >= 16);
400 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
402 struct kvm_vcpu *vcpu;
406 if (unlikely(vcpu->arch.apicv_active)) {
407 /* try to update RVI */
408 apic_clear_vector(vec, apic->regs + APIC_IRR);
409 kvm_make_request(KVM_REQ_EVENT, vcpu);
411 apic->irr_pending = false;
412 apic_clear_vector(vec, apic->regs + APIC_IRR);
413 if (apic_search_irr(apic) != -1)
414 apic->irr_pending = true;
418 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
420 struct kvm_vcpu *vcpu;
422 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
428 * With APIC virtualization enabled, all caching is disabled
429 * because the processor can modify ISR under the hood. Instead
432 if (unlikely(vcpu->arch.apicv_active))
433 kvm_x86_ops->hwapic_isr_update(vcpu, vec);
436 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
438 * ISR (in service register) bit is set when injecting an interrupt.
439 * The highest vector is injected. Thus the latest bit set matches
440 * the highest bit in ISR.
442 apic->highest_isr_cache = vec;
446 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
451 * Note that isr_count is always 1, and highest_isr_cache
452 * is always -1, with APIC virtualization enabled.
454 if (!apic->isr_count)
456 if (likely(apic->highest_isr_cache != -1))
457 return apic->highest_isr_cache;
459 result = find_highest_vector(apic->regs + APIC_ISR);
460 ASSERT(result == -1 || result >= 16);
465 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
467 struct kvm_vcpu *vcpu;
468 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
474 * We do get here for APIC virtualization enabled if the guest
475 * uses the Hyper-V APIC enlightenment. In this case we may need
476 * to trigger a new interrupt delivery by writing the SVI field;
477 * on the other hand isr_count and highest_isr_cache are unused
478 * and must be left alone.
480 if (unlikely(vcpu->arch.apicv_active))
481 kvm_x86_ops->hwapic_isr_update(vcpu,
482 apic_find_highest_isr(apic));
485 BUG_ON(apic->isr_count < 0);
486 apic->highest_isr_cache = -1;
490 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
492 /* This may race with setting of irr in __apic_accept_irq() and
493 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
494 * will cause vmexit immediately and the value will be recalculated
495 * on the next vmentry.
497 return apic_find_highest_irr(vcpu->arch.apic);
500 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
501 int vector, int level, int trig_mode,
502 struct dest_map *dest_map);
504 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
505 struct dest_map *dest_map)
507 struct kvm_lapic *apic = vcpu->arch.apic;
509 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
510 irq->level, irq->trig_mode, dest_map);
513 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
516 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
520 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
523 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
527 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
529 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
532 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
535 if (pv_eoi_get_user(vcpu, &val) < 0) {
536 apic_debug("Can't read EOI MSR value: 0x%llx\n",
537 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
543 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
545 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
546 apic_debug("Can't set EOI MSR value: 0x%llx\n",
547 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
550 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
553 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
555 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
556 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
557 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
560 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
563 static void apic_update_ppr(struct kvm_lapic *apic)
565 u32 tpr, isrv, ppr, old_ppr;
568 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
569 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
570 isr = apic_find_highest_isr(apic);
571 isrv = (isr != -1) ? isr : 0;
573 if ((tpr & 0xf0) >= (isrv & 0xf0))
578 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
579 apic, ppr, isr, isrv);
581 if (old_ppr != ppr) {
582 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
584 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
588 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
590 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
591 apic_update_ppr(apic);
594 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
596 if (apic_x2apic_mode(apic))
597 return mda == X2APIC_BROADCAST;
599 return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST;
602 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
604 if (kvm_apic_broadcast(apic, mda))
607 if (apic_x2apic_mode(apic))
608 return mda == kvm_apic_id(apic);
610 return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
613 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
617 if (kvm_apic_broadcast(apic, mda))
620 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
622 if (apic_x2apic_mode(apic))
623 return ((logical_id >> 16) == (mda >> 16))
624 && (logical_id & mda & 0xffff) != 0;
626 logical_id = GET_APIC_LOGICAL_ID(logical_id);
627 mda = GET_APIC_DEST_FIELD(mda);
629 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
631 return (logical_id & mda) != 0;
632 case APIC_DFR_CLUSTER:
633 return ((logical_id >> 4) == (mda >> 4))
634 && (logical_id & mda & 0xf) != 0;
636 apic_debug("Bad DFR vcpu %d: %08x\n",
637 apic->vcpu->vcpu_id, kvm_lapic_get_reg(apic, APIC_DFR));
642 /* The KVM local APIC implementation has two quirks:
644 * - the xAPIC MDA stores the destination at bits 24-31, while this
645 * is not true of struct kvm_lapic_irq's dest_id field. This is
646 * just a quirk in the API and is not problematic.
648 * - in-kernel IOAPIC messages have to be delivered directly to
649 * x2APIC, because the kernel does not support interrupt remapping.
650 * In order to support broadcast without interrupt remapping, x2APIC
651 * rewrites the destination of non-IPI messages from APIC_BROADCAST
652 * to X2APIC_BROADCAST.
654 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
655 * important when userspace wants to use x2APIC-format MSIs, because
656 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
658 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
659 struct kvm_lapic *source, struct kvm_lapic *target)
661 bool ipi = source != NULL;
662 bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);
664 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
665 !ipi && dest_id == APIC_BROADCAST && x2apic_mda)
666 return X2APIC_BROADCAST;
668 return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
671 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
672 int short_hand, unsigned int dest, int dest_mode)
674 struct kvm_lapic *target = vcpu->arch.apic;
675 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
677 apic_debug("target %p, source %p, dest 0x%x, "
678 "dest_mode 0x%x, short_hand 0x%x\n",
679 target, source, dest, dest_mode, short_hand);
682 switch (short_hand) {
683 case APIC_DEST_NOSHORT:
684 if (dest_mode == APIC_DEST_PHYSICAL)
685 return kvm_apic_match_physical_addr(target, mda);
687 return kvm_apic_match_logical_addr(target, mda);
689 return target == source;
690 case APIC_DEST_ALLINC:
692 case APIC_DEST_ALLBUT:
693 return target != source;
695 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
700 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
702 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
703 const unsigned long *bitmap, u32 bitmap_size)
708 mod = vector % dest_vcpus;
710 for (i = 0; i <= mod; i++) {
711 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
712 BUG_ON(idx == bitmap_size);
718 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
720 if (!kvm->arch.disabled_lapic_found) {
721 kvm->arch.disabled_lapic_found = true;
723 "Disabled LAPIC found during irq injection\n");
727 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
728 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
730 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
731 if ((irq->dest_id == APIC_BROADCAST &&
732 map->mode != KVM_APIC_MODE_X2APIC))
734 if (irq->dest_id == X2APIC_BROADCAST)
737 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
738 if (irq->dest_id == (x2apic_ipi ?
739 X2APIC_BROADCAST : APIC_BROADCAST))
746 /* Return true if the interrupt can be handled by using *bitmap as index mask
747 * for valid destinations in *dst array.
748 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
749 * Note: we may have zero kvm_lapic destinations when we return true, which
750 * means that the interrupt should be dropped. In this case, *bitmap would be
751 * zero and *dst undefined.
753 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
754 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
755 struct kvm_apic_map *map, struct kvm_lapic ***dst,
756 unsigned long *bitmap)
760 if (irq->shorthand == APIC_DEST_SELF && src) {
764 } else if (irq->shorthand)
767 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
770 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
771 if (irq->dest_id > map->max_apic_id) {
774 *dst = &map->phys_map[irq->dest_id];
781 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
785 if (!kvm_lowest_prio_delivery(irq))
788 if (!kvm_vector_hashing_enabled()) {
790 for_each_set_bit(i, bitmap, 16) {
795 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
796 (*dst)[lowest]->vcpu) < 0)
803 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
806 if (!(*dst)[lowest]) {
807 kvm_apic_disabled_lapic_found(kvm);
813 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
818 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
819 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
821 struct kvm_apic_map *map;
822 unsigned long bitmap;
823 struct kvm_lapic **dst = NULL;
829 if (irq->shorthand == APIC_DEST_SELF) {
830 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
835 map = rcu_dereference(kvm->arch.apic_map);
837 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
839 for_each_set_bit(i, &bitmap, 16) {
844 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
852 * This routine tries to handler interrupts in posted mode, here is how
853 * it deals with different cases:
854 * - For single-destination interrupts, handle it in posted mode
855 * - Else if vector hashing is enabled and it is a lowest-priority
856 * interrupt, handle it in posted mode and use the following mechanism
857 * to find the destinaiton vCPU.
858 * 1. For lowest-priority interrupts, store all the possible
859 * destination vCPUs in an array.
860 * 2. Use "guest vector % max number of destination vCPUs" to find
861 * the right destination vCPU in the array for the lowest-priority
863 * - Otherwise, use remapped mode to inject the interrupt.
865 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
866 struct kvm_vcpu **dest_vcpu)
868 struct kvm_apic_map *map;
869 unsigned long bitmap;
870 struct kvm_lapic **dst = NULL;
877 map = rcu_dereference(kvm->arch.apic_map);
879 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
880 hweight16(bitmap) == 1) {
881 unsigned long i = find_first_bit(&bitmap, 16);
884 *dest_vcpu = dst[i]->vcpu;
894 * Add a pending IRQ into lapic.
895 * Return 1 if successfully added and 0 if discarded.
897 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
898 int vector, int level, int trig_mode,
899 struct dest_map *dest_map)
902 struct kvm_vcpu *vcpu = apic->vcpu;
904 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
906 switch (delivery_mode) {
908 vcpu->arch.apic_arb_prio++;
910 if (unlikely(trig_mode && !level))
913 /* FIXME add logic for vcpu on reset */
914 if (unlikely(!apic_enabled(apic)))
920 __set_bit(vcpu->vcpu_id, dest_map->map);
921 dest_map->vectors[vcpu->vcpu_id] = vector;
924 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
926 kvm_lapic_set_vector(vector, apic->regs + APIC_TMR);
928 apic_clear_vector(vector, apic->regs + APIC_TMR);
931 if (vcpu->arch.apicv_active)
932 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
934 kvm_lapic_set_irr(vector, apic);
936 kvm_make_request(KVM_REQ_EVENT, vcpu);
943 vcpu->arch.pv.pv_unhalted = 1;
944 kvm_make_request(KVM_REQ_EVENT, vcpu);
950 kvm_make_request(KVM_REQ_SMI, vcpu);
956 kvm_inject_nmi(vcpu);
961 if (!trig_mode || level) {
963 /* assumes that there are only KVM_APIC_INIT/SIPI */
964 apic->pending_events = (1UL << KVM_APIC_INIT);
965 /* make sure pending_events is visible before sending
968 kvm_make_request(KVM_REQ_EVENT, vcpu);
971 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
976 case APIC_DM_STARTUP:
977 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
978 vcpu->vcpu_id, vector);
980 apic->sipi_vector = vector;
981 /* make sure sipi_vector is visible for the receiver */
983 set_bit(KVM_APIC_SIPI, &apic->pending_events);
984 kvm_make_request(KVM_REQ_EVENT, vcpu);
990 * Should only be called by kvm_apic_local_deliver() with LVT0,
991 * before NMI watchdog was enabled. Already handled by
992 * kvm_apic_accept_pic_intr().
997 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1004 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1006 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1009 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1011 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1014 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1018 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1019 if (!kvm_ioapic_handles_vector(apic, vector))
1022 /* Request a KVM exit to inform the userspace IOAPIC. */
1023 if (irqchip_split(apic->vcpu->kvm)) {
1024 apic->vcpu->arch.pending_ioapic_eoi = vector;
1025 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1029 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1030 trigger_mode = IOAPIC_LEVEL_TRIG;
1032 trigger_mode = IOAPIC_EDGE_TRIG;
1034 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1037 static int apic_set_eoi(struct kvm_lapic *apic)
1039 int vector = apic_find_highest_isr(apic);
1041 trace_kvm_eoi(apic, vector);
1044 * Not every write EOI will has corresponding ISR,
1045 * one example is when Kernel check timer on setup_IO_APIC
1050 apic_clear_isr(vector, apic);
1051 apic_update_ppr(apic);
1053 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
1054 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1056 kvm_ioapic_send_eoi(apic, vector);
1057 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1062 * this interface assumes a trap-like exit, which has already finished
1063 * desired side effect including vISR and vPPR update.
1065 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1067 struct kvm_lapic *apic = vcpu->arch.apic;
1069 trace_kvm_eoi(apic, vector);
1071 kvm_ioapic_send_eoi(apic, vector);
1072 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1074 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1076 static void apic_send_ipi(struct kvm_lapic *apic)
1078 u32 icr_low = kvm_lapic_get_reg(apic, APIC_ICR);
1079 u32 icr_high = kvm_lapic_get_reg(apic, APIC_ICR2);
1080 struct kvm_lapic_irq irq;
1082 irq.vector = icr_low & APIC_VECTOR_MASK;
1083 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1084 irq.dest_mode = icr_low & APIC_DEST_MASK;
1085 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1086 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1087 irq.shorthand = icr_low & APIC_SHORT_MASK;
1088 irq.msi_redir_hint = false;
1089 if (apic_x2apic_mode(apic))
1090 irq.dest_id = icr_high;
1092 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1094 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1096 apic_debug("icr_high 0x%x, icr_low 0x%x, "
1097 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1098 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
1099 "msi_redir_hint 0x%x\n",
1100 icr_high, icr_low, irq.shorthand, irq.dest_id,
1101 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
1102 irq.vector, irq.msi_redir_hint);
1104 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1107 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1113 ASSERT(apic != NULL);
1115 /* if initial count is 0, current count should also be 0 */
1116 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1117 apic->lapic_timer.period == 0)
1120 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
1121 if (ktime_to_ns(remaining) < 0)
1122 remaining = ktime_set(0, 0);
1124 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1125 tmcct = div64_u64(ns,
1126 (APIC_BUS_CYCLE_NS * apic->divide_count));
1131 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1133 struct kvm_vcpu *vcpu = apic->vcpu;
1134 struct kvm_run *run = vcpu->run;
1136 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1137 run->tpr_access.rip = kvm_rip_read(vcpu);
1138 run->tpr_access.is_write = write;
1141 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1143 if (apic->vcpu->arch.tpr_access_reporting)
1144 __report_tpr_access(apic, write);
1147 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1151 if (offset >= LAPIC_MMIO_LENGTH)
1156 apic_debug("Access APIC ARBPRI register which is for P6\n");
1159 case APIC_TMCCT: /* Timer CCR */
1160 if (apic_lvtt_tscdeadline(apic))
1163 val = apic_get_tmcct(apic);
1166 apic_update_ppr(apic);
1167 val = kvm_lapic_get_reg(apic, offset);
1170 report_tpr_access(apic, false);
1173 val = kvm_lapic_get_reg(apic, offset);
1180 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1182 return container_of(dev, struct kvm_lapic, dev);
1185 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1188 unsigned char alignment = offset & 0xf;
1190 /* this bitmask has a bit cleared for each reserved register */
1191 static const u64 rmask = 0x43ff01ffffffe70cULL;
1193 if ((alignment + len) > 4) {
1194 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
1199 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
1200 apic_debug("KVM_APIC_READ: read reserved register %x\n",
1205 result = __apic_read(apic, offset & ~0xf);
1207 trace_kvm_apic_read(offset, result);
1213 memcpy(data, (char *)&result + alignment, len);
1216 printk(KERN_ERR "Local APIC read with len = %x, "
1217 "should be 1,2, or 4 instead\n", len);
1222 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1224 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1226 return addr >= apic->base_address &&
1227 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1230 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1231 gpa_t address, int len, void *data)
1233 struct kvm_lapic *apic = to_lapic(this);
1234 u32 offset = address - apic->base_address;
1236 if (!apic_mmio_in_range(apic, address))
1239 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1240 if (!kvm_check_has_quirk(vcpu->kvm,
1241 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1244 memset(data, 0xff, len);
1248 kvm_lapic_reg_read(apic, offset, len, data);
1253 static void update_divide_count(struct kvm_lapic *apic)
1255 u32 tmp1, tmp2, tdcr;
1257 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1259 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1260 apic->divide_count = 0x1 << (tmp2 & 0x7);
1262 apic_debug("timer divide count is 0x%x\n",
1263 apic->divide_count);
1266 static void apic_update_lvtt(struct kvm_lapic *apic)
1268 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1269 apic->lapic_timer.timer_mode_mask;
1271 if (apic->lapic_timer.timer_mode != timer_mode) {
1272 apic->lapic_timer.timer_mode = timer_mode;
1273 hrtimer_cancel(&apic->lapic_timer.timer);
1277 static void apic_timer_expired(struct kvm_lapic *apic)
1279 struct kvm_vcpu *vcpu = apic->vcpu;
1280 struct swait_queue_head *q = &vcpu->wq;
1281 struct kvm_timer *ktimer = &apic->lapic_timer;
1283 if (atomic_read(&apic->lapic_timer.pending))
1286 atomic_inc(&apic->lapic_timer.pending);
1287 kvm_set_pending_timer(vcpu);
1289 if (swait_active(q))
1292 if (apic_lvtt_tscdeadline(apic))
1293 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1297 * On APICv, this test will cause a busy wait
1298 * during a higher-priority task.
1301 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1303 struct kvm_lapic *apic = vcpu->arch.apic;
1304 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1306 if (kvm_apic_hw_enabled(apic)) {
1307 int vec = reg & APIC_VECTOR_MASK;
1308 void *bitmap = apic->regs + APIC_ISR;
1310 if (vcpu->arch.apicv_active)
1311 bitmap = apic->regs + APIC_IRR;
1313 if (apic_test_vector(vec, bitmap))
1319 void wait_lapic_expire(struct kvm_vcpu *vcpu)
1321 struct kvm_lapic *apic = vcpu->arch.apic;
1322 u64 guest_tsc, tsc_deadline;
1324 if (!lapic_in_kernel(vcpu))
1327 if (apic->lapic_timer.expired_tscdeadline == 0)
1330 if (!lapic_timer_int_injected(vcpu))
1333 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1334 apic->lapic_timer.expired_tscdeadline = 0;
1335 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1336 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1338 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1339 if (guest_tsc < tsc_deadline)
1340 __delay(min(tsc_deadline - guest_tsc,
1341 nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
1344 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1346 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
1349 struct kvm_vcpu *vcpu = apic->vcpu;
1350 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1351 unsigned long flags;
1354 if (unlikely(!tscdeadline || !this_tsc_khz))
1357 local_irq_save(flags);
1359 now = apic->lapic_timer.timer.base->get_time();
1360 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1361 if (likely(tscdeadline > guest_tsc)) {
1362 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1363 do_div(ns, this_tsc_khz);
1364 expire = ktime_add_ns(now, ns);
1365 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1366 hrtimer_start(&apic->lapic_timer.timer,
1367 expire, HRTIMER_MODE_ABS_PINNED);
1369 apic_timer_expired(apic);
1371 local_irq_restore(flags);
1374 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1376 if (!lapic_in_kernel(vcpu))
1379 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1381 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1383 static void cancel_hv_tscdeadline(struct kvm_lapic *apic)
1386 kvm_x86_ops->cancel_hv_timer(apic->vcpu);
1387 apic->lapic_timer.hv_timer_in_use = false;
1391 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1393 struct kvm_lapic *apic = vcpu->arch.apic;
1395 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1396 WARN_ON(swait_active(&vcpu->wq));
1397 cancel_hv_tscdeadline(apic);
1398 apic_timer_expired(apic);
1400 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1402 static bool start_hv_tscdeadline(struct kvm_lapic *apic)
1404 u64 tscdeadline = apic->lapic_timer.tscdeadline;
1406 if (atomic_read(&apic->lapic_timer.pending) ||
1407 kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
1408 if (apic->lapic_timer.hv_timer_in_use)
1409 cancel_hv_tscdeadline(apic);
1411 apic->lapic_timer.hv_timer_in_use = true;
1412 hrtimer_cancel(&apic->lapic_timer.timer);
1414 /* In case the sw timer triggered in the window */
1415 if (atomic_read(&apic->lapic_timer.pending))
1416 cancel_hv_tscdeadline(apic);
1418 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
1419 apic->lapic_timer.hv_timer_in_use);
1420 return apic->lapic_timer.hv_timer_in_use;
1423 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1425 struct kvm_lapic *apic = vcpu->arch.apic;
1427 WARN_ON(apic->lapic_timer.hv_timer_in_use);
1429 if (apic_lvtt_tscdeadline(apic))
1430 start_hv_tscdeadline(apic);
1432 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1434 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1436 struct kvm_lapic *apic = vcpu->arch.apic;
1438 /* Possibly the TSC deadline timer is not enabled yet */
1439 if (!apic->lapic_timer.hv_timer_in_use)
1442 cancel_hv_tscdeadline(apic);
1444 if (atomic_read(&apic->lapic_timer.pending))
1447 start_sw_tscdeadline(apic);
1449 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1451 static void start_apic_timer(struct kvm_lapic *apic)
1455 atomic_set(&apic->lapic_timer.pending, 0);
1457 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1458 /* lapic timer in oneshot or periodic mode */
1459 now = apic->lapic_timer.timer.base->get_time();
1460 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1461 * APIC_BUS_CYCLE_NS * apic->divide_count;
1463 if (!apic->lapic_timer.period)
1466 * Do not allow the guest to program periodic timers with small
1467 * interval, since the hrtimers are not throttled by the host
1470 if (apic_lvtt_period(apic)) {
1471 s64 min_period = min_timer_period_us * 1000LL;
1473 if (apic->lapic_timer.period < min_period) {
1474 pr_info_ratelimited(
1475 "kvm: vcpu %i: requested %lld ns "
1476 "lapic timer period limited to %lld ns\n",
1477 apic->vcpu->vcpu_id,
1478 apic->lapic_timer.period, min_period);
1479 apic->lapic_timer.period = min_period;
1483 hrtimer_start(&apic->lapic_timer.timer,
1484 ktime_add_ns(now, apic->lapic_timer.period),
1485 HRTIMER_MODE_ABS_PINNED);
1487 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1489 "timer initial count 0x%x, period %lldns, "
1490 "expire @ 0x%016" PRIx64 ".\n", __func__,
1491 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
1492 kvm_lapic_get_reg(apic, APIC_TMICT),
1493 apic->lapic_timer.period,
1494 ktime_to_ns(ktime_add_ns(now,
1495 apic->lapic_timer.period)));
1496 } else if (apic_lvtt_tscdeadline(apic)) {
1497 if (!(kvm_x86_ops->set_hv_timer && start_hv_tscdeadline(apic)))
1498 start_sw_tscdeadline(apic);
1502 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1504 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1506 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1507 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1508 if (lvt0_in_nmi_mode) {
1509 apic_debug("Receive NMI setting on APIC_LVT0 "
1510 "for cpu %d\n", apic->vcpu->vcpu_id);
1511 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1513 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1517 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1521 trace_kvm_apic_write(reg, val);
1524 case APIC_ID: /* Local APIC ID */
1525 if (!apic_x2apic_mode(apic))
1526 kvm_apic_set_xapic_id(apic, val >> 24);
1532 report_tpr_access(apic, true);
1533 apic_set_tpr(apic, val & 0xff);
1541 if (!apic_x2apic_mode(apic))
1542 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
1548 if (!apic_x2apic_mode(apic)) {
1549 kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1550 recalculate_apic_map(apic->vcpu->kvm);
1557 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1558 mask |= APIC_SPIV_DIRECTED_EOI;
1559 apic_set_spiv(apic, val & mask);
1560 if (!(val & APIC_SPIV_APIC_ENABLED)) {
1564 for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
1565 lvt_val = kvm_lapic_get_reg(apic,
1566 APIC_LVTT + 0x10 * i);
1567 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
1568 lvt_val | APIC_LVT_MASKED);
1570 apic_update_lvtt(apic);
1571 atomic_set(&apic->lapic_timer.pending, 0);
1577 /* No delay here, so we always clear the pending bit */
1578 kvm_lapic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
1579 apic_send_ipi(apic);
1583 if (!apic_x2apic_mode(apic))
1585 kvm_lapic_set_reg(apic, APIC_ICR2, val);
1589 apic_manage_nmi_watchdog(apic, val);
1594 /* TODO: Check vector */
1598 if (!kvm_apic_sw_enabled(apic))
1599 val |= APIC_LVT_MASKED;
1600 size = ARRAY_SIZE(apic_lvt_mask);
1601 index = array_index_nospec(
1602 (reg - APIC_LVTT) >> 4, size);
1603 val &= apic_lvt_mask[index];
1604 kvm_lapic_set_reg(apic, reg, val);
1609 if (!kvm_apic_sw_enabled(apic))
1610 val |= APIC_LVT_MASKED;
1611 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1612 kvm_lapic_set_reg(apic, APIC_LVTT, val);
1613 apic_update_lvtt(apic);
1617 if (apic_lvtt_tscdeadline(apic))
1620 hrtimer_cancel(&apic->lapic_timer.timer);
1621 kvm_lapic_set_reg(apic, APIC_TMICT, val);
1622 start_apic_timer(apic);
1627 apic_debug("KVM_WRITE:TDCR %x\n", val);
1628 kvm_lapic_set_reg(apic, APIC_TDCR, val);
1629 update_divide_count(apic);
1633 if (apic_x2apic_mode(apic) && val != 0) {
1634 apic_debug("KVM_WRITE:ESR not zero %x\n", val);
1640 if (apic_x2apic_mode(apic)) {
1641 kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
1650 apic_debug("Local APIC Write to read-only register %x\n", reg);
1653 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
1655 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1656 gpa_t address, int len, const void *data)
1658 struct kvm_lapic *apic = to_lapic(this);
1659 unsigned int offset = address - apic->base_address;
1662 if (!apic_mmio_in_range(apic, address))
1665 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1666 if (!kvm_check_has_quirk(vcpu->kvm,
1667 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1674 * APIC register must be aligned on 128-bits boundary.
1675 * 32/64/128 bits registers must be accessed thru 32 bits.
1678 if (len != 4 || (offset & 0xf)) {
1679 /* Don't shout loud, $infamous_os would cause only noise. */
1680 apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
1686 /* too common printing */
1687 if (offset != APIC_EOI)
1688 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1689 "0x%x\n", __func__, offset, len, val);
1691 kvm_lapic_reg_write(apic, offset & 0xff0, val);
1696 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
1698 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
1700 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
1702 /* emulate APIC access in a trap manner */
1703 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
1707 /* hw has done the conditional check and inst decode */
1710 kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
1712 /* TODO: optimize to just emulate side effect w/o one more write */
1713 kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
1715 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
1717 void kvm_free_lapic(struct kvm_vcpu *vcpu)
1719 struct kvm_lapic *apic = vcpu->arch.apic;
1721 if (!vcpu->arch.apic)
1724 hrtimer_cancel(&apic->lapic_timer.timer);
1726 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
1727 static_key_slow_dec_deferred(&apic_hw_disabled);
1729 if (!apic->sw_enabled)
1730 static_key_slow_dec_deferred(&apic_sw_disabled);
1733 free_page((unsigned long)apic->regs);
1739 *----------------------------------------------------------------------
1741 *----------------------------------------------------------------------
1744 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
1746 struct kvm_lapic *apic = vcpu->arch.apic;
1748 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
1749 apic_lvtt_period(apic))
1752 return apic->lapic_timer.tscdeadline;
1755 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
1757 struct kvm_lapic *apic = vcpu->arch.apic;
1759 if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
1760 apic_lvtt_period(apic))
1763 hrtimer_cancel(&apic->lapic_timer.timer);
1764 apic->lapic_timer.tscdeadline = data;
1765 start_apic_timer(apic);
1768 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
1770 struct kvm_lapic *apic = vcpu->arch.apic;
1772 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
1773 | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
1776 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
1780 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
1782 return (tpr & 0xf0) >> 4;
1785 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1787 u64 old_value = vcpu->arch.apic_base;
1788 struct kvm_lapic *apic = vcpu->arch.apic;
1791 value |= MSR_IA32_APICBASE_BSP;
1792 vcpu->arch.apic_base = value;
1796 vcpu->arch.apic_base = value;
1798 /* update jump label if enable bit changes */
1799 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
1800 if (value & MSR_IA32_APICBASE_ENABLE) {
1801 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
1802 static_key_slow_dec_deferred(&apic_hw_disabled);
1804 static_key_slow_inc(&apic_hw_disabled.key);
1805 recalculate_apic_map(vcpu->kvm);
1809 if ((old_value ^ value) & X2APIC_ENABLE) {
1810 if (value & X2APIC_ENABLE) {
1811 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
1812 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
1814 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
1817 apic->base_address = apic->vcpu->arch.apic_base &
1818 MSR_IA32_APICBASE_BASE;
1820 if ((value & MSR_IA32_APICBASE_ENABLE) &&
1821 apic->base_address != APIC_DEFAULT_PHYS_BASE)
1822 pr_warn_once("APIC base relocation is unsupported by KVM");
1824 /* with FSB delivery interrupt, we can restart APIC functionality */
1825 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
1826 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
1830 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1832 struct kvm_lapic *apic;
1835 apic_debug("%s\n", __func__);
1838 apic = vcpu->arch.apic;
1839 ASSERT(apic != NULL);
1841 /* Stop the timer in case it's a reset to an active apic */
1842 hrtimer_cancel(&apic->lapic_timer.timer);
1845 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
1846 MSR_IA32_APICBASE_ENABLE);
1847 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
1849 kvm_apic_set_version(apic->vcpu);
1851 for (i = 0; i < KVM_APIC_LVT_NUM; i++)
1852 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1853 apic_update_lvtt(apic);
1854 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
1855 kvm_lapic_set_reg(apic, APIC_LVT0,
1856 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1857 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
1859 kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU);
1860 apic_set_spiv(apic, 0xff);
1861 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
1862 if (!apic_x2apic_mode(apic))
1863 kvm_apic_set_ldr(apic, 0);
1864 kvm_lapic_set_reg(apic, APIC_ESR, 0);
1865 kvm_lapic_set_reg(apic, APIC_ICR, 0);
1866 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
1867 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
1868 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1869 for (i = 0; i < 8; i++) {
1870 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
1871 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
1872 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1874 apic->irr_pending = vcpu->arch.apicv_active;
1875 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
1876 apic->highest_isr_cache = -1;
1877 update_divide_count(apic);
1878 atomic_set(&apic->lapic_timer.pending, 0);
1879 if (kvm_vcpu_is_bsp(vcpu))
1880 kvm_lapic_set_base(vcpu,
1881 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
1882 vcpu->arch.pv_eoi.msr_val = 0;
1883 apic_update_ppr(apic);
1885 vcpu->arch.apic_arb_prio = 0;
1886 vcpu->arch.apic_attention = 0;
1888 apic_debug("%s: vcpu=%p, id=%d, base_msr="
1889 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
1890 vcpu, kvm_apic_id(apic),
1891 vcpu->arch.apic_base, apic->base_address);
1895 *----------------------------------------------------------------------
1897 *----------------------------------------------------------------------
1900 static bool lapic_is_periodic(struct kvm_lapic *apic)
1902 return apic_lvtt_period(apic);
1905 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
1907 struct kvm_lapic *apic = vcpu->arch.apic;
1909 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
1910 return atomic_read(&apic->lapic_timer.pending);
1915 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
1917 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
1918 int vector, mode, trig_mode;
1920 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
1921 vector = reg & APIC_VECTOR_MASK;
1922 mode = reg & APIC_MODE_MASK;
1923 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
1924 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
1930 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
1932 struct kvm_lapic *apic = vcpu->arch.apic;
1935 kvm_apic_local_deliver(apic, APIC_LVT0);
1938 static const struct kvm_io_device_ops apic_mmio_ops = {
1939 .read = apic_mmio_read,
1940 .write = apic_mmio_write,
1943 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
1945 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
1946 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
1948 apic_timer_expired(apic);
1950 if (lapic_is_periodic(apic)) {
1951 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
1952 return HRTIMER_RESTART;
1954 return HRTIMER_NORESTART;
1957 int kvm_create_lapic(struct kvm_vcpu *vcpu)
1959 struct kvm_lapic *apic;
1961 ASSERT(vcpu != NULL);
1962 apic_debug("apic_init %d\n", vcpu->vcpu_id);
1964 apic = kzalloc(sizeof(*apic), GFP_KERNEL);
1968 vcpu->arch.apic = apic;
1970 apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
1972 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
1974 goto nomem_free_apic;
1978 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1979 HRTIMER_MODE_ABS_PINNED);
1980 apic->lapic_timer.timer.function = apic_timer_fn;
1983 * APIC is created enabled. This will prevent kvm_lapic_set_base from
1984 * thinking that APIC satet has changed.
1986 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
1987 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
1988 kvm_lapic_reset(vcpu, false);
1989 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
1998 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2000 struct kvm_lapic *apic = vcpu->arch.apic;
2003 if (!kvm_apic_hw_enabled(apic))
2006 apic_update_ppr(apic);
2007 highest_irr = apic_find_highest_irr(apic);
2008 if ((highest_irr == -1) ||
2009 ((highest_irr & 0xF0) <= kvm_lapic_get_reg(apic, APIC_PROCPRI)))
2014 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2016 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2019 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2021 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2022 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2027 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2029 struct kvm_lapic *apic = vcpu->arch.apic;
2031 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2032 kvm_apic_local_deliver(apic, APIC_LVTT);
2033 if (apic_lvtt_tscdeadline(apic))
2034 apic->lapic_timer.tscdeadline = 0;
2035 atomic_set(&apic->lapic_timer.pending, 0);
2039 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2041 int vector = kvm_apic_has_interrupt(vcpu);
2042 struct kvm_lapic *apic = vcpu->arch.apic;
2048 * We get here even with APIC virtualization enabled, if doing
2049 * nested virtualization and L1 runs with the "acknowledge interrupt
2050 * on exit" mode. Then we cannot inject the interrupt via RVI,
2051 * because the process would deliver it through the IDT.
2054 apic_set_isr(vector, apic);
2055 apic_update_ppr(apic);
2056 apic_clear_irr(vector, apic);
2058 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
2059 apic_clear_isr(vector, apic);
2060 apic_update_ppr(apic);
2066 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2067 struct kvm_lapic_state *s, bool set)
2069 if (apic_x2apic_mode(vcpu->arch.apic)) {
2070 u32 *id = (u32 *)(s->regs + APIC_ID);
2071 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2073 if (vcpu->kvm->arch.x2apic_format) {
2074 if (*id != vcpu->vcpu_id)
2083 /* In x2APIC mode, the LDR is fixed and based on the id */
2085 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2091 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2093 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2094 return kvm_apic_state_fixup(vcpu, s, false);
2097 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2099 struct kvm_lapic *apic = vcpu->arch.apic;
2103 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2104 /* set SPIV separately to get count of SW disabled APICs right */
2105 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2107 r = kvm_apic_state_fixup(vcpu, s, true);
2110 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2112 recalculate_apic_map(vcpu->kvm);
2113 kvm_apic_set_version(vcpu);
2115 apic_update_ppr(apic);
2116 hrtimer_cancel(&apic->lapic_timer.timer);
2117 apic_update_lvtt(apic);
2118 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2119 update_divide_count(apic);
2120 start_apic_timer(apic);
2121 apic->irr_pending = true;
2122 apic->isr_count = vcpu->arch.apicv_active ?
2123 1 : count_vectors(apic->regs + APIC_ISR);
2124 apic->highest_isr_cache = -1;
2125 if (vcpu->arch.apicv_active) {
2126 if (kvm_x86_ops->apicv_post_state_restore)
2127 kvm_x86_ops->apicv_post_state_restore(vcpu);
2128 kvm_x86_ops->hwapic_irr_update(vcpu,
2129 apic_find_highest_irr(apic));
2130 kvm_x86_ops->hwapic_isr_update(vcpu,
2131 apic_find_highest_isr(apic));
2133 kvm_make_request(KVM_REQ_EVENT, vcpu);
2134 if (ioapic_in_kernel(vcpu->kvm))
2135 kvm_rtc_eoi_tracking_restore_one(vcpu);
2137 vcpu->arch.apic_arb_prio = 0;
2142 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2144 struct hrtimer *timer;
2146 if (!lapic_in_kernel(vcpu))
2149 timer = &vcpu->arch.apic->lapic_timer.timer;
2150 if (hrtimer_cancel(timer))
2151 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2155 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2157 * Detect whether guest triggered PV EOI since the
2158 * last entry. If yes, set EOI on guests's behalf.
2159 * Clear PV EOI in guest memory in any case.
2161 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2162 struct kvm_lapic *apic)
2167 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2168 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2170 * KVM_APIC_PV_EOI_PENDING is unset:
2171 * -> host disabled PV EOI.
2172 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2173 * -> host enabled PV EOI, guest did not execute EOI yet.
2174 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2175 * -> host enabled PV EOI, guest executed EOI.
2177 BUG_ON(!pv_eoi_enabled(vcpu));
2178 pending = pv_eoi_get_pending(vcpu);
2180 * Clear pending bit in any case: it will be set again on vmentry.
2181 * While this might not be ideal from performance point of view,
2182 * this makes sure pv eoi is only enabled when we know it's safe.
2184 pv_eoi_clr_pending(vcpu);
2187 vector = apic_set_eoi(apic);
2188 trace_kvm_pv_eoi(apic, vector);
2191 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2195 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2196 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2198 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2201 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2205 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2209 * apic_sync_pv_eoi_to_guest - called before vmentry
2211 * Detect whether it's safe to enable PV EOI and
2214 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2215 struct kvm_lapic *apic)
2217 if (!pv_eoi_enabled(vcpu) ||
2218 /* IRR set or many bits in ISR: could be nested. */
2219 apic->irr_pending ||
2220 /* Cache not set: could be safe but we don't bother. */
2221 apic->highest_isr_cache == -1 ||
2222 /* Need EOI to update ioapic. */
2223 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2225 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2226 * so we need not do anything here.
2231 pv_eoi_set_pending(apic->vcpu);
2234 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2237 int max_irr, max_isr;
2238 struct kvm_lapic *apic = vcpu->arch.apic;
2240 apic_sync_pv_eoi_to_guest(vcpu, apic);
2242 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2245 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2246 max_irr = apic_find_highest_irr(apic);
2249 max_isr = apic_find_highest_isr(apic);
2252 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2254 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2258 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2261 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2262 &vcpu->arch.apic->vapic_cache,
2263 vapic_addr, sizeof(u32)))
2265 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2267 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2270 vcpu->arch.apic->vapic_addr = vapic_addr;
2274 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2276 struct kvm_lapic *apic = vcpu->arch.apic;
2277 u32 reg = (msr - APIC_BASE_MSR) << 4;
2279 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2282 if (reg == APIC_ICR2)
2285 /* if this is ICR write vector before command */
2286 if (reg == APIC_ICR)
2287 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2288 return kvm_lapic_reg_write(apic, reg, (u32)data);
2291 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2293 struct kvm_lapic *apic = vcpu->arch.apic;
2294 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2296 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2299 if (reg == APIC_DFR || reg == APIC_ICR2) {
2300 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
2305 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2307 if (reg == APIC_ICR)
2308 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2310 *data = (((u64)high) << 32) | low;
2315 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2317 struct kvm_lapic *apic = vcpu->arch.apic;
2319 if (!lapic_in_kernel(vcpu))
2322 /* if this is ICR write vector before command */
2323 if (reg == APIC_ICR)
2324 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2325 return kvm_lapic_reg_write(apic, reg, (u32)data);
2328 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2330 struct kvm_lapic *apic = vcpu->arch.apic;
2333 if (!lapic_in_kernel(vcpu))
2336 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2338 if (reg == APIC_ICR)
2339 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2341 *data = (((u64)high) << 32) | low;
2346 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
2348 u64 addr = data & ~KVM_MSR_ENABLED;
2349 if (!IS_ALIGNED(addr, 4))
2352 vcpu->arch.pv_eoi.msr_val = data;
2353 if (!pv_eoi_enabled(vcpu))
2355 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
2359 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2361 struct kvm_lapic *apic = vcpu->arch.apic;
2365 if (!lapic_in_kernel(vcpu) || !apic->pending_events)
2369 * INITs are latched while in SMM. Because an SMM CPU cannot
2370 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
2371 * and delay processing of INIT until the next RSM.
2374 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2375 if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
2376 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2380 pe = xchg(&apic->pending_events, 0);
2381 if (test_bit(KVM_APIC_INIT, &pe)) {
2382 kvm_lapic_reset(vcpu, true);
2383 kvm_vcpu_reset(vcpu, true);
2384 if (kvm_vcpu_is_bsp(apic->vcpu))
2385 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2387 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2389 if (test_bit(KVM_APIC_SIPI, &pe) &&
2390 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2391 /* evaluate pending_events before reading the vector */
2393 sipi_vector = apic->sipi_vector;
2394 apic_debug("vcpu %d received sipi with vector # %x\n",
2395 vcpu->vcpu_id, sipi_vector);
2396 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2397 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2401 void kvm_lapic_init(void)
2403 /* do not patch jump label more than once per second */
2404 jump_label_rate_limit(&apic_hw_disabled, HZ);
2405 jump_label_rate_limit(&apic_sw_disabled, HZ);
2408 void kvm_lapic_exit(void)
2410 static_key_deferred_flush(&apic_hw_disabled);
2411 static_key_deferred_flush(&apic_sw_disabled);