1 // SPDX-License-Identifier: GPL-2.0-only
4 * Local APIC virtualization
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
23 #include <linux/highmem.h>
24 #include <linux/smp.h>
25 #include <linux/hrtimer.h>
27 #include <linux/export.h>
28 #include <linux/math64.h>
29 #include <linux/slab.h>
30 #include <asm/processor.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
39 #include "kvm_cache_regs.h"
50 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
52 #define mod_64(x, y) ((x) % (y))
55 /* 14 is the version for Xeon and Pentium 8.4.8*/
56 #define APIC_VERSION 0x14UL
57 #define LAPIC_MMIO_LENGTH (1 << 12)
58 /* followed define is not in apicdef.h */
59 #define MAX_APIC_VECTOR 256
60 #define APIC_VECTORS_PER_REG 32
62 static bool lapic_timer_advance_dynamic __read_mostly;
63 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
64 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
65 #define LAPIC_TIMER_ADVANCE_NS_INIT 1000
66 #define LAPIC_TIMER_ADVANCE_NS_MAX 5000
67 /* step-by-step approximation to mitigate fluctuation */
68 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
69 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
70 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
72 static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
74 *((u32 *) (regs + reg_off)) = val;
77 static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
79 __kvm_lapic_set_reg(apic->regs, reg_off, val);
82 static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
84 BUILD_BUG_ON(reg != APIC_ICR);
85 return *((u64 *) (regs + reg));
88 static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
90 return __kvm_lapic_get_reg64(apic->regs, reg);
93 static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
95 BUILD_BUG_ON(reg != APIC_ICR);
96 *((u64 *) (regs + reg)) = val;
99 static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
102 __kvm_lapic_set_reg64(apic->regs, reg, val);
105 static inline int apic_test_vector(int vec, void *bitmap)
107 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
110 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
112 struct kvm_lapic *apic = vcpu->arch.apic;
114 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
115 apic_test_vector(vector, apic->regs + APIC_IRR);
118 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
120 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
123 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
125 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
128 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
129 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
131 static inline int apic_enabled(struct kvm_lapic *apic)
133 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
137 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
140 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
141 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
143 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
145 return apic->vcpu->vcpu_id;
148 static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
150 return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
151 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
154 bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
156 return kvm_x86_ops.set_hv_timer
157 && !(kvm_mwait_in_guest(vcpu->kvm) ||
158 kvm_can_post_timer_interrupt(vcpu));
161 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
163 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
166 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
168 return ((id >> 4) << 16) | (1 << (id & 0xf));
171 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
172 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
173 switch (map->logical_mode) {
174 case KVM_APIC_MODE_SW_DISABLED:
175 /* Arbitrarily use the flat map so that @cluster isn't NULL. */
176 *cluster = map->xapic_flat_map;
179 case KVM_APIC_MODE_X2APIC: {
180 u32 offset = (dest_id >> 16) * 16;
181 u32 max_apic_id = map->max_apic_id;
183 if (offset <= max_apic_id) {
184 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
186 offset = array_index_nospec(offset, map->max_apic_id + 1);
187 *cluster = &map->phys_map[offset];
188 *mask = dest_id & (0xffff >> (16 - cluster_size));
195 case KVM_APIC_MODE_XAPIC_FLAT:
196 *cluster = map->xapic_flat_map;
197 *mask = dest_id & 0xff;
199 case KVM_APIC_MODE_XAPIC_CLUSTER:
200 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
201 *mask = dest_id & 0xf;
203 case KVM_APIC_MODE_MAP_DISABLED:
211 static void kvm_apic_map_free(struct rcu_head *rcu)
213 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
218 static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
219 struct kvm_vcpu *vcpu,
220 bool *xapic_id_mismatch)
222 struct kvm_lapic *apic = vcpu->arch.apic;
223 u32 x2apic_id = kvm_x2apic_id(apic);
224 u32 xapic_id = kvm_xapic_id(apic);
228 * For simplicity, KVM always allocates enough space for all possible
229 * xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on
230 * without the optimized map.
232 if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
236 * Bail if a vCPU was added and/or enabled its APIC between allocating
237 * the map and doing the actual calculations for the map. Note, KVM
238 * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
239 * the compiler decides to reload x2apic_id after this check.
241 if (x2apic_id > new->max_apic_id)
245 * Deliberately truncate the vCPU ID when detecting a mismatched APIC
246 * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
247 * 32-bit value. Any unwanted aliasing due to truncation results will
250 if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id)
251 *xapic_id_mismatch = true;
254 * Apply KVM's hotplug hack if userspace has enable 32-bit APIC IDs.
255 * Allow sending events to vCPUs by their x2APIC ID even if the target
256 * vCPU is in legacy xAPIC mode, and silently ignore aliased xAPIC IDs
257 * (the x2APIC ID is truncated to 8 bits, causing IDs > 0xff to wrap
260 * Honor the architectural (and KVM's non-optimized) behavior if
261 * userspace has not enabled 32-bit x2APIC IDs. Each APIC is supposed
262 * to process messages independently. If multiple vCPUs have the same
263 * effective APIC ID, e.g. due to the x2APIC wrap or because the guest
264 * manually modified its xAPIC IDs, events targeting that ID are
265 * supposed to be recognized by all vCPUs with said ID.
267 if (vcpu->kvm->arch.x2apic_format) {
268 /* See also kvm_apic_match_physical_addr(). */
269 if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
270 new->phys_map[x2apic_id] = apic;
272 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
273 new->phys_map[xapic_id] = apic;
276 * Disable the optimized map if the physical APIC ID is already
277 * mapped, i.e. is aliased to multiple vCPUs. The optimized
278 * map requires a strict 1:1 mapping between IDs and vCPUs.
280 if (apic_x2apic_mode(apic))
281 physical_id = x2apic_id;
283 physical_id = xapic_id;
285 if (new->phys_map[physical_id])
288 new->phys_map[physical_id] = apic;
294 static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
295 struct kvm_vcpu *vcpu)
297 struct kvm_lapic *apic = vcpu->arch.apic;
298 enum kvm_apic_logical_mode logical_mode;
299 struct kvm_lapic **cluster;
303 if (new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
306 if (!kvm_apic_sw_enabled(apic))
309 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
313 if (apic_x2apic_mode(apic)) {
314 logical_mode = KVM_APIC_MODE_X2APIC;
316 ldr = GET_APIC_LOGICAL_ID(ldr);
317 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
318 logical_mode = KVM_APIC_MODE_XAPIC_FLAT;
320 logical_mode = KVM_APIC_MODE_XAPIC_CLUSTER;
324 * To optimize logical mode delivery, all software-enabled APICs must
325 * be configured for the same mode.
327 if (new->logical_mode == KVM_APIC_MODE_SW_DISABLED) {
328 new->logical_mode = logical_mode;
329 } else if (new->logical_mode != logical_mode) {
330 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
335 * In x2APIC mode, the LDR is read-only and derived directly from the
336 * x2APIC ID, thus is guaranteed to be addressable. KVM reuses
337 * kvm_apic_map.phys_map to optimize logical mode x2APIC interrupts by
338 * reversing the LDR calculation to get cluster of APICs, i.e. no
339 * additional work is required.
341 if (apic_x2apic_mode(apic)) {
342 WARN_ON_ONCE(ldr != kvm_apic_calc_x2apic_ldr(kvm_x2apic_id(apic)));
346 if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr,
348 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
356 if (!is_power_of_2(mask) || cluster[ldr])
357 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
363 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
365 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
366 * apic_map_lock_held.
374 void kvm_recalculate_apic_map(struct kvm *kvm)
376 struct kvm_apic_map *new, *old = NULL;
377 struct kvm_vcpu *vcpu;
379 u32 max_id = 255; /* enough space for any xAPIC ID */
380 bool xapic_id_mismatch;
383 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
384 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
387 WARN_ONCE(!irqchip_in_kernel(kvm),
388 "Dirty APIC map without an in-kernel local APIC");
390 mutex_lock(&kvm->arch.apic_map_lock);
394 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
395 * or the APIC registers (if dirty). Note, on retry the map may have
396 * not yet been marked dirty by whatever task changed a vCPU's x2APIC
397 * ID, i.e. the map may still show up as in-progress. In that case
398 * this task still needs to retry and complete its calculation.
400 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
401 DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
402 /* Someone else has updated the map. */
403 mutex_unlock(&kvm->arch.apic_map_lock);
408 * Reset the mismatch flag between attempts so that KVM does the right
409 * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e.
410 * keep max_id strictly increasing. Disallowing max_id from shrinking
411 * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU
412 * with the highest x2APIC ID is toggling its APIC on and off.
414 xapic_id_mismatch = false;
416 kvm_for_each_vcpu(i, vcpu, kvm)
417 if (kvm_apic_present(vcpu))
418 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
420 new = kvzalloc(sizeof(struct kvm_apic_map) +
421 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
427 new->max_apic_id = max_id;
428 new->logical_mode = KVM_APIC_MODE_SW_DISABLED;
430 kvm_for_each_vcpu(i, vcpu, kvm) {
431 if (!kvm_apic_present(vcpu))
434 r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch);
446 kvm_recalculate_logical_map(new, vcpu);
450 * The optimized map is effectively KVM's internal version of APICv,
451 * and all unwanted aliasing that results in disabling the optimized
452 * map also applies to APICv.
455 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
457 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
459 if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
460 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
462 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
464 if (xapic_id_mismatch)
465 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
467 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
469 old = rcu_dereference_protected(kvm->arch.apic_map,
470 lockdep_is_held(&kvm->arch.apic_map_lock));
471 rcu_assign_pointer(kvm->arch.apic_map, new);
473 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
474 * If another update has come in, leave it DIRTY.
476 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
477 UPDATE_IN_PROGRESS, CLEAN);
478 mutex_unlock(&kvm->arch.apic_map_lock);
481 call_rcu(&old->rcu, kvm_apic_map_free);
483 kvm_make_scan_ioapic_request(kvm);
486 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
488 bool enabled = val & APIC_SPIV_APIC_ENABLED;
490 kvm_lapic_set_reg(apic, APIC_SPIV, val);
492 if (enabled != apic->sw_enabled) {
493 apic->sw_enabled = enabled;
495 static_branch_slow_dec_deferred(&apic_sw_disabled);
497 static_branch_inc(&apic_sw_disabled.key);
499 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
502 /* Check if there are APF page ready requests pending */
504 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
505 kvm_xen_sw_enable_lapic(apic->vcpu);
509 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
511 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
512 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
515 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
517 kvm_lapic_set_reg(apic, APIC_LDR, id);
518 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
521 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
523 kvm_lapic_set_reg(apic, APIC_DFR, val);
524 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
527 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
529 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
531 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
533 kvm_lapic_set_reg(apic, APIC_ID, id);
534 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
535 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
538 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
540 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
543 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
545 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
548 static inline int apic_lvtt_period(struct kvm_lapic *apic)
550 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
553 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
555 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
558 static inline int apic_lvt_nmi_mode(u32 lvt_val)
560 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
563 static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
565 return apic->nr_lvt_entries > lvt_index;
568 static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
570 return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
573 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
575 struct kvm_lapic *apic = vcpu->arch.apic;
578 if (!lapic_in_kernel(vcpu))
581 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
584 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
585 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
586 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
587 * version first and level-triggered interrupts never get EOIed in
590 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
591 !ioapic_in_kernel(vcpu->kvm))
592 v |= APIC_LVR_DIRECTED_EOI;
593 kvm_lapic_set_reg(apic, APIC_LVR, v);
596 void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
598 int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
599 struct kvm_lapic *apic = vcpu->arch.apic;
602 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
605 /* Initialize/mask any "new" LVT entries. */
606 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
607 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
609 apic->nr_lvt_entries = nr_lvt_entries;
611 /* The number of LVT entries is reflected in the version register. */
612 kvm_apic_set_version(vcpu);
615 static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
616 [LVT_TIMER] = LVT_MASK, /* timer mode mask added at runtime */
617 [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
618 [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
619 [LVT_LINT0] = LINT_MASK,
620 [LVT_LINT1] = LINT_MASK,
621 [LVT_ERROR] = LVT_MASK,
622 [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
625 static int find_highest_vector(void *bitmap)
630 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
631 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
632 reg = bitmap + REG_POS(vec);
634 return __fls(*reg) + vec;
640 static u8 count_vectors(void *bitmap)
646 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
647 reg = bitmap + REG_POS(vec);
648 count += hweight32(*reg);
654 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
657 u32 pir_val, irr_val, prev_irr_val;
660 max_updated_irr = -1;
663 for (i = vec = 0; i <= 7; i++, vec += 32) {
664 u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
667 pir_val = READ_ONCE(pir[i]);
670 pir_val = xchg(&pir[i], 0);
672 prev_irr_val = irr_val;
674 irr_val = prev_irr_val | pir_val;
675 } while (prev_irr_val != irr_val &&
676 !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
678 if (prev_irr_val != irr_val)
679 max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
682 *max_irr = __fls(irr_val) + vec;
685 return ((max_updated_irr != -1) &&
686 (max_updated_irr == *max_irr));
688 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
690 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
692 struct kvm_lapic *apic = vcpu->arch.apic;
693 bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
695 if (unlikely(!apic->apicv_active && irr_updated))
696 apic->irr_pending = true;
699 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
701 static inline int apic_search_irr(struct kvm_lapic *apic)
703 return find_highest_vector(apic->regs + APIC_IRR);
706 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
711 * Note that irr_pending is just a hint. It will be always
712 * true with virtual interrupt delivery enabled.
714 if (!apic->irr_pending)
717 result = apic_search_irr(apic);
718 ASSERT(result == -1 || result >= 16);
723 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
725 if (unlikely(apic->apicv_active)) {
726 /* need to update RVI */
727 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
728 static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
729 apic_find_highest_irr(apic));
731 apic->irr_pending = false;
732 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
733 if (apic_search_irr(apic) != -1)
734 apic->irr_pending = true;
738 void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
740 apic_clear_irr(vec, vcpu->arch.apic);
742 EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
744 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
746 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
750 * With APIC virtualization enabled, all caching is disabled
751 * because the processor can modify ISR under the hood. Instead
754 if (unlikely(apic->apicv_active))
755 static_call_cond(kvm_x86_hwapic_isr_update)(vec);
758 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
760 * ISR (in service register) bit is set when injecting an interrupt.
761 * The highest vector is injected. Thus the latest bit set matches
762 * the highest bit in ISR.
764 apic->highest_isr_cache = vec;
768 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
773 * Note that isr_count is always 1, and highest_isr_cache
774 * is always -1, with APIC virtualization enabled.
776 if (!apic->isr_count)
778 if (likely(apic->highest_isr_cache != -1))
779 return apic->highest_isr_cache;
781 result = find_highest_vector(apic->regs + APIC_ISR);
782 ASSERT(result == -1 || result >= 16);
787 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
789 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
793 * We do get here for APIC virtualization enabled if the guest
794 * uses the Hyper-V APIC enlightenment. In this case we may need
795 * to trigger a new interrupt delivery by writing the SVI field;
796 * on the other hand isr_count and highest_isr_cache are unused
797 * and must be left alone.
799 if (unlikely(apic->apicv_active))
800 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
803 BUG_ON(apic->isr_count < 0);
804 apic->highest_isr_cache = -1;
808 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
810 /* This may race with setting of irr in __apic_accept_irq() and
811 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
812 * will cause vmexit immediately and the value will be recalculated
813 * on the next vmentry.
815 return apic_find_highest_irr(vcpu->arch.apic);
817 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
819 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
820 int vector, int level, int trig_mode,
821 struct dest_map *dest_map);
823 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
824 struct dest_map *dest_map)
826 struct kvm_lapic *apic = vcpu->arch.apic;
828 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
829 irq->level, irq->trig_mode, dest_map);
832 static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
833 struct kvm_lapic_irq *irq, u32 min)
836 struct kvm_vcpu *vcpu;
838 if (min > map->max_apic_id)
841 for_each_set_bit(i, ipi_bitmap,
842 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
843 if (map->phys_map[min + i]) {
844 vcpu = map->phys_map[min + i]->vcpu;
845 count += kvm_apic_set_irq(vcpu, irq, NULL);
852 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
853 unsigned long ipi_bitmap_high, u32 min,
854 unsigned long icr, int op_64_bit)
856 struct kvm_apic_map *map;
857 struct kvm_lapic_irq irq = {0};
858 int cluster_size = op_64_bit ? 64 : 32;
861 if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
864 irq.vector = icr & APIC_VECTOR_MASK;
865 irq.delivery_mode = icr & APIC_MODE_MASK;
866 irq.level = (icr & APIC_INT_ASSERT) != 0;
867 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
870 map = rcu_dereference(kvm->arch.apic_map);
874 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
876 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
883 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
886 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
890 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
893 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
897 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
899 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
902 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
904 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
907 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
910 static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
914 if (pv_eoi_get_user(vcpu, &val) < 0)
917 val &= KVM_PV_EOI_ENABLED;
919 if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
923 * Clear pending bit in any case: it will be set again on vmentry.
924 * While this might not be ideal from performance point of view,
925 * this makes sure pv eoi is only enabled when we know it's safe.
927 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
932 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
935 if (kvm_x86_ops.sync_pir_to_irr)
936 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
938 highest_irr = apic_find_highest_irr(apic);
939 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
944 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
946 u32 tpr, isrv, ppr, old_ppr;
949 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
950 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
951 isr = apic_find_highest_isr(apic);
952 isrv = (isr != -1) ? isr : 0;
954 if ((tpr & 0xf0) >= (isrv & 0xf0))
961 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
963 return ppr < old_ppr;
966 static void apic_update_ppr(struct kvm_lapic *apic)
970 if (__apic_update_ppr(apic, &ppr) &&
971 apic_has_interrupt_for_ppr(apic, ppr) != -1)
972 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
975 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
977 apic_update_ppr(vcpu->arch.apic);
979 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
981 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
983 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
984 apic_update_ppr(apic);
987 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
989 return mda == (apic_x2apic_mode(apic) ?
990 X2APIC_BROADCAST : APIC_BROADCAST);
993 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
995 if (kvm_apic_broadcast(apic, mda))
999 * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
1000 * were in x2APIC mode if the target APIC ID can't be encoded as an
1001 * xAPIC ID. This allows unique addressing of hotplugged vCPUs (which
1002 * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
1003 * mode. Match the x2APIC ID if and only if the target APIC ID can't
1004 * be encoded in xAPIC to avoid spurious matches against a vCPU that
1005 * changed its (addressable) xAPIC ID (which is writable).
1007 if (apic_x2apic_mode(apic) || mda > 0xff)
1008 return mda == kvm_x2apic_id(apic);
1010 return mda == kvm_xapic_id(apic);
1013 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
1017 if (kvm_apic_broadcast(apic, mda))
1020 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
1022 if (apic_x2apic_mode(apic))
1023 return ((logical_id >> 16) == (mda >> 16))
1024 && (logical_id & mda & 0xffff) != 0;
1026 logical_id = GET_APIC_LOGICAL_ID(logical_id);
1028 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
1030 return (logical_id & mda) != 0;
1031 case APIC_DFR_CLUSTER:
1032 return ((logical_id >> 4) == (mda >> 4))
1033 && (logical_id & mda & 0xf) != 0;
1039 /* The KVM local APIC implementation has two quirks:
1041 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
1042 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
1043 * KVM doesn't do that aliasing.
1045 * - in-kernel IOAPIC messages have to be delivered directly to
1046 * x2APIC, because the kernel does not support interrupt remapping.
1047 * In order to support broadcast without interrupt remapping, x2APIC
1048 * rewrites the destination of non-IPI messages from APIC_BROADCAST
1049 * to X2APIC_BROADCAST.
1051 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
1052 * important when userspace wants to use x2APIC-format MSIs, because
1053 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
1055 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
1056 struct kvm_lapic *source, struct kvm_lapic *target)
1058 bool ipi = source != NULL;
1060 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
1061 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
1062 return X2APIC_BROADCAST;
1067 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1068 int shorthand, unsigned int dest, int dest_mode)
1070 struct kvm_lapic *target = vcpu->arch.apic;
1071 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
1074 switch (shorthand) {
1075 case APIC_DEST_NOSHORT:
1076 if (dest_mode == APIC_DEST_PHYSICAL)
1077 return kvm_apic_match_physical_addr(target, mda);
1079 return kvm_apic_match_logical_addr(target, mda);
1080 case APIC_DEST_SELF:
1081 return target == source;
1082 case APIC_DEST_ALLINC:
1084 case APIC_DEST_ALLBUT:
1085 return target != source;
1090 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
1092 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
1093 const unsigned long *bitmap, u32 bitmap_size)
1098 mod = vector % dest_vcpus;
1100 for (i = 0; i <= mod; i++) {
1101 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
1102 BUG_ON(idx == bitmap_size);
1108 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
1110 if (!kvm->arch.disabled_lapic_found) {
1111 kvm->arch.disabled_lapic_found = true;
1112 pr_info("Disabled LAPIC found during irq injection\n");
1116 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
1117 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
1119 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
1120 if ((irq->dest_id == APIC_BROADCAST &&
1121 map->logical_mode != KVM_APIC_MODE_X2APIC))
1123 if (irq->dest_id == X2APIC_BROADCAST)
1126 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
1127 if (irq->dest_id == (x2apic_ipi ?
1128 X2APIC_BROADCAST : APIC_BROADCAST))
1135 /* Return true if the interrupt can be handled by using *bitmap as index mask
1136 * for valid destinations in *dst array.
1137 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
1138 * Note: we may have zero kvm_lapic destinations when we return true, which
1139 * means that the interrupt should be dropped. In this case, *bitmap would be
1140 * zero and *dst undefined.
1142 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
1143 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
1144 struct kvm_apic_map *map, struct kvm_lapic ***dst,
1145 unsigned long *bitmap)
1149 if (irq->shorthand == APIC_DEST_SELF && src) {
1153 } else if (irq->shorthand)
1156 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
1159 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
1160 if (irq->dest_id > map->max_apic_id) {
1163 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
1164 *dst = &map->phys_map[dest_id];
1171 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1175 if (!kvm_lowest_prio_delivery(irq))
1178 if (!kvm_vector_hashing_enabled()) {
1180 for_each_set_bit(i, bitmap, 16) {
1185 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1186 (*dst)[lowest]->vcpu) < 0)
1193 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1196 if (!(*dst)[lowest]) {
1197 kvm_apic_disabled_lapic_found(kvm);
1203 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1208 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1209 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1211 struct kvm_apic_map *map;
1212 unsigned long bitmap;
1213 struct kvm_lapic **dst = NULL;
1219 if (irq->shorthand == APIC_DEST_SELF) {
1220 if (KVM_BUG_ON(!src, kvm)) {
1224 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1229 map = rcu_dereference(kvm->arch.apic_map);
1231 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1234 for_each_set_bit(i, &bitmap, 16) {
1237 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1246 * This routine tries to handle interrupts in posted mode, here is how
1247 * it deals with different cases:
1248 * - For single-destination interrupts, handle it in posted mode
1249 * - Else if vector hashing is enabled and it is a lowest-priority
1250 * interrupt, handle it in posted mode and use the following mechanism
1251 * to find the destination vCPU.
1252 * 1. For lowest-priority interrupts, store all the possible
1253 * destination vCPUs in an array.
1254 * 2. Use "guest vector % max number of destination vCPUs" to find
1255 * the right destination vCPU in the array for the lowest-priority
1257 * - Otherwise, use remapped mode to inject the interrupt.
1259 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1260 struct kvm_vcpu **dest_vcpu)
1262 struct kvm_apic_map *map;
1263 unsigned long bitmap;
1264 struct kvm_lapic **dst = NULL;
1271 map = rcu_dereference(kvm->arch.apic_map);
1273 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1274 hweight16(bitmap) == 1) {
1275 unsigned long i = find_first_bit(&bitmap, 16);
1278 *dest_vcpu = dst[i]->vcpu;
1288 * Add a pending IRQ into lapic.
1289 * Return 1 if successfully added and 0 if discarded.
1291 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1292 int vector, int level, int trig_mode,
1293 struct dest_map *dest_map)
1296 struct kvm_vcpu *vcpu = apic->vcpu;
1298 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1300 switch (delivery_mode) {
1301 case APIC_DM_LOWEST:
1302 vcpu->arch.apic_arb_prio++;
1305 if (unlikely(trig_mode && !level))
1308 /* FIXME add logic for vcpu on reset */
1309 if (unlikely(!apic_enabled(apic)))
1315 __set_bit(vcpu->vcpu_id, dest_map->map);
1316 dest_map->vectors[vcpu->vcpu_id] = vector;
1319 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1321 kvm_lapic_set_vector(vector,
1322 apic->regs + APIC_TMR);
1324 kvm_lapic_clear_vector(vector,
1325 apic->regs + APIC_TMR);
1328 static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
1334 vcpu->arch.pv.pv_unhalted = 1;
1335 kvm_make_request(KVM_REQ_EVENT, vcpu);
1336 kvm_vcpu_kick(vcpu);
1340 if (!kvm_inject_smi(vcpu)) {
1341 kvm_vcpu_kick(vcpu);
1348 kvm_inject_nmi(vcpu);
1349 kvm_vcpu_kick(vcpu);
1353 if (!trig_mode || level) {
1355 /* assumes that there are only KVM_APIC_INIT/SIPI */
1356 apic->pending_events = (1UL << KVM_APIC_INIT);
1357 kvm_make_request(KVM_REQ_EVENT, vcpu);
1358 kvm_vcpu_kick(vcpu);
1362 case APIC_DM_STARTUP:
1364 apic->sipi_vector = vector;
1365 /* make sure sipi_vector is visible for the receiver */
1367 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1368 kvm_make_request(KVM_REQ_EVENT, vcpu);
1369 kvm_vcpu_kick(vcpu);
1372 case APIC_DM_EXTINT:
1374 * Should only be called by kvm_apic_local_deliver() with LVT0,
1375 * before NMI watchdog was enabled. Already handled by
1376 * kvm_apic_accept_pic_intr().
1381 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1389 * This routine identifies the destination vcpus mask meant to receive the
1390 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1391 * out the destination vcpus array and set the bitmap or it traverses to
1392 * each available vcpu to identify the same.
1394 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1395 unsigned long *vcpu_bitmap)
1397 struct kvm_lapic **dest_vcpu = NULL;
1398 struct kvm_lapic *src = NULL;
1399 struct kvm_apic_map *map;
1400 struct kvm_vcpu *vcpu;
1401 unsigned long bitmap, i;
1406 map = rcu_dereference(kvm->arch.apic_map);
1408 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1411 for_each_set_bit(i, &bitmap, 16) {
1414 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1415 __set_bit(vcpu_idx, vcpu_bitmap);
1418 kvm_for_each_vcpu(i, vcpu, kvm) {
1419 if (!kvm_apic_present(vcpu))
1421 if (!kvm_apic_match_dest(vcpu, NULL,
1426 __set_bit(i, vcpu_bitmap);
1432 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1434 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1437 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1439 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1442 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1446 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1447 if (!kvm_ioapic_handles_vector(apic, vector))
1450 /* Request a KVM exit to inform the userspace IOAPIC. */
1451 if (irqchip_split(apic->vcpu->kvm)) {
1452 apic->vcpu->arch.pending_ioapic_eoi = vector;
1453 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1457 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1458 trigger_mode = IOAPIC_LEVEL_TRIG;
1460 trigger_mode = IOAPIC_EDGE_TRIG;
1462 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1465 static int apic_set_eoi(struct kvm_lapic *apic)
1467 int vector = apic_find_highest_isr(apic);
1469 trace_kvm_eoi(apic, vector);
1472 * Not every write EOI will has corresponding ISR,
1473 * one example is when Kernel check timer on setup_IO_APIC
1478 apic_clear_isr(vector, apic);
1479 apic_update_ppr(apic);
1481 if (kvm_hv_synic_has_vector(apic->vcpu, vector))
1482 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1484 kvm_ioapic_send_eoi(apic, vector);
1485 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1490 * this interface assumes a trap-like exit, which has already finished
1491 * desired side effect including vISR and vPPR update.
1493 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1495 struct kvm_lapic *apic = vcpu->arch.apic;
1497 trace_kvm_eoi(apic, vector);
1499 kvm_ioapic_send_eoi(apic, vector);
1500 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1502 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1504 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1506 struct kvm_lapic_irq irq;
1508 /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1509 WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1511 irq.vector = icr_low & APIC_VECTOR_MASK;
1512 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1513 irq.dest_mode = icr_low & APIC_DEST_MASK;
1514 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1515 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1516 irq.shorthand = icr_low & APIC_SHORT_MASK;
1517 irq.msi_redir_hint = false;
1518 if (apic_x2apic_mode(apic))
1519 irq.dest_id = icr_high;
1521 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1523 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1525 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1527 EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1529 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1531 ktime_t remaining, now;
1534 ASSERT(apic != NULL);
1536 /* if initial count is 0, current count should also be 0 */
1537 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1538 apic->lapic_timer.period == 0)
1542 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1543 if (ktime_to_ns(remaining) < 0)
1546 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1547 return div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->divide_count));
1550 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1552 struct kvm_vcpu *vcpu = apic->vcpu;
1553 struct kvm_run *run = vcpu->run;
1555 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1556 run->tpr_access.rip = kvm_rip_read(vcpu);
1557 run->tpr_access.is_write = write;
1560 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1562 if (apic->vcpu->arch.tpr_access_reporting)
1563 __report_tpr_access(apic, write);
1566 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1570 if (offset >= LAPIC_MMIO_LENGTH)
1577 case APIC_TMCCT: /* Timer CCR */
1578 if (apic_lvtt_tscdeadline(apic))
1581 val = apic_get_tmcct(apic);
1584 apic_update_ppr(apic);
1585 val = kvm_lapic_get_reg(apic, offset);
1588 report_tpr_access(apic, false);
1591 val = kvm_lapic_get_reg(apic, offset);
1598 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1600 return container_of(dev, struct kvm_lapic, dev);
1603 #define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1604 #define APIC_REGS_MASK(first, count) \
1605 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1607 u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
1609 /* Leave bits '0' for reserved and write-only registers. */
1610 u64 valid_reg_mask =
1611 APIC_REG_MASK(APIC_ID) |
1612 APIC_REG_MASK(APIC_LVR) |
1613 APIC_REG_MASK(APIC_TASKPRI) |
1614 APIC_REG_MASK(APIC_PROCPRI) |
1615 APIC_REG_MASK(APIC_LDR) |
1616 APIC_REG_MASK(APIC_SPIV) |
1617 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1618 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1619 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1620 APIC_REG_MASK(APIC_ESR) |
1621 APIC_REG_MASK(APIC_ICR) |
1622 APIC_REG_MASK(APIC_LVTT) |
1623 APIC_REG_MASK(APIC_LVTTHMR) |
1624 APIC_REG_MASK(APIC_LVTPC) |
1625 APIC_REG_MASK(APIC_LVT0) |
1626 APIC_REG_MASK(APIC_LVT1) |
1627 APIC_REG_MASK(APIC_LVTERR) |
1628 APIC_REG_MASK(APIC_TMICT) |
1629 APIC_REG_MASK(APIC_TMCCT) |
1630 APIC_REG_MASK(APIC_TDCR);
1632 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1633 valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1635 /* ARBPRI, DFR, and ICR2 are not valid in x2APIC mode. */
1636 if (!apic_x2apic_mode(apic))
1637 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1638 APIC_REG_MASK(APIC_DFR) |
1639 APIC_REG_MASK(APIC_ICR2);
1641 return valid_reg_mask;
1643 EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask);
1645 static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1648 unsigned char alignment = offset & 0xf;
1652 * WARN if KVM reads ICR in x2APIC mode, as it's an 8-byte register in
1653 * x2APIC and needs to be manually handled by the caller.
1655 WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR);
1657 if (alignment + len > 4)
1660 if (offset > 0x3f0 ||
1661 !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset)))
1664 result = __apic_read(apic, offset & ~0xf);
1666 trace_kvm_apic_read(offset, result);
1672 memcpy(data, (char *)&result + alignment, len);
1675 printk(KERN_ERR "Local APIC read with len = %x, "
1676 "should be 1,2, or 4 instead\n", len);
1682 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1684 return addr >= apic->base_address &&
1685 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1688 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1689 gpa_t address, int len, void *data)
1691 struct kvm_lapic *apic = to_lapic(this);
1692 u32 offset = address - apic->base_address;
1694 if (!apic_mmio_in_range(apic, address))
1697 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1698 if (!kvm_check_has_quirk(vcpu->kvm,
1699 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1702 memset(data, 0xff, len);
1706 kvm_lapic_reg_read(apic, offset, len, data);
1711 static void update_divide_count(struct kvm_lapic *apic)
1713 u32 tmp1, tmp2, tdcr;
1715 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1717 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1718 apic->divide_count = 0x1 << (tmp2 & 0x7);
1721 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1724 * Do not allow the guest to program periodic timers with small
1725 * interval, since the hrtimers are not throttled by the host
1728 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1729 s64 min_period = min_timer_period_us * 1000LL;
1731 if (apic->lapic_timer.period < min_period) {
1732 pr_info_ratelimited(
1733 "vcpu %i: requested %lld ns "
1734 "lapic timer period limited to %lld ns\n",
1735 apic->vcpu->vcpu_id,
1736 apic->lapic_timer.period, min_period);
1737 apic->lapic_timer.period = min_period;
1742 static void cancel_hv_timer(struct kvm_lapic *apic);
1744 static void cancel_apic_timer(struct kvm_lapic *apic)
1746 hrtimer_cancel(&apic->lapic_timer.timer);
1748 if (apic->lapic_timer.hv_timer_in_use)
1749 cancel_hv_timer(apic);
1751 atomic_set(&apic->lapic_timer.pending, 0);
1754 static void apic_update_lvtt(struct kvm_lapic *apic)
1756 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1757 apic->lapic_timer.timer_mode_mask;
1759 if (apic->lapic_timer.timer_mode != timer_mode) {
1760 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1761 APIC_LVT_TIMER_TSCDEADLINE)) {
1762 cancel_apic_timer(apic);
1763 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1764 apic->lapic_timer.period = 0;
1765 apic->lapic_timer.tscdeadline = 0;
1767 apic->lapic_timer.timer_mode = timer_mode;
1768 limit_periodic_timer_frequency(apic);
1773 * On APICv, this test will cause a busy wait
1774 * during a higher-priority task.
1777 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1779 struct kvm_lapic *apic = vcpu->arch.apic;
1780 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1782 if (kvm_apic_hw_enabled(apic)) {
1783 int vec = reg & APIC_VECTOR_MASK;
1784 void *bitmap = apic->regs + APIC_ISR;
1786 if (apic->apicv_active)
1787 bitmap = apic->regs + APIC_IRR;
1789 if (apic_test_vector(vec, bitmap))
1795 static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1797 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1800 * If the guest TSC is running at a different ratio than the host, then
1801 * convert the delay to nanoseconds to achieve an accurate delay. Note
1802 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1803 * always for VMX enabled hardware.
1805 if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1806 __delay(min(guest_cycles,
1807 nsec_to_cycles(vcpu, timer_advance_ns)));
1809 u64 delay_ns = guest_cycles * 1000000ULL;
1810 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1811 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1815 static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1816 s64 advance_expire_delta)
1818 struct kvm_lapic *apic = vcpu->arch.apic;
1819 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1822 /* Do not adjust for tiny fluctuations or large random spikes. */
1823 if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1824 abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1828 if (advance_expire_delta < 0) {
1829 ns = -advance_expire_delta * 1000000ULL;
1830 do_div(ns, vcpu->arch.virtual_tsc_khz);
1831 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1834 ns = advance_expire_delta * 1000000ULL;
1835 do_div(ns, vcpu->arch.virtual_tsc_khz);
1836 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1839 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1840 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1841 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1844 static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1846 struct kvm_lapic *apic = vcpu->arch.apic;
1847 u64 guest_tsc, tsc_deadline;
1849 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1850 apic->lapic_timer.expired_tscdeadline = 0;
1851 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1852 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1854 if (lapic_timer_advance_dynamic) {
1855 adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1857 * If the timer fired early, reread the TSC to account for the
1858 * overhead of the above adjustment to avoid waiting longer
1859 * than is necessary.
1861 if (guest_tsc < tsc_deadline)
1862 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1865 if (guest_tsc < tsc_deadline)
1866 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1869 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1871 if (lapic_in_kernel(vcpu) &&
1872 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1873 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1874 lapic_timer_int_injected(vcpu))
1875 __kvm_wait_lapic_expire(vcpu);
1877 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1879 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1881 struct kvm_timer *ktimer = &apic->lapic_timer;
1883 kvm_apic_local_deliver(apic, APIC_LVTT);
1884 if (apic_lvtt_tscdeadline(apic)) {
1885 ktimer->tscdeadline = 0;
1886 } else if (apic_lvtt_oneshot(apic)) {
1887 ktimer->tscdeadline = 0;
1888 ktimer->target_expiration = 0;
1892 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1894 struct kvm_vcpu *vcpu = apic->vcpu;
1895 struct kvm_timer *ktimer = &apic->lapic_timer;
1897 if (atomic_read(&apic->lapic_timer.pending))
1900 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1901 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1903 if (!from_timer_fn && apic->apicv_active) {
1904 WARN_ON(kvm_get_running_vcpu() != vcpu);
1905 kvm_apic_inject_pending_timer_irqs(apic);
1909 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1911 * Ensure the guest's timer has truly expired before posting an
1912 * interrupt. Open code the relevant checks to avoid querying
1913 * lapic_timer_int_injected(), which will be false since the
1914 * interrupt isn't yet injected. Waiting until after injecting
1915 * is not an option since that won't help a posted interrupt.
1917 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1918 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1919 __kvm_wait_lapic_expire(vcpu);
1920 kvm_apic_inject_pending_timer_irqs(apic);
1924 atomic_inc(&apic->lapic_timer.pending);
1925 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1927 kvm_vcpu_kick(vcpu);
1930 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1932 struct kvm_timer *ktimer = &apic->lapic_timer;
1933 u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1936 struct kvm_vcpu *vcpu = apic->vcpu;
1937 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1938 unsigned long flags;
1941 if (unlikely(!tscdeadline || !this_tsc_khz))
1944 local_irq_save(flags);
1947 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1949 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1950 do_div(ns, this_tsc_khz);
1952 if (likely(tscdeadline > guest_tsc) &&
1953 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1954 expire = ktime_add_ns(now, ns);
1955 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1956 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1958 apic_timer_expired(apic, false);
1960 local_irq_restore(flags);
1963 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1965 return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1968 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1970 ktime_t now, remaining;
1971 u64 ns_remaining_old, ns_remaining_new;
1973 apic->lapic_timer.period =
1974 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1975 limit_periodic_timer_frequency(apic);
1978 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1979 if (ktime_to_ns(remaining) < 0)
1982 ns_remaining_old = ktime_to_ns(remaining);
1983 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1984 apic->divide_count, old_divisor);
1986 apic->lapic_timer.tscdeadline +=
1987 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1988 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1989 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1992 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1999 apic->lapic_timer.period =
2000 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
2002 if (!apic->lapic_timer.period) {
2003 apic->lapic_timer.tscdeadline = 0;
2007 limit_periodic_timer_frequency(apic);
2008 deadline = apic->lapic_timer.period;
2010 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
2011 if (unlikely(count_reg != APIC_TMICT)) {
2012 deadline = tmict_to_ns(apic,
2013 kvm_lapic_get_reg(apic, count_reg));
2014 if (unlikely(deadline <= 0)) {
2015 if (apic_lvtt_period(apic))
2016 deadline = apic->lapic_timer.period;
2020 else if (unlikely(deadline > apic->lapic_timer.period)) {
2021 pr_info_ratelimited(
2022 "vcpu %i: requested lapic timer restore with "
2023 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
2024 "Using initial count to start timer.\n",
2025 apic->vcpu->vcpu_id,
2027 kvm_lapic_get_reg(apic, count_reg),
2028 deadline, apic->lapic_timer.period);
2029 kvm_lapic_set_reg(apic, count_reg, 0);
2030 deadline = apic->lapic_timer.period;
2035 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2036 nsec_to_cycles(apic->vcpu, deadline);
2037 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
2042 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2044 ktime_t now = ktime_get();
2049 * Synchronize both deadlines to the same time source or
2050 * differences in the periods (caused by differences in the
2051 * underlying clocks or numerical approximation errors) will
2052 * cause the two to drift apart over time as the errors
2055 apic->lapic_timer.target_expiration =
2056 ktime_add_ns(apic->lapic_timer.target_expiration,
2057 apic->lapic_timer.period);
2058 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2059 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2060 nsec_to_cycles(apic->vcpu, delta);
2063 static void start_sw_period(struct kvm_lapic *apic)
2065 if (!apic->lapic_timer.period)
2068 if (ktime_after(ktime_get(),
2069 apic->lapic_timer.target_expiration)) {
2070 apic_timer_expired(apic, false);
2072 if (apic_lvtt_oneshot(apic))
2075 advance_periodic_target_expiration(apic);
2078 hrtimer_start(&apic->lapic_timer.timer,
2079 apic->lapic_timer.target_expiration,
2080 HRTIMER_MODE_ABS_HARD);
2083 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
2085 if (!lapic_in_kernel(vcpu))
2088 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2091 static void cancel_hv_timer(struct kvm_lapic *apic)
2093 WARN_ON(preemptible());
2094 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2095 static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
2096 apic->lapic_timer.hv_timer_in_use = false;
2099 static bool start_hv_timer(struct kvm_lapic *apic)
2101 struct kvm_timer *ktimer = &apic->lapic_timer;
2102 struct kvm_vcpu *vcpu = apic->vcpu;
2105 WARN_ON(preemptible());
2106 if (!kvm_can_use_hv_timer(vcpu))
2109 if (!ktimer->tscdeadline)
2112 if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
2115 ktimer->hv_timer_in_use = true;
2116 hrtimer_cancel(&ktimer->timer);
2119 * To simplify handling the periodic timer, leave the hv timer running
2120 * even if the deadline timer has expired, i.e. rely on the resulting
2121 * VM-Exit to recompute the periodic timer's target expiration.
2123 if (!apic_lvtt_period(apic)) {
2125 * Cancel the hv timer if the sw timer fired while the hv timer
2126 * was being programmed, or if the hv timer itself expired.
2128 if (atomic_read(&ktimer->pending)) {
2129 cancel_hv_timer(apic);
2130 } else if (expired) {
2131 apic_timer_expired(apic, false);
2132 cancel_hv_timer(apic);
2136 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
2141 static void start_sw_timer(struct kvm_lapic *apic)
2143 struct kvm_timer *ktimer = &apic->lapic_timer;
2145 WARN_ON(preemptible());
2146 if (apic->lapic_timer.hv_timer_in_use)
2147 cancel_hv_timer(apic);
2148 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
2151 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2152 start_sw_period(apic);
2153 else if (apic_lvtt_tscdeadline(apic))
2154 start_sw_tscdeadline(apic);
2155 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
2158 static void restart_apic_timer(struct kvm_lapic *apic)
2162 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
2165 if (!start_hv_timer(apic))
2166 start_sw_timer(apic);
2171 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
2173 struct kvm_lapic *apic = vcpu->arch.apic;
2176 /* If the preempt notifier has already run, it also called apic_timer_expired */
2177 if (!apic->lapic_timer.hv_timer_in_use)
2179 WARN_ON(kvm_vcpu_is_blocking(vcpu));
2180 apic_timer_expired(apic, false);
2181 cancel_hv_timer(apic);
2183 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2184 advance_periodic_target_expiration(apic);
2185 restart_apic_timer(apic);
2190 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2192 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2194 restart_apic_timer(vcpu->arch.apic);
2197 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2199 struct kvm_lapic *apic = vcpu->arch.apic;
2202 /* Possibly the TSC deadline timer is not enabled yet */
2203 if (apic->lapic_timer.hv_timer_in_use)
2204 start_sw_timer(apic);
2208 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2210 struct kvm_lapic *apic = vcpu->arch.apic;
2212 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2213 restart_apic_timer(apic);
2216 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2218 atomic_set(&apic->lapic_timer.pending, 0);
2220 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2221 && !set_target_expiration(apic, count_reg))
2224 restart_apic_timer(apic);
2227 static void start_apic_timer(struct kvm_lapic *apic)
2229 __start_apic_timer(apic, APIC_TMICT);
2232 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2234 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2236 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2237 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2238 if (lvt0_in_nmi_mode) {
2239 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2241 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2245 static int get_lvt_index(u32 reg)
2247 if (reg == APIC_LVTCMCI)
2249 if (reg < APIC_LVTT || reg > APIC_LVTERR)
2251 return array_index_nospec(
2252 (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2255 static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2259 trace_kvm_apic_write(reg, val);
2262 case APIC_ID: /* Local APIC ID */
2263 if (!apic_x2apic_mode(apic)) {
2264 kvm_apic_set_xapic_id(apic, val >> 24);
2271 report_tpr_access(apic, true);
2272 apic_set_tpr(apic, val & 0xff);
2280 if (!apic_x2apic_mode(apic))
2281 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2287 if (!apic_x2apic_mode(apic))
2288 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2295 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2296 mask |= APIC_SPIV_DIRECTED_EOI;
2297 apic_set_spiv(apic, val & mask);
2298 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2301 for (i = 0; i < apic->nr_lvt_entries; i++) {
2302 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2303 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2305 apic_update_lvtt(apic);
2306 atomic_set(&apic->lapic_timer.pending, 0);
2312 WARN_ON_ONCE(apic_x2apic_mode(apic));
2314 /* No delay here, so we always clear the pending bit */
2315 val &= ~APIC_ICR_BUSY;
2316 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2317 kvm_lapic_set_reg(apic, APIC_ICR, val);
2320 if (apic_x2apic_mode(apic))
2323 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2327 apic_manage_nmi_watchdog(apic, val);
2333 case APIC_LVTCMCI: {
2334 u32 index = get_lvt_index(reg);
2335 if (!kvm_lapic_lvt_supported(apic, index)) {
2339 if (!kvm_apic_sw_enabled(apic))
2340 val |= APIC_LVT_MASKED;
2341 val &= apic_lvt_mask[index];
2342 kvm_lapic_set_reg(apic, reg, val);
2347 if (!kvm_apic_sw_enabled(apic))
2348 val |= APIC_LVT_MASKED;
2349 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2350 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2351 apic_update_lvtt(apic);
2355 if (apic_lvtt_tscdeadline(apic))
2358 cancel_apic_timer(apic);
2359 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2360 start_apic_timer(apic);
2364 uint32_t old_divisor = apic->divide_count;
2366 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2367 update_divide_count(apic);
2368 if (apic->divide_count != old_divisor &&
2369 apic->lapic_timer.period) {
2370 hrtimer_cancel(&apic->lapic_timer.timer);
2371 update_target_expiration(apic, old_divisor);
2372 restart_apic_timer(apic);
2377 if (apic_x2apic_mode(apic) && val != 0)
2383 * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold
2384 * the vector, everything else is reserved.
2386 if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
2389 kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
2397 * Recalculate APIC maps if necessary, e.g. if the software enable bit
2398 * was toggled, the APIC ID changed, etc... The maps are marked dirty
2399 * on relevant changes, i.e. this is a nop for most writes.
2401 kvm_recalculate_apic_map(apic->vcpu->kvm);
2406 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2407 gpa_t address, int len, const void *data)
2409 struct kvm_lapic *apic = to_lapic(this);
2410 unsigned int offset = address - apic->base_address;
2413 if (!apic_mmio_in_range(apic, address))
2416 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2417 if (!kvm_check_has_quirk(vcpu->kvm,
2418 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2425 * APIC register must be aligned on 128-bits boundary.
2426 * 32/64/128 bits registers must be accessed thru 32 bits.
2429 if (len != 4 || (offset & 0xf))
2434 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2439 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2441 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2443 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2445 /* emulate APIC access in a trap manner */
2446 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2448 struct kvm_lapic *apic = vcpu->arch.apic;
2451 * ICR is a single 64-bit register when x2APIC is enabled, all others
2452 * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
2453 * go down the common path to get the upper half from ICR2.
2455 * Note, using the write helpers may incur an unnecessary write to the
2456 * virtual APIC state, but KVM needs to conditionally modify the value
2457 * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
2458 * conditional branches is likely a wash relative to the cost of the
2459 * maybe-unecessary write, and both are in the noise anyways.
2461 if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2462 kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
2464 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2466 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2468 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2470 struct kvm_lapic *apic = vcpu->arch.apic;
2472 if (!vcpu->arch.apic)
2475 hrtimer_cancel(&apic->lapic_timer.timer);
2477 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2478 static_branch_slow_dec_deferred(&apic_hw_disabled);
2480 if (!apic->sw_enabled)
2481 static_branch_slow_dec_deferred(&apic_sw_disabled);
2484 free_page((unsigned long)apic->regs);
2490 *----------------------------------------------------------------------
2492 *----------------------------------------------------------------------
2494 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2496 struct kvm_lapic *apic = vcpu->arch.apic;
2498 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2501 return apic->lapic_timer.tscdeadline;
2504 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2506 struct kvm_lapic *apic = vcpu->arch.apic;
2508 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2511 hrtimer_cancel(&apic->lapic_timer.timer);
2512 apic->lapic_timer.tscdeadline = data;
2513 start_apic_timer(apic);
2516 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2518 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2521 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2525 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2527 return (tpr & 0xf0) >> 4;
2530 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2532 u64 old_value = vcpu->arch.apic_base;
2533 struct kvm_lapic *apic = vcpu->arch.apic;
2535 vcpu->arch.apic_base = value;
2537 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2538 kvm_update_cpuid_runtime(vcpu);
2543 /* update jump label if enable bit changes */
2544 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2545 if (value & MSR_IA32_APICBASE_ENABLE) {
2546 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2547 static_branch_slow_dec_deferred(&apic_hw_disabled);
2548 /* Check if there are APF page ready requests pending */
2549 kvm_make_request(KVM_REQ_APF_READY, vcpu);
2551 static_branch_inc(&apic_hw_disabled.key);
2552 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2556 if ((old_value ^ value) & X2APIC_ENABLE) {
2557 if (value & X2APIC_ENABLE)
2558 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2559 else if (value & MSR_IA32_APICBASE_ENABLE)
2560 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2563 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2564 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2565 static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu);
2568 apic->base_address = apic->vcpu->arch.apic_base &
2569 MSR_IA32_APICBASE_BASE;
2571 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2572 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2573 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2574 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2578 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2580 struct kvm_lapic *apic = vcpu->arch.apic;
2582 if (apic->apicv_active) {
2583 /* irr_pending is always true when apicv is activated. */
2584 apic->irr_pending = true;
2585 apic->isr_count = 1;
2588 * Don't clear irr_pending, searching the IRR can race with
2589 * updates from the CPU as APICv is still active from hardware's
2590 * perspective. The flag will be cleared as appropriate when
2591 * KVM injects the interrupt.
2593 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2595 apic->highest_isr_cache = -1;
2598 int kvm_alloc_apic_access_page(struct kvm *kvm)
2604 mutex_lock(&kvm->slots_lock);
2605 if (kvm->arch.apic_access_memslot_enabled ||
2606 kvm->arch.apic_access_memslot_inhibited)
2609 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2610 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
2616 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
2617 if (is_error_page(page)) {
2623 * Do not pin the page in memory, so that memory hot-unplug
2624 * is able to migrate it.
2627 kvm->arch.apic_access_memslot_enabled = true;
2629 mutex_unlock(&kvm->slots_lock);
2632 EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page);
2634 void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
2636 struct kvm *kvm = vcpu->kvm;
2638 if (!kvm->arch.apic_access_memslot_enabled)
2641 kvm_vcpu_srcu_read_unlock(vcpu);
2643 mutex_lock(&kvm->slots_lock);
2645 if (kvm->arch.apic_access_memslot_enabled) {
2646 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
2648 * Clear "enabled" after the memslot is deleted so that a
2649 * different vCPU doesn't get a false negative when checking
2650 * the flag out of slots_lock. No additional memory barrier is
2651 * needed as modifying memslots requires waiting other vCPUs to
2652 * drop SRCU (see above), and false positives are ok as the
2653 * flag is rechecked after acquiring slots_lock.
2655 kvm->arch.apic_access_memslot_enabled = false;
2658 * Mark the memslot as inhibited to prevent reallocating the
2659 * memslot during vCPU creation, e.g. if a vCPU is hotplugged.
2661 kvm->arch.apic_access_memslot_inhibited = true;
2664 mutex_unlock(&kvm->slots_lock);
2666 kvm_vcpu_srcu_read_lock(vcpu);
2669 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2671 struct kvm_lapic *apic = vcpu->arch.apic;
2675 static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
2678 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2679 if (kvm_vcpu_is_reset_bsp(vcpu))
2680 msr_val |= MSR_IA32_APICBASE_BSP;
2681 kvm_lapic_set_base(vcpu, msr_val);
2687 /* Stop the timer in case it's a reset to an active apic */
2688 hrtimer_cancel(&apic->lapic_timer.timer);
2690 /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2692 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2693 kvm_apic_set_version(apic->vcpu);
2695 for (i = 0; i < apic->nr_lvt_entries; i++)
2696 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2697 apic_update_lvtt(apic);
2698 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2699 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2700 kvm_lapic_set_reg(apic, APIC_LVT0,
2701 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2702 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2704 kvm_apic_set_dfr(apic, 0xffffffffU);
2705 apic_set_spiv(apic, 0xff);
2706 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2707 if (!apic_x2apic_mode(apic))
2708 kvm_apic_set_ldr(apic, 0);
2709 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2710 if (!apic_x2apic_mode(apic)) {
2711 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2712 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2714 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2716 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2717 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2718 for (i = 0; i < 8; i++) {
2719 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2720 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2721 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2723 kvm_apic_update_apicv(vcpu);
2724 update_divide_count(apic);
2725 atomic_set(&apic->lapic_timer.pending, 0);
2727 vcpu->arch.pv_eoi.msr_val = 0;
2728 apic_update_ppr(apic);
2729 if (apic->apicv_active) {
2730 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2731 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
2732 static_call_cond(kvm_x86_hwapic_isr_update)(-1);
2735 vcpu->arch.apic_arb_prio = 0;
2736 vcpu->arch.apic_attention = 0;
2738 kvm_recalculate_apic_map(vcpu->kvm);
2742 *----------------------------------------------------------------------
2744 *----------------------------------------------------------------------
2747 static bool lapic_is_periodic(struct kvm_lapic *apic)
2749 return apic_lvtt_period(apic);
2752 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2754 struct kvm_lapic *apic = vcpu->arch.apic;
2756 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2757 return atomic_read(&apic->lapic_timer.pending);
2762 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2764 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2765 int vector, mode, trig_mode;
2768 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2769 vector = reg & APIC_VECTOR_MASK;
2770 mode = reg & APIC_MODE_MASK;
2771 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2773 r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2774 if (r && lvt_type == APIC_LVTPC)
2775 kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2781 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2783 struct kvm_lapic *apic = vcpu->arch.apic;
2786 kvm_apic_local_deliver(apic, APIC_LVT0);
2789 static const struct kvm_io_device_ops apic_mmio_ops = {
2790 .read = apic_mmio_read,
2791 .write = apic_mmio_write,
2794 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2796 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2797 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2799 apic_timer_expired(apic, true);
2801 if (lapic_is_periodic(apic)) {
2802 advance_periodic_target_expiration(apic);
2803 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2804 return HRTIMER_RESTART;
2806 return HRTIMER_NORESTART;
2809 int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2811 struct kvm_lapic *apic;
2813 ASSERT(vcpu != NULL);
2815 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2819 vcpu->arch.apic = apic;
2821 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2823 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2825 goto nomem_free_apic;
2829 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2831 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2832 HRTIMER_MODE_ABS_HARD);
2833 apic->lapic_timer.timer.function = apic_timer_fn;
2834 if (timer_advance_ns == -1) {
2835 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2836 lapic_timer_advance_dynamic = true;
2838 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2839 lapic_timer_advance_dynamic = false;
2843 * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2844 * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2846 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2847 static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2848 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2853 vcpu->arch.apic = NULL;
2858 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2860 struct kvm_lapic *apic = vcpu->arch.apic;
2863 if (!kvm_apic_present(vcpu))
2866 __apic_update_ppr(apic, &ppr);
2867 return apic_has_interrupt_for_ppr(apic, ppr);
2869 EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2871 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2873 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2875 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2877 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2878 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2883 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2885 struct kvm_lapic *apic = vcpu->arch.apic;
2887 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2888 kvm_apic_inject_pending_timer_irqs(apic);
2889 atomic_set(&apic->lapic_timer.pending, 0);
2893 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2895 int vector = kvm_apic_has_interrupt(vcpu);
2896 struct kvm_lapic *apic = vcpu->arch.apic;
2903 * We get here even with APIC virtualization enabled, if doing
2904 * nested virtualization and L1 runs with the "acknowledge interrupt
2905 * on exit" mode. Then we cannot inject the interrupt via RVI,
2906 * because the process would deliver it through the IDT.
2909 apic_clear_irr(vector, apic);
2910 if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
2912 * For auto-EOI interrupts, there might be another pending
2913 * interrupt above PPR, so check whether to raise another
2916 apic_update_ppr(apic);
2919 * For normal interrupts, PPR has been raised and there cannot
2920 * be a higher-priority pending interrupt---except if there was
2921 * a concurrent interrupt injection, but that would have
2922 * triggered KVM_REQ_EVENT already.
2924 apic_set_isr(vector, apic);
2925 __apic_update_ppr(apic, &ppr);
2931 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2932 struct kvm_lapic_state *s, bool set)
2934 if (apic_x2apic_mode(vcpu->arch.apic)) {
2935 u32 *id = (u32 *)(s->regs + APIC_ID);
2936 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2939 if (vcpu->kvm->arch.x2apic_format) {
2940 if (*id != vcpu->vcpu_id)
2950 * In x2APIC mode, the LDR is fixed and based on the id. And
2951 * ICR is internally a single 64-bit register, but needs to be
2952 * split to ICR+ICR2 in userspace for backwards compatibility.
2955 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2957 icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
2958 (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
2959 __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
2961 icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
2962 __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
2969 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2971 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2974 * Get calculated timer current count for remaining timer period (if
2975 * any) and store it in the returned register set.
2977 __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2978 __apic_read(vcpu->arch.apic, APIC_TMCCT));
2980 return kvm_apic_state_fixup(vcpu, s, false);
2983 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2985 struct kvm_lapic *apic = vcpu->arch.apic;
2988 static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
2990 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2991 /* set SPIV separately to get count of SW disabled APICs right */
2992 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2994 r = kvm_apic_state_fixup(vcpu, s, true);
2996 kvm_recalculate_apic_map(vcpu->kvm);
2999 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3001 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3002 kvm_recalculate_apic_map(vcpu->kvm);
3003 kvm_apic_set_version(vcpu);
3005 apic_update_ppr(apic);
3006 cancel_apic_timer(apic);
3007 apic->lapic_timer.expired_tscdeadline = 0;
3008 apic_update_lvtt(apic);
3009 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3010 update_divide_count(apic);
3011 __start_apic_timer(apic, APIC_TMCCT);
3012 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3013 kvm_apic_update_apicv(vcpu);
3014 if (apic->apicv_active) {
3015 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
3016 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3017 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
3019 kvm_make_request(KVM_REQ_EVENT, vcpu);
3020 if (ioapic_in_kernel(vcpu->kvm))
3021 kvm_rtc_eoi_tracking_restore_one(vcpu);
3023 vcpu->arch.apic_arb_prio = 0;
3028 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
3030 struct hrtimer *timer;
3032 if (!lapic_in_kernel(vcpu) ||
3033 kvm_can_post_timer_interrupt(vcpu))
3036 timer = &vcpu->arch.apic->lapic_timer.timer;
3037 if (hrtimer_cancel(timer))
3038 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
3042 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
3044 * Detect whether guest triggered PV EOI since the
3045 * last entry. If yes, set EOI on guests's behalf.
3046 * Clear PV EOI in guest memory in any case.
3048 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
3049 struct kvm_lapic *apic)
3053 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
3054 * and KVM_PV_EOI_ENABLED in guest memory as follows:
3056 * KVM_APIC_PV_EOI_PENDING is unset:
3057 * -> host disabled PV EOI.
3058 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
3059 * -> host enabled PV EOI, guest did not execute EOI yet.
3060 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
3061 * -> host enabled PV EOI, guest executed EOI.
3063 BUG_ON(!pv_eoi_enabled(vcpu));
3065 if (pv_eoi_test_and_clr_pending(vcpu))
3067 vector = apic_set_eoi(apic);
3068 trace_kvm_pv_eoi(apic, vector);
3071 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
3075 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3076 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3078 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3081 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3085 apic_set_tpr(vcpu->arch.apic, data & 0xff);
3089 * apic_sync_pv_eoi_to_guest - called before vmentry
3091 * Detect whether it's safe to enable PV EOI and
3094 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
3095 struct kvm_lapic *apic)
3097 if (!pv_eoi_enabled(vcpu) ||
3098 /* IRR set or many bits in ISR: could be nested. */
3099 apic->irr_pending ||
3100 /* Cache not set: could be safe but we don't bother. */
3101 apic->highest_isr_cache == -1 ||
3102 /* Need EOI to update ioapic. */
3103 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
3105 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
3106 * so we need not do anything here.
3111 pv_eoi_set_pending(apic->vcpu);
3114 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
3117 int max_irr, max_isr;
3118 struct kvm_lapic *apic = vcpu->arch.apic;
3120 apic_sync_pv_eoi_to_guest(vcpu, apic);
3122 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3125 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3126 max_irr = apic_find_highest_irr(apic);
3129 max_isr = apic_find_highest_isr(apic);
3132 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
3134 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3138 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
3141 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
3142 &vcpu->arch.apic->vapic_cache,
3143 vapic_addr, sizeof(u32)))
3145 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3147 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3150 vcpu->arch.apic->vapic_addr = vapic_addr;
3154 int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
3156 data &= ~APIC_ICR_BUSY;
3158 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
3159 kvm_lapic_set_reg64(apic, APIC_ICR, data);
3160 trace_kvm_apic_write(APIC_ICR, data);
3164 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
3168 if (reg == APIC_ICR) {
3169 *data = kvm_lapic_get_reg64(apic, APIC_ICR);
3173 if (kvm_lapic_reg_read(apic, reg, 4, &low))
3181 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
3184 * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and
3185 * can be written as such, all other registers remain accessible only
3186 * through 32-bit reads/writes.
3188 if (reg == APIC_ICR)
3189 return kvm_x2apic_icr_write(apic, data);
3191 /* Bits 63:32 are reserved in all other registers. */
3195 return kvm_lapic_reg_write(apic, reg, (u32)data);
3198 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
3200 struct kvm_lapic *apic = vcpu->arch.apic;
3201 u32 reg = (msr - APIC_BASE_MSR) << 4;
3203 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3206 return kvm_lapic_msr_write(apic, reg, data);
3209 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
3211 struct kvm_lapic *apic = vcpu->arch.apic;
3212 u32 reg = (msr - APIC_BASE_MSR) << 4;
3214 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3217 return kvm_lapic_msr_read(apic, reg, data);
3220 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
3222 if (!lapic_in_kernel(vcpu))
3225 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3228 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
3230 if (!lapic_in_kernel(vcpu))
3233 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3236 int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
3238 u64 addr = data & ~KVM_MSR_ENABLED;
3239 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3240 unsigned long new_len;
3243 if (!IS_ALIGNED(addr, 4))
3246 if (data & KVM_MSR_ENABLED) {
3247 if (addr == ghc->gpa && len <= ghc->len)
3252 ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3257 vcpu->arch.pv_eoi.msr_val = data;
3262 int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3264 struct kvm_lapic *apic = vcpu->arch.apic;
3268 if (!kvm_apic_has_pending_init_or_sipi(vcpu))
3271 if (is_guest_mode(vcpu)) {
3272 r = kvm_check_nested_events(vcpu);
3274 return r == -EBUSY ? 0 : r;
3276 * Continue processing INIT/SIPI even if a nested VM-Exit
3277 * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3278 * are blocked as a result of transitioning to VMX root mode.
3283 * INITs are blocked while CPU is in specific states (SMM, VMX root
3284 * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3285 * wait-for-SIPI (WFS).
3287 if (!kvm_apic_init_sipi_allowed(vcpu)) {
3288 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3289 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3293 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3294 kvm_vcpu_reset(vcpu, true);
3295 if (kvm_vcpu_is_bsp(apic->vcpu))
3296 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3298 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3300 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3301 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3302 /* evaluate pending_events before reading the vector */
3304 sipi_vector = apic->sipi_vector;
3305 static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector);
3306 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3312 void kvm_lapic_exit(void)
3314 static_key_deferred_flush(&apic_hw_disabled);
3315 WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3316 static_key_deferred_flush(&apic_sw_disabled);
3317 WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));