1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/hashtable.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/kvm_host.h>
22 #include <asm/irq_remapping.h>
31 * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
32 * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
33 * if an interrupt can't be delivered, e.g. because the vCPU isn't running.
35 * For the vCPU ID, use however many bits are currently allowed for the max
36 * guest physical APIC ID (limited by the size of the physical ID table), and
37 * use whatever bits remain to assign arbitrary AVIC IDs to VMs. Note, the
38 * size of the GATag is defined by hardware (32 bits), but is an opaque value
39 * as far as hardware is concerned.
41 #define AVIC_VCPU_ID_MASK AVIC_PHYSICAL_MAX_INDEX_MASK
43 #define AVIC_VM_ID_SHIFT HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
44 #define AVIC_VM_ID_MASK (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
46 #define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
47 (y & AVIC_VCPU_ID_MASK))
48 #define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
49 #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
51 static_assert(AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
53 static bool force_avic;
54 module_param_unsafe(force_avic, bool, 0444);
57 * This hash table is used to map VM_ID to a struct kvm_svm,
58 * when handling AMD IOMMU GALOG notification to schedule in
61 #define SVM_VM_DATA_HASH_BITS 8
62 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
63 static u32 next_vm_id = 0;
64 static bool next_vm_id_wrapped = 0;
65 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
66 enum avic_modes avic_mode;
69 * This is a wrapper of struct amd_iommu_ir_data.
71 struct amd_svm_iommu_ir {
72 struct list_head node; /* Used by SVM for per-vcpu ir_list */
73 void *data; /* Storing pointer to struct amd_ir_data */
76 static void avic_activate_vmcb(struct vcpu_svm *svm)
78 struct vmcb *vmcb = svm->vmcb01.ptr;
80 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
81 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
83 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
86 * KVM can support hybrid-AVIC mode, where KVM emulates x2APIC
87 * MSR accesses, while interrupt injection to a running vCPU
88 * can be achieved using AVIC doorbell. The AVIC hardware still
89 * accelerate MMIO accesses, but this does not cause any harm
90 * as the guest is not supposed to access xAPIC mmio when uses x2APIC.
92 if (apic_x2apic_mode(svm->vcpu.arch.apic) &&
93 avic_mode == AVIC_MODE_X2) {
94 vmcb->control.int_ctl |= X2APIC_MODE_MASK;
95 vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID;
96 /* Disabling MSR intercept for x2APIC registers */
97 svm_set_x2apic_msr_interception(svm, false);
100 * Flush the TLB, the guest may have inserted a non-APIC
101 * mapping into the TLB while AVIC was disabled.
103 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu);
105 /* For xAVIC and hybrid-xAVIC modes */
106 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
107 /* Enabling MSR intercept for x2APIC registers */
108 svm_set_x2apic_msr_interception(svm, true);
112 static void avic_deactivate_vmcb(struct vcpu_svm *svm)
114 struct vmcb *vmcb = svm->vmcb01.ptr;
116 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
117 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
120 * If running nested and the guest uses its own MSR bitmap, there
121 * is no need to update L0's msr bitmap
123 if (is_guest_mode(&svm->vcpu) &&
124 vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))
127 /* Enabling MSR intercept for x2APIC registers */
128 svm_set_x2apic_msr_interception(svm, true);
132 * This function is called from IOMMU driver to notify
133 * SVM to schedule in a particular vCPU of a particular VM.
135 int avic_ga_log_notifier(u32 ga_tag)
138 struct kvm_svm *kvm_svm;
139 struct kvm_vcpu *vcpu = NULL;
140 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
141 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
143 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
144 trace_kvm_avic_ga_log(vm_id, vcpu_id);
146 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
147 hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
148 if (kvm_svm->avic_vm_id != vm_id)
150 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
153 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
156 * At this point, the IOMMU should have already set the pending
157 * bit in the vAPIC backing page. So, we just need to schedule
161 kvm_vcpu_wake_up(vcpu);
166 void avic_vm_destroy(struct kvm *kvm)
169 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
174 if (kvm_svm->avic_logical_id_table_page)
175 __free_page(kvm_svm->avic_logical_id_table_page);
176 if (kvm_svm->avic_physical_id_table_page)
177 __free_page(kvm_svm->avic_physical_id_table_page);
179 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
180 hash_del(&kvm_svm->hnode);
181 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
184 int avic_vm_init(struct kvm *kvm)
188 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
197 /* Allocating physical APIC ID table (4KB) */
198 p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
202 kvm_svm->avic_physical_id_table_page = p_page;
204 /* Allocating logical APIC ID table (4KB) */
205 l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
209 kvm_svm->avic_logical_id_table_page = l_page;
211 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
213 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
214 if (vm_id == 0) { /* id is 1-based, zero is not okay */
215 next_vm_id_wrapped = 1;
218 /* Is it still in use? Only possible if wrapped at least once */
219 if (next_vm_id_wrapped) {
220 hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
221 if (k2->avic_vm_id == vm_id)
225 kvm_svm->avic_vm_id = vm_id;
226 hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
227 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
232 avic_vm_destroy(kvm);
236 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
238 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
239 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
240 phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
241 phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
243 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
244 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
245 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
246 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK;
248 if (kvm_apicv_activated(svm->vcpu.kvm))
249 avic_activate_vmcb(svm);
251 avic_deactivate_vmcb(svm);
254 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
257 u64 *avic_physical_id_table;
258 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
260 if ((avic_mode == AVIC_MODE_X1 && index > AVIC_MAX_PHYSICAL_ID) ||
261 (avic_mode == AVIC_MODE_X2 && index > X2AVIC_MAX_PHYSICAL_ID))
264 avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
266 return &avic_physical_id_table[index];
271 * AVIC hardware walks the nested page table to check permissions,
272 * but does not use the SPA address specified in the leaf page
273 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
274 * field of the VMCB. Therefore, we set up the
275 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
277 static int avic_alloc_access_page(struct kvm *kvm)
282 mutex_lock(&kvm->slots_lock);
284 if (kvm->arch.apic_access_memslot_enabled)
287 ret = __x86_set_memory_region(kvm,
288 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
289 APIC_DEFAULT_PHYS_BASE,
296 kvm->arch.apic_access_memslot_enabled = true;
298 mutex_unlock(&kvm->slots_lock);
302 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
304 u64 *entry, new_entry;
305 int id = vcpu->vcpu_id;
306 struct vcpu_svm *svm = to_svm(vcpu);
308 if ((avic_mode == AVIC_MODE_X1 && id > AVIC_MAX_PHYSICAL_ID) ||
309 (avic_mode == AVIC_MODE_X2 && id > X2AVIC_MAX_PHYSICAL_ID))
312 if (!vcpu->arch.apic->regs)
315 if (kvm_apicv_activated(vcpu->kvm)) {
318 ret = avic_alloc_access_page(vcpu->kvm);
323 svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
325 /* Setting AVIC backing page address in the phy APIC ID table */
326 entry = avic_get_physical_id_entry(vcpu, id);
330 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
331 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
332 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
333 WRITE_ONCE(*entry, new_entry);
335 svm->avic_physical_id_cache = entry;
340 void avic_ring_doorbell(struct kvm_vcpu *vcpu)
343 * Note, the vCPU could get migrated to a different pCPU at any point,
344 * which could result in signalling the wrong/previous pCPU. But if
345 * that happens the vCPU is guaranteed to do a VMRUN (after being
346 * migrated) and thus will process pending interrupts, i.e. a doorbell
347 * is not needed (and the spurious one is harmless).
349 int cpu = READ_ONCE(vcpu->cpu);
351 if (cpu != get_cpu()) {
352 wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
353 trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu));
359 * A fast-path version of avic_kick_target_vcpus(), which attempts to match
360 * destination APIC ID to vCPU without looping through all vCPUs.
362 static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source,
363 u32 icrl, u32 icrh, u32 index)
365 u32 l1_physical_id, dest;
366 struct kvm_vcpu *target_vcpu;
367 int dest_mode = icrl & APIC_DEST_MASK;
368 int shorthand = icrl & APIC_SHORT_MASK;
369 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
371 if (shorthand != APIC_DEST_NOSHORT)
374 if (apic_x2apic_mode(source))
377 dest = GET_XAPIC_DEST_FIELD(icrh);
379 if (dest_mode == APIC_DEST_PHYSICAL) {
380 /* broadcast destination, use slow path */
381 if (apic_x2apic_mode(source) && dest == X2APIC_BROADCAST)
383 if (!apic_x2apic_mode(source) && dest == APIC_BROADCAST)
386 l1_physical_id = dest;
388 if (WARN_ON_ONCE(l1_physical_id != index))
395 if (apic_x2apic_mode(source)) {
396 /* 16 bit dest mask, 16 bit cluster id */
397 bitmap = dest & 0xFFFF0000;
398 cluster = (dest >> 16) << 4;
399 } else if (kvm_lapic_get_reg(source, APIC_DFR) == APIC_DFR_FLAT) {
404 /* 4 bit desk mask, 4 bit cluster id */
406 cluster = (dest >> 4) << 2;
409 if (unlikely(!bitmap))
410 /* guest bug: nobody to send the logical interrupt to */
413 if (!is_power_of_2(bitmap))
414 /* multiple logical destinations, use slow path */
417 logid_index = cluster + __ffs(bitmap);
419 if (!apic_x2apic_mode(source)) {
420 u32 *avic_logical_id_table =
421 page_address(kvm_svm->avic_logical_id_table_page);
423 u32 logid_entry = avic_logical_id_table[logid_index];
425 if (WARN_ON_ONCE(index != logid_index))
428 /* guest bug: non existing/reserved logical destination */
429 if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
432 l1_physical_id = logid_entry &
433 AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
436 * For x2APIC logical mode, cannot leverage the index.
437 * Instead, calculate physical ID from logical ID in ICRH.
439 int cluster = (icrh & 0xffff0000) >> 16;
440 int apic = ffs(icrh & 0xffff) - 1;
443 * If the x2APIC logical ID sub-field (i.e. icrh[15:0])
444 * contains anything but a single bit, we cannot use the
445 * fast path, because it is limited to a single vCPU.
447 if (apic < 0 || icrh != (1 << apic))
450 l1_physical_id = (cluster << 4) + apic;
454 target_vcpu = kvm_get_vcpu_by_id(kvm, l1_physical_id);
455 if (unlikely(!target_vcpu))
456 /* guest bug: non existing vCPU is a target of this IPI*/
459 target_vcpu->arch.apic->irr_pending = true;
460 svm_complete_interrupt_delivery(target_vcpu,
461 icrl & APIC_MODE_MASK,
462 icrl & APIC_INT_LEVELTRIG,
463 icrl & APIC_VECTOR_MASK);
467 static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
468 u32 icrl, u32 icrh, u32 index)
471 struct kvm_vcpu *vcpu;
473 if (!avic_kick_target_vcpus_fast(kvm, source, icrl, icrh, index))
476 trace_kvm_avic_kick_vcpu_slowpath(icrh, icrl, index);
479 * Wake any target vCPUs that are blocking, i.e. waiting for a wake
480 * event. There's no need to signal doorbells, as hardware has handled
481 * vCPUs that were in guest at the time of the IPI, and vCPUs that have
482 * since entered the guest will have processed pending IRQs at VMRUN.
484 kvm_for_each_vcpu(i, vcpu, kvm) {
487 if (apic_x2apic_mode(vcpu->arch.apic))
490 dest = GET_XAPIC_DEST_FIELD(icrh);
492 if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
493 dest, icrl & APIC_DEST_MASK)) {
494 vcpu->arch.apic->irr_pending = true;
495 svm_complete_interrupt_delivery(vcpu,
496 icrl & APIC_MODE_MASK,
497 icrl & APIC_INT_LEVELTRIG,
498 icrl & APIC_VECTOR_MASK);
503 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
505 struct vcpu_svm *svm = to_svm(vcpu);
506 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
507 u32 icrl = svm->vmcb->control.exit_info_1;
508 u32 id = svm->vmcb->control.exit_info_2 >> 32;
509 u32 index = svm->vmcb->control.exit_info_2 & 0x1FF;
510 struct kvm_lapic *apic = vcpu->arch.apic;
512 trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
515 case AVIC_IPI_FAILURE_INVALID_TARGET:
516 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
518 * Emulate IPIs that are not handled by AVIC hardware, which
519 * only virtualizes Fixed, Edge-Triggered INTRs, and falls over
520 * if _any_ targets are invalid, e.g. if the logical mode mask
521 * is a superset of running vCPUs.
523 * The exit is a trap, e.g. ICR holds the correct value and RIP
524 * has been advanced, KVM is responsible only for emulating the
525 * IPI. Sadly, hardware may sometimes leave the BUSY flag set,
526 * in which case KVM needs to emulate the ICR write as well in
527 * order to clear the BUSY flag.
529 if (icrl & APIC_ICR_BUSY)
530 kvm_apic_write_nodecode(vcpu, APIC_ICR);
532 kvm_apic_send_ipi(apic, icrl, icrh);
534 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING:
536 * At this point, we expect that the AVIC HW has already
537 * set the appropriate IRR bits on the valid target
538 * vcpus. So, we just need to kick the appropriate vcpu.
540 avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh, index);
542 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
543 WARN_ONCE(1, "Invalid backing page\n");
545 case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
546 /* Invalid IPI with vector < 16 */
549 vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
555 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu)
557 if (is_guest_mode(vcpu))
558 return APICV_INHIBIT_REASON_NESTED;
562 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
564 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
566 u32 *logical_apic_id_table;
567 int dlid = GET_APIC_LOGICAL_ID(ldr);
572 if (flat) { /* flat */
573 index = ffs(dlid) - 1;
576 } else { /* cluster */
577 int cluster = (dlid & 0xf0) >> 4;
578 int apic = ffs(dlid & 0x0f) - 1;
580 if ((apic < 0) || (apic > 7) ||
583 index = (cluster << 2) + apic;
586 logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
588 return &logical_apic_id_table[index];
591 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
594 u32 *entry, new_entry;
596 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
597 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
601 new_entry = READ_ONCE(*entry);
602 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
603 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
604 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
605 WRITE_ONCE(*entry, new_entry);
610 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
612 struct vcpu_svm *svm = to_svm(vcpu);
613 bool flat = svm->dfr_reg == APIC_DFR_FLAT;
616 /* Note: x2AVIC does not use logical APIC ID table */
617 if (apic_x2apic_mode(vcpu->arch.apic))
620 entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
622 clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
625 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
628 struct vcpu_svm *svm = to_svm(vcpu);
629 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
630 u32 id = kvm_xapic_id(vcpu->arch.apic);
632 /* AVIC does not support LDR update for x2APIC */
633 if (apic_x2apic_mode(vcpu->arch.apic))
636 if (ldr == svm->ldr_reg)
639 avic_invalidate_logical_id_entry(vcpu);
642 ret = avic_ldr_write(vcpu, id, ldr);
650 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
652 struct vcpu_svm *svm = to_svm(vcpu);
653 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
655 if (svm->dfr_reg == dfr)
658 avic_invalidate_logical_id_entry(vcpu);
662 static int avic_unaccel_trap_write(struct kvm_vcpu *vcpu)
664 u32 offset = to_svm(vcpu)->vmcb->control.exit_info_1 &
665 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
669 if (avic_handle_ldr_update(vcpu))
673 avic_handle_dfr_update(vcpu);
679 kvm_apic_write_nodecode(vcpu, offset);
683 static bool is_avic_unaccelerated_access_trap(u32 offset)
712 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
714 struct vcpu_svm *svm = to_svm(vcpu);
716 u32 offset = svm->vmcb->control.exit_info_1 &
717 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
718 u32 vector = svm->vmcb->control.exit_info_2 &
719 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
720 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
721 AVIC_UNACCEL_ACCESS_WRITE_MASK;
722 bool trap = is_avic_unaccelerated_access_trap(offset);
724 trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
725 trap, write, vector);
728 WARN_ONCE(!write, "svm: Handling trap read.\n");
729 ret = avic_unaccel_trap_write(vcpu);
732 ret = kvm_emulate_instruction(vcpu, 0);
738 int avic_init_vcpu(struct vcpu_svm *svm)
741 struct kvm_vcpu *vcpu = &svm->vcpu;
743 if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
746 ret = avic_init_backing_page(vcpu);
750 INIT_LIST_HEAD(&svm->ir_list);
751 spin_lock_init(&svm->ir_list_lock);
752 svm->dfr_reg = APIC_DFR_FLAT;
757 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
759 avic_handle_dfr_update(vcpu);
760 avic_handle_ldr_update(vcpu);
763 static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
767 struct amd_svm_iommu_ir *ir;
768 struct vcpu_svm *svm = to_svm(vcpu);
770 if (!kvm_arch_has_assigned_device(vcpu->kvm))
774 * Here, we go through the per-vcpu ir_list to update all existing
775 * interrupt remapping table entry targeting this vcpu.
777 spin_lock_irqsave(&svm->ir_list_lock, flags);
779 if (list_empty(&svm->ir_list))
782 list_for_each_entry(ir, &svm->ir_list, node) {
784 ret = amd_iommu_activate_guest_mode(ir->data);
786 ret = amd_iommu_deactivate_guest_mode(ir->data);
791 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
795 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
798 struct amd_svm_iommu_ir *cur;
800 spin_lock_irqsave(&svm->ir_list_lock, flags);
801 list_for_each_entry(cur, &svm->ir_list, node) {
802 if (cur->data != pi->ir_data)
804 list_del(&cur->node);
808 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
811 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
815 struct amd_svm_iommu_ir *ir;
819 * In some cases, the existing irte is updated and re-set,
820 * so we need to check here if it's already been * added
823 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
824 struct kvm *kvm = svm->vcpu.kvm;
825 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
826 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
827 struct vcpu_svm *prev_svm;
834 prev_svm = to_svm(prev_vcpu);
835 svm_ir_list_del(prev_svm, pi);
839 * Allocating new amd_iommu_pi_data, which will get
840 * add to the per-vcpu ir_list.
842 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
847 ir->data = pi->ir_data;
849 spin_lock_irqsave(&svm->ir_list_lock, flags);
852 * Update the target pCPU for IOMMU doorbells if the vCPU is running.
853 * If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM
854 * will update the pCPU info when the vCPU awkened and/or scheduled in.
855 * See also avic_vcpu_load().
857 entry = READ_ONCE(*(svm->avic_physical_id_cache));
858 if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
859 amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
862 list_add(&ir->node, &svm->ir_list);
863 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
870 * The HW cannot support posting multicast/broadcast
871 * interrupts to a vCPU. So, we still use legacy interrupt
872 * remapping for these kind of interrupts.
874 * For lowest-priority interrupts, we only support
875 * those with single CPU as the destination, e.g. user
876 * configures the interrupts via /proc/irq or uses
877 * irqbalance to make the interrupts single-CPU.
880 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
881 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
883 struct kvm_lapic_irq irq;
884 struct kvm_vcpu *vcpu = NULL;
886 kvm_set_msi_irq(kvm, e, &irq);
888 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
889 !kvm_irq_is_postable(&irq)) {
890 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
891 __func__, irq.vector);
895 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
898 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
899 vcpu_info->vector = irq.vector;
905 * avic_pi_update_irte - set IRTE for Posted-Interrupts
908 * @host_irq: host irq of the interrupt
909 * @guest_irq: gsi of the interrupt
910 * @set: set or unset PI
911 * returns 0 on success, < 0 on failure
913 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
914 uint32_t guest_irq, bool set)
916 struct kvm_kernel_irq_routing_entry *e;
917 struct kvm_irq_routing_table *irq_rt;
920 if (!kvm_arch_has_assigned_device(kvm) ||
921 !irq_remapping_cap(IRQ_POSTING_CAP))
924 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
925 __func__, host_irq, guest_irq, set);
927 idx = srcu_read_lock(&kvm->irq_srcu);
928 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
930 if (guest_irq >= irq_rt->nr_rt_entries ||
931 hlist_empty(&irq_rt->map[guest_irq])) {
932 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
933 guest_irq, irq_rt->nr_rt_entries);
937 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
938 struct vcpu_data vcpu_info;
939 struct vcpu_svm *svm = NULL;
941 if (e->type != KVM_IRQ_ROUTING_MSI)
945 * Here, we setup with legacy mode in the following cases:
946 * 1. When cannot target interrupt to a specific vcpu.
947 * 2. Unsetting posted interrupt.
948 * 3. APIC virtualization is disabled for the vcpu.
949 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
951 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
952 kvm_vcpu_apicv_active(&svm->vcpu)) {
953 struct amd_iommu_pi_data pi;
955 /* Try to enable guest_mode in IRTE */
956 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
958 pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
960 pi.is_guest_mode = true;
961 pi.vcpu_data = &vcpu_info;
962 ret = irq_set_vcpu_affinity(host_irq, &pi);
965 * Here, we successfully setting up vcpu affinity in
966 * IOMMU guest mode. Now, we need to store the posted
967 * interrupt information in a per-vcpu ir_list so that
968 * we can reference to them directly when we update vcpu
969 * scheduling information in IOMMU irte.
971 if (!ret && pi.is_guest_mode)
972 svm_ir_list_add(svm, &pi);
974 /* Use legacy mode in IRTE */
975 struct amd_iommu_pi_data pi;
978 * Here, pi is used to:
979 * - Tell IOMMU to use legacy mode for this interrupt.
980 * - Retrieve ga_tag of prior interrupt remapping data.
983 pi.is_guest_mode = false;
984 ret = irq_set_vcpu_affinity(host_irq, &pi);
987 * Check if the posted interrupt was previously
988 * setup with the guest_mode by checking if the ga_tag
989 * was cached. If so, we need to clean up the per-vcpu
992 if (!ret && pi.prev_ga_tag) {
993 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
994 struct kvm_vcpu *vcpu;
996 vcpu = kvm_get_vcpu_by_id(kvm, id);
998 svm_ir_list_del(to_svm(vcpu), &pi);
1003 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
1004 e->gsi, vcpu_info.vector,
1005 vcpu_info.pi_desc_addr, set);
1009 pr_err("%s: failed to update PI IRTE\n", __func__);
1016 srcu_read_unlock(&kvm->irq_srcu, idx);
1020 bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
1022 ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
1023 BIT(APICV_INHIBIT_REASON_ABSENT) |
1024 BIT(APICV_INHIBIT_REASON_HYPERV) |
1025 BIT(APICV_INHIBIT_REASON_NESTED) |
1026 BIT(APICV_INHIBIT_REASON_IRQWIN) |
1027 BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
1028 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
1029 BIT(APICV_INHIBIT_REASON_SEV) |
1030 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) |
1031 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
1033 return supported & BIT(reason);
1038 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
1041 struct amd_svm_iommu_ir *ir;
1042 struct vcpu_svm *svm = to_svm(vcpu);
1044 lockdep_assert_held(&svm->ir_list_lock);
1046 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1050 * Here, we go through the per-vcpu ir_list to update all existing
1051 * interrupt remapping table entry targeting this vcpu.
1053 if (list_empty(&svm->ir_list))
1056 list_for_each_entry(ir, &svm->ir_list, node) {
1057 ret = amd_iommu_update_ga(cpu, r, ir->data);
1064 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1067 int h_physical_id = kvm_cpu_get_apicid(cpu);
1068 struct vcpu_svm *svm = to_svm(vcpu);
1069 unsigned long flags;
1071 lockdep_assert_preemption_disabled();
1073 if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
1077 * No need to update anything if the vCPU is blocking, i.e. if the vCPU
1078 * is being scheduled in after being preempted. The CPU entries in the
1079 * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
1080 * If the vCPU was migrated, its new CPU value will be stuffed when the
1083 if (kvm_vcpu_is_blocking(vcpu))
1087 * Grab the per-vCPU interrupt remapping lock even if the VM doesn't
1088 * _currently_ have assigned devices, as that can change. Holding
1089 * ir_list_lock ensures that either svm_ir_list_add() will consume
1090 * up-to-date entry information, or that this task will wait until
1091 * svm_ir_list_add() completes to set the new target pCPU.
1093 spin_lock_irqsave(&svm->ir_list_lock, flags);
1095 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1097 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1098 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1099 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1101 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1102 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
1104 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1107 void avic_vcpu_put(struct kvm_vcpu *vcpu)
1110 struct vcpu_svm *svm = to_svm(vcpu);
1111 unsigned long flags;
1113 lockdep_assert_preemption_disabled();
1116 * Note, reading the Physical ID entry outside of ir_list_lock is safe
1117 * as only the pCPU that has loaded (or is loading) the vCPU is allowed
1118 * to modify the entry, and preemption is disabled. I.e. the vCPU
1119 * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
1122 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1124 /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
1125 if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
1129 * Take and hold the per-vCPU interrupt remapping lock while updating
1130 * the Physical ID entry even though the lock doesn't protect against
1131 * multiple writers (see above). Holding ir_list_lock ensures that
1132 * either svm_ir_list_add() will consume up-to-date entry information,
1133 * or that this task will wait until svm_ir_list_add() completes to
1134 * mark the vCPU as not running.
1136 spin_lock_irqsave(&svm->ir_list_lock, flags);
1138 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1140 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1141 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1143 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1147 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
1149 struct vcpu_svm *svm = to_svm(vcpu);
1150 struct vmcb *vmcb = svm->vmcb01.ptr;
1152 if (!lapic_in_kernel(vcpu) || avic_mode == AVIC_MODE_NONE)
1158 if (kvm_vcpu_apicv_active(vcpu)) {
1160 * During AVIC temporary deactivation, guest could update
1161 * APIC ID, DFR and LDR registers, which would not be trapped
1162 * by avic_unaccelerated_access_interception(). In this case,
1163 * we need to check and update the AVIC logical APIC ID table
1164 * accordingly before re-activating.
1166 avic_apicv_post_state_restore(vcpu);
1167 avic_activate_vmcb(svm);
1169 avic_deactivate_vmcb(svm);
1171 vmcb_mark_dirty(vmcb, VMCB_AVIC);
1174 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
1176 bool activated = kvm_vcpu_apicv_active(vcpu);
1181 avic_refresh_virtual_apic_mode(vcpu);
1184 avic_vcpu_load(vcpu, vcpu->cpu);
1186 avic_vcpu_put(vcpu);
1188 avic_set_pi_irte_mode(vcpu, activated);
1191 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
1193 if (!kvm_vcpu_apicv_active(vcpu))
1197 * Unload the AVIC when the vCPU is about to block, _before_
1198 * the vCPU actually blocks.
1200 * Any IRQs that arrive before IsRunning=0 will not cause an
1201 * incomplete IPI vmexit on the source, therefore vIRR will also
1202 * be checked by kvm_vcpu_check_block() before blocking. The
1203 * memory barrier implicit in set_current_state orders writing
1204 * IsRunning=0 before reading the vIRR. The processor needs a
1205 * matching memory barrier on interrupt delivery between writing
1206 * IRR and reading IsRunning; the lack of this barrier might be
1207 * the cause of errata #1235).
1209 avic_vcpu_put(vcpu);
1212 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
1214 if (!kvm_vcpu_apicv_active(vcpu))
1217 avic_vcpu_load(vcpu, vcpu->cpu);
1222 * - The module param avic enable both xAPIC and x2APIC mode.
1223 * - Hypervisor can support both xAVIC and x2AVIC in the same guest.
1224 * - The mode can be switched at run-time.
1226 bool avic_hardware_setup(struct kvm_x86_ops *x86_ops)
1231 if (boot_cpu_has(X86_FEATURE_AVIC)) {
1232 avic_mode = AVIC_MODE_X1;
1233 pr_info("AVIC enabled\n");
1234 } else if (force_avic) {
1236 * Some older systems does not advertise AVIC support.
1237 * See Revision Guide for specific AMD processor for more detail.
1239 avic_mode = AVIC_MODE_X1;
1240 pr_warn("AVIC is not supported in CPUID but force enabled");
1241 pr_warn("Your system might crash and burn");
1244 /* AVIC is a prerequisite for x2AVIC. */
1245 if (boot_cpu_has(X86_FEATURE_X2AVIC)) {
1246 if (avic_mode == AVIC_MODE_X1) {
1247 avic_mode = AVIC_MODE_X2;
1248 pr_info("x2AVIC enabled\n");
1250 pr_warn(FW_BUG "Cannot support x2AVIC due to AVIC is disabled");
1251 pr_warn(FW_BUG "Try enable AVIC using force_avic option");
1255 if (avic_mode != AVIC_MODE_NONE)
1256 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);