1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 #include <linux/kvm_host.h>
8 #include <linux/preempt.h>
9 #include <linux/export.h>
10 #include <linux/sched.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/memblock.h>
14 #include <linux/sizes.h>
15 #include <linux/cma.h>
16 #include <linux/bitops.h>
18 #include <asm/cputable.h>
19 #include <asm/interrupt.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
22 #include <asm/machdep.h>
25 #include <asm/dbell.h>
26 #include <asm/cputhreads.h>
31 #define KVM_CMA_CHUNK_ORDER 18
33 #include "book3s_xics.h"
34 #include "book3s_xive.h"
35 #include "book3s_hv.h"
38 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
39 * should be power of 2.
41 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
43 * By default we reserve 5% of memory for hash pagetable allocation.
45 static unsigned long kvm_cma_resv_ratio = 5;
47 static struct cma *kvm_cma;
49 static int __init early_parse_kvm_cma_resv(char *p)
51 pr_debug("%s(%s)\n", __func__, p);
54 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
56 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
58 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
60 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
62 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
65 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
67 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
69 cma_release(kvm_cma, page, nr_pages);
71 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
74 * kvm_cma_reserve() - reserve area for kvm hash pagetable
76 * This function reserves memory from early allocator. It should be
77 * called by arch specific code once the memblock allocator
78 * has been activated and all other subsystems have already allocated/reserved
81 void __init kvm_cma_reserve(void)
83 unsigned long align_size;
84 phys_addr_t selected_size;
87 * We need CMA reservation only when we are in HV mode
89 if (!cpu_has_feature(CPU_FTR_HVMODE))
92 selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
94 pr_info("%s: reserving %ld MiB for global area\n", __func__,
95 (unsigned long)selected_size / SZ_1M);
96 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
97 cma_declare_contiguous(0, selected_size, 0, align_size,
98 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
104 * Real-mode H_CONFER implementation.
105 * We check if we are the only vcpu out of this virtual core
106 * still running in the guest and not ceded. If so, we pop up
107 * to the virtual-mode implementation; if not, just return to
110 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
111 unsigned int yield_count)
113 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
114 int ptid = local_paca->kvm_hstate.ptid;
117 int threads_conferring;
118 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
119 int rv = H_SUCCESS; /* => don't yield */
121 set_bit(ptid, &vc->conferring_threads);
122 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
123 threads_running = VCORE_ENTRY_MAP(vc);
124 threads_ceded = vc->napping_threads;
125 threads_conferring = vc->conferring_threads;
126 if ((threads_ceded | threads_conferring) == threads_running) {
127 rv = H_TOO_HARD; /* => do yield */
131 clear_bit(ptid, &vc->conferring_threads);
136 * When running HV mode KVM we need to block certain operations while KVM VMs
137 * exist in the system. We use a counter of VMs to track this.
139 * One of the operations we need to block is onlining of secondaries, so we
140 * protect hv_vm_count with cpus_read_lock/unlock().
142 static atomic_t hv_vm_count;
144 void kvm_hv_vm_activated(void)
147 atomic_inc(&hv_vm_count);
150 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
152 void kvm_hv_vm_deactivated(void)
155 atomic_dec(&hv_vm_count);
158 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
160 bool kvm_hv_mode_active(void)
162 return atomic_read(&hv_vm_count) != 0;
165 extern int hcall_real_table[], hcall_real_table_end[];
167 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
170 if (cmd < hcall_real_table_end - hcall_real_table &&
171 hcall_real_table[cmd])
176 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
178 int kvmppc_hwrng_present(void)
180 return ppc_md.get_random_seed != NULL;
182 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
184 long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
188 if (ppc_md.get_random_seed &&
189 ppc_md.get_random_seed(&rand)) {
190 kvmppc_set_gpr(vcpu, 4, rand);
198 * Send an interrupt or message to another CPU.
199 * The caller needs to include any barrier needed to order writes
200 * to memory vs. the IPI/message.
202 void kvmhv_rm_send_ipi(int cpu)
204 void __iomem *xics_phys;
205 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
207 /* On POWER9 we can use msgsnd for any destination cpu. */
208 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
209 msg |= get_hard_smp_processor_id(cpu);
210 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
214 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
215 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
216 cpu_first_thread_sibling(cpu) ==
217 cpu_first_thread_sibling(raw_smp_processor_id())) {
218 msg |= cpu_thread_in_core(cpu);
219 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
223 /* We should never reach this */
224 if (WARN_ON_ONCE(xics_on_xive()))
227 /* Else poke the target with an IPI */
228 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
230 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
232 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
236 * The following functions are called from the assembly code
237 * in book3s_hv_rmhandlers.S.
239 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
243 /* Order setting of exit map vs. msgsnd/IPI */
245 for (; active; active >>= 1, ++cpu)
247 kvmhv_rm_send_ipi(cpu);
250 void kvmhv_commence_exit(int trap)
252 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
253 int ptid = local_paca->kvm_hstate.ptid;
254 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
257 /* Set our bit in the threads-exiting-guest map in the 0xff00
258 bits of vcore->entry_exit_map */
261 ee = vc->entry_exit_map;
262 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
264 /* Are we the first here? */
269 * Trigger the other threads in this vcore to exit the guest.
270 * If this is a hypervisor decrementer interrupt then they
271 * will be already on their way out of the guest.
273 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
274 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
277 * If we are doing dynamic micro-threading, interrupt the other
278 * subcores to pull them out of their guests too.
283 for (i = 0; i < MAX_SUBCORES; ++i) {
288 ee = vc->entry_exit_map;
289 /* Already asked to exit? */
292 } while (cmpxchg(&vc->entry_exit_map, ee,
293 ee | VCORE_EXIT_REQ) != ee);
295 kvmhv_interrupt_vcore(vc, ee);
299 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
300 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
302 #ifdef CONFIG_KVM_XICS
303 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
309 * We access the mapped array here without a lock. That
310 * is safe because we never reduce the number of entries
311 * in the array and we never change the v_hwirq field of
312 * an entry once it is set.
314 * We have also carefully ordered the stores in the writer
315 * and the loads here in the reader, so that if we find a matching
316 * hwirq here, the associated GSI and irq_desc fields are valid.
318 for (i = 0; i < pimap->n_mapped; i++) {
319 if (xisr == pimap->mapped[i].r_hwirq) {
321 * Order subsequent reads in the caller to serialize
325 return &pimap->mapped[i];
332 * If we have an interrupt that's not an IPI, check if we have a
333 * passthrough adapter and if so, check if this external interrupt
334 * is for the adapter.
335 * We will attempt to deliver the IRQ directly to the target VCPU's
336 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
338 * If the delivery fails or if this is not for a passthrough adapter,
339 * return to the host to handle this interrupt. We earlier
340 * saved a copy of the XIRR in the PACA, it will be picked up by
341 * the host ICP driver.
343 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
345 struct kvmppc_passthru_irqmap *pimap;
346 struct kvmppc_irq_map *irq_map;
347 struct kvm_vcpu *vcpu;
349 vcpu = local_paca->kvm_hstate.kvm_vcpu;
352 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
355 irq_map = get_irqmap(pimap, xisr);
359 /* We're handling this interrupt, generic code doesn't need to */
360 local_paca->kvm_hstate.saved_xirr = 0;
362 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
366 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
373 * Determine what sort of external interrupt is pending (if any).
375 * 0 if no interrupt is pending
376 * 1 if an interrupt is pending that needs to be handled by the host
377 * 2 Passthrough that needs completion in the host
378 * -1 if there was a guest wakeup IPI (which has now been cleared)
379 * -2 if there is PCI passthrough external interrupt that was handled
381 static long kvmppc_read_one_intr(bool *again);
383 long kvmppc_read_intr(void)
394 rc = kvmppc_read_one_intr(&again);
395 if (rc && (ret == 0 || rc > ret))
401 static long kvmppc_read_one_intr(bool *again)
403 void __iomem *xics_phys;
413 /* see if a host IPI is pending */
414 host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi);
418 /* Now read the interrupt from the ICP */
419 xics_phys = local_paca->kvm_hstate.xics_phys;
422 rc = opal_int_get_xirr(&xirr, false);
424 xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
429 * Save XIRR for later. Since we get control in reverse endian
430 * on LE systems, save it byte reversed and fetch it back in
431 * host endian. Note that xirr is the value read from the
432 * XIRR register, while h_xirr is the host endian version.
434 h_xirr = be32_to_cpu(xirr);
435 local_paca->kvm_hstate.saved_xirr = h_xirr;
436 xisr = h_xirr & 0xffffff;
438 * Ensure that the store/load complete to guarantee all side
439 * effects of loading from XIRR has completed
443 /* if nothing pending in the ICP */
447 /* We found something in the ICP...
449 * If it is an IPI, clear the MFRR and EOI it.
451 if (xisr == XICS_IPI) {
454 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
455 __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
457 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
458 rc = opal_int_eoi(h_xirr);
460 /* If rc > 0, there is another interrupt pending */
464 * Need to ensure side effects of above stores
465 * complete before proceeding.
470 * We need to re-check host IPI now in case it got set in the
471 * meantime. If it's clear, we bounce the interrupt to the
474 host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi);
475 if (unlikely(host_ipi != 0)) {
476 /* We raced with the host,
477 * we need to resend that IPI, bummer
480 __raw_rm_writeb(IPI_PRIORITY,
481 xics_phys + XICS_MFRR);
483 opal_int_set_mfrr(hard_smp_processor_id(),
485 /* Let side effects complete */
490 /* OK, it's an IPI for us */
491 local_paca->kvm_hstate.saved_xirr = 0;
495 return kvmppc_check_passthru(xisr, xirr, again);
498 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
500 vcpu->arch.ceded = 0;
501 if (vcpu->arch.timer_running) {
502 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
503 vcpu->arch.timer_running = 0;
507 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
509 /* Guest must always run with ME enabled, HV disabled. */
510 msr = (msr | MSR_ME) & ~MSR_HV;
513 * Check for illegal transactional state bit combination
514 * and if we find it, force the TS field to a safe state.
516 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
518 __kvmppc_set_msr_hv(vcpu, msr);
519 kvmppc_end_cede(vcpu);
521 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
523 static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
525 unsigned long msr, pc, new_msr, new_pc;
527 msr = kvmppc_get_msr(vcpu);
528 pc = kvmppc_get_pc(vcpu);
529 new_msr = vcpu->arch.intr_msr;
532 /* If transactional, change to suspend mode on IRQ delivery */
533 if (MSR_TM_TRANSACTIONAL(msr))
536 new_msr |= msr & MSR_TS_MASK;
539 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and
540 * applicable. AIL=2 is not supported.
542 * AIL does not apply to SRESET, MCE, or HMI (which is never
543 * delivered to the guest), and does not apply if IR=0 or DR=0.
545 if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET &&
546 vec != BOOK3S_INTERRUPT_MACHINE_CHECK &&
547 (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 &&
548 (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
549 new_msr |= MSR_IR | MSR_DR;
550 new_pc += 0xC000000000004000ULL;
553 kvmppc_set_srr0(vcpu, pc);
554 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
555 kvmppc_set_pc(vcpu, new_pc);
556 __kvmppc_set_msr_hv(vcpu, new_msr);
559 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
561 inject_interrupt(vcpu, vec, srr1_flags);
562 kvmppc_end_cede(vcpu);
564 EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
567 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
568 * Can we inject a Decrementer or a External interrupt?
570 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
575 WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
577 /* Insert EXTERNAL bit into LPCR at the MER bit position */
578 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
579 lpcr = mfspr(SPRN_LPCR);
580 lpcr |= ext << LPCR_MER_SH;
581 mtspr(SPRN_LPCR, lpcr);
584 if (vcpu->arch.shregs.msr & MSR_EE) {
586 inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
588 long int dec = mfspr(SPRN_DEC);
589 if (!(lpcr & LPCR_LD))
592 inject_interrupt(vcpu,
593 BOOK3S_INTERRUPT_DECREMENTER, 0);
597 if (vcpu->arch.doorbell_request) {
598 mtspr(SPRN_DPDES, 1);
599 vcpu->arch.vcore->dpdes = 1;
601 vcpu->arch.doorbell_request = 0;
605 static void flush_guest_tlb(struct kvm *kvm)
607 unsigned long rb, set;
609 rb = PPC_BIT(52); /* IS = 2 */
610 for (set = 0; set < kvm->arch.tlb_sets; ++set) {
611 /* R=0 PRS=0 RIC=0 */
612 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
613 : : "r" (rb), "i" (0), "i" (0), "i" (0),
615 rb += PPC_BIT(51); /* increment set number */
617 asm volatile("ptesync": : :"memory");
620 void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu)
622 if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) {
623 flush_guest_tlb(kvm);
625 /* Clear the bit after the TLB flush */
626 cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush);
629 EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);