2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
10 #include <linux/kvm_host.h>
11 #include <linux/preempt.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/memblock.h>
17 #include <linux/sizes.h>
18 #include <linux/cma.h>
19 #include <linux/bitops.h>
21 #include <asm/asm-prototypes.h>
22 #include <asm/cputable.h>
23 #include <asm/kvm_ppc.h>
24 #include <asm/kvm_book3s.h>
25 #include <asm/archrandom.h>
28 #include <asm/dbell.h>
29 #include <asm/cputhreads.h>
34 #define KVM_CMA_CHUNK_ORDER 18
36 #include "book3s_xics.h"
37 #include "book3s_xive.h"
40 * The XIVE module will populate these when it loads
42 unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
43 unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
44 int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
46 int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
47 int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
48 EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
49 EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
50 EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
51 EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
52 EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
55 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
56 * should be power of 2.
58 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
60 * By default we reserve 5% of memory for hash pagetable allocation.
62 static unsigned long kvm_cma_resv_ratio = 5;
64 static struct cma *kvm_cma;
66 static int __init early_parse_kvm_cma_resv(char *p)
68 pr_debug("%s(%s)\n", __func__, p);
71 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
73 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
75 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
77 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
79 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
82 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
84 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
86 cma_release(kvm_cma, page, nr_pages);
88 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
91 * kvm_cma_reserve() - reserve area for kvm hash pagetable
93 * This function reserves memory from early allocator. It should be
94 * called by arch specific code once the memblock allocator
95 * has been activated and all other subsystems have already allocated/reserved
98 void __init kvm_cma_reserve(void)
100 unsigned long align_size;
101 struct memblock_region *reg;
102 phys_addr_t selected_size = 0;
105 * We need CMA reservation only when we are in HV mode
107 if (!cpu_has_feature(CPU_FTR_HVMODE))
110 * We cannot use memblock_phys_mem_size() here, because
111 * memblock_analyze() has not been called yet.
113 for_each_memblock(memory, reg)
114 selected_size += memblock_region_memory_end_pfn(reg) -
115 memblock_region_memory_base_pfn(reg);
117 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
119 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
120 (unsigned long)selected_size / SZ_1M);
121 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
122 cma_declare_contiguous(0, selected_size, 0, align_size,
123 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
129 * Real-mode H_CONFER implementation.
130 * We check if we are the only vcpu out of this virtual core
131 * still running in the guest and not ceded. If so, we pop up
132 * to the virtual-mode implementation; if not, just return to
135 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
136 unsigned int yield_count)
138 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
139 int ptid = local_paca->kvm_hstate.ptid;
142 int threads_conferring;
143 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
144 int rv = H_SUCCESS; /* => don't yield */
146 set_bit(ptid, &vc->conferring_threads);
147 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
148 threads_running = VCORE_ENTRY_MAP(vc);
149 threads_ceded = vc->napping_threads;
150 threads_conferring = vc->conferring_threads;
151 if ((threads_ceded | threads_conferring) == threads_running) {
152 rv = H_TOO_HARD; /* => do yield */
156 clear_bit(ptid, &vc->conferring_threads);
161 * When running HV mode KVM we need to block certain operations while KVM VMs
162 * exist in the system. We use a counter of VMs to track this.
164 * One of the operations we need to block is onlining of secondaries, so we
165 * protect hv_vm_count with get/put_online_cpus().
167 static atomic_t hv_vm_count;
169 void kvm_hv_vm_activated(void)
172 atomic_inc(&hv_vm_count);
175 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
177 void kvm_hv_vm_deactivated(void)
180 atomic_dec(&hv_vm_count);
183 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
185 bool kvm_hv_mode_active(void)
187 return atomic_read(&hv_vm_count) != 0;
190 extern int hcall_real_table[], hcall_real_table_end[];
192 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
195 if (cmd < hcall_real_table_end - hcall_real_table &&
196 hcall_real_table[cmd])
201 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
203 int kvmppc_hwrng_present(void)
205 return powernv_hwrng_present();
207 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
209 long kvmppc_h_random(struct kvm_vcpu *vcpu)
213 /* Only need to do the expensive mfmsr() on radix */
214 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
215 r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
217 r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
225 * Send an interrupt or message to another CPU.
226 * The caller needs to include any barrier needed to order writes
227 * to memory vs. the IPI/message.
229 void kvmhv_rm_send_ipi(int cpu)
231 void __iomem *xics_phys;
232 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
234 /* On POWER9 we can use msgsnd for any destination cpu. */
235 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
236 msg |= get_hard_smp_processor_id(cpu);
237 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
241 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
242 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
243 cpu_first_thread_sibling(cpu) ==
244 cpu_first_thread_sibling(raw_smp_processor_id())) {
245 msg |= cpu_thread_in_core(cpu);
246 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
250 /* We should never reach this */
251 if (WARN_ON_ONCE(xive_enabled()))
254 /* Else poke the target with an IPI */
255 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
257 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
259 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
263 * The following functions are called from the assembly code
264 * in book3s_hv_rmhandlers.S.
266 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
270 /* Order setting of exit map vs. msgsnd/IPI */
272 for (; active; active >>= 1, ++cpu)
274 kvmhv_rm_send_ipi(cpu);
277 void kvmhv_commence_exit(int trap)
279 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
280 int ptid = local_paca->kvm_hstate.ptid;
281 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
285 /* Set our bit in the threads-exiting-guest map in the 0xff00
286 bits of vcore->entry_exit_map */
289 ee = vc->entry_exit_map;
290 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
292 /* Are we the first here? */
297 * Trigger the other threads in this vcore to exit the guest.
298 * If this is a hypervisor decrementer interrupt then they
299 * will be already on their way out of the guest.
301 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
302 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
305 * If we are doing dynamic micro-threading, interrupt the other
306 * subcores to pull them out of their guests too.
311 for (i = 0; i < MAX_SUBCORES; ++i) {
316 ee = vc->entry_exit_map;
317 /* Already asked to exit? */
320 } while (cmpxchg(&vc->entry_exit_map, ee,
321 ee | VCORE_EXIT_REQ) != ee);
323 kvmhv_interrupt_vcore(vc, ee);
327 * On POWER9 when running a HPT guest on a radix host (sip != NULL),
328 * we have to interrupt inactive CPU threads to get them to
329 * restore the host LPCR value.
332 if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
333 vc = local_paca->kvm_hstate.kvm_vcore;
334 cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
335 for (t = 1; t < threads_per_core; ++t) {
337 kvmhv_rm_send_ipi(cpu0 + t);
343 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
344 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
346 #ifdef CONFIG_KVM_XICS
347 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
353 * We access the mapped array here without a lock. That
354 * is safe because we never reduce the number of entries
355 * in the array and we never change the v_hwirq field of
356 * an entry once it is set.
358 * We have also carefully ordered the stores in the writer
359 * and the loads here in the reader, so that if we find a matching
360 * hwirq here, the associated GSI and irq_desc fields are valid.
362 for (i = 0; i < pimap->n_mapped; i++) {
363 if (xisr == pimap->mapped[i].r_hwirq) {
365 * Order subsequent reads in the caller to serialize
369 return &pimap->mapped[i];
376 * If we have an interrupt that's not an IPI, check if we have a
377 * passthrough adapter and if so, check if this external interrupt
378 * is for the adapter.
379 * We will attempt to deliver the IRQ directly to the target VCPU's
380 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
382 * If the delivery fails or if this is not for a passthrough adapter,
383 * return to the host to handle this interrupt. We earlier
384 * saved a copy of the XIRR in the PACA, it will be picked up by
385 * the host ICP driver.
387 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
389 struct kvmppc_passthru_irqmap *pimap;
390 struct kvmppc_irq_map *irq_map;
391 struct kvm_vcpu *vcpu;
393 vcpu = local_paca->kvm_hstate.kvm_vcpu;
396 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
399 irq_map = get_irqmap(pimap, xisr);
403 /* We're handling this interrupt, generic code doesn't need to */
404 local_paca->kvm_hstate.saved_xirr = 0;
406 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
410 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
417 * Determine what sort of external interrupt is pending (if any).
419 * 0 if no interrupt is pending
420 * 1 if an interrupt is pending that needs to be handled by the host
421 * 2 Passthrough that needs completion in the host
422 * -1 if there was a guest wakeup IPI (which has now been cleared)
423 * -2 if there is PCI passthrough external interrupt that was handled
425 static long kvmppc_read_one_intr(bool *again);
427 long kvmppc_read_intr(void)
438 rc = kvmppc_read_one_intr(&again);
439 if (rc && (ret == 0 || rc > ret))
445 static long kvmppc_read_one_intr(bool *again)
447 void __iomem *xics_phys;
457 /* see if a host IPI is pending */
458 host_ipi = local_paca->kvm_hstate.host_ipi;
462 /* Now read the interrupt from the ICP */
463 xics_phys = local_paca->kvm_hstate.xics_phys;
466 rc = opal_int_get_xirr(&xirr, false);
468 xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
473 * Save XIRR for later. Since we get control in reverse endian
474 * on LE systems, save it byte reversed and fetch it back in
475 * host endian. Note that xirr is the value read from the
476 * XIRR register, while h_xirr is the host endian version.
478 h_xirr = be32_to_cpu(xirr);
479 local_paca->kvm_hstate.saved_xirr = h_xirr;
480 xisr = h_xirr & 0xffffff;
482 * Ensure that the store/load complete to guarantee all side
483 * effects of loading from XIRR has completed
487 /* if nothing pending in the ICP */
491 /* We found something in the ICP...
493 * If it is an IPI, clear the MFRR and EOI it.
495 if (xisr == XICS_IPI) {
498 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
499 __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
501 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
502 rc = opal_int_eoi(h_xirr);
504 /* If rc > 0, there is another interrupt pending */
508 * Need to ensure side effects of above stores
509 * complete before proceeding.
514 * We need to re-check host IPI now in case it got set in the
515 * meantime. If it's clear, we bounce the interrupt to the
518 host_ipi = local_paca->kvm_hstate.host_ipi;
519 if (unlikely(host_ipi != 0)) {
520 /* We raced with the host,
521 * we need to resend that IPI, bummer
524 __raw_rm_writeb(IPI_PRIORITY,
525 xics_phys + XICS_MFRR);
527 opal_int_set_mfrr(hard_smp_processor_id(),
529 /* Let side effects complete */
534 /* OK, it's an IPI for us */
535 local_paca->kvm_hstate.saved_xirr = 0;
539 return kvmppc_check_passthru(xisr, xirr, again);
542 #ifdef CONFIG_KVM_XICS
543 static inline bool is_rm(void)
545 return !(mfmsr() & MSR_DR);
548 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
550 if (!kvmppc_xics_enabled(vcpu))
552 if (xive_enabled()) {
554 return xive_rm_h_xirr(vcpu);
555 if (unlikely(!__xive_vm_h_xirr))
556 return H_NOT_AVAILABLE;
557 return __xive_vm_h_xirr(vcpu);
559 return xics_rm_h_xirr(vcpu);
562 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
564 if (!kvmppc_xics_enabled(vcpu))
566 vcpu->arch.regs.gpr[5] = get_tb();
567 if (xive_enabled()) {
569 return xive_rm_h_xirr(vcpu);
570 if (unlikely(!__xive_vm_h_xirr))
571 return H_NOT_AVAILABLE;
572 return __xive_vm_h_xirr(vcpu);
574 return xics_rm_h_xirr(vcpu);
577 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
579 if (!kvmppc_xics_enabled(vcpu))
581 if (xive_enabled()) {
583 return xive_rm_h_ipoll(vcpu, server);
584 if (unlikely(!__xive_vm_h_ipoll))
585 return H_NOT_AVAILABLE;
586 return __xive_vm_h_ipoll(vcpu, server);
591 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
594 if (!kvmppc_xics_enabled(vcpu))
596 if (xive_enabled()) {
598 return xive_rm_h_ipi(vcpu, server, mfrr);
599 if (unlikely(!__xive_vm_h_ipi))
600 return H_NOT_AVAILABLE;
601 return __xive_vm_h_ipi(vcpu, server, mfrr);
603 return xics_rm_h_ipi(vcpu, server, mfrr);
606 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
608 if (!kvmppc_xics_enabled(vcpu))
610 if (xive_enabled()) {
612 return xive_rm_h_cppr(vcpu, cppr);
613 if (unlikely(!__xive_vm_h_cppr))
614 return H_NOT_AVAILABLE;
615 return __xive_vm_h_cppr(vcpu, cppr);
617 return xics_rm_h_cppr(vcpu, cppr);
620 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
622 if (!kvmppc_xics_enabled(vcpu))
624 if (xive_enabled()) {
626 return xive_rm_h_eoi(vcpu, xirr);
627 if (unlikely(!__xive_vm_h_eoi))
628 return H_NOT_AVAILABLE;
629 return __xive_vm_h_eoi(vcpu, xirr);
631 return xics_rm_h_eoi(vcpu, xirr);
633 #endif /* CONFIG_KVM_XICS */
635 void kvmppc_bad_interrupt(struct pt_regs *regs)
638 * 100 could happen at any time, 200 can happen due to invalid real
639 * address access for example (or any time due to a hardware problem).
641 if (TRAP(regs) == 0x100) {
642 get_paca()->in_nmi++;
643 system_reset_exception(regs);
644 get_paca()->in_nmi--;
645 } else if (TRAP(regs) == 0x200) {
646 machine_check_exception(regs);
648 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
650 panic("Bad KVM trap");
654 * Functions used to switch LPCR HR and UPRT bits on all threads
655 * when entering and exiting HPT guests on a radix host.
658 #define PHASE_REALMODE 1 /* in real mode */
659 #define PHASE_SET_LPCR 2 /* have set LPCR */
660 #define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
661 #define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
663 #define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
665 static void wait_for_sync(struct kvm_split_mode *sip, int phase)
667 int thr = local_paca->kvm_hstate.tid;
669 sip->lpcr_sync.phase[thr] |= phase;
671 while ((sip->lpcr_sync.allphases & phase) != phase) {
678 void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
680 unsigned long rb, set;
682 /* wait for every other thread to get to real mode */
683 wait_for_sync(sip, PHASE_REALMODE);
685 /* Set LPCR and LPIDR */
686 mtspr(SPRN_LPCR, sip->lpcr_req);
687 mtspr(SPRN_LPID, sip->lpidr_req);
690 /* Invalidate the TLB on thread 0 */
691 if (local_paca->kvm_hstate.tid == 0) {
693 asm volatile("ptesync" : : : "memory");
694 for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) {
695 rb = TLBIEL_INVAL_SET_LPID +
696 (set << TLBIEL_INVAL_SET_SHIFT);
697 asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
700 asm volatile("ptesync" : : : "memory");
703 /* indicate that we have done so and wait for others */
704 wait_for_sync(sip, PHASE_SET_LPCR);
705 /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
710 * Called when a thread that has been in the guest needs
711 * to reload the host LPCR value - but only on POWER9 when
712 * running a HPT guest on a radix host.
714 void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
716 /* we're out of the guest... */
717 wait_for_sync(sip, PHASE_OUT_OF_GUEST);
720 mtspr(SPRN_LPCR, sip->host_lpcr);
723 if (local_paca->kvm_hstate.tid == 0) {
725 smp_wmb(); /* order store of do_restore vs. phase */
728 wait_for_sync(sip, PHASE_RESET_LPCR);
730 local_paca->kvm_hstate.kvm_split_mode = NULL;