2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <linux/irqbypass.h>
31 #include <linux/kvm_irqfd.h>
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/tlbflush.h>
36 #include <asm/cputhreads.h>
37 #include <asm/irqflags.h>
38 #include <asm/iommu.h>
41 #include "../mm/mmu_decl.h"
43 #define CREATE_TRACE_POINTS
46 struct kvmppc_ops *kvmppc_hv_ops;
47 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
48 struct kvmppc_ops *kvmppc_pr_ops;
49 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
52 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
54 return !!(v->arch.pending_exceptions) ||
58 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
64 * Common checks before entering the guest world. Call with interrupts
69 * == 1 if we're ready to go into guest state
70 * <= 0 if we need to go back to the host with return value
72 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
76 WARN_ON(irqs_disabled());
87 if (signal_pending(current)) {
88 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
89 vcpu->run->exit_reason = KVM_EXIT_INTR;
94 vcpu->mode = IN_GUEST_MODE;
97 * Reading vcpu->requests must happen after setting vcpu->mode,
98 * so we don't miss a request because the requester sees
99 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
100 * before next entering the guest (and thus doesn't IPI).
101 * This also orders the write to mode from any reads
102 * to the page tables done while the VCPU is running.
103 * Please see the comment in kvm_flush_remote_tlbs.
107 if (vcpu->requests) {
108 /* Make sure we process requests preemptable */
110 trace_kvm_check_requests(vcpu);
111 r = kvmppc_core_check_requests(vcpu);
118 if (kvmppc_core_prepare_to_enter(vcpu)) {
119 /* interrupts got enabled in between, so we
120 are back at square 1 */
124 guest_enter_irqoff();
132 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
134 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
135 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
137 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
140 shared->sprg0 = swab64(shared->sprg0);
141 shared->sprg1 = swab64(shared->sprg1);
142 shared->sprg2 = swab64(shared->sprg2);
143 shared->sprg3 = swab64(shared->sprg3);
144 shared->srr0 = swab64(shared->srr0);
145 shared->srr1 = swab64(shared->srr1);
146 shared->dar = swab64(shared->dar);
147 shared->msr = swab64(shared->msr);
148 shared->dsisr = swab32(shared->dsisr);
149 shared->int_pending = swab32(shared->int_pending);
150 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
151 shared->sr[i] = swab32(shared->sr[i]);
155 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
157 int nr = kvmppc_get_gpr(vcpu, 11);
159 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
160 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
161 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
162 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
163 unsigned long r2 = 0;
165 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
167 param1 &= 0xffffffff;
168 param2 &= 0xffffffff;
169 param3 &= 0xffffffff;
170 param4 &= 0xffffffff;
174 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
176 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
177 /* Book3S can be little endian, find it out here */
178 int shared_big_endian = true;
179 if (vcpu->arch.intr_msr & MSR_LE)
180 shared_big_endian = false;
181 if (shared_big_endian != vcpu->arch.shared_big_endian)
182 kvmppc_swab_shared(vcpu);
183 vcpu->arch.shared_big_endian = shared_big_endian;
186 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
188 * Older versions of the Linux magic page code had
189 * a bug where they would map their trampoline code
190 * NX. If that's the case, remove !PR NX capability.
192 vcpu->arch.disable_kernel_nx = true;
193 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
196 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
197 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
199 #ifdef CONFIG_PPC_64K_PAGES
201 * Make sure our 4k magic page is in the same window of a 64k
202 * page within the guest and within the host's page.
204 if ((vcpu->arch.magic_page_pa & 0xf000) !=
205 ((ulong)vcpu->arch.shared & 0xf000)) {
206 void *old_shared = vcpu->arch.shared;
207 ulong shared = (ulong)vcpu->arch.shared;
211 shared |= vcpu->arch.magic_page_pa & 0xf000;
212 new_shared = (void*)shared;
213 memcpy(new_shared, old_shared, 0x1000);
214 vcpu->arch.shared = new_shared;
218 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
223 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
225 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
226 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
229 /* Second return value is in r4 */
231 case EV_HCALL_TOKEN(EV_IDLE):
233 kvm_vcpu_block(vcpu);
234 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
237 r = EV_UNIMPLEMENTED;
241 kvmppc_set_gpr(vcpu, 4, r2);
245 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
247 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
251 /* We have to know what CPU to virtualize */
255 /* PAPR only works with book3s_64 */
256 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
259 /* HV KVM can only do PAPR mode for now */
260 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
263 #ifdef CONFIG_KVM_BOOKE_HV
264 if (!cpu_has_feature(CPU_FTR_EMB_HV))
272 return r ? 0 : -EINVAL;
274 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
276 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
278 enum emulation_result er;
281 er = kvmppc_emulate_loadstore(vcpu);
284 /* Future optimization: only reload non-volatiles if they were
285 * actually modified. */
291 case EMULATE_DO_MMIO:
292 run->exit_reason = KVM_EXIT_MMIO;
293 /* We must reload nonvolatiles because "update" load/store
294 * instructions modify register state. */
295 /* Future optimization: only reload non-volatiles if they were
296 * actually modified. */
303 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
304 /* XXX Deliver Program interrupt to guest. */
305 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
316 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
318 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
321 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
322 struct kvmppc_pte pte;
327 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
337 /* Magic page override */
338 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
339 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
340 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
341 void *magic = vcpu->arch.shared;
342 magic += pte.eaddr & 0xfff;
343 memcpy(magic, ptr, size);
347 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
348 return EMULATE_DO_MMIO;
352 EXPORT_SYMBOL_GPL(kvmppc_st);
354 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
357 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
358 struct kvmppc_pte pte;
363 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
373 if (!data && !pte.may_execute)
376 /* Magic page override */
377 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
378 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
379 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
380 void *magic = vcpu->arch.shared;
381 magic += pte.eaddr & 0xfff;
382 memcpy(ptr, magic, size);
386 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
387 return EMULATE_DO_MMIO;
391 EXPORT_SYMBOL_GPL(kvmppc_ld);
393 int kvm_arch_hardware_enable(void)
398 int kvm_arch_hardware_setup(void)
403 void kvm_arch_check_processor_compat(void *rtn)
405 *(int *)rtn = kvmppc_core_check_processor_compat();
408 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
410 struct kvmppc_ops *kvm_ops = NULL;
412 * if we have both HV and PR enabled, default is HV
416 kvm_ops = kvmppc_hv_ops;
418 kvm_ops = kvmppc_pr_ops;
421 } else if (type == KVM_VM_PPC_HV) {
424 kvm_ops = kvmppc_hv_ops;
425 } else if (type == KVM_VM_PPC_PR) {
428 kvm_ops = kvmppc_pr_ops;
432 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
435 kvm->arch.kvm_ops = kvm_ops;
436 return kvmppc_core_init_vm(kvm);
441 bool kvm_arch_has_vcpu_debugfs(void)
446 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
451 void kvm_arch_destroy_vm(struct kvm *kvm)
454 struct kvm_vcpu *vcpu;
456 #ifdef CONFIG_KVM_XICS
458 * We call kick_all_cpus_sync() to ensure that all
459 * CPUs have executed any pending IPIs before we
460 * continue and free VCPUs structures below.
462 if (is_kvmppc_hv_enabled(kvm))
463 kick_all_cpus_sync();
466 kvm_for_each_vcpu(i, vcpu, kvm)
467 kvm_arch_vcpu_free(vcpu);
469 mutex_lock(&kvm->lock);
470 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
471 kvm->vcpus[i] = NULL;
473 atomic_set(&kvm->online_vcpus, 0);
475 kvmppc_core_destroy_vm(kvm);
477 mutex_unlock(&kvm->lock);
479 /* drop the module reference */
480 module_put(kvm->arch.kvm_ops->owner);
483 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
486 /* Assume we're using HV mode when the HV module is loaded */
487 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
491 * Hooray - we know which VM type we're running on. Depend on
492 * that rather than the guess above.
494 hv_enabled = is_kvmppc_hv_enabled(kvm);
499 case KVM_CAP_PPC_BOOKE_SREGS:
500 case KVM_CAP_PPC_BOOKE_WATCHDOG:
501 case KVM_CAP_PPC_EPR:
503 case KVM_CAP_PPC_SEGSTATE:
504 case KVM_CAP_PPC_HIOR:
505 case KVM_CAP_PPC_PAPR:
507 case KVM_CAP_PPC_UNSET_IRQ:
508 case KVM_CAP_PPC_IRQ_LEVEL:
509 case KVM_CAP_ENABLE_CAP:
510 case KVM_CAP_ENABLE_CAP_VM:
511 case KVM_CAP_ONE_REG:
512 case KVM_CAP_IOEVENTFD:
513 case KVM_CAP_DEVICE_CTRL:
516 case KVM_CAP_PPC_PAIRED_SINGLES:
517 case KVM_CAP_PPC_OSI:
518 case KVM_CAP_PPC_GET_PVINFO:
519 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
522 /* We support this only for PR */
525 #ifdef CONFIG_KVM_MMIO
526 case KVM_CAP_COALESCED_MMIO:
527 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
530 #ifdef CONFIG_KVM_MPIC
531 case KVM_CAP_IRQ_MPIC:
536 #ifdef CONFIG_PPC_BOOK3S_64
537 case KVM_CAP_SPAPR_TCE:
538 case KVM_CAP_SPAPR_TCE_64:
539 case KVM_CAP_PPC_ALLOC_HTAB:
540 case KVM_CAP_PPC_RTAS:
541 case KVM_CAP_PPC_FIXUP_HCALL:
542 case KVM_CAP_PPC_ENABLE_HCALL:
543 #ifdef CONFIG_KVM_XICS
544 case KVM_CAP_IRQ_XICS:
548 #endif /* CONFIG_PPC_BOOK3S_64 */
549 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
550 case KVM_CAP_PPC_SMT:
552 r = threads_per_subcore;
556 case KVM_CAP_PPC_RMA:
559 case KVM_CAP_PPC_HWRNG:
560 r = kvmppc_hwrng_present();
563 case KVM_CAP_SYNC_MMU:
564 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
566 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
572 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
573 case KVM_CAP_PPC_HTAB_FD:
577 case KVM_CAP_NR_VCPUS:
579 * Recommending a number of CPUs is somewhat arbitrary; we
580 * return the number of present CPUs for -HV (since a host
581 * will have secondary threads "offline"), and for other KVM
582 * implementations just count online CPUs.
585 r = num_present_cpus();
587 r = num_online_cpus();
589 case KVM_CAP_NR_MEMSLOTS:
590 r = KVM_USER_MEM_SLOTS;
592 case KVM_CAP_MAX_VCPUS:
595 #ifdef CONFIG_PPC_BOOK3S_64
596 case KVM_CAP_PPC_GET_SMMU_INFO:
599 case KVM_CAP_SPAPR_MULTITCE:
603 case KVM_CAP_PPC_HTM:
604 r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
614 long kvm_arch_dev_ioctl(struct file *filp,
615 unsigned int ioctl, unsigned long arg)
620 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
621 struct kvm_memory_slot *dont)
623 kvmppc_core_free_memslot(kvm, free, dont);
626 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
627 unsigned long npages)
629 return kvmppc_core_create_memslot(kvm, slot, npages);
632 int kvm_arch_prepare_memory_region(struct kvm *kvm,
633 struct kvm_memory_slot *memslot,
634 const struct kvm_userspace_memory_region *mem,
635 enum kvm_mr_change change)
637 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
640 void kvm_arch_commit_memory_region(struct kvm *kvm,
641 const struct kvm_userspace_memory_region *mem,
642 const struct kvm_memory_slot *old,
643 const struct kvm_memory_slot *new,
644 enum kvm_mr_change change)
646 kvmppc_core_commit_memory_region(kvm, mem, old, new);
649 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
650 struct kvm_memory_slot *slot)
652 kvmppc_core_flush_memslot(kvm, slot);
655 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
657 struct kvm_vcpu *vcpu;
658 vcpu = kvmppc_core_vcpu_create(kvm, id);
660 vcpu->arch.wqp = &vcpu->wq;
661 kvmppc_create_vcpu_debugfs(vcpu, id);
666 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
670 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
672 /* Make sure we're not using the vcpu anymore */
673 hrtimer_cancel(&vcpu->arch.dec_timer);
675 kvmppc_remove_vcpu_debugfs(vcpu);
677 switch (vcpu->arch.irq_type) {
678 case KVMPPC_IRQ_MPIC:
679 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
681 case KVMPPC_IRQ_XICS:
682 kvmppc_xics_free_icp(vcpu);
686 kvmppc_core_vcpu_free(vcpu);
689 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
691 kvm_arch_vcpu_free(vcpu);
694 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
696 return kvmppc_core_pending_dec(vcpu);
699 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
701 struct kvm_vcpu *vcpu;
703 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
704 kvmppc_decrementer_func(vcpu);
706 return HRTIMER_NORESTART;
709 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
713 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
714 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
715 vcpu->arch.dec_expires = ~(u64)0;
717 #ifdef CONFIG_KVM_EXIT_TIMING
718 mutex_init(&vcpu->arch.exit_timing_lock);
720 ret = kvmppc_subarch_vcpu_init(vcpu);
724 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
726 kvmppc_mmu_destroy(vcpu);
727 kvmppc_subarch_vcpu_uninit(vcpu);
730 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
734 * vrsave (formerly usprg0) isn't used by Linux, but may
735 * be used by the guest.
737 * On non-booke this is associated with Altivec and
738 * is handled by code in book3s.c.
740 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
742 kvmppc_core_vcpu_load(vcpu, cpu);
745 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
747 kvmppc_core_vcpu_put(vcpu);
749 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
754 * irq_bypass_add_producer and irq_bypass_del_producer are only
755 * useful if the architecture supports PCI passthrough.
756 * irq_bypass_stop and irq_bypass_start are not needed and so
757 * kvm_ops are not defined for them.
759 bool kvm_arch_has_irq_bypass(void)
761 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
762 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
765 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
766 struct irq_bypass_producer *prod)
768 struct kvm_kernel_irqfd *irqfd =
769 container_of(cons, struct kvm_kernel_irqfd, consumer);
770 struct kvm *kvm = irqfd->kvm;
772 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
773 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
778 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
779 struct irq_bypass_producer *prod)
781 struct kvm_kernel_irqfd *irqfd =
782 container_of(cons, struct kvm_kernel_irqfd, consumer);
783 struct kvm *kvm = irqfd->kvm;
785 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
786 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
789 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
792 u64 uninitialized_var(gpr);
794 if (run->mmio.len > sizeof(gpr)) {
795 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
799 if (!vcpu->arch.mmio_host_swabbed) {
800 switch (run->mmio.len) {
801 case 8: gpr = *(u64 *)run->mmio.data; break;
802 case 4: gpr = *(u32 *)run->mmio.data; break;
803 case 2: gpr = *(u16 *)run->mmio.data; break;
804 case 1: gpr = *(u8 *)run->mmio.data; break;
807 switch (run->mmio.len) {
808 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
809 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
810 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
811 case 1: gpr = *(u8 *)run->mmio.data; break;
815 if (vcpu->arch.mmio_sign_extend) {
816 switch (run->mmio.len) {
831 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
833 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
834 case KVM_MMIO_REG_GPR:
835 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
837 case KVM_MMIO_REG_FPR:
838 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
840 #ifdef CONFIG_PPC_BOOK3S
841 case KVM_MMIO_REG_QPR:
842 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
844 case KVM_MMIO_REG_FQPR:
845 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
846 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
854 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
855 unsigned int rt, unsigned int bytes,
856 int is_default_endian, int sign_extend)
861 /* Pity C doesn't have a logical XOR operator */
862 if (kvmppc_need_byteswap(vcpu)) {
863 host_swabbed = is_default_endian;
865 host_swabbed = !is_default_endian;
868 if (bytes > sizeof(run->mmio.data)) {
869 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
873 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
874 run->mmio.len = bytes;
875 run->mmio.is_write = 0;
877 vcpu->arch.io_gpr = rt;
878 vcpu->arch.mmio_host_swabbed = host_swabbed;
879 vcpu->mmio_needed = 1;
880 vcpu->mmio_is_write = 0;
881 vcpu->arch.mmio_sign_extend = sign_extend;
883 idx = srcu_read_lock(&vcpu->kvm->srcu);
885 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
886 bytes, &run->mmio.data);
888 srcu_read_unlock(&vcpu->kvm->srcu, idx);
891 kvmppc_complete_mmio_load(vcpu, run);
892 vcpu->mmio_needed = 0;
896 return EMULATE_DO_MMIO;
899 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
900 unsigned int rt, unsigned int bytes,
901 int is_default_endian)
903 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
905 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
907 /* Same as above, but sign extends */
908 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
909 unsigned int rt, unsigned int bytes,
910 int is_default_endian)
912 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
915 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
916 u64 val, unsigned int bytes, int is_default_endian)
918 void *data = run->mmio.data;
922 /* Pity C doesn't have a logical XOR operator */
923 if (kvmppc_need_byteswap(vcpu)) {
924 host_swabbed = is_default_endian;
926 host_swabbed = !is_default_endian;
929 if (bytes > sizeof(run->mmio.data)) {
930 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
934 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
935 run->mmio.len = bytes;
936 run->mmio.is_write = 1;
937 vcpu->mmio_needed = 1;
938 vcpu->mmio_is_write = 1;
940 /* Store the value at the lowest bytes in 'data'. */
943 case 8: *(u64 *)data = val; break;
944 case 4: *(u32 *)data = val; break;
945 case 2: *(u16 *)data = val; break;
946 case 1: *(u8 *)data = val; break;
950 case 8: *(u64 *)data = swab64(val); break;
951 case 4: *(u32 *)data = swab32(val); break;
952 case 2: *(u16 *)data = swab16(val); break;
953 case 1: *(u8 *)data = val; break;
957 idx = srcu_read_lock(&vcpu->kvm->srcu);
959 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
960 bytes, &run->mmio.data);
962 srcu_read_unlock(&vcpu->kvm->srcu, idx);
965 vcpu->mmio_needed = 0;
969 return EMULATE_DO_MMIO;
971 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
973 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
976 union kvmppc_one_reg val;
979 size = one_reg_size(reg->id);
980 if (size > sizeof(val))
983 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
987 #ifdef CONFIG_ALTIVEC
988 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
989 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
993 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
995 case KVM_REG_PPC_VSCR:
996 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1000 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1002 case KVM_REG_PPC_VRSAVE:
1003 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1005 #endif /* CONFIG_ALTIVEC */
1015 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1021 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1024 union kvmppc_one_reg val;
1027 size = one_reg_size(reg->id);
1028 if (size > sizeof(val))
1031 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1034 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1038 #ifdef CONFIG_ALTIVEC
1039 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1040 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1044 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1046 case KVM_REG_PPC_VSCR:
1047 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1051 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1053 case KVM_REG_PPC_VRSAVE:
1054 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1058 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1060 #endif /* CONFIG_ALTIVEC */
1070 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1075 if (vcpu->sigset_active)
1076 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1078 if (vcpu->mmio_needed) {
1079 if (!vcpu->mmio_is_write)
1080 kvmppc_complete_mmio_load(vcpu, run);
1081 vcpu->mmio_needed = 0;
1082 } else if (vcpu->arch.osi_needed) {
1083 u64 *gprs = run->osi.gprs;
1086 for (i = 0; i < 32; i++)
1087 kvmppc_set_gpr(vcpu, i, gprs[i]);
1088 vcpu->arch.osi_needed = 0;
1089 } else if (vcpu->arch.hcall_needed) {
1092 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1093 for (i = 0; i < 9; ++i)
1094 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1095 vcpu->arch.hcall_needed = 0;
1097 } else if (vcpu->arch.epr_needed) {
1098 kvmppc_set_epr(vcpu, run->epr.epr);
1099 vcpu->arch.epr_needed = 0;
1103 r = kvmppc_vcpu_run(run, vcpu);
1105 if (vcpu->sigset_active)
1106 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1111 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1113 if (irq->irq == KVM_INTERRUPT_UNSET) {
1114 kvmppc_core_dequeue_external(vcpu);
1118 kvmppc_core_queue_external(vcpu, irq);
1120 kvm_vcpu_kick(vcpu);
1125 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1126 struct kvm_enable_cap *cap)
1134 case KVM_CAP_PPC_OSI:
1136 vcpu->arch.osi_enabled = true;
1138 case KVM_CAP_PPC_PAPR:
1140 vcpu->arch.papr_enabled = true;
1142 case KVM_CAP_PPC_EPR:
1145 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1147 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1150 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1152 vcpu->arch.watchdog_enabled = true;
1155 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1156 case KVM_CAP_SW_TLB: {
1157 struct kvm_config_tlb cfg;
1158 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1161 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1164 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1168 #ifdef CONFIG_KVM_MPIC
1169 case KVM_CAP_IRQ_MPIC: {
1171 struct kvm_device *dev;
1174 f = fdget(cap->args[0]);
1179 dev = kvm_device_from_filp(f.file);
1181 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1187 #ifdef CONFIG_KVM_XICS
1188 case KVM_CAP_IRQ_XICS: {
1190 struct kvm_device *dev;
1193 f = fdget(cap->args[0]);
1198 dev = kvm_device_from_filp(f.file);
1200 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1205 #endif /* CONFIG_KVM_XICS */
1212 r = kvmppc_sanity_check(vcpu);
1217 bool kvm_arch_intc_initialized(struct kvm *kvm)
1219 #ifdef CONFIG_KVM_MPIC
1223 #ifdef CONFIG_KVM_XICS
1230 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1231 struct kvm_mp_state *mp_state)
1236 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1237 struct kvm_mp_state *mp_state)
1242 long kvm_arch_vcpu_ioctl(struct file *filp,
1243 unsigned int ioctl, unsigned long arg)
1245 struct kvm_vcpu *vcpu = filp->private_data;
1246 void __user *argp = (void __user *)arg;
1250 case KVM_INTERRUPT: {
1251 struct kvm_interrupt irq;
1253 if (copy_from_user(&irq, argp, sizeof(irq)))
1255 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1259 case KVM_ENABLE_CAP:
1261 struct kvm_enable_cap cap;
1263 if (copy_from_user(&cap, argp, sizeof(cap)))
1265 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1269 case KVM_SET_ONE_REG:
1270 case KVM_GET_ONE_REG:
1272 struct kvm_one_reg reg;
1274 if (copy_from_user(®, argp, sizeof(reg)))
1276 if (ioctl == KVM_SET_ONE_REG)
1277 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
1279 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
1283 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1284 case KVM_DIRTY_TLB: {
1285 struct kvm_dirty_tlb dirty;
1287 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1289 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1301 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1303 return VM_FAULT_SIGBUS;
1306 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1308 u32 inst_nop = 0x60000000;
1309 #ifdef CONFIG_KVM_BOOKE_HV
1310 u32 inst_sc1 = 0x44000022;
1311 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1312 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1313 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1314 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1316 u32 inst_lis = 0x3c000000;
1317 u32 inst_ori = 0x60000000;
1318 u32 inst_sc = 0x44000002;
1319 u32 inst_imm_mask = 0xffff;
1322 * The hypercall to get into KVM from within guest context is as
1325 * lis r0, r0, KVM_SC_MAGIC_R0@h
1326 * ori r0, KVM_SC_MAGIC_R0@l
1330 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1331 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1332 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1333 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1336 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1341 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1344 if (!irqchip_in_kernel(kvm))
1347 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1348 irq_event->irq, irq_event->level,
1354 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1355 struct kvm_enable_cap *cap)
1363 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1364 case KVM_CAP_PPC_ENABLE_HCALL: {
1365 unsigned long hcall = cap->args[0];
1368 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1371 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1374 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1376 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1389 long kvm_arch_vm_ioctl(struct file *filp,
1390 unsigned int ioctl, unsigned long arg)
1392 struct kvm *kvm __maybe_unused = filp->private_data;
1393 void __user *argp = (void __user *)arg;
1397 case KVM_PPC_GET_PVINFO: {
1398 struct kvm_ppc_pvinfo pvinfo;
1399 memset(&pvinfo, 0, sizeof(pvinfo));
1400 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1401 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1408 case KVM_ENABLE_CAP:
1410 struct kvm_enable_cap cap;
1412 if (copy_from_user(&cap, argp, sizeof(cap)))
1414 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1417 #ifdef CONFIG_PPC_BOOK3S_64
1418 case KVM_CREATE_SPAPR_TCE_64: {
1419 struct kvm_create_spapr_tce_64 create_tce_64;
1422 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1424 if (create_tce_64.flags) {
1428 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1431 case KVM_CREATE_SPAPR_TCE: {
1432 struct kvm_create_spapr_tce create_tce;
1433 struct kvm_create_spapr_tce_64 create_tce_64;
1436 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1439 create_tce_64.liobn = create_tce.liobn;
1440 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1441 create_tce_64.offset = 0;
1442 create_tce_64.size = create_tce.window_size >>
1443 IOMMU_PAGE_SHIFT_4K;
1444 create_tce_64.flags = 0;
1445 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1448 case KVM_PPC_GET_SMMU_INFO: {
1449 struct kvm_ppc_smmu_info info;
1450 struct kvm *kvm = filp->private_data;
1452 memset(&info, 0, sizeof(info));
1453 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1454 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1458 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1459 struct kvm *kvm = filp->private_data;
1461 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1465 struct kvm *kvm = filp->private_data;
1466 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1468 #else /* CONFIG_PPC_BOOK3S_64 */
1477 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1478 static unsigned long nr_lpids;
1480 long kvmppc_alloc_lpid(void)
1485 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1486 if (lpid >= nr_lpids) {
1487 pr_err("%s: No LPIDs free\n", __func__);
1490 } while (test_and_set_bit(lpid, lpid_inuse));
1494 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1496 void kvmppc_claim_lpid(long lpid)
1498 set_bit(lpid, lpid_inuse);
1500 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1502 void kvmppc_free_lpid(long lpid)
1504 clear_bit(lpid, lpid_inuse);
1506 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1508 void kvmppc_init_lpid(unsigned long nr_lpids_param)
1510 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1511 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1513 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1515 int kvm_arch_init(void *opaque)
1520 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);