2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
29 #include <asm/cputable.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
32 #include <linux/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/kvm_book3s.h>
36 #include <asm/mmu_context.h>
43 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
45 /* #define EXIT_DEBUG */
47 struct kvm_stats_debugfs_item debugfs_entries[] = {
48 { "exits", VCPU_STAT(sum_exits) },
49 { "mmio", VCPU_STAT(mmio_exits) },
50 { "sig", VCPU_STAT(signal_exits) },
51 { "sysc", VCPU_STAT(syscall_exits) },
52 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
53 { "dec", VCPU_STAT(dec_exits) },
54 { "ext_intr", VCPU_STAT(ext_intr_exits) },
55 { "queue_intr", VCPU_STAT(queue_intr) },
56 { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) },
57 { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) },
58 { "halt_wait_ns", VCPU_STAT(halt_wait_ns) },
59 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
60 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
61 { "halt_successful_wait", VCPU_STAT(halt_successful_wait) },
62 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
63 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
64 { "pf_storage", VCPU_STAT(pf_storage) },
65 { "sp_storage", VCPU_STAT(sp_storage) },
66 { "pf_instruc", VCPU_STAT(pf_instruc) },
67 { "sp_instruc", VCPU_STAT(sp_instruc) },
68 { "ld", VCPU_STAT(ld) },
69 { "ld_slow", VCPU_STAT(ld_slow) },
70 { "st", VCPU_STAT(st) },
71 { "st_slow", VCPU_STAT(st_slow) },
72 { "pthru_all", VCPU_STAT(pthru_all) },
73 { "pthru_host", VCPU_STAT(pthru_host) },
74 { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
78 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
80 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
81 ulong pc = kvmppc_get_pc(vcpu);
82 ulong lr = kvmppc_get_lr(vcpu);
83 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
84 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
85 if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
86 kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
87 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
90 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
92 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
94 if (!is_kvmppc_hv_enabled(vcpu->kvm))
95 return to_book3s(vcpu)->hior;
99 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
100 unsigned long pending_now, unsigned long old_pending)
102 if (is_kvmppc_hv_enabled(vcpu->kvm))
105 kvmppc_set_int_pending(vcpu, 1);
106 else if (old_pending)
107 kvmppc_set_int_pending(vcpu, 0);
110 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
116 if (is_kvmppc_hv_enabled(vcpu->kvm))
119 crit_raw = kvmppc_get_critical(vcpu);
120 crit_r1 = kvmppc_get_gpr(vcpu, 1);
122 /* Truncate crit indicators in 32 bit mode */
123 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
124 crit_raw &= 0xffffffff;
125 crit_r1 &= 0xffffffff;
128 /* Critical section when crit == r1 */
129 crit = (crit_raw == crit_r1);
130 /* ... and we're in supervisor mode */
131 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
136 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
138 kvmppc_unfixup_split_real(vcpu);
139 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
140 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
141 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
142 vcpu->arch.mmu.reset_msr(vcpu);
145 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
150 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
151 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
152 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
153 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
154 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
155 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
156 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
157 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
158 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
159 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
160 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
161 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
162 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
163 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
164 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
165 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
166 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
167 default: prio = BOOK3S_IRQPRIO_MAX; break;
173 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
176 unsigned long old_pending = vcpu->arch.pending_exceptions;
178 clear_bit(kvmppc_book3s_vec2irqprio(vec),
179 &vcpu->arch.pending_exceptions);
181 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
185 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
187 vcpu->stat.queue_intr++;
189 set_bit(kvmppc_book3s_vec2irqprio(vec),
190 &vcpu->arch.pending_exceptions);
192 printk(KERN_INFO "Queueing interrupt %x\n", vec);
195 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
197 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
199 /* might as well deliver this straight away */
200 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
202 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
204 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
206 /* might as well deliver this straight away */
207 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
210 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
212 /* might as well deliver this straight away */
213 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
216 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
218 /* might as well deliver this straight away */
219 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
222 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
224 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
226 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
228 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
230 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
232 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
234 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
236 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
238 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
240 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
241 struct kvm_interrupt *irq)
243 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
245 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
246 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
248 kvmppc_book3s_queue_irqprio(vcpu, vec);
251 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
253 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
254 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
257 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
260 kvmppc_set_dar(vcpu, dar);
261 kvmppc_set_dsisr(vcpu, flags);
262 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
264 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); /* used by kvm_hv */
266 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
268 u64 msr = kvmppc_get_msr(vcpu);
269 msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
270 msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
271 kvmppc_set_msr_fast(vcpu, msr);
272 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
275 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
276 unsigned int priority)
280 bool crit = kvmppc_critical_section(vcpu);
283 case BOOK3S_IRQPRIO_DECREMENTER:
284 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
285 vec = BOOK3S_INTERRUPT_DECREMENTER;
287 case BOOK3S_IRQPRIO_EXTERNAL:
288 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
289 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
290 vec = BOOK3S_INTERRUPT_EXTERNAL;
292 case BOOK3S_IRQPRIO_SYSTEM_RESET:
293 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
295 case BOOK3S_IRQPRIO_MACHINE_CHECK:
296 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
298 case BOOK3S_IRQPRIO_DATA_STORAGE:
299 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
301 case BOOK3S_IRQPRIO_INST_STORAGE:
302 vec = BOOK3S_INTERRUPT_INST_STORAGE;
304 case BOOK3S_IRQPRIO_DATA_SEGMENT:
305 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
307 case BOOK3S_IRQPRIO_INST_SEGMENT:
308 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
310 case BOOK3S_IRQPRIO_ALIGNMENT:
311 vec = BOOK3S_INTERRUPT_ALIGNMENT;
313 case BOOK3S_IRQPRIO_PROGRAM:
314 vec = BOOK3S_INTERRUPT_PROGRAM;
316 case BOOK3S_IRQPRIO_VSX:
317 vec = BOOK3S_INTERRUPT_VSX;
319 case BOOK3S_IRQPRIO_ALTIVEC:
320 vec = BOOK3S_INTERRUPT_ALTIVEC;
322 case BOOK3S_IRQPRIO_FP_UNAVAIL:
323 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
325 case BOOK3S_IRQPRIO_SYSCALL:
326 vec = BOOK3S_INTERRUPT_SYSCALL;
328 case BOOK3S_IRQPRIO_DEBUG:
329 vec = BOOK3S_INTERRUPT_TRACE;
331 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
332 vec = BOOK3S_INTERRUPT_PERFMON;
334 case BOOK3S_IRQPRIO_FAC_UNAVAIL:
335 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
339 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
344 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
348 kvmppc_inject_interrupt(vcpu, vec, 0);
354 * This function determines if an irqprio should be cleared once issued.
356 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
359 case BOOK3S_IRQPRIO_DECREMENTER:
360 /* DEC interrupts get cleared by mtdec */
362 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
363 /* External interrupts get cleared by userspace */
370 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
372 unsigned long *pending = &vcpu->arch.pending_exceptions;
373 unsigned long old_pending = vcpu->arch.pending_exceptions;
374 unsigned int priority;
377 if (vcpu->arch.pending_exceptions)
378 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
380 priority = __ffs(*pending);
381 while (priority < BOOK3S_IRQPRIO_MAX) {
382 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
383 clear_irqprio(vcpu, priority)) {
384 clear_bit(priority, &vcpu->arch.pending_exceptions);
388 priority = find_next_bit(pending,
389 BITS_PER_BYTE * sizeof(*pending),
393 /* Tell the guest about our interrupt status */
394 kvmppc_update_int_pending(vcpu, *pending, old_pending);
398 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
400 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
403 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
404 gfn_t gfn = gpa >> PAGE_SHIFT;
406 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
407 mp_pa = (uint32_t)mp_pa;
409 /* Magic page override */
411 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
412 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
415 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
416 get_page(pfn_to_page(pfn));
422 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
424 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
426 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
427 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
429 bool data = (xlid == XLATE_DATA);
430 bool iswrite = (xlrw == XLATE_WRITE);
431 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
435 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
438 pte->raddr = eaddr & KVM_PAM;
439 pte->vpage = VSID_REAL | eaddr >> 12;
440 pte->may_read = true;
441 pte->may_write = true;
442 pte->may_execute = true;
445 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
447 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
448 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
449 pte->raddr &= ~SPLIT_HACK_MASK;
456 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
459 ulong pc = kvmppc_get_pc(vcpu);
465 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
466 if (r == EMULATE_DONE)
469 return EMULATE_AGAIN;
471 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
473 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
478 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
483 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
487 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
488 struct kvm_sregs *sregs)
490 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
493 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
494 struct kvm_sregs *sregs)
496 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
499 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
503 regs->pc = kvmppc_get_pc(vcpu);
504 regs->cr = kvmppc_get_cr(vcpu);
505 regs->ctr = kvmppc_get_ctr(vcpu);
506 regs->lr = kvmppc_get_lr(vcpu);
507 regs->xer = kvmppc_get_xer(vcpu);
508 regs->msr = kvmppc_get_msr(vcpu);
509 regs->srr0 = kvmppc_get_srr0(vcpu);
510 regs->srr1 = kvmppc_get_srr1(vcpu);
511 regs->pid = vcpu->arch.pid;
512 regs->sprg0 = kvmppc_get_sprg0(vcpu);
513 regs->sprg1 = kvmppc_get_sprg1(vcpu);
514 regs->sprg2 = kvmppc_get_sprg2(vcpu);
515 regs->sprg3 = kvmppc_get_sprg3(vcpu);
516 regs->sprg4 = kvmppc_get_sprg4(vcpu);
517 regs->sprg5 = kvmppc_get_sprg5(vcpu);
518 regs->sprg6 = kvmppc_get_sprg6(vcpu);
519 regs->sprg7 = kvmppc_get_sprg7(vcpu);
521 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
522 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
527 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
531 kvmppc_set_pc(vcpu, regs->pc);
532 kvmppc_set_cr(vcpu, regs->cr);
533 kvmppc_set_ctr(vcpu, regs->ctr);
534 kvmppc_set_lr(vcpu, regs->lr);
535 kvmppc_set_xer(vcpu, regs->xer);
536 kvmppc_set_msr(vcpu, regs->msr);
537 kvmppc_set_srr0(vcpu, regs->srr0);
538 kvmppc_set_srr1(vcpu, regs->srr1);
539 kvmppc_set_sprg0(vcpu, regs->sprg0);
540 kvmppc_set_sprg1(vcpu, regs->sprg1);
541 kvmppc_set_sprg2(vcpu, regs->sprg2);
542 kvmppc_set_sprg3(vcpu, regs->sprg3);
543 kvmppc_set_sprg4(vcpu, regs->sprg4);
544 kvmppc_set_sprg5(vcpu, regs->sprg5);
545 kvmppc_set_sprg6(vcpu, regs->sprg6);
546 kvmppc_set_sprg7(vcpu, regs->sprg7);
548 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
549 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
554 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
559 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
564 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
565 union kvmppc_one_reg *val)
570 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
574 case KVM_REG_PPC_DAR:
575 *val = get_reg_val(id, kvmppc_get_dar(vcpu));
577 case KVM_REG_PPC_DSISR:
578 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
580 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
581 i = id - KVM_REG_PPC_FPR0;
582 *val = get_reg_val(id, VCPU_FPR(vcpu, i));
584 case KVM_REG_PPC_FPSCR:
585 *val = get_reg_val(id, vcpu->arch.fp.fpscr);
588 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
589 if (cpu_has_feature(CPU_FTR_VSX)) {
590 i = id - KVM_REG_PPC_VSR0;
591 val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
592 val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
597 #endif /* CONFIG_VSX */
598 case KVM_REG_PPC_DEBUG_INST:
599 *val = get_reg_val(id, INS_TW);
601 #ifdef CONFIG_KVM_XICS
602 case KVM_REG_PPC_ICP_STATE:
603 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
608 *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
610 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
612 #endif /* CONFIG_KVM_XICS */
613 case KVM_REG_PPC_FSCR:
614 *val = get_reg_val(id, vcpu->arch.fscr);
616 case KVM_REG_PPC_TAR:
617 *val = get_reg_val(id, vcpu->arch.tar);
619 case KVM_REG_PPC_EBBHR:
620 *val = get_reg_val(id, vcpu->arch.ebbhr);
622 case KVM_REG_PPC_EBBRR:
623 *val = get_reg_val(id, vcpu->arch.ebbrr);
625 case KVM_REG_PPC_BESCR:
626 *val = get_reg_val(id, vcpu->arch.bescr);
629 *val = get_reg_val(id, vcpu->arch.ic);
640 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
641 union kvmppc_one_reg *val)
646 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
650 case KVM_REG_PPC_DAR:
651 kvmppc_set_dar(vcpu, set_reg_val(id, *val));
653 case KVM_REG_PPC_DSISR:
654 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
656 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
657 i = id - KVM_REG_PPC_FPR0;
658 VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
660 case KVM_REG_PPC_FPSCR:
661 vcpu->arch.fp.fpscr = set_reg_val(id, *val);
664 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
665 if (cpu_has_feature(CPU_FTR_VSX)) {
666 i = id - KVM_REG_PPC_VSR0;
667 vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
668 vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
673 #endif /* CONFIG_VSX */
674 #ifdef CONFIG_KVM_XICS
675 case KVM_REG_PPC_ICP_STATE:
676 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
681 r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
683 r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
685 #endif /* CONFIG_KVM_XICS */
686 case KVM_REG_PPC_FSCR:
687 vcpu->arch.fscr = set_reg_val(id, *val);
689 case KVM_REG_PPC_TAR:
690 vcpu->arch.tar = set_reg_val(id, *val);
692 case KVM_REG_PPC_EBBHR:
693 vcpu->arch.ebbhr = set_reg_val(id, *val);
695 case KVM_REG_PPC_EBBRR:
696 vcpu->arch.ebbrr = set_reg_val(id, *val);
698 case KVM_REG_PPC_BESCR:
699 vcpu->arch.bescr = set_reg_val(id, *val);
702 vcpu->arch.ic = set_reg_val(id, *val);
713 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
715 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
718 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
720 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
723 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
725 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
727 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
729 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
731 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
734 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
735 struct kvm_translation *tr)
740 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
741 struct kvm_guest_debug *dbg)
743 vcpu->guest_debug = dbg->control;
747 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
749 kvmppc_core_queue_dec(vcpu);
753 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
755 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
758 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
760 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
763 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
765 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
768 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
770 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
773 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
774 struct kvm_memory_slot *dont)
776 kvm->arch.kvm_ops->free_memslot(free, dont);
779 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
780 unsigned long npages)
782 return kvm->arch.kvm_ops->create_memslot(slot, npages);
785 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
787 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
790 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
791 struct kvm_memory_slot *memslot,
792 const struct kvm_userspace_memory_region *mem)
794 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
797 void kvmppc_core_commit_memory_region(struct kvm *kvm,
798 const struct kvm_userspace_memory_region *mem,
799 const struct kvm_memory_slot *old,
800 const struct kvm_memory_slot *new)
802 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
805 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
807 return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
809 EXPORT_SYMBOL_GPL(kvm_unmap_hva);
811 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
813 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
816 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
818 return kvm->arch.kvm_ops->age_hva(kvm, start, end);
821 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
823 return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
826 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
828 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
831 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
833 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
836 int kvmppc_core_init_vm(struct kvm *kvm)
840 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
841 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
842 mutex_init(&kvm->arch.rtas_token_lock);
845 return kvm->arch.kvm_ops->init_vm(kvm);
848 void kvmppc_core_destroy_vm(struct kvm *kvm)
850 kvm->arch.kvm_ops->destroy_vm(kvm);
853 kvmppc_rtas_tokens_free(kvm);
854 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
858 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
860 unsigned long size = kvmppc_get_gpr(vcpu, 4);
861 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
866 if (!is_power_of_2(size) || (size > sizeof(buf)))
869 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
870 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
871 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
877 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
881 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
885 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
889 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
898 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
900 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
902 unsigned long size = kvmppc_get_gpr(vcpu, 4);
903 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
904 unsigned long val = kvmppc_get_gpr(vcpu, 6);
915 *(__be16 *)&buf = cpu_to_be16(val);
919 *(__be32 *)&buf = cpu_to_be32(val);
923 *(__be64 *)&buf = cpu_to_be64(val);
930 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
931 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
932 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
938 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
940 int kvmppc_core_check_processor_compat(void)
943 * We always return 0 for book3s. We check
944 * for compatibility while loading the HV
950 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
952 return kvm->arch.kvm_ops->hcall_implemented(hcall);
955 #ifdef CONFIG_KVM_XICS
956 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
960 return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
963 return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
967 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
968 struct kvm *kvm, int irq_source_id,
969 int level, bool line_status)
971 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
974 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
975 struct kvm *kvm, int irq_source_id, int level,
978 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
981 int kvm_irq_map_gsi(struct kvm *kvm,
982 struct kvm_kernel_irq_routing_entry *entries, int gsi)
985 entries->type = KVM_IRQ_ROUTING_IRQCHIP;
986 entries->set = kvmppc_book3s_set_irq;
987 entries->irqchip.irqchip = 0;
988 entries->irqchip.pin = gsi;
992 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
997 #endif /* CONFIG_KVM_XICS */
999 static int kvmppc_book3s_init(void)
1003 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1006 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1007 r = kvmppc_book3s_init_pr();
1010 #ifdef CONFIG_KVM_XICS
1011 #ifdef CONFIG_KVM_XIVE
1012 if (xive_enabled()) {
1013 kvmppc_xive_init_module();
1014 kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
1017 kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
1022 static void kvmppc_book3s_exit(void)
1024 #ifdef CONFIG_KVM_XICS
1026 kvmppc_xive_exit_module();
1028 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1029 kvmppc_book3s_exit_pr();
1034 module_init(kvmppc_book3s_init);
1035 module_exit(kvmppc_book3s_exit);
1037 /* On 32bit this is our one and only kernel module */
1038 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1039 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1040 MODULE_ALIAS("devname:kvm");