1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017-2019, IBM Corporation.
6 #define pr_fmt(fmt) "xive-kvm: " fmt
8 #include <linux/kernel.h>
9 #include <linux/kvm_host.h>
10 #include <linux/err.h>
11 #include <linux/gfp.h>
12 #include <linux/spinlock.h>
13 #include <linux/delay.h>
14 #include <linux/file.h>
15 #include <linux/irqdomain.h>
16 #include <asm/uaccess.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/kvm_ppc.h>
19 #include <asm/hvcall.h>
21 #include <asm/xive-regs.h>
22 #include <asm/debug.h>
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
28 #include "book3s_xive.h"
30 static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
35 * The KVM XIVE native device does not use the XIVE_ESB_SET_PQ_10
36 * load operation, so there is no need to enforce load-after-store
40 val = in_be64(xd->eoi_mmio + offset);
44 static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
46 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
47 struct xive_q *q = &xc->queues[prio];
49 xive_native_disable_queue(xc->vp_id, q, prio);
51 put_page(virt_to_page(q->qpage));
56 static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
57 u8 prio, __be32 *qpage,
58 u32 order, bool can_escalate)
61 __be32 *qpage_prev = q->qpage;
63 rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
69 put_page(virt_to_page(qpage_prev));
74 void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
76 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
79 if (!kvmppc_xive_enabled(vcpu))
85 pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num);
87 /* Ensure no interrupt is still routed to that VP */
89 kvmppc_xive_disable_vcpu_interrupts(vcpu);
91 /* Free escalations */
92 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
93 /* Free the escalation irq */
94 if (xc->esc_virq[i]) {
95 if (kvmppc_xive_has_single_escalation(xc->xive))
96 xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]);
97 free_irq(xc->esc_virq[i], vcpu);
98 irq_dispose_mapping(xc->esc_virq[i]);
99 kfree(xc->esc_virq_names[i]);
105 xive_native_disable_vp(xc->vp_id);
107 /* Clear the cam word so guest entry won't try to push context */
108 vcpu->arch.xive_cam_word = 0;
110 /* Free the queues */
111 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
112 kvmppc_xive_native_cleanup_queue(vcpu, i);
118 /* Cleanup the vcpu */
119 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
120 vcpu->arch.xive_vcpu = NULL;
123 int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
124 struct kvm_vcpu *vcpu, u32 server_num)
126 struct kvmppc_xive *xive = dev->private;
127 struct kvmppc_xive_vcpu *xc = NULL;
131 pr_devel("native_connect_vcpu(server=%d)\n", server_num);
133 if (dev->ops != &kvm_xive_native_ops) {
134 pr_devel("Wrong ops !\n");
137 if (xive->kvm != vcpu->kvm)
139 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
142 mutex_lock(&xive->lock);
144 rc = kvmppc_xive_compute_vp_id(xive, server_num, &vp_id);
148 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
154 vcpu->arch.xive_vcpu = xc;
157 xc->server_num = server_num;
161 vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
163 rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
165 pr_err("Failed to get VP info from OPAL: %d\n", rc);
169 if (!kvmppc_xive_check_save_restore(vcpu)) {
170 pr_err("inconsistent save-restore setup for VCPU %d\n", server_num);
176 * Enable the VP first as the single escalation mode will
177 * affect escalation interrupts numbering
179 rc = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
181 pr_err("Failed to enable VP in OPAL: %d\n", rc);
185 /* Configure VCPU fields for use by assembly push/pull */
186 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
187 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
189 /* TODO: reset all queues to a clean state ? */
191 mutex_unlock(&xive->lock);
193 kvmppc_xive_native_cleanup_vcpu(vcpu);
199 * Device passthrough support
201 static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
203 struct kvmppc_xive *xive = kvm->arch.xive;
204 pgoff_t esb_pgoff = KVM_XIVE_ESB_PAGE_OFFSET + irq * 2;
206 if (irq >= KVMPPC_XIVE_NR_IRQS)
210 * Clear the ESB pages of the IRQ number being mapped (or
211 * unmapped) into the guest and let the VM fault handler
212 * repopulate with the appropriate ESB pages (device or IC)
214 pr_debug("clearing esb pages for girq 0x%lx\n", irq);
215 mutex_lock(&xive->mapping_lock);
217 unmap_mapping_range(xive->mapping,
218 esb_pgoff << PAGE_SHIFT,
219 2ull << PAGE_SHIFT, 1);
220 mutex_unlock(&xive->mapping_lock);
224 static struct kvmppc_xive_ops kvmppc_xive_native_ops = {
225 .reset_mapped = kvmppc_xive_native_reset_mapped,
228 static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
230 struct vm_area_struct *vma = vmf->vma;
231 struct kvm_device *dev = vma->vm_file->private_data;
232 struct kvmppc_xive *xive = dev->private;
233 struct kvmppc_xive_src_block *sb;
234 struct kvmppc_xive_irq_state *state;
235 struct xive_irq_data *xd;
243 * Linux/KVM uses a two pages ESB setting, one for trigger and
246 page_offset = vmf->pgoff - vma->vm_pgoff;
247 irq = page_offset / 2;
249 sb = kvmppc_xive_find_source(xive, irq, &src);
251 pr_devel("%s: source %lx not found !\n", __func__, irq);
252 return VM_FAULT_SIGBUS;
255 state = &sb->irq_state[src];
257 /* Some sanity checking */
259 pr_devel("%s: source %lx invalid !\n", __func__, irq);
260 return VM_FAULT_SIGBUS;
263 kvmppc_xive_select_irq(state, &hw_num, &xd);
265 arch_spin_lock(&sb->lock);
268 * first/even page is for trigger
269 * second/odd page is for EOI and management.
271 page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
272 arch_spin_unlock(&sb->lock);
274 if (WARN_ON(!page)) {
275 pr_err("%s: accessing invalid ESB page for source %lx !\n",
277 return VM_FAULT_SIGBUS;
280 vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
281 return VM_FAULT_NOPAGE;
284 static const struct vm_operations_struct xive_native_esb_vmops = {
285 .fault = xive_native_esb_fault,
288 static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
290 struct vm_area_struct *vma = vmf->vma;
292 switch (vmf->pgoff - vma->vm_pgoff) {
293 case 0: /* HW - forbid access */
294 case 1: /* HV - forbid access */
295 return VM_FAULT_SIGBUS;
297 vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
298 return VM_FAULT_NOPAGE;
299 case 3: /* USER - TODO */
301 return VM_FAULT_SIGBUS;
305 static const struct vm_operations_struct xive_native_tima_vmops = {
306 .fault = xive_native_tima_fault,
309 static int kvmppc_xive_native_mmap(struct kvm_device *dev,
310 struct vm_area_struct *vma)
312 struct kvmppc_xive *xive = dev->private;
314 /* We only allow mappings at fixed offset for now */
315 if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
316 if (vma_pages(vma) > 4)
318 vma->vm_ops = &xive_native_tima_vmops;
319 } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
320 if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
322 vma->vm_ops = &xive_native_esb_vmops;
327 vm_flags_set(vma, VM_IO | VM_PFNMAP);
328 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
331 * Grab the KVM device file address_space to be able to clear
332 * the ESB pages mapping when a device is passed-through into
335 xive->mapping = vma->vm_file->f_mapping;
339 static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
342 struct kvmppc_xive_src_block *sb;
343 struct kvmppc_xive_irq_state *state;
344 u64 __user *ubufp = (u64 __user *) addr;
349 pr_devel("%s irq=0x%lx\n", __func__, irq);
351 if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS)
354 sb = kvmppc_xive_find_source(xive, irq, &idx);
356 pr_debug("No source, creating source block...\n");
357 sb = kvmppc_xive_create_src_block(xive, irq);
359 pr_err("Failed to create block...\n");
363 state = &sb->irq_state[idx];
365 if (get_user(val, ubufp)) {
366 pr_err("fault getting user info !\n");
370 arch_spin_lock(&sb->lock);
373 * If the source doesn't already have an IPI, allocate
374 * one and get the corresponding data
376 if (!state->ipi_number) {
377 state->ipi_number = xive_native_alloc_irq();
378 if (state->ipi_number == 0) {
379 pr_err("Failed to allocate IRQ !\n");
383 xive_native_populate_irq_data(state->ipi_number,
385 pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__,
386 state->ipi_number, irq);
389 /* Restore LSI state */
390 if (val & KVM_XIVE_LEVEL_SENSITIVE) {
392 if (val & KVM_XIVE_LEVEL_ASSERTED)
393 state->asserted = true;
394 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
397 /* Mask IRQ to start with */
398 state->act_server = 0;
399 state->act_priority = MASKED;
400 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
401 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
403 /* Increment the number of valid sources and mark this one valid */
411 arch_spin_unlock(&sb->lock);
416 static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
417 struct kvmppc_xive_src_block *sb,
418 struct kvmppc_xive_irq_state *state,
419 u32 server, u8 priority, bool masked,
422 struct kvm *kvm = xive->kvm;
426 arch_spin_lock(&sb->lock);
428 if (state->act_server == server && state->act_priority == priority &&
432 pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
433 priority, server, masked, state->act_server,
434 state->act_priority);
436 kvmppc_xive_select_irq(state, &hw_num, NULL);
438 if (priority != MASKED && !masked) {
439 rc = kvmppc_xive_select_target(kvm, &server, priority);
443 state->act_priority = priority;
444 state->act_server = server;
447 rc = xive_native_configure_irq(hw_num,
448 kvmppc_xive_vp(xive, server),
451 state->act_priority = MASKED;
452 state->act_server = 0;
455 rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
459 arch_spin_unlock(&sb->lock);
463 static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive,
466 struct kvmppc_xive_src_block *sb;
467 struct kvmppc_xive_irq_state *state;
468 u64 __user *ubufp = (u64 __user *) addr;
476 sb = kvmppc_xive_find_source(xive, irq, &src);
480 state = &sb->irq_state[src];
485 if (get_user(kvm_cfg, ubufp))
488 pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg);
490 priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >>
491 KVM_XIVE_SOURCE_PRIORITY_SHIFT;
492 server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >>
493 KVM_XIVE_SOURCE_SERVER_SHIFT;
494 masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >>
495 KVM_XIVE_SOURCE_MASKED_SHIFT;
496 eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >>
497 KVM_XIVE_SOURCE_EISN_SHIFT;
499 if (priority != xive_prio_from_guest(priority)) {
500 pr_err("invalid priority for queue %d for VCPU %d\n",
505 return kvmppc_xive_native_update_source_config(xive, sb, state, server,
506 priority, masked, eisn);
509 static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
512 struct kvmppc_xive_src_block *sb;
513 struct kvmppc_xive_irq_state *state;
514 struct xive_irq_data *xd;
519 pr_devel("%s irq=0x%lx", __func__, irq);
521 sb = kvmppc_xive_find_source(xive, irq, &src);
525 state = &sb->irq_state[src];
529 arch_spin_lock(&sb->lock);
532 kvmppc_xive_select_irq(state, &hw_num, &xd);
533 xive_native_sync_source(hw_num);
537 arch_spin_unlock(&sb->lock);
541 static int xive_native_validate_queue_size(u32 qshift)
544 * We only support 64K pages for the moment. This is also
545 * advertised in the DT property "ibm,xive-eq-sizes"
548 case 0: /* EQ reset */
559 static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
560 long eq_idx, u64 addr)
562 struct kvm *kvm = xive->kvm;
563 struct kvm_vcpu *vcpu;
564 struct kvmppc_xive_vcpu *xc;
565 void __user *ubufp = (void __user *) addr;
568 struct kvm_ppc_xive_eq kvm_eq;
574 unsigned long page_size;
578 * Demangle priority/server tuple from the EQ identifier
580 priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
581 KVM_XIVE_EQ_PRIORITY_SHIFT;
582 server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
583 KVM_XIVE_EQ_SERVER_SHIFT;
585 if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
588 vcpu = kvmppc_xive_find_server(kvm, server);
590 pr_err("Can't find server %d\n", server);
593 xc = vcpu->arch.xive_vcpu;
595 if (priority != xive_prio_from_guest(priority)) {
596 pr_err("Trying to restore invalid queue %d for VCPU %d\n",
600 q = &xc->queues[priority];
602 pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
603 __func__, server, priority, kvm_eq.flags,
604 kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
606 /* reset queue and disable queueing */
607 if (!kvm_eq.qshift) {
611 rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
614 pr_err("Failed to reset queue %d for VCPU %d: %d\n",
615 priority, xc->server_num, rc);
623 * sPAPR specifies a "Unconditional Notify (n) flag" for the
624 * H_INT_SET_QUEUE_CONFIG hcall which forces notification
625 * without using the coalescing mechanisms provided by the
626 * XIVE END ESBs. This is required on KVM as notification
627 * using the END ESBs is not supported.
629 if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
630 pr_err("invalid flags %d\n", kvm_eq.flags);
634 rc = xive_native_validate_queue_size(kvm_eq.qshift);
636 pr_err("invalid queue size %d\n", kvm_eq.qshift);
640 if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
641 pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
642 1ull << kvm_eq.qshift);
646 srcu_idx = srcu_read_lock(&kvm->srcu);
647 gfn = gpa_to_gfn(kvm_eq.qaddr);
649 page_size = kvm_host_page_size(vcpu, gfn);
650 if (1ull << kvm_eq.qshift > page_size) {
651 srcu_read_unlock(&kvm->srcu, srcu_idx);
652 pr_warn("Incompatible host page size %lx!\n", page_size);
656 page = gfn_to_page(kvm, gfn);
657 if (is_error_page(page)) {
658 srcu_read_unlock(&kvm->srcu, srcu_idx);
659 pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
663 qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
664 srcu_read_unlock(&kvm->srcu, srcu_idx);
667 * Backup the queue page guest address to the mark EQ page
668 * dirty for migration.
670 q->guest_qaddr = kvm_eq.qaddr;
671 q->guest_qshift = kvm_eq.qshift;
674 * Unconditional Notification is forced by default at the
675 * OPAL level because the use of END ESBs is not supported by
678 rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
679 (__be32 *) qaddr, kvm_eq.qshift, true);
681 pr_err("Failed to configure queue %d for VCPU %d: %d\n",
682 priority, xc->server_num, rc);
688 * Only restore the queue state when needed. When doing the
689 * H_INT_SET_SOURCE_CONFIG hcall, it should not.
691 if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) {
692 rc = xive_native_set_queue_state(xc->vp_id, priority,
699 rc = kvmppc_xive_attach_escalation(vcpu, priority,
700 kvmppc_xive_has_single_escalation(xive));
703 kvmppc_xive_native_cleanup_queue(vcpu, priority);
707 static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive,
708 long eq_idx, u64 addr)
710 struct kvm *kvm = xive->kvm;
711 struct kvm_vcpu *vcpu;
712 struct kvmppc_xive_vcpu *xc;
714 void __user *ubufp = (u64 __user *) addr;
717 struct kvm_ppc_xive_eq kvm_eq;
726 * Demangle priority/server tuple from the EQ identifier
728 priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
729 KVM_XIVE_EQ_PRIORITY_SHIFT;
730 server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
731 KVM_XIVE_EQ_SERVER_SHIFT;
733 vcpu = kvmppc_xive_find_server(kvm, server);
735 pr_err("Can't find server %d\n", server);
738 xc = vcpu->arch.xive_vcpu;
740 if (priority != xive_prio_from_guest(priority)) {
741 pr_err("invalid priority for queue %d for VCPU %d\n",
745 q = &xc->queues[priority];
747 memset(&kvm_eq, 0, sizeof(kvm_eq));
752 rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift,
753 &qeoi_page, &escalate_irq, &qflags);
758 if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
759 kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
761 kvm_eq.qshift = q->guest_qshift;
762 kvm_eq.qaddr = q->guest_qaddr;
764 rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle,
769 pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
770 __func__, server, priority, kvm_eq.flags,
771 kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
773 if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq)))
779 static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb)
783 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
784 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
789 if (state->act_priority == MASKED)
793 state->act_server = 0;
794 state->act_priority = MASKED;
795 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
796 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
797 if (state->pt_number) {
798 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
799 xive_native_configure_irq(state->pt_number,
805 static int kvmppc_xive_reset(struct kvmppc_xive *xive)
807 struct kvm *kvm = xive->kvm;
808 struct kvm_vcpu *vcpu;
811 pr_devel("%s\n", __func__);
813 mutex_lock(&xive->lock);
815 kvm_for_each_vcpu(i, vcpu, kvm) {
816 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
822 kvmppc_xive_disable_vcpu_interrupts(vcpu);
824 for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
826 /* Single escalation, no queue 7 */
827 if (prio == 7 && kvmppc_xive_has_single_escalation(xive))
830 if (xc->esc_virq[prio]) {
831 free_irq(xc->esc_virq[prio], vcpu);
832 irq_dispose_mapping(xc->esc_virq[prio]);
833 kfree(xc->esc_virq_names[prio]);
834 xc->esc_virq[prio] = 0;
837 kvmppc_xive_native_cleanup_queue(vcpu, prio);
841 for (i = 0; i <= xive->max_sbid; i++) {
842 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
845 arch_spin_lock(&sb->lock);
846 kvmppc_xive_reset_sources(sb);
847 arch_spin_unlock(&sb->lock);
851 mutex_unlock(&xive->lock);
856 static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb)
860 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
861 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
862 struct xive_irq_data *xd;
869 * The struct kvmppc_xive_irq_state reflects the state
870 * of the EAS configuration and not the state of the
871 * source. The source is masked setting the PQ bits to
872 * '-Q', which is what is being done before calling
873 * the KVM_DEV_XIVE_EQ_SYNC control.
875 * If a source EAS is configured, OPAL syncs the XIVE
876 * IC of the source and the XIVE IC of the previous
879 * So it should be fine ignoring MASKED sources as
880 * they have been synced already.
882 if (state->act_priority == MASKED)
885 kvmppc_xive_select_irq(state, &hw_num, &xd);
886 xive_native_sync_source(hw_num);
887 xive_native_sync_queue(hw_num);
891 static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
893 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
900 for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
901 struct xive_q *q = &xc->queues[prio];
906 /* Mark EQ page dirty for migration */
907 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
908 mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
909 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
914 static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
916 struct kvm *kvm = xive->kvm;
917 struct kvm_vcpu *vcpu;
920 pr_devel("%s\n", __func__);
922 mutex_lock(&xive->lock);
923 for (i = 0; i <= xive->max_sbid; i++) {
924 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
927 arch_spin_lock(&sb->lock);
928 kvmppc_xive_native_sync_sources(sb);
929 arch_spin_unlock(&sb->lock);
933 kvm_for_each_vcpu(i, vcpu, kvm) {
934 kvmppc_xive_native_vcpu_eq_sync(vcpu);
936 mutex_unlock(&xive->lock);
941 static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
942 struct kvm_device_attr *attr)
944 struct kvmppc_xive *xive = dev->private;
946 switch (attr->group) {
947 case KVM_DEV_XIVE_GRP_CTRL:
948 switch (attr->attr) {
949 case KVM_DEV_XIVE_RESET:
950 return kvmppc_xive_reset(xive);
951 case KVM_DEV_XIVE_EQ_SYNC:
952 return kvmppc_xive_native_eq_sync(xive);
953 case KVM_DEV_XIVE_NR_SERVERS:
954 return kvmppc_xive_set_nr_servers(xive, attr->addr);
957 case KVM_DEV_XIVE_GRP_SOURCE:
958 return kvmppc_xive_native_set_source(xive, attr->attr,
960 case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
961 return kvmppc_xive_native_set_source_config(xive, attr->attr,
963 case KVM_DEV_XIVE_GRP_EQ_CONFIG:
964 return kvmppc_xive_native_set_queue_config(xive, attr->attr,
966 case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
967 return kvmppc_xive_native_sync_source(xive, attr->attr,
973 static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
974 struct kvm_device_attr *attr)
976 struct kvmppc_xive *xive = dev->private;
978 switch (attr->group) {
979 case KVM_DEV_XIVE_GRP_EQ_CONFIG:
980 return kvmppc_xive_native_get_queue_config(xive, attr->attr,
986 static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
987 struct kvm_device_attr *attr)
989 switch (attr->group) {
990 case KVM_DEV_XIVE_GRP_CTRL:
991 switch (attr->attr) {
992 case KVM_DEV_XIVE_RESET:
993 case KVM_DEV_XIVE_EQ_SYNC:
994 case KVM_DEV_XIVE_NR_SERVERS:
998 case KVM_DEV_XIVE_GRP_SOURCE:
999 case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
1000 case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
1001 if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ &&
1002 attr->attr < KVMPPC_XIVE_NR_IRQS)
1005 case KVM_DEV_XIVE_GRP_EQ_CONFIG:
1012 * Called when device fd is closed. kvm->lock is held.
1014 static void kvmppc_xive_native_release(struct kvm_device *dev)
1016 struct kvmppc_xive *xive = dev->private;
1017 struct kvm *kvm = xive->kvm;
1018 struct kvm_vcpu *vcpu;
1021 pr_devel("Releasing xive native device\n");
1024 * Clear the KVM device file address_space which is used to
1025 * unmap the ESB pages when a device is passed-through.
1027 mutex_lock(&xive->mapping_lock);
1028 xive->mapping = NULL;
1029 mutex_unlock(&xive->mapping_lock);
1032 * Since this is the device release function, we know that
1033 * userspace does not have any open fd or mmap referring to
1034 * the device. Therefore there can not be any of the
1035 * device attribute set/get, mmap, or page fault functions
1036 * being executed concurrently, and similarly, the
1037 * connect_vcpu and set/clr_mapped functions also cannot
1038 * be being executed.
1041 debugfs_remove(xive->dentry);
1044 * We should clean up the vCPU interrupt presenters first.
1046 kvm_for_each_vcpu(i, vcpu, kvm) {
1048 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1049 * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
1050 * Holding the vcpu->mutex also means that the vcpu cannot
1051 * be executing the KVM_RUN ioctl, and therefore it cannot
1052 * be executing the XIVE push or pull code or accessing
1053 * the XIVE MMIO regions.
1055 mutex_lock(&vcpu->mutex);
1056 kvmppc_xive_native_cleanup_vcpu(vcpu);
1057 mutex_unlock(&vcpu->mutex);
1061 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
1062 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
1063 * against xive code getting called during vcpu execution or
1064 * set/get one_reg operations.
1066 kvm->arch.xive = NULL;
1068 for (i = 0; i <= xive->max_sbid; i++) {
1069 if (xive->src_blocks[i])
1070 kvmppc_xive_free_sources(xive->src_blocks[i]);
1071 kfree(xive->src_blocks[i]);
1072 xive->src_blocks[i] = NULL;
1075 if (xive->vp_base != XIVE_INVALID_VP)
1076 xive_native_free_vp_block(xive->vp_base);
1079 * A reference of the kvmppc_xive pointer is now kept under
1080 * the xive_devices struct of the machine for reuse. It is
1081 * freed when the VM is destroyed for now until we fix all the
1089 * Create a XIVE device. kvm->lock is held.
1091 static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
1093 struct kvmppc_xive *xive;
1094 struct kvm *kvm = dev->kvm;
1096 pr_devel("Creating xive native device\n");
1101 xive = kvmppc_xive_get_device(kvm, type);
1105 dev->private = xive;
1108 mutex_init(&xive->mapping_lock);
1109 mutex_init(&xive->lock);
1111 /* VP allocation is delayed to the first call to connect_vcpu */
1112 xive->vp_base = XIVE_INVALID_VP;
1113 /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
1114 * on a POWER9 system.
1116 xive->nr_servers = KVM_MAX_VCPUS;
1118 if (xive_native_has_single_escalation())
1119 xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
1121 if (xive_native_has_save_restore())
1122 xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
1124 xive->ops = &kvmppc_xive_native_ops;
1126 kvm->arch.xive = xive;
1131 * Interrupt Pending Buffer (IPB) offset
1133 #define TM_IPB_SHIFT 40
1134 #define TM_IPB_MASK (((u64) 0xFF) << TM_IPB_SHIFT)
1136 int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
1138 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1142 if (!kvmppc_xive_enabled(vcpu))
1148 /* Thread context registers. We only care about IPB and CPPR */
1149 val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01;
1151 /* Get the VP state from OPAL */
1152 rc = xive_native_get_vp_state(xc->vp_id, &opal_state);
1157 * Capture the backup of IPB register in the NVT structure and
1158 * merge it in our KVM VP state.
1160 val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK);
1162 pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n",
1164 vcpu->arch.xive_saved_state.nsr,
1165 vcpu->arch.xive_saved_state.cppr,
1166 vcpu->arch.xive_saved_state.ipb,
1167 vcpu->arch.xive_saved_state.pipr,
1168 vcpu->arch.xive_saved_state.w01,
1169 (u32) vcpu->arch.xive_cam_word, opal_state);
1174 int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
1176 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1177 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1179 pr_devel("%s w01=%016llx vp=%016llx\n", __func__,
1180 val->xive_timaval[0], val->xive_timaval[1]);
1182 if (!kvmppc_xive_enabled(vcpu))
1188 /* We can't update the state of a "pushed" VCPU */
1189 if (WARN_ON(vcpu->arch.xive_pushed))
1193 * Restore the thread context registers. IPB and CPPR should
1194 * be the only ones that matter.
1196 vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0];
1199 * There is no need to restore the XIVE internal state (IPB
1200 * stored in the NVT) as the IPB register was merged in KVM VP
1201 * state when captured.
1206 bool kvmppc_xive_native_supported(void)
1208 return xive_native_has_queue_state_support();
1211 static int xive_native_debug_show(struct seq_file *m, void *private)
1213 struct kvmppc_xive *xive = m->private;
1214 struct kvm *kvm = xive->kvm;
1215 struct kvm_vcpu *vcpu;
1221 seq_puts(m, "=========\nVCPU state\n=========\n");
1223 kvm_for_each_vcpu(i, vcpu, kvm) {
1224 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1229 seq_printf(m, "VCPU %d: VP=%#x/%02x\n"
1230 " NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
1231 xc->server_num, xc->vp_id, xc->vp_chip_id,
1232 vcpu->arch.xive_saved_state.nsr,
1233 vcpu->arch.xive_saved_state.cppr,
1234 vcpu->arch.xive_saved_state.ipb,
1235 vcpu->arch.xive_saved_state.pipr,
1236 be64_to_cpu(vcpu->arch.xive_saved_state.w01),
1237 be32_to_cpu(vcpu->arch.xive_cam_word));
1239 kvmppc_xive_debug_show_queues(m, vcpu);
1242 seq_puts(m, "=========\nSources\n=========\n");
1244 for (i = 0; i <= xive->max_sbid; i++) {
1245 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1248 arch_spin_lock(&sb->lock);
1249 kvmppc_xive_debug_show_sources(m, sb);
1250 arch_spin_unlock(&sb->lock);
1257 DEFINE_SHOW_ATTRIBUTE(xive_native_debug);
1259 static void xive_native_debugfs_init(struct kvmppc_xive *xive)
1261 xive->dentry = debugfs_create_file("xive", 0444, xive->kvm->debugfs_dentry,
1262 xive, &xive_native_debug_fops);
1264 pr_debug("%s: created\n", __func__);
1267 static void kvmppc_xive_native_init(struct kvm_device *dev)
1269 struct kvmppc_xive *xive = dev->private;
1271 /* Register some debug interfaces */
1272 xive_native_debugfs_init(xive);
1275 struct kvm_device_ops kvm_xive_native_ops = {
1276 .name = "kvm-xive-native",
1277 .create = kvmppc_xive_native_create,
1278 .init = kvmppc_xive_native_init,
1279 .release = kvmppc_xive_native_release,
1280 .set_attr = kvmppc_xive_native_set_attr,
1281 .get_attr = kvmppc_xive_native_get_attr,
1282 .has_attr = kvmppc_xive_native_has_attr,
1283 .mmap = kvmppc_xive_native_mmap,