2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
9 #define pr_fmt(fmt) "xive-kvm: " fmt
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/err.h>
14 #include <linux/gfp.h>
15 #include <linux/spinlock.h>
16 #include <linux/delay.h>
17 #include <linux/percpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/uaccess.h>
20 #include <asm/kvm_book3s.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/hvcall.h>
25 #include <asm/xive-regs.h>
26 #include <asm/debug.h>
27 #include <asm/debugfs.h>
31 #include <linux/debugfs.h>
32 #include <linux/seq_file.h>
34 #include "book3s_xive.h"
38 * Virtual mode variants of the hcalls for use on radix/radix
39 * with AIL. They require the VCPU's VP to be "pushed"
41 * We still instantiate them here because we use some of the
42 * generated utility functions as well in this file.
44 #define XIVE_RUNTIME_CHECKS
45 #define X_PFX xive_vm_
46 #define X_STATIC static
47 #define X_STAT_PFX stat_vm_
48 #define __x_tima xive_tima
49 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
50 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
51 #define __x_writeb __raw_writeb
52 #define __x_readw __raw_readw
53 #define __x_readq __raw_readq
54 #define __x_writeq __raw_writeq
56 #include "book3s_xive_template.c"
59 * We leave a gap of a couple of interrupts in the queue to
60 * account for the IPI and additional safety guard.
65 * This is a simple trigger for a generic XIVE IRQ. This must
66 * only be called for interrupts that support a trigger page
68 static bool xive_irq_trigger(struct xive_irq_data *xd)
70 /* This should be only for MSIs */
71 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
74 /* Those interrupts should always have a trigger page */
75 if (WARN_ON(!xd->trig_mmio))
78 out_be64(xd->trig_mmio, 0);
83 static irqreturn_t xive_esc_irq(int irq, void *data)
85 struct kvm_vcpu *vcpu = data;
87 vcpu->arch.irq_pending = 1;
90 kvmppc_fast_vcpu_kick(vcpu);
92 /* Since we have the no-EOI flag, the interrupt is effectively
93 * disabled now. Clearing xive_esc_on means we won't bother
94 * doing so on the next entry.
96 * This also allows the entry code to know that if a PQ combination
97 * of 10 is observed while xive_esc_on is true, it means the queue
98 * contains an unprocessed escalation interrupt. We don't make use of
99 * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
101 vcpu->arch.xive_esc_on = false;
106 static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
108 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
109 struct xive_q *q = &xc->queues[prio];
113 /* Already there ? */
114 if (xc->esc_virq[prio])
117 /* Hook up the escalation interrupt */
118 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
119 if (!xc->esc_virq[prio]) {
120 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
121 prio, xc->server_num);
125 if (xc->xive->single_escalation)
126 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
127 vcpu->kvm->arch.lpid, xc->server_num);
129 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
130 vcpu->kvm->arch.lpid, xc->server_num, prio);
132 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
133 prio, xc->server_num);
138 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
140 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
141 IRQF_NO_THREAD, name, vcpu);
143 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
144 prio, xc->server_num);
147 xc->esc_virq_names[prio] = name;
149 /* In single escalation mode, we grab the ESB MMIO of the
150 * interrupt and mask it. Also populate the VCPU v/raddr
151 * of the ESB page for use by asm entry/exit code. Finally
152 * set the XIVE_IRQ_NO_EOI flag which will prevent the
153 * core code from performing an EOI on the escalation
154 * interrupt, thus leaving it effectively masked after
157 if (xc->xive->single_escalation) {
158 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
159 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
161 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
162 vcpu->arch.xive_esc_raddr = xd->eoi_page;
163 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
164 xd->flags |= XIVE_IRQ_NO_EOI;
169 irq_dispose_mapping(xc->esc_virq[prio]);
170 xc->esc_virq[prio] = 0;
175 static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
177 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
178 struct kvmppc_xive *xive = xc->xive;
179 struct xive_q *q = &xc->queues[prio];
183 if (WARN_ON(q->qpage))
186 /* Allocate the queue and retrieve infos on current node for now */
187 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
189 pr_err("Failed to allocate queue %d for VCPU %d\n",
190 prio, xc->server_num);
193 memset(qpage, 0, 1 << xive->q_order);
196 * Reconfigure the queue. This will set q->qpage only once the
197 * queue is fully configured. This is a requirement for prio 0
198 * as we will stop doing EOIs for every IPI as soon as we observe
199 * qpage being non-NULL, and instead will only EOI when we receive
200 * corresponding queue 0 entries
202 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
203 xive->q_order, true);
205 pr_err("Failed to configure queue %d for VCPU %d\n",
206 prio, xc->server_num);
210 /* Called with kvm_lock held */
211 static int xive_check_provisioning(struct kvm *kvm, u8 prio)
213 struct kvmppc_xive *xive = kvm->arch.xive;
214 struct kvm_vcpu *vcpu;
217 lockdep_assert_held(&kvm->lock);
219 /* Already provisioned ? */
220 if (xive->qmap & (1 << prio))
223 pr_devel("Provisioning prio... %d\n", prio);
225 /* Provision each VCPU and enable escalations if needed */
226 kvm_for_each_vcpu(i, vcpu, kvm) {
227 if (!vcpu->arch.xive_vcpu)
229 rc = xive_provision_queue(vcpu, prio);
230 if (rc == 0 && !xive->single_escalation)
231 xive_attach_escalation(vcpu, prio);
236 /* Order previous stores and mark it as provisioned */
238 xive->qmap |= (1 << prio);
242 static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
244 struct kvm_vcpu *vcpu;
245 struct kvmppc_xive_vcpu *xc;
248 /* Locate target server */
249 vcpu = kvmppc_xive_find_server(kvm, server);
251 pr_warn("%s: Can't find server %d\n", __func__, server);
254 xc = vcpu->arch.xive_vcpu;
258 q = &xc->queues[prio];
259 atomic_inc(&q->pending_count);
262 static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
264 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
273 q = &xc->queues[prio];
274 if (WARN_ON(!q->qpage))
277 /* Calculate max number of interrupts in that queue. */
278 max = (q->msk + 1) - XIVE_Q_GAP;
279 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
282 static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
284 struct kvm_vcpu *vcpu;
287 /* Locate target server */
288 vcpu = kvmppc_xive_find_server(kvm, *server);
290 pr_devel("Can't find server %d\n", *server);
294 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
297 rc = xive_try_pick_queue(vcpu, prio);
301 pr_devel(" .. failed, looking up candidate...\n");
303 /* Failed, pick another VCPU */
304 kvm_for_each_vcpu(i, vcpu, kvm) {
305 if (!vcpu->arch.xive_vcpu)
307 rc = xive_try_pick_queue(vcpu, prio);
309 *server = vcpu->arch.xive_vcpu->server_num;
310 pr_devel(" found on 0x%x/%d\n", *server, prio);
314 pr_devel(" no available target !\n");
316 /* No available target ! */
320 static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
322 return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
325 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
326 struct kvmppc_xive_src_block *sb,
327 struct kvmppc_xive_irq_state *state)
329 struct xive_irq_data *xd;
335 * Take the lock, set masked, try again if racing
339 arch_spin_lock(&sb->lock);
340 old_prio = state->guest_priority;
341 state->guest_priority = MASKED;
345 state->guest_priority = old_prio;
346 arch_spin_unlock(&sb->lock);
349 /* No change ? Bail */
350 if (old_prio == MASKED)
353 /* Get the right irq */
354 kvmppc_xive_select_irq(state, &hw_num, &xd);
357 * If the interrupt is marked as needing masking via
358 * firmware, we do it here. Firmware masking however
359 * is "lossy", it won't return the old p and q bits
360 * and won't set the interrupt to a state where it will
361 * record queued ones. If this is an issue we should do
362 * lazy masking instead.
364 * For now, we work around this in unmask by forcing
365 * an interrupt whenever we unmask a non-LSI via FW
368 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
369 xive_native_configure_irq(hw_num,
370 xive_vp(xive, state->act_server),
371 MASKED, state->number);
372 /* set old_p so we can track if an H_EOI was done */
374 state->old_q = false;
376 /* Set PQ to 10, return old P and old Q and remember them */
377 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
378 state->old_p = !!(val & 2);
379 state->old_q = !!(val & 1);
382 * Synchronize hardware to sensure the queues are updated
385 xive_native_sync_source(hw_num);
391 static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
392 struct kvmppc_xive_irq_state *state)
395 * Take the lock try again if racing with H_EOI
398 arch_spin_lock(&sb->lock);
401 arch_spin_unlock(&sb->lock);
405 static void xive_finish_unmask(struct kvmppc_xive *xive,
406 struct kvmppc_xive_src_block *sb,
407 struct kvmppc_xive_irq_state *state,
410 struct xive_irq_data *xd;
413 /* If we aren't changing a thing, move on */
414 if (state->guest_priority != MASKED)
417 /* Get the right irq */
418 kvmppc_xive_select_irq(state, &hw_num, &xd);
421 * See command in xive_lock_and_mask() concerning masking
424 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
425 xive_native_configure_irq(hw_num,
426 xive_vp(xive, state->act_server),
427 state->act_priority, state->number);
428 /* If an EOI is needed, do it here */
430 xive_vm_source_eoi(hw_num, xd);
431 /* If this is not an LSI, force a trigger */
432 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
433 xive_irq_trigger(xd);
437 /* Old Q set, set PQ to 11 */
439 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
442 * If not old P, then perform an "effective" EOI,
443 * on the source. This will handle the cases where
447 xive_vm_source_eoi(hw_num, xd);
449 /* Synchronize ordering and mark unmasked */
452 state->guest_priority = prio;
456 * Target an interrupt to a given server/prio, this will fallback
457 * to another server if necessary and perform the HW targetting
460 * NOTE: Must be called with the state lock held
462 static int xive_target_interrupt(struct kvm *kvm,
463 struct kvmppc_xive_irq_state *state,
466 struct kvmppc_xive *xive = kvm->arch.xive;
471 * This will return a tentative server and actual
472 * priority. The count for that new target will have
473 * already been incremented.
475 rc = xive_select_target(kvm, &server, prio);
478 * We failed to find a target ? Not much we can do
479 * at least until we support the GIQ.
485 * Increment the old queue pending count if there
486 * was one so that the old queue count gets adjusted later
487 * when observed to be empty.
489 if (state->act_priority != MASKED)
490 xive_inc_q_pending(kvm,
492 state->act_priority);
494 * Update state and HW
496 state->act_priority = prio;
497 state->act_server = server;
499 /* Get the right irq */
500 kvmppc_xive_select_irq(state, &hw_num, NULL);
502 return xive_native_configure_irq(hw_num,
503 xive_vp(xive, server),
504 prio, state->number);
508 * Targetting rules: In order to avoid losing track of
509 * pending interrupts accross mask and unmask, which would
510 * allow queue overflows, we implement the following rules:
512 * - Unless it was never enabled (or we run out of capacity)
513 * an interrupt is always targetted at a valid server/queue
514 * pair even when "masked" by the guest. This pair tends to
515 * be the last one used but it can be changed under some
516 * circumstances. That allows us to separate targetting
517 * from masking, we only handle accounting during (re)targetting,
518 * this also allows us to let an interrupt drain into its target
519 * queue after masking, avoiding complex schemes to remove
520 * interrupts out of remote processor queues.
522 * - When masking, we set PQ to 10 and save the previous value
525 * - When unmasking, if saved Q was set, we set PQ to 11
526 * otherwise we leave PQ to the HW state which will be either
527 * 10 if nothing happened or 11 if the interrupt fired while
528 * masked. Effectively we are OR'ing the previous Q into the
531 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
532 * which will unmask the interrupt and shoot a new one if Q was
535 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
536 * effectively meaning an H_EOI from the guest is still expected
537 * for that interrupt).
539 * - If H_EOI occurs while masked, we clear the saved P.
541 * - When changing target, we account on the new target and
542 * increment a separate "pending" counter on the old one.
543 * This pending counter will be used to decrement the old
544 * target's count when its queue has been observed empty.
547 int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
550 struct kvmppc_xive *xive = kvm->arch.xive;
551 struct kvmppc_xive_src_block *sb;
552 struct kvmppc_xive_irq_state *state;
560 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
561 irq, server, priority);
563 /* First, check provisioning of queues */
564 if (priority != MASKED)
565 rc = xive_check_provisioning(xive->kvm,
566 xive_prio_from_guest(priority));
568 pr_devel(" provisioning failure %d !\n", rc);
572 sb = kvmppc_xive_find_source(xive, irq, &idx);
575 state = &sb->irq_state[idx];
578 * We first handle masking/unmasking since the locking
579 * might need to be retried due to EOIs, we'll handle
580 * targetting changes later. These functions will return
581 * with the SB lock held.
583 * xive_lock_and_mask() will also set state->guest_priority
584 * but won't otherwise change other fields of the state.
586 * xive_lock_for_unmask will not actually unmask, this will
587 * be done later by xive_finish_unmask() once the targetting
588 * has been done, so we don't try to unmask an interrupt
589 * that hasn't yet been targetted.
591 if (priority == MASKED)
592 xive_lock_and_mask(xive, sb, state);
594 xive_lock_for_unmask(sb, state);
598 * Then we handle targetting.
600 * First calculate a new "actual priority"
602 new_act_prio = state->act_priority;
603 if (priority != MASKED)
604 new_act_prio = xive_prio_from_guest(priority);
606 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
607 new_act_prio, state->act_server, state->act_priority);
610 * Then check if we actually need to change anything,
612 * The condition for re-targetting the interrupt is that
613 * we have a valid new priority (new_act_prio is not 0xff)
614 * and either the server or the priority changed.
616 * Note: If act_priority was ff and the new priority is
617 * also ff, we don't do anything and leave the interrupt
618 * untargetted. An attempt of doing an int_on on an
619 * untargetted interrupt will fail. If that is a problem
620 * we could initialize interrupts with valid default
623 if (new_act_prio != MASKED &&
624 (state->act_server != server ||
625 state->act_priority != new_act_prio))
626 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
629 * Perform the final unmasking of the interrupt source
632 if (priority != MASKED)
633 xive_finish_unmask(xive, sb, state, priority);
636 * Finally Update saved_priority to match. Only int_on/off
637 * set this field to a different value.
639 state->saved_priority = priority;
641 arch_spin_unlock(&sb->lock);
645 int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
648 struct kvmppc_xive *xive = kvm->arch.xive;
649 struct kvmppc_xive_src_block *sb;
650 struct kvmppc_xive_irq_state *state;
656 sb = kvmppc_xive_find_source(xive, irq, &idx);
659 state = &sb->irq_state[idx];
660 arch_spin_lock(&sb->lock);
661 *server = state->act_server;
662 *priority = state->guest_priority;
663 arch_spin_unlock(&sb->lock);
668 int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
670 struct kvmppc_xive *xive = kvm->arch.xive;
671 struct kvmppc_xive_src_block *sb;
672 struct kvmppc_xive_irq_state *state;
678 sb = kvmppc_xive_find_source(xive, irq, &idx);
681 state = &sb->irq_state[idx];
683 pr_devel("int_on(irq=0x%x)\n", irq);
686 * Check if interrupt was not targetted
688 if (state->act_priority == MASKED) {
689 pr_devel("int_on on untargetted interrupt\n");
693 /* If saved_priority is 0xff, do nothing */
694 if (state->saved_priority == MASKED)
698 * Lock and unmask it.
700 xive_lock_for_unmask(sb, state);
701 xive_finish_unmask(xive, sb, state, state->saved_priority);
702 arch_spin_unlock(&sb->lock);
707 int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
709 struct kvmppc_xive *xive = kvm->arch.xive;
710 struct kvmppc_xive_src_block *sb;
711 struct kvmppc_xive_irq_state *state;
717 sb = kvmppc_xive_find_source(xive, irq, &idx);
720 state = &sb->irq_state[idx];
722 pr_devel("int_off(irq=0x%x)\n", irq);
727 state->saved_priority = xive_lock_and_mask(xive, sb, state);
728 arch_spin_unlock(&sb->lock);
733 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
735 struct kvmppc_xive_src_block *sb;
736 struct kvmppc_xive_irq_state *state;
739 sb = kvmppc_xive_find_source(xive, irq, &idx);
742 state = &sb->irq_state[idx];
747 * Trigger the IPI. This assumes we never restore a pass-through
748 * interrupt which should be safe enough
750 xive_irq_trigger(&state->ipi_data);
755 u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
757 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
762 /* Return the per-cpu state for state saving/migration */
763 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
764 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
765 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
768 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
770 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
771 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
778 /* Grab individual state fields. We don't use pending_pri */
779 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
780 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
781 KVM_REG_PPC_ICP_XISR_MASK;
782 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
784 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
785 xc->server_num, cppr, mfrr, xisr);
788 * We can't update the state of a "pushed" VCPU, but that
791 if (WARN_ON(vcpu->arch.xive_pushed))
794 /* Update VCPU HW saved state */
795 vcpu->arch.xive_saved_state.cppr = cppr;
796 xc->hw_cppr = xc->cppr = cppr;
799 * Update MFRR state. If it's not 0xff, we mark the VCPU as
800 * having a pending MFRR change, which will re-evaluate the
801 * target. The VCPU will thus potentially get a spurious
802 * interrupt but that's not a big deal.
806 xive_irq_trigger(&xc->vp_ipi_data);
809 * Now saved XIRR is "interesting". It means there's something in
810 * the legacy "1 element" queue... for an IPI we simply ignore it,
811 * as the MFRR restore will handle that. For anything else we need
812 * to force a resend of the source.
813 * However the source may not have been setup yet. If that's the
814 * case, we keep that info and increment a counter in the xive to
815 * tell subsequent xive_set_source() to go look.
817 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
818 xc->delayed_irq = xisr;
819 xive->delayed_irqs++;
820 pr_devel(" xisr restore delayed\n");
826 int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
827 struct irq_desc *host_desc)
829 struct kvmppc_xive *xive = kvm->arch.xive;
830 struct kvmppc_xive_src_block *sb;
831 struct kvmppc_xive_irq_state *state;
832 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
833 unsigned int host_irq = irq_desc_get_irq(host_desc);
834 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
842 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
844 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
847 state = &sb->irq_state[idx];
850 * Mark the passed-through interrupt as going to a VCPU,
851 * this will prevent further EOIs and similar operations
852 * from the XIVE code. It will also mask the interrupt
853 * to either PQ=10 or 11 state, the latter if the interrupt
854 * is pending. This will allow us to unmask or retrigger it
855 * after routing it to the guest with a simple EOI.
857 * The "state" argument is a "token", all it needs is to be
858 * non-NULL to switch to passed-through or NULL for the
859 * other way around. We may not yet have an actual VCPU
860 * target here and we don't really care.
862 rc = irq_set_vcpu_affinity(host_irq, state);
864 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
869 * Mask and read state of IPI. We need to know if its P bit
870 * is set as that means it's potentially already using a
871 * queue entry in the target
873 prio = xive_lock_and_mask(xive, sb, state);
874 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
875 state->old_p, state->old_q);
877 /* Turn the IPI hard off */
878 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
880 /* Grab info about irq */
881 state->pt_number = hw_irq;
882 state->pt_data = irq_data_get_irq_handler_data(host_data);
885 * Configure the IRQ to match the existing configuration of
886 * the IPI if it was already targetted. Otherwise this will
887 * mask the interrupt in a lossy way (act_priority is 0xff)
888 * which is fine for a never started interrupt.
890 xive_native_configure_irq(hw_irq,
891 xive_vp(xive, state->act_server),
892 state->act_priority, state->number);
895 * We do an EOI to enable the interrupt (and retrigger if needed)
896 * if the guest has the interrupt unmasked and the P bit was *not*
897 * set in the IPI. If it was set, we know a slot may still be in
898 * use in the target queue thus we have to wait for a guest
901 if (prio != MASKED && !state->old_p)
902 xive_vm_source_eoi(hw_irq, state->pt_data);
904 /* Clear old_p/old_q as they are no longer relevant */
905 state->old_p = state->old_q = false;
907 /* Restore guest prio (unlocks EOI) */
909 state->guest_priority = prio;
910 arch_spin_unlock(&sb->lock);
914 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
916 int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
917 struct irq_desc *host_desc)
919 struct kvmppc_xive *xive = kvm->arch.xive;
920 struct kvmppc_xive_src_block *sb;
921 struct kvmppc_xive_irq_state *state;
922 unsigned int host_irq = irq_desc_get_irq(host_desc);
930 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
932 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
935 state = &sb->irq_state[idx];
938 * Mask and read state of IRQ. We need to know if its P bit
939 * is set as that means it's potentially already using a
940 * queue entry in the target
942 prio = xive_lock_and_mask(xive, sb, state);
943 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
944 state->old_p, state->old_q);
947 * If old_p is set, the interrupt is pending, we switch it to
948 * PQ=11. This will force a resend in the host so the interrupt
949 * isn't lost to whatver host driver may pick it up
952 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
954 /* Release the passed-through interrupt to the host */
955 rc = irq_set_vcpu_affinity(host_irq, NULL);
957 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
961 /* Forget about the IRQ */
962 state->pt_number = 0;
963 state->pt_data = NULL;
965 /* Reconfigure the IPI */
966 xive_native_configure_irq(state->ipi_number,
967 xive_vp(xive, state->act_server),
968 state->act_priority, state->number);
971 * If old_p is set (we have a queue entry potentially
972 * occupied) or the interrupt is masked, we set the IPI
973 * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
975 if (prio == MASKED || state->old_p)
976 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
978 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
980 /* Restore guest prio (unlocks EOI) */
982 state->guest_priority = prio;
983 arch_spin_unlock(&sb->lock);
987 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
989 static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
991 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
992 struct kvm *kvm = vcpu->kvm;
993 struct kvmppc_xive *xive = kvm->arch.xive;
996 for (i = 0; i <= xive->max_sbid; i++) {
997 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1001 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1002 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1006 if (state->act_priority == MASKED)
1008 if (state->act_server != xc->server_num)
1012 arch_spin_lock(&sb->lock);
1013 state->act_priority = MASKED;
1014 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1015 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1016 if (state->pt_number) {
1017 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1018 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1020 arch_spin_unlock(&sb->lock);
1025 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1028 struct kvmppc_xive *xive = xc->xive;
1031 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1033 /* Ensure no interrupt is still routed to that VP */
1035 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1037 /* Mask the VP IPI */
1038 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1040 /* Free escalations */
1041 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1042 if (xc->esc_virq[i]) {
1043 free_irq(xc->esc_virq[i], vcpu);
1044 irq_dispose_mapping(xc->esc_virq[i]);
1045 kfree(xc->esc_virq_names[i]);
1049 /* Disable the VP */
1050 xive_native_disable_vp(xc->vp_id);
1052 /* Free the queues */
1053 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1054 struct xive_q *q = &xc->queues[i];
1056 xive_native_disable_queue(xc->vp_id, q, i);
1058 free_pages((unsigned long)q->qpage,
1059 xive->q_page_order);
1066 xive_cleanup_irq_data(&xc->vp_ipi_data);
1067 xive_native_free_irq(xc->vp_ipi);
1073 int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1074 struct kvm_vcpu *vcpu, u32 cpu)
1076 struct kvmppc_xive *xive = dev->private;
1077 struct kvmppc_xive_vcpu *xc;
1080 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1082 if (dev->ops != &kvm_xive_ops) {
1083 pr_devel("Wrong ops !\n");
1086 if (xive->kvm != vcpu->kvm)
1088 if (vcpu->arch.irq_type)
1090 if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1091 pr_devel("Duplicate !\n");
1094 if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
1095 pr_devel("Out of bounds !\n");
1098 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1102 /* We need to synchronize with queue provisioning */
1103 mutex_lock(&vcpu->kvm->lock);
1104 vcpu->arch.xive_vcpu = xc;
1107 xc->server_num = cpu;
1108 xc->vp_id = xive_vp(xive, cpu);
1112 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1116 /* Configure VCPU fields for use by assembly push/pull */
1117 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1118 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1121 xc->vp_ipi = xive_native_alloc_irq();
1123 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1127 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1129 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1134 * Enable the VP first as the single escalation mode will
1135 * affect escalation interrupts numbering
1137 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1139 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1144 * Initialize queues. Initially we set them all for no queueing
1145 * and we enable escalation for queue 0 only which we'll use for
1146 * our mfrr change notifications. If the VCPU is hot-plugged, we
1147 * do handle provisioning however based on the existing "map"
1148 * of enabled queues.
1150 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1151 struct xive_q *q = &xc->queues[i];
1153 /* Single escalation, no queue 7 */
1154 if (i == 7 && xive->single_escalation)
1157 /* Is queue already enabled ? Provision it */
1158 if (xive->qmap & (1 << i)) {
1159 r = xive_provision_queue(vcpu, i);
1160 if (r == 0 && !xive->single_escalation)
1161 xive_attach_escalation(vcpu, i);
1165 r = xive_native_configure_queue(xc->vp_id,
1166 q, i, NULL, 0, true);
1168 pr_err("Failed to configure queue %d for VCPU %d\n",
1175 /* If not done above, attach priority 0 escalation */
1176 r = xive_attach_escalation(vcpu, 0);
1181 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1183 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1186 mutex_unlock(&vcpu->kvm->lock);
1188 kvmppc_xive_cleanup_vcpu(vcpu);
1192 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1197 * Scanning of queues before/after migration save
1199 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1201 struct kvmppc_xive_src_block *sb;
1202 struct kvmppc_xive_irq_state *state;
1205 sb = kvmppc_xive_find_source(xive, irq, &idx);
1209 state = &sb->irq_state[idx];
1211 /* Some sanity checking */
1212 if (!state->valid) {
1213 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1218 * If the interrupt is in a queue it should have P set.
1219 * We warn so that gets reported. A backtrace isn't useful
1220 * so no need to use a WARN_ON.
1222 if (!state->saved_p)
1223 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1226 state->in_queue = true;
1229 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1230 struct kvmppc_xive_src_block *sb,
1233 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1238 /* Mask and save state, this will also sync HW queues */
1239 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1241 /* Transfer P and Q */
1242 state->saved_p = state->old_p;
1243 state->saved_q = state->old_q;
1246 arch_spin_unlock(&sb->lock);
1249 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1250 struct kvmppc_xive_src_block *sb,
1253 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1259 * Lock / exclude EOI (not technically necessary if the
1260 * guest isn't running concurrently. If this becomes a
1261 * performance issue we can probably remove the lock.
1263 xive_lock_for_unmask(sb, state);
1265 /* Restore mask/prio if it wasn't masked */
1266 if (state->saved_scan_prio != MASKED)
1267 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1270 arch_spin_unlock(&sb->lock);
1273 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1276 u32 toggle = q->toggle;
1280 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1282 xive_pre_save_set_queued(xive, irq);
1286 static void xive_pre_save_scan(struct kvmppc_xive *xive)
1288 struct kvm_vcpu *vcpu = NULL;
1292 * See comment in xive_get_source() about how this
1293 * work. Collect a stable state for all interrupts
1295 for (i = 0; i <= xive->max_sbid; i++) {
1296 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1299 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1300 xive_pre_save_mask_irq(xive, sb, j);
1303 /* Then scan the queues and update the "in_queue" flag */
1304 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1305 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1308 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1309 if (xc->queues[j].qpage)
1310 xive_pre_save_queue(xive, &xc->queues[j]);
1314 /* Finally restore interrupt states */
1315 for (i = 0; i <= xive->max_sbid; i++) {
1316 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1319 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1320 xive_pre_save_unmask_irq(xive, sb, j);
1324 static void xive_post_save_scan(struct kvmppc_xive *xive)
1328 /* Clear all the in_queue flags */
1329 for (i = 0; i <= xive->max_sbid; i++) {
1330 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1333 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1334 sb->irq_state[j].in_queue = false;
1337 /* Next get_source() will do a new scan */
1338 xive->saved_src_count = 0;
1342 * This returns the source configuration and state to user space.
1344 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1346 struct kvmppc_xive_src_block *sb;
1347 struct kvmppc_xive_irq_state *state;
1348 u64 __user *ubufp = (u64 __user *) addr;
1352 sb = kvmppc_xive_find_source(xive, irq, &idx);
1356 state = &sb->irq_state[idx];
1361 pr_devel("get_source(%ld)...\n", irq);
1364 * So to properly save the state into something that looks like a
1365 * XICS migration stream we cannot treat interrupts individually.
1367 * We need, instead, mask them all (& save their previous PQ state)
1368 * to get a stable state in the HW, then sync them to ensure that
1369 * any interrupt that had already fired hits its queue, and finally
1370 * scan all the queues to collect which interrupts are still present
1371 * in the queues, so we can set the "pending" flag on them and
1372 * they can be resent on restore.
1374 * So we do it all when the "first" interrupt gets saved, all the
1375 * state is collected at that point, the rest of xive_get_source()
1376 * will merely collect and convert that state to the expected
1377 * userspace bit mask.
1379 if (xive->saved_src_count == 0)
1380 xive_pre_save_scan(xive);
1381 xive->saved_src_count++;
1383 /* Convert saved state into something compatible with xics */
1384 val = state->act_server;
1385 prio = state->saved_scan_prio;
1387 if (prio == MASKED) {
1388 val |= KVM_XICS_MASKED;
1389 prio = state->saved_priority;
1391 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1393 val |= KVM_XICS_LEVEL_SENSITIVE;
1395 val |= KVM_XICS_PENDING;
1398 val |= KVM_XICS_PRESENTED;
1401 val |= KVM_XICS_QUEUED;
1404 * We mark it pending (which will attempt a re-delivery)
1405 * if we are in a queue *or* we were masked and had
1406 * Q set which is equivalent to the XICS "masked pending"
1409 if (state->in_queue || (prio == MASKED && state->saved_q))
1410 val |= KVM_XICS_PENDING;
1414 * If that was the last interrupt saved, reset the
1417 if (xive->saved_src_count == xive->src_count)
1418 xive_post_save_scan(xive);
1420 /* Copy the result to userspace */
1421 if (put_user(val, ubufp))
1427 static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
1430 struct kvm *kvm = xive->kvm;
1431 struct kvmppc_xive_src_block *sb;
1434 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1436 mutex_lock(&kvm->lock);
1438 /* block already exists - somebody else got here first */
1439 if (xive->src_blocks[bid])
1442 /* Create the ICS */
1443 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1449 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1450 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1451 sb->irq_state[i].guest_priority = MASKED;
1452 sb->irq_state[i].saved_priority = MASKED;
1453 sb->irq_state[i].act_priority = MASKED;
1456 xive->src_blocks[bid] = sb;
1458 if (bid > xive->max_sbid)
1459 xive->max_sbid = bid;
1462 mutex_unlock(&kvm->lock);
1463 return xive->src_blocks[bid];
1466 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1468 struct kvm *kvm = xive->kvm;
1469 struct kvm_vcpu *vcpu = NULL;
1472 kvm_for_each_vcpu(i, vcpu, kvm) {
1473 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1478 if (xc->delayed_irq == irq) {
1479 xc->delayed_irq = 0;
1480 xive->delayed_irqs--;
1487 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1489 struct kvmppc_xive_src_block *sb;
1490 struct kvmppc_xive_irq_state *state;
1491 u64 __user *ubufp = (u64 __user *) addr;
1494 u8 act_prio, guest_prio;
1498 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1501 pr_devel("set_source(irq=0x%lx)\n", irq);
1503 /* Find the source */
1504 sb = kvmppc_xive_find_source(xive, irq, &idx);
1506 pr_devel("No source, creating source block...\n");
1507 sb = xive_create_src_block(xive, irq);
1509 pr_devel("Failed to create block...\n");
1513 state = &sb->irq_state[idx];
1515 /* Read user passed data */
1516 if (get_user(val, ubufp)) {
1517 pr_devel("fault getting user info !\n");
1521 server = val & KVM_XICS_DESTINATION_MASK;
1522 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1524 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1525 val, server, guest_prio);
1528 * If the source doesn't already have an IPI, allocate
1529 * one and get the corresponding data
1531 if (!state->ipi_number) {
1532 state->ipi_number = xive_native_alloc_irq();
1533 if (state->ipi_number == 0) {
1534 pr_devel("Failed to allocate IPI !\n");
1537 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1538 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1542 * We use lock_and_mask() to set us in the right masked
1543 * state. We will override that state from the saved state
1544 * further down, but this will handle the cases of interrupts
1545 * that need FW masking. We set the initial guest_priority to
1546 * 0 before calling it to ensure it actually performs the masking.
1548 state->guest_priority = 0;
1549 xive_lock_and_mask(xive, sb, state);
1552 * Now, we select a target if we have one. If we don't we
1553 * leave the interrupt untargetted. It means that an interrupt
1554 * can become "untargetted" accross migration if it was masked
1555 * by set_xive() but there is little we can do about it.
1558 /* First convert prio and mark interrupt as untargetted */
1559 act_prio = xive_prio_from_guest(guest_prio);
1560 state->act_priority = MASKED;
1563 * We need to drop the lock due to the mutex below. Hopefully
1564 * nothing is touching that interrupt yet since it hasn't been
1565 * advertized to a running guest yet
1567 arch_spin_unlock(&sb->lock);
1569 /* If we have a priority target the interrupt */
1570 if (act_prio != MASKED) {
1571 /* First, check provisioning of queues */
1572 mutex_lock(&xive->kvm->lock);
1573 rc = xive_check_provisioning(xive->kvm, act_prio);
1574 mutex_unlock(&xive->kvm->lock);
1576 /* Target interrupt */
1578 rc = xive_target_interrupt(xive->kvm, state,
1581 * If provisioning or targetting failed, leave it
1582 * alone and masked. It will remain disabled until
1583 * the guest re-targets it.
1588 * Find out if this was a delayed irq stashed in an ICP,
1589 * in which case, treat it as pending
1591 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1592 val |= KVM_XICS_PENDING;
1593 pr_devel(" Found delayed ! forcing PENDING !\n");
1596 /* Cleanup the SW state */
1597 state->old_p = false;
1598 state->old_q = false;
1600 state->asserted = false;
1602 /* Restore LSI state */
1603 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1605 if (val & KVM_XICS_PENDING)
1606 state->asserted = true;
1607 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1611 * Restore P and Q. If the interrupt was pending, we
1612 * force Q and !P, which will trigger a resend.
1614 * That means that a guest that had both an interrupt
1615 * pending (queued) and Q set will restore with only
1616 * one instance of that interrupt instead of 2, but that
1617 * is perfectly fine as coalescing interrupts that haven't
1618 * been presented yet is always allowed.
1620 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1621 state->old_p = true;
1622 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1623 state->old_q = true;
1625 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1628 * If the interrupt was unmasked, update guest priority and
1629 * perform the appropriate state transition and do a
1630 * re-trigger if necessary.
1632 if (val & KVM_XICS_MASKED) {
1633 pr_devel(" masked, saving prio\n");
1634 state->guest_priority = MASKED;
1635 state->saved_priority = guest_prio;
1637 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1638 xive_finish_unmask(xive, sb, state, guest_prio);
1639 state->saved_priority = guest_prio;
1642 /* Increment the number of valid sources and mark this one valid */
1645 state->valid = true;
1650 int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1653 struct kvmppc_xive *xive = kvm->arch.xive;
1654 struct kvmppc_xive_src_block *sb;
1655 struct kvmppc_xive_irq_state *state;
1661 sb = kvmppc_xive_find_source(xive, irq, &idx);
1665 /* Perform locklessly .... (we need to do some RCUisms here...) */
1666 state = &sb->irq_state[idx];
1670 /* We don't allow a trigger on a passed-through interrupt */
1671 if (state->pt_number)
1674 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1675 state->asserted = 1;
1676 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1677 state->asserted = 0;
1681 /* Trigger the IPI */
1682 xive_irq_trigger(&state->ipi_data);
1687 static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1689 struct kvmppc_xive *xive = dev->private;
1691 /* We honor the existing XICS ioctl */
1692 switch (attr->group) {
1693 case KVM_DEV_XICS_GRP_SOURCES:
1694 return xive_set_source(xive, attr->attr, attr->addr);
1699 static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1701 struct kvmppc_xive *xive = dev->private;
1703 /* We honor the existing XICS ioctl */
1704 switch (attr->group) {
1705 case KVM_DEV_XICS_GRP_SOURCES:
1706 return xive_get_source(xive, attr->attr, attr->addr);
1711 static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1713 /* We honor the same limits as XICS, at least for now */
1714 switch (attr->group) {
1715 case KVM_DEV_XICS_GRP_SOURCES:
1716 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1717 attr->attr < KVMPPC_XICS_NR_IRQS)
1724 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1726 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1727 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1730 static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1734 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1735 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1740 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1741 xive_cleanup_irq_data(&state->ipi_data);
1742 xive_native_free_irq(state->ipi_number);
1744 /* Pass-through, cleanup too but keep IRQ hw data */
1745 if (state->pt_number)
1746 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1748 state->valid = false;
1752 static void kvmppc_xive_free(struct kvm_device *dev)
1754 struct kvmppc_xive *xive = dev->private;
1755 struct kvm *kvm = xive->kvm;
1758 debugfs_remove(xive->dentry);
1761 kvm->arch.xive = NULL;
1763 /* Mask and free interrupts */
1764 for (i = 0; i <= xive->max_sbid; i++) {
1765 if (xive->src_blocks[i])
1766 kvmppc_xive_free_sources(xive->src_blocks[i]);
1767 kfree(xive->src_blocks[i]);
1768 xive->src_blocks[i] = NULL;
1771 if (xive->vp_base != XIVE_INVALID_VP)
1772 xive_native_free_vp_block(xive->vp_base);
1779 static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1781 struct kvmppc_xive *xive;
1782 struct kvm *kvm = dev->kvm;
1785 pr_devel("Creating xive for partition\n");
1787 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1791 dev->private = xive;
1795 /* Already there ? */
1799 kvm->arch.xive = xive;
1801 /* We use the default queue size set by the host */
1802 xive->q_order = xive_native_default_eq_shift();
1803 if (xive->q_order < PAGE_SHIFT)
1804 xive->q_page_order = 0;
1806 xive->q_page_order = xive->q_order - PAGE_SHIFT;
1808 /* Allocate a bunch of VPs */
1809 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1810 pr_devel("VP_Base=%x\n", xive->vp_base);
1812 if (xive->vp_base == XIVE_INVALID_VP)
1815 xive->single_escalation = xive_native_has_single_escalation();
1826 static int xive_debug_show(struct seq_file *m, void *private)
1828 struct kvmppc_xive *xive = m->private;
1829 struct kvm *kvm = xive->kvm;
1830 struct kvm_vcpu *vcpu;
1831 u64 t_rm_h_xirr = 0;
1832 u64 t_rm_h_ipoll = 0;
1833 u64 t_rm_h_cppr = 0;
1836 u64 t_vm_h_xirr = 0;
1837 u64 t_vm_h_ipoll = 0;
1838 u64 t_vm_h_cppr = 0;
1846 seq_printf(m, "=========\nVCPU state\n=========\n");
1848 kvm_for_each_vcpu(i, vcpu, kvm) {
1849 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1855 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1856 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1857 xc->server_num, xc->cppr, xc->hw_cppr,
1858 xc->mfrr, xc->pending,
1859 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
1860 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1861 struct xive_q *q = &xc->queues[i];
1864 if (!q->qpage && !xc->esc_virq[i])
1867 seq_printf(m, " [q%d]: ", i);
1871 i0 = be32_to_cpup(q->qpage + idx);
1872 idx = (idx + 1) & q->msk;
1873 i1 = be32_to_cpup(q->qpage + idx);
1874 seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
1876 if (xc->esc_virq[i]) {
1877 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
1878 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1879 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
1880 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
1881 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
1882 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
1883 xc->esc_virq[i], pq, xd->eoi_page);
1884 seq_printf(m, "\n");
1888 t_rm_h_xirr += xc->stat_rm_h_xirr;
1889 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
1890 t_rm_h_cppr += xc->stat_rm_h_cppr;
1891 t_rm_h_eoi += xc->stat_rm_h_eoi;
1892 t_rm_h_ipi += xc->stat_rm_h_ipi;
1893 t_vm_h_xirr += xc->stat_vm_h_xirr;
1894 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
1895 t_vm_h_cppr += xc->stat_vm_h_cppr;
1896 t_vm_h_eoi += xc->stat_vm_h_eoi;
1897 t_vm_h_ipi += xc->stat_vm_h_ipi;
1900 seq_printf(m, "Hcalls totals\n");
1901 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
1902 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
1903 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
1904 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
1905 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
1910 static int xive_debug_open(struct inode *inode, struct file *file)
1912 return single_open(file, xive_debug_show, inode->i_private);
1915 static const struct file_operations xive_debug_fops = {
1916 .open = xive_debug_open,
1918 .llseek = seq_lseek,
1919 .release = single_release,
1922 static void xive_debugfs_init(struct kvmppc_xive *xive)
1926 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
1928 pr_err("%s: no memory for name\n", __func__);
1932 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1933 xive, &xive_debug_fops);
1935 pr_debug("%s: created %s\n", __func__, name);
1939 static void kvmppc_xive_init(struct kvm_device *dev)
1941 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
1943 /* Register some debug interfaces */
1944 xive_debugfs_init(xive);
1947 struct kvm_device_ops kvm_xive_ops = {
1949 .create = kvmppc_xive_create,
1950 .init = kvmppc_xive_init,
1951 .destroy = kvmppc_xive_free,
1952 .set_attr = xive_set_attr,
1953 .get_attr = xive_get_attr,
1954 .has_attr = xive_has_attr,
1957 void kvmppc_xive_init_module(void)
1959 __xive_vm_h_xirr = xive_vm_h_xirr;
1960 __xive_vm_h_ipoll = xive_vm_h_ipoll;
1961 __xive_vm_h_ipi = xive_vm_h_ipi;
1962 __xive_vm_h_cppr = xive_vm_h_cppr;
1963 __xive_vm_h_eoi = xive_vm_h_eoi;
1966 void kvmppc_xive_exit_module(void)
1968 __xive_vm_h_xirr = NULL;
1969 __xive_vm_h_ipoll = NULL;
1970 __xive_vm_h_ipi = NULL;
1971 __xive_vm_h_cppr = NULL;
1972 __xive_vm_h_eoi = NULL;