2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
9 /* File to be included by other .c files */
11 #define XGLUE(a,b) a##b
12 #define GLUE(a,b) XGLUE(a,b)
14 /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
17 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
23 * Ensure any previous store to CPPR is ordered vs.
24 * the subsequent loads from PIPR or ACK.
28 /* Perform the acknowledge OS to register cycle. */
29 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
31 /* Synchronize subsequent queue accesses */
34 /* XXX Check grouping level */
37 if (!((ack >> 8) & TM_QW1_NSR_EO))
40 /* Grab CPPR of the most favored pending interrupt */
43 xc->pending |= 1 << cppr;
45 #ifdef XIVE_RUNTIME_CHECKS
46 /* Check consistency */
47 if (cppr >= xc->hw_cppr)
48 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
49 smp_processor_id(), cppr, xc->hw_cppr);
53 * Update our image of the HW CPPR. We don't yet modify
54 * xc->cppr, this will be done as we scan for interrupts
60 static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
64 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
65 offset |= offset << 4;
67 val =__x_readq(__x_eoi_page(xd) + offset);
68 #ifdef __LITTLE_ENDIAN__
75 static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
77 /* If the XIVE supports the new "store EOI facility, use it */
78 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
79 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
80 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
82 else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
84 * For LSIs the HW EOI cycle is used rather than PQ bits,
85 * as they are automatically re-triggred in HW when still
88 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
93 * Otherwise for EOI, we use the special MMIO that does
94 * a clear of both P and Q and returns the old Q,
95 * except for LSIs where we use the "EOI cycle" special
98 * This allows us to then do a re-trigger if Q was set
99 * rather than synthetizing an interrupt in software
101 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
103 /* Re-trigger if needed */
104 if ((eoi_val & 1) && __x_trig_page(xd))
105 __x_writeq(0, __x_trig_page(xd));
115 static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
116 u8 pending, int scan_type)
121 /* Find highest pending priority */
122 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
128 * If pending is 0 this will return 0xff which is what
131 prio = ffs(pending) - 1;
134 * If the most favoured prio we found pending is less
135 * favored (or equal) than a pending IPI, we return
138 * Note: If pending was 0 and mfrr is 0xff, we will
139 * not spurriously take an IPI because mfrr cannot
140 * then be smaller than cppr.
142 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
148 /* Don't scan past the guest cppr */
149 if (prio >= xc->cppr || prio > 7)
152 /* Grab queue and pointers */
153 q = &xc->queues[prio];
158 * Snapshot the queue page. The test further down for EOI
159 * must use the same "copy" that was used by __xive_read_eq
160 * since qpage can be set concurrently and we don't want
163 qpage = READ_ONCE(q->qpage);
167 * Try to fetch from the queue. Will return 0 for a
168 * non-queueing priority (ie, qpage = 0).
170 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
173 * If this was a signal for an MFFR change done by
174 * H_IPI we skip it. Additionally, if we were fetching
175 * we EOI it now, thus re-enabling reception of a new
178 * We also need to do that if prio is 0 and we had no
179 * page for the queue. In this case, we have non-queued
180 * IPI that needs to be EOId.
182 * This is safe because if we have another pending MFRR
183 * change that wasn't observed above, the Q bit will have
184 * been set and another occurrence of the IPI will trigger.
186 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
187 if (scan_type == scan_fetch)
188 GLUE(X_PFX,source_eoi)(xc->vp_ipi,
190 /* Loop back on same queue with updated idx/toggle */
191 #ifdef XIVE_RUNTIME_CHECKS
192 WARN_ON(hirq && hirq != XICS_IPI);
198 /* If it's the dummy interrupt, continue searching */
199 if (hirq == XICS_DUMMY)
202 /* If fetching, update queue pointers */
203 if (scan_type == scan_fetch) {
208 /* Something found, stop searching */
212 /* Clear the pending bit on the now empty queue */
213 pending &= ~(1 << prio);
216 * Check if the queue count needs adjusting due to
217 * interrupts being moved away.
219 if (atomic_read(&q->pending_count)) {
220 int p = atomic_xchg(&q->pending_count, 0);
222 #ifdef XIVE_RUNTIME_CHECKS
223 WARN_ON(p > atomic_read(&q->count));
225 atomic_sub(p, &q->count);
230 /* If we are just taking a "peek", do nothing else */
231 if (scan_type == scan_poll)
234 /* Update the pending bits */
235 xc->pending = pending;
238 * If this is an EOI that's it, no CPPR adjustment done here,
239 * all we needed was cleanup the stale pending bits and check
240 * if there's anything left.
242 if (scan_type == scan_eoi)
246 * If we found an interrupt, adjust what the guest CPPR should
247 * be as if we had just fetched that interrupt from HW.
249 * Note: This can only make xc->cppr smaller as the previous
250 * loop will only exit with hirq != 0 if prio is lower than
251 * the current xc->cppr. Thus we don't need to re-check xc->mfrr
257 * If it was an IPI the HW CPPR might have been lowered too much
258 * as the HW interrupt we use for IPIs is routed to priority 0.
260 * We re-sync it here.
262 if (xc->cppr != xc->hw_cppr) {
263 xc->hw_cppr = xc->cppr;
264 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
270 X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
272 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
276 pr_devel("H_XIRR\n");
278 xc->GLUE(X_STAT_PFX,h_xirr)++;
280 /* First collect pending bits from HW */
281 GLUE(X_PFX,ack_pending)(xc);
284 * Cleanup the old-style bits if needed (they may have been
285 * set by pull or an escalation interrupts).
287 if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
288 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
289 &vcpu->arch.pending_exceptions);
291 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
292 xc->pending, xc->hw_cppr, xc->cppr);
294 /* Grab previous CPPR and reverse map it */
295 old_cppr = xive_prio_to_guest(xc->cppr);
297 /* Scan for actual interrupts */
298 hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
300 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
301 hirq, xc->hw_cppr, xc->cppr);
303 #ifdef XIVE_RUNTIME_CHECKS
304 /* That should never hit */
305 if (hirq & 0xff000000)
306 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
310 * XXX We could check if the interrupt is masked here and
311 * filter it. If we chose to do so, we would need to do:
323 /* Return interrupt and old CPPR in GPR4 */
324 vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
329 X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
331 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
332 u8 pending = xc->pending;
335 pr_devel("H_IPOLL(server=%ld)\n", server);
337 xc->GLUE(X_STAT_PFX,h_ipoll)++;
339 /* Grab the target VCPU if not the current one */
340 if (xc->server_num != server) {
341 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
344 xc = vcpu->arch.xive_vcpu;
346 /* Scan all priorities */
349 /* Grab pending interrupt if any */
350 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
351 u8 pipr = be64_to_cpu(qw1) & 0xff;
353 pending |= 1 << pipr;
356 hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
358 /* Return interrupt and old CPPR in GPR4 */
359 vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
364 static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
368 pending = xc->pending;
369 if (xc->mfrr != 0xff) {
371 pending |= 1 << xc->mfrr;
377 prio = ffs(pending) - 1;
379 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
382 static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
383 struct kvmppc_xive_vcpu *xc)
387 /* For each priority that is now masked */
388 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
389 struct xive_q *q = &xc->queues[prio];
390 struct kvmppc_xive_irq_state *state;
391 struct kvmppc_xive_src_block *sb;
392 u32 idx, toggle, entry, irq, hw_num;
393 struct xive_irq_data *xd;
399 qpage = READ_ONCE(q->qpage);
403 /* For each interrupt in the queue */
405 entry = be32_to_cpup(qpage + idx);
408 if ((entry >> 31) == toggle)
410 irq = entry & 0x7fffffff;
412 /* Skip dummies and IPIs */
413 if (irq == XICS_DUMMY || irq == XICS_IPI)
415 sb = kvmppc_xive_find_source(xive, irq, &src);
418 state = &sb->irq_state[src];
420 /* Has it been rerouted ? */
421 if (xc->server_num == state->act_server)
425 * Allright, it *has* been re-routed, kill it from
428 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
430 /* Find the HW interrupt */
431 kvmppc_xive_select_irq(state, &hw_num, &xd);
433 /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
434 if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
435 GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
438 GLUE(X_PFX,source_eoi)(hw_num, xd);
441 idx = (idx + 1) & q->msk;
448 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
450 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
451 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
454 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
456 xc->GLUE(X_STAT_PFX,h_cppr)++;
459 cppr = xive_prio_from_guest(cppr);
461 /* Remember old and update SW state */
466 * Order the above update of xc->cppr with the subsequent
467 * read of xc->mfrr inside push_pending_to_hw()
471 if (cppr > old_cppr) {
473 * We are masking less, we need to look for pending things
474 * to deliver and set VP pending bits accordingly to trigger
475 * a new interrupt otherwise we might miss MFRR changes for
476 * which we have optimized out sending an IPI signal.
478 GLUE(X_PFX,push_pending_to_hw)(xc);
481 * We are masking more, we need to check the queue for any
482 * interrupt that has been routed to another CPU, take
483 * it out (replace it with the dummy) and retrigger it.
485 * This is necessary since those interrupts may otherwise
486 * never be processed, at least not until this CPU restores
489 * This is in theory racy vs. HW adding new interrupts to
490 * the queue. In practice this works because the interesting
491 * cases are when the guest has done a set_xive() to move the
492 * interrupt away, which flushes the xive, followed by the
493 * target CPU doing a H_CPPR. So any new interrupt coming into
494 * the queue must still be routed to us and isn't a source
497 GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
502 __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
507 X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
509 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
510 struct kvmppc_xive_src_block *sb;
511 struct kvmppc_xive_irq_state *state;
512 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
513 struct xive_irq_data *xd;
514 u8 new_cppr = xirr >> 24;
515 u32 irq = xirr & 0x00ffffff, hw_num;
519 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
521 xc->GLUE(X_STAT_PFX,h_eoi)++;
523 xc->cppr = xive_prio_from_guest(new_cppr);
526 * IPIs are synthetized from MFRR and thus don't need
527 * any special EOI handling. The underlying interrupt
528 * used to signal MFRR changes is EOId when fetched from
531 if (irq == XICS_IPI || irq == 0) {
533 * This barrier orders the setting of xc->cppr vs.
534 * subsquent test of xc->mfrr done inside
535 * scan_interrupts and push_pending_to_hw
541 /* Find interrupt source */
542 sb = kvmppc_xive_find_source(xive, irq, &src);
544 pr_devel(" source not found !\n");
550 state = &sb->irq_state[src];
551 kvmppc_xive_select_irq(state, &hw_num, &xd);
553 state->in_eoi = true;
556 * This barrier orders both setting of in_eoi above vs,
557 * subsequent test of guest_priority, and the setting
558 * of xc->cppr vs. subsquent test of xc->mfrr done inside
559 * scan_interrupts and push_pending_to_hw
564 if (state->guest_priority == MASKED) {
565 arch_spin_lock(&sb->lock);
566 if (state->guest_priority != MASKED) {
567 arch_spin_unlock(&sb->lock);
570 pr_devel(" EOI on saved P...\n");
572 /* Clear old_p, that will cause unmask to perform an EOI */
573 state->old_p = false;
575 arch_spin_unlock(&sb->lock);
577 pr_devel(" EOI on source...\n");
579 /* Perform EOI on the source */
580 GLUE(X_PFX,source_eoi)(hw_num, xd);
582 /* If it's an emulated LSI, check level and resend */
583 if (state->lsi && state->asserted)
584 __x_writeq(0, __x_trig_page(xd));
589 * This barrier orders the above guest_priority check
590 * and spin_lock/unlock with clearing in_eoi below.
592 * It also has to be a full mb() as it must ensure
593 * the MMIOs done in source_eoi() are completed before
594 * state->in_eoi is visible.
597 state->in_eoi = false;
600 /* Re-evaluate pending IRQs and update HW */
601 GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
602 GLUE(X_PFX,push_pending_to_hw)(xc);
603 pr_devel(" after scan pending=%02x\n", xc->pending);
606 xc->hw_cppr = xc->cppr;
607 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
612 X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
615 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
617 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
619 xc->GLUE(X_STAT_PFX,h_ipi)++;
622 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
625 xc = vcpu->arch.xive_vcpu;
627 /* Locklessly write over MFRR */
631 * The load of xc->cppr below and the subsequent MMIO store
632 * to the IPI must happen after the above mfrr update is
633 * globally visible so that:
635 * - Synchronize with another CPU doing an H_EOI or a H_CPPR
636 * updating xc->cppr then reading xc->mfrr.
638 * - The target of the IPI sees the xc->mfrr update
642 /* Shoot the IPI if most favored than target cppr */
644 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));