1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2016 IBM Corporation.
5 #include <linux/types.h>
6 #include <linux/kernel.h>
9 #include <linux/interrupt.h>
10 #include <linux/irqdomain.h>
11 #include <linux/cpu.h>
16 #include <asm/errno.h>
20 #include <asm/kvm_ppc.h>
22 static void icp_opal_teardown_cpu(void)
24 int hw_cpu = hard_smp_processor_id();
26 /* Clear any pending IPI */
27 opal_int_set_mfrr(hw_cpu, 0xff);
30 static void icp_opal_flush_ipi(void)
33 * We take the ipi irq but and never return so we need to EOI the IPI,
34 * but want to leave our priority 0.
36 * Should we check all the other interrupts too?
37 * Should we be flagging idle loop instead?
38 * Or creating some task to be scheduled?
40 if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
41 force_external_irq_replay();
44 static unsigned int icp_opal_get_xirr(void)
46 unsigned int kvm_xirr;
50 /* Handle an interrupt latched by KVM first */
51 kvm_xirr = kvmppc_get_xics_latch();
56 rc = opal_int_get_xirr(&hw_xirr, false);
59 return be32_to_cpu(hw_xirr);
62 static unsigned int icp_opal_get_irq(void)
68 xirr = icp_opal_get_xirr();
69 vec = xirr & 0x00ffffff;
70 if (vec == XICS_IRQ_SPURIOUS)
73 irq = irq_find_mapping(xics_host, vec);
79 /* We don't have a linux mapping, so have rtas mask it. */
80 xics_mask_unknown_vec(vec);
82 /* We might learn about it later, so EOI it */
83 if (opal_int_eoi(xirr) > 0)
84 force_external_irq_replay();
89 static void icp_opal_set_cpu_priority(unsigned char cppr)
92 * Here be dragons. The caller has asked to allow only IPI's and not
93 * external interrupts. But OPAL XIVE doesn't support that. So instead
94 * of allowing no interrupts allow all. That's still not right, but
95 * currently the only caller who does this is xics_migrate_irqs_away()
96 * and it works in that case.
98 if (cppr >= DEFAULT_PRIORITY)
99 cppr = LOWEST_PRIORITY;
101 xics_set_base_cppr(cppr);
102 opal_int_set_cppr(cppr);
106 static void icp_opal_eoi(struct irq_data *d)
108 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
112 rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);
115 * EOI tells us whether there are more interrupts to fetch.
117 * Some HW implementations might not be able to send us another
118 * external interrupt in that case, so we force a replay.
121 force_external_irq_replay();
126 static void icp_opal_cause_ipi(int cpu)
128 int hw_cpu = get_hard_smp_processor_id(cpu);
130 kvmppc_set_host_ipi(cpu);
131 opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
134 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
136 int cpu = smp_processor_id();
138 kvmppc_clear_host_ipi(cpu);
139 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
141 return smp_ipi_demux();
145 * Called when an interrupt is received on an off-line CPU to
146 * clear the interrupt, so that the CPU can go back to nap mode.
148 void icp_opal_flush_interrupt(void)
154 xirr = icp_opal_get_xirr();
155 vec = xirr & 0x00ffffff;
156 if (vec == XICS_IRQ_SPURIOUS)
158 if (vec == XICS_IPI) {
159 /* Clear pending IPI */
160 int cpu = smp_processor_id();
161 kvmppc_clear_host_ipi(cpu);
162 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
164 pr_err("XICS: hw interrupt 0x%x to offline cpu, "
166 xics_mask_unknown_vec(vec);
169 /* EOI the interrupt */
170 } while (opal_int_eoi(xirr) > 0);
173 #endif /* CONFIG_SMP */
175 static const struct icp_ops icp_opal_ops = {
176 .get_irq = icp_opal_get_irq,
178 .set_priority = icp_opal_set_cpu_priority,
179 .teardown_cpu = icp_opal_teardown_cpu,
180 .flush_ipi = icp_opal_flush_ipi,
182 .ipi_action = icp_opal_ipi_action,
183 .cause_ipi = icp_opal_cause_ipi,
187 int __init icp_opal_init(void)
189 struct device_node *np;
191 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
195 icp_ops = &icp_opal_ops;
197 printk("XICS: Using OPAL ICP fallbacks\n");