2 * Copyright 2011 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/irq.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/cpu.h>
19 #include <linux/spinlock.h>
20 #include <linux/module.h>
26 #include <asm/errno.h>
28 #include <asm/kvm_ppc.h>
29 #include <asm/dbell.h>
50 static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
52 static inline unsigned int icp_native_get_xirr(void)
54 int cpu = smp_processor_id();
57 /* Handled an interrupt latched by KVM */
58 xirr = kvmppc_get_xics_latch();
62 return in_be32(&icp_native_regs[cpu]->xirr.word);
65 static inline void icp_native_set_xirr(unsigned int value)
67 int cpu = smp_processor_id();
69 out_be32(&icp_native_regs[cpu]->xirr.word, value);
72 static inline void icp_native_set_cppr(u8 value)
74 int cpu = smp_processor_id();
76 out_8(&icp_native_regs[cpu]->xirr.bytes[0], value);
79 static inline void icp_native_set_qirr(int n_cpu, u8 value)
81 out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value);
84 static void icp_native_set_cpu_priority(unsigned char cppr)
86 xics_set_base_cppr(cppr);
87 icp_native_set_cppr(cppr);
91 void icp_native_eoi(struct irq_data *d)
93 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
96 icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq);
99 static void icp_native_teardown_cpu(void)
101 int cpu = smp_processor_id();
103 /* Clear any pending IPI */
104 icp_native_set_qirr(cpu, 0xff);
107 static void icp_native_flush_ipi(void)
109 /* We take the ipi irq but and never return so we
110 * need to EOI the IPI, but want to leave our priority 0
112 * should we check all the other interrupts too?
113 * should we be flagging idle loop instead?
114 * or creating some task to be scheduled?
117 icp_native_set_xirr((0x00 << 24) | XICS_IPI);
120 static unsigned int icp_native_get_irq(void)
122 unsigned int xirr = icp_native_get_xirr();
123 unsigned int vec = xirr & 0x00ffffff;
126 if (vec == XICS_IRQ_SPURIOUS)
129 irq = irq_find_mapping(xics_host, vec);
135 /* We don't have a linux mapping, so have rtas mask it. */
136 xics_mask_unknown_vec(vec);
138 /* We might learn about it later, so EOI it */
139 icp_native_set_xirr(xirr);
146 static void icp_native_cause_ipi(int cpu)
148 kvmppc_set_host_ipi(cpu, 1);
149 icp_native_set_qirr(cpu, IPI_PRIORITY);
152 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
153 void icp_native_cause_ipi_rm(int cpu)
156 * Currently not used to send IPIs to another CPU
157 * on the same core. Only caller is KVM real mode.
158 * Need the physical address of the XICS to be
159 * previously saved in kvm_hstate in the paca.
161 void __iomem *xics_phys;
164 * Just like the cause_ipi functions, it is required to
165 * include a full barrier before causing the IPI.
167 xics_phys = paca[cpu].kvm_hstate.xics_phys;
169 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
174 * Called when an interrupt is received on an off-line CPU to
175 * clear the interrupt, so that the CPU can go back to nap mode.
177 void icp_native_flush_interrupt(void)
179 unsigned int xirr = icp_native_get_xirr();
180 unsigned int vec = xirr & 0x00ffffff;
182 if (vec == XICS_IRQ_SPURIOUS)
184 if (vec == XICS_IPI) {
185 /* Clear pending IPI */
186 int cpu = smp_processor_id();
187 kvmppc_set_host_ipi(cpu, 0);
188 icp_native_set_qirr(cpu, 0xff);
190 pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n",
192 xics_mask_unknown_vec(vec);
194 /* EOI the interrupt */
195 icp_native_set_xirr(xirr);
198 void xics_wake_cpu(int cpu)
200 icp_native_set_qirr(cpu, IPI_PRIORITY);
202 EXPORT_SYMBOL_GPL(xics_wake_cpu);
204 static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
206 int cpu = smp_processor_id();
208 kvmppc_set_host_ipi(cpu, 0);
209 icp_native_set_qirr(cpu, 0xff);
211 return smp_ipi_demux();
214 #endif /* CONFIG_SMP */
216 static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
222 /* This may look gross but it's good enough for now, we don't quite
223 * have a hard -> linux processor id matching.
225 for_each_possible_cpu(i) {
228 if (hw_id == get_hard_smp_processor_id(i)) {
234 /* Fail, skip that CPU. Don't print, it's normal, some XICS come up
235 * with way more entries in there than you have CPUs
240 rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
243 if (!request_mem_region(addr, size, rname)) {
244 pr_warning("icp_native: Could not reserve ICP MMIO"
245 " for CPU %d, interrupt server #0x%x\n",
250 icp_native_regs[cpu] = ioremap(addr, size);
251 kvmppc_set_xics_phys(cpu, addr);
252 if (!icp_native_regs[cpu]) {
253 pr_warning("icp_native: Failed ioremap for CPU %d, "
254 "interrupt server #0x%x, addr %#lx\n",
256 release_mem_region(addr, size);
262 static int __init icp_native_init_one_node(struct device_node *np,
271 /* This code does the theorically broken assumption that the interrupt
272 * server numbers are the same as the hard CPU numbers.
273 * This happens to be the case so far but we are playing with fire...
274 * should be fixed one of these days. -BenH.
276 ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen);
278 /* Do that ever happen ? we'll know soon enough... but even good'old
279 * f80 does have that property ..
281 WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32)));
284 *indx = of_read_number(ireg, 1);
285 if (ilen >= 2*sizeof(u32))
286 num_servers = of_read_number(ireg + 1, 1);
289 ireg = of_get_property(np, "reg", &ilen);
291 pr_err("icp_native: Can't find interrupt reg property");
295 reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4;
296 if (((ilen % reg_tuple_size) != 0)
297 || (num_servers && (num_servers != (ilen / reg_tuple_size)))) {
298 pr_err("icp_native: ICP reg len (%d) != num servers (%d)",
299 ilen / reg_tuple_size, num_servers);
303 for (i = 0; i < (ilen / reg_tuple_size); i++) {
307 err = of_address_to_resource(np, i, &r);
309 pr_err("icp_native: Could not translate ICP MMIO"
310 " for interrupt server 0x%x (%d)\n", *indx, err);
314 if (icp_native_map_one_cpu(*indx, r.start, resource_size(&r)))
322 static const struct icp_ops icp_native_ops = {
323 .get_irq = icp_native_get_irq,
324 .eoi = icp_native_eoi,
325 .set_priority = icp_native_set_cpu_priority,
326 .teardown_cpu = icp_native_teardown_cpu,
327 .flush_ipi = icp_native_flush_ipi,
329 .ipi_action = icp_native_ipi_action,
330 .cause_ipi = icp_native_cause_ipi,
334 int __init icp_native_init(void)
336 struct device_node *np;
340 for_each_compatible_node(np, NULL, "ibm,ppc-xicp")
341 if (icp_native_init_one_node(np, &indx) == 0)
344 for_each_node_by_type(np,
345 "PowerPC-External-Interrupt-Presentation") {
346 if (icp_native_init_one_node(np, &indx) == 0)
354 icp_ops = &icp_native_ops;