1 // SPDX-License-Identifier: GPL-2.0
3 * Loongson Extend I/O Interrupt Controller support
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8 #define pr_fmt(fmt) "eiointc: " fmt
10 #include <linux/cpuhotplug.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip.h>
14 #include <linux/irqdomain.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/kernel.h>
17 #include <linux/syscore_ops.h>
19 #define EIOINTC_REG_NODEMAP 0x14a0
20 #define EIOINTC_REG_IPMAP 0x14c0
21 #define EIOINTC_REG_ENABLE 0x1600
22 #define EIOINTC_REG_BOUNCE 0x1680
23 #define EIOINTC_REG_ISR 0x1800
24 #define EIOINTC_REG_ROUTE 0x1c00
26 #define VEC_REG_COUNT 4
27 #define VEC_COUNT_PER_REG 64
28 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
29 #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
30 #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
31 #define EIOINTC_ALL_ENABLE 0xffffffff
33 #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
41 cpumask_t cpuspan_map;
42 struct fwnode_handle *domain_handle;
43 struct irq_domain *eiointc_domain;
46 static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
48 static void eiointc_enable(void)
52 misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
53 misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
54 iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
57 static int cpu_to_eio_node(int cpu)
59 return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
62 static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
64 int i, node, cpu_node, route_node;
65 unsigned char coremap;
66 uint32_t pos_off, data, data_byte, data_mask;
70 data_mask = ~BIT_MASK(data_byte) & 0xf;
72 /* Calculate node and coremap of target irq */
73 cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
74 coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
76 for_each_online_cpu(i) {
77 node = cpu_to_eio_node(i);
78 if (!node_isset(node, *node_map))
81 /* EIO node 0 is in charge of inter-node interrupt dispatch */
82 route_node = (node == mnode) ? cpu_node : node;
83 data = ((coremap | (route_node << 4)) << (data_byte * 8));
84 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
88 static DEFINE_RAW_SPINLOCK(affinity_lock);
90 static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
94 uint32_t vector, regaddr;
95 struct cpumask intersect_affinity;
96 struct eiointc_priv *priv = d->domain->host_data;
98 raw_spin_lock_irqsave(&affinity_lock, flags);
100 cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
101 cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
103 if (cpumask_empty(&intersect_affinity)) {
104 raw_spin_unlock_irqrestore(&affinity_lock, flags);
107 cpu = cpumask_first(&intersect_affinity);
110 regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
112 /* Mask target vector */
113 csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
114 0x0, priv->node * CORES_PER_EIO_NODE);
116 /* Set route for target vector */
117 eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
119 /* Unmask target vector */
120 csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
121 0x0, priv->node * CORES_PER_EIO_NODE);
123 irq_data_update_effective_affinity(d, cpumask_of(cpu));
125 raw_spin_unlock_irqrestore(&affinity_lock, flags);
127 return IRQ_SET_MASK_OK;
130 static int eiointc_index(int node)
134 for (i = 0; i < nr_pics; i++) {
135 if (node_isset(node, eiointc_priv[i]->node_map))
142 static int eiointc_router_init(unsigned int cpu)
146 uint32_t node = cpu_to_eio_node(cpu);
147 int index = eiointc_index(node);
150 pr_err("Error: invalid nodemap!\n");
154 if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
157 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
158 data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
159 iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
162 for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) {
163 bit = BIT(1 + index); /* Route to IP[1 + index] */
164 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
165 iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
168 for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) {
169 /* Route to Node-0 Core-0 */
171 bit = BIT(cpu_logical_map(0));
173 bit = (eiointc_priv[index]->node << 4) | 1;
175 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
176 iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
179 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
181 iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
182 iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
189 static void eiointc_irq_dispatch(struct irq_desc *desc)
193 bool handled = false;
194 struct irq_chip *chip = irq_desc_get_chip(desc);
195 struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
197 chained_irq_enter(chip, desc);
199 for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
200 pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
202 /* Skip handling if pending bitmap is zero */
207 iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
209 int bit = __ffs(pending);
210 int irq = bit + VEC_COUNT_PER_REG * i;
212 generic_handle_domain_irq(priv->eiointc_domain, irq);
213 pending &= ~BIT(bit);
219 spurious_interrupt();
221 chained_irq_exit(chip, desc);
224 static void eiointc_ack_irq(struct irq_data *d)
228 static void eiointc_mask_irq(struct irq_data *d)
232 static void eiointc_unmask_irq(struct irq_data *d)
236 static struct irq_chip eiointc_irq_chip = {
238 .irq_ack = eiointc_ack_irq,
239 .irq_mask = eiointc_mask_irq,
240 .irq_unmask = eiointc_unmask_irq,
241 .irq_set_affinity = eiointc_set_irq_affinity,
244 static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
245 unsigned int nr_irqs, void *arg)
248 unsigned int i, type;
249 unsigned long hwirq = 0;
250 struct eiointc_priv *priv = domain->host_data;
252 ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
256 for (i = 0; i < nr_irqs; i++) {
257 irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
258 priv, handle_edge_irq, NULL, NULL);
264 static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
265 unsigned int nr_irqs)
269 for (i = 0; i < nr_irqs; i++) {
270 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
272 irq_set_handler(virq + i, NULL);
273 irq_domain_reset_irq_data(d);
277 static const struct irq_domain_ops eiointc_domain_ops = {
278 .translate = irq_domain_translate_onecell,
279 .alloc = eiointc_domain_alloc,
280 .free = eiointc_domain_free,
283 static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
287 for (i = 0; i < MAX_IO_PICS; i++) {
288 if (node == vec_group[i].node) {
289 vec_group[i].parent = parent;
295 static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
299 for (i = 0; i < MAX_IO_PICS; i++) {
300 if (node == vec_group[i].node)
301 return vec_group[i].parent;
306 static int eiointc_suspend(void)
311 static void eiointc_resume(void)
313 eiointc_router_init(0);
316 static struct syscore_ops eiointc_syscore_ops = {
317 .suspend = eiointc_suspend,
318 .resume = eiointc_resume,
321 static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
322 const unsigned long end)
324 struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
325 unsigned int node = (pchpic_entry->address >> 44) & 0xf;
326 struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
329 return pch_pic_acpi_init(parent, pchpic_entry);
334 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
335 const unsigned long end)
337 struct irq_domain *parent;
338 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
341 if (cpu_has_flatmode)
342 node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
344 node = eiointc_priv[nr_pics - 1]->node;
346 parent = acpi_get_vec_parent(node, msi_group);
349 return pch_msi_acpi_init(parent, pchmsi_entry);
354 static int __init acpi_cascade_irqdomain_init(void)
358 r = acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC, pch_pic_parse_madt, 0);
362 r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
369 static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
374 node_map = node_map ? node_map : -1ULL;
375 for_each_possible_cpu(i) {
376 if (node_map & (1ULL << (cpu_to_eio_node(i)))) {
377 node_set(cpu_to_eio_node(i), priv->node_map);
378 cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map,
383 priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle,
387 if (!priv->eiointc_domain) {
388 pr_err("loongson-extioi: cannot add IRQ domain\n");
392 eiointc_priv[nr_pics++] = priv;
393 eiointc_router_init(0);
394 irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
397 register_syscore_ops(&eiointc_syscore_ops);
398 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
399 "irqchip/loongarch/intc:starting",
400 eiointc_router_init, NULL);
406 int __init eiointc_acpi_init(struct irq_domain *parent,
407 struct acpi_madt_eio_pic *acpi_eiointc)
410 struct eiointc_priv *priv;
413 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
417 priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
419 if (!priv->domain_handle) {
420 pr_err("Unable to allocate domain handle\n");
424 priv->vec_count = VEC_COUNT;
425 priv->node = acpi_eiointc->node;
427 parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
429 ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map);
431 goto out_free_handle;
433 if (cpu_has_flatmode)
434 node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
436 node = acpi_eiointc->node;
437 acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
438 acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
440 ret = acpi_cascade_irqdomain_init();
442 goto out_free_handle;
447 irq_domain_free_fwnode(priv->domain_handle);
448 priv->domain_handle = NULL;
455 static int __init eiointc_of_init(struct device_node *of_node,
456 struct device_node *parent)
459 struct eiointc_priv *priv;
461 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
465 parent_irq = irq_of_parse_and_map(of_node, 0);
466 if (parent_irq <= 0) {
471 ret = irq_set_handler_data(parent_irq, priv);
476 * In particular, the number of devices supported by the LS2K0500
477 * extended I/O interrupt vector is 128.
479 if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc"))
480 priv->vec_count = 128;
482 priv->vec_count = VEC_COUNT;
485 priv->domain_handle = of_node_to_fwnode(of_node);
487 ret = eiointc_init(priv, parent_irq, 0);
498 IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init);
499 IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init);