2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 #define pr_fmt(fmt) "irq-mips-gic: " fmt
12 #include <linux/bitmap.h>
13 #include <linux/clocksource.h>
14 #include <linux/cpuhotplug.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/irqchip.h>
19 #include <linux/of_address.h>
20 #include <linux/percpu.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
24 #include <asm/mips-cps.h>
25 #include <asm/setup.h>
26 #include <asm/traps.h>
28 #include <dt-bindings/interrupt-controller/mips-gic.h>
30 #define GIC_MAX_INTRS 256
31 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS)
33 /* Add 2 to convert GIC CPU pin to core interrupt */
34 #define GIC_CPU_PIN_OFFSET 2
36 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
37 #define GIC_PIN_TO_VEC_OFFSET 1
39 /* Convert between local/shared IRQ number and GIC HW IRQ number. */
40 #define GIC_LOCAL_HWIRQ_BASE 0
41 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
42 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
43 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
44 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
45 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
47 void __iomem *mips_gic_base;
49 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
51 static DEFINE_RAW_SPINLOCK(gic_lock);
52 static struct irq_domain *gic_irq_domain;
53 static int gic_shared_intrs;
54 static unsigned int gic_cpu_pin;
55 static unsigned int timer_cpu_pin;
56 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
58 #ifdef CONFIG_GENERIC_IRQ_IPI
59 static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
60 static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
61 #endif /* CONFIG_GENERIC_IRQ_IPI */
63 static struct gic_all_vpes_chip_data {
66 } gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
68 static void gic_clear_pcpu_masks(unsigned int intr)
72 /* Clear the interrupt's bit in all pcpu_masks */
73 for_each_possible_cpu(i)
74 clear_bit(intr, per_cpu_ptr(pcpu_masks, i));
77 static bool gic_local_irq_is_routable(int intr)
81 /* All local interrupts are routable in EIC mode. */
85 vpe_ctl = read_gic_vl_ctl();
87 case GIC_LOCAL_INT_TIMER:
88 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE;
89 case GIC_LOCAL_INT_PERFCTR:
90 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE;
91 case GIC_LOCAL_INT_FDC:
92 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE;
93 case GIC_LOCAL_INT_SWINT0:
94 case GIC_LOCAL_INT_SWINT1:
95 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE;
101 static void gic_bind_eic_interrupt(int irq, int set)
103 /* Convert irq vector # to hw int # */
104 irq -= GIC_PIN_TO_VEC_OFFSET;
106 /* Set irq to use shadow set */
107 write_gic_vl_eic_shadow_set(irq, set);
110 static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
112 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
114 write_gic_wedge(GIC_WEDGE_RW | hwirq);
117 int gic_get_c0_compare_int(void)
119 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
120 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
121 return irq_create_mapping(gic_irq_domain,
122 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
125 int gic_get_c0_perfcount_int(void)
127 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
128 /* Is the performance counter shared with the timer? */
129 if (cp0_perfcount_irq < 0)
131 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
133 return irq_create_mapping(gic_irq_domain,
134 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
137 int gic_get_c0_fdc_int(void)
139 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
140 /* Is the FDC IRQ even present? */
143 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
146 return irq_create_mapping(gic_irq_domain,
147 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
150 static void gic_handle_shared_int(bool chained)
152 unsigned int intr, virq;
153 unsigned long *pcpu_mask;
154 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
156 /* Get per-cpu bitmaps */
157 pcpu_mask = this_cpu_ptr(pcpu_masks);
160 __ioread64_copy(pending, addr_gic_pend(),
161 DIV_ROUND_UP(gic_shared_intrs, 64));
163 __ioread32_copy(pending, addr_gic_pend(),
164 DIV_ROUND_UP(gic_shared_intrs, 32));
166 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
168 for_each_set_bit(intr, pending, gic_shared_intrs) {
169 virq = irq_linear_revmap(gic_irq_domain,
170 GIC_SHARED_TO_HWIRQ(intr));
172 generic_handle_irq(virq);
178 static void gic_mask_irq(struct irq_data *d)
180 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
182 write_gic_rmask(intr);
183 gic_clear_pcpu_masks(intr);
186 static void gic_unmask_irq(struct irq_data *d)
188 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
191 write_gic_smask(intr);
193 gic_clear_pcpu_masks(intr);
194 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
195 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
198 static void gic_ack_irq(struct irq_data *d)
200 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
202 write_gic_wedge(irq);
205 static int gic_set_type(struct irq_data *d, unsigned int type)
207 unsigned int irq, pol, trig, dual;
210 irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
212 raw_spin_lock_irqsave(&gic_lock, flags);
213 switch (type & IRQ_TYPE_SENSE_MASK) {
214 case IRQ_TYPE_EDGE_FALLING:
215 pol = GIC_POL_FALLING_EDGE;
216 trig = GIC_TRIG_EDGE;
217 dual = GIC_DUAL_SINGLE;
219 case IRQ_TYPE_EDGE_RISING:
220 pol = GIC_POL_RISING_EDGE;
221 trig = GIC_TRIG_EDGE;
222 dual = GIC_DUAL_SINGLE;
224 case IRQ_TYPE_EDGE_BOTH:
225 pol = 0; /* Doesn't matter */
226 trig = GIC_TRIG_EDGE;
227 dual = GIC_DUAL_DUAL;
229 case IRQ_TYPE_LEVEL_LOW:
230 pol = GIC_POL_ACTIVE_LOW;
231 trig = GIC_TRIG_LEVEL;
232 dual = GIC_DUAL_SINGLE;
234 case IRQ_TYPE_LEVEL_HIGH:
236 pol = GIC_POL_ACTIVE_HIGH;
237 trig = GIC_TRIG_LEVEL;
238 dual = GIC_DUAL_SINGLE;
242 change_gic_pol(irq, pol);
243 change_gic_trig(irq, trig);
244 change_gic_dual(irq, dual);
246 if (trig == GIC_TRIG_EDGE)
247 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
248 handle_edge_irq, NULL);
250 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
251 handle_level_irq, NULL);
252 raw_spin_unlock_irqrestore(&gic_lock, flags);
258 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
261 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
265 cpu = cpumask_first_and(cpumask, cpu_online_mask);
269 /* Assumption : cpumask refers to a single CPU */
270 raw_spin_lock_irqsave(&gic_lock, flags);
272 /* Re-route this IRQ */
273 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
275 /* Update the pcpu_masks */
276 gic_clear_pcpu_masks(irq);
277 if (read_gic_mask(irq))
278 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
280 irq_data_update_effective_affinity(d, cpumask_of(cpu));
281 raw_spin_unlock_irqrestore(&gic_lock, flags);
283 return IRQ_SET_MASK_OK;
287 static struct irq_chip gic_level_irq_controller = {
289 .irq_mask = gic_mask_irq,
290 .irq_unmask = gic_unmask_irq,
291 .irq_set_type = gic_set_type,
293 .irq_set_affinity = gic_set_affinity,
297 static struct irq_chip gic_edge_irq_controller = {
299 .irq_ack = gic_ack_irq,
300 .irq_mask = gic_mask_irq,
301 .irq_unmask = gic_unmask_irq,
302 .irq_set_type = gic_set_type,
304 .irq_set_affinity = gic_set_affinity,
306 .ipi_send_single = gic_send_ipi,
309 static void gic_handle_local_int(bool chained)
311 unsigned long pending, masked;
312 unsigned int intr, virq;
314 pending = read_gic_vl_pend();
315 masked = read_gic_vl_mask();
317 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
319 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
320 virq = irq_linear_revmap(gic_irq_domain,
321 GIC_LOCAL_TO_HWIRQ(intr));
323 generic_handle_irq(virq);
329 static void gic_mask_local_irq(struct irq_data *d)
331 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
333 write_gic_vl_rmask(BIT(intr));
336 static void gic_unmask_local_irq(struct irq_data *d)
338 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
340 write_gic_vl_smask(BIT(intr));
343 static struct irq_chip gic_local_irq_controller = {
344 .name = "MIPS GIC Local",
345 .irq_mask = gic_mask_local_irq,
346 .irq_unmask = gic_unmask_local_irq,
349 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
351 struct gic_all_vpes_chip_data *cd;
355 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
356 cd = irq_data_get_irq_chip_data(d);
359 raw_spin_lock_irqsave(&gic_lock, flags);
360 for_each_online_cpu(cpu) {
361 write_gic_vl_other(mips_cm_vp_id(cpu));
362 write_gic_vo_rmask(BIT(intr));
364 raw_spin_unlock_irqrestore(&gic_lock, flags);
367 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
369 struct gic_all_vpes_chip_data *cd;
373 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
374 cd = irq_data_get_irq_chip_data(d);
377 raw_spin_lock_irqsave(&gic_lock, flags);
378 for_each_online_cpu(cpu) {
379 write_gic_vl_other(mips_cm_vp_id(cpu));
380 write_gic_vo_smask(BIT(intr));
382 raw_spin_unlock_irqrestore(&gic_lock, flags);
385 static void gic_all_vpes_irq_cpu_online(void)
387 static const unsigned int local_intrs[] = {
389 GIC_LOCAL_INT_PERFCTR,
395 raw_spin_lock_irqsave(&gic_lock, flags);
397 for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
398 unsigned int intr = local_intrs[i];
399 struct gic_all_vpes_chip_data *cd;
401 if (!gic_local_irq_is_routable(intr))
403 cd = &gic_all_vpes_chip_data[intr];
404 write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
406 write_gic_vl_smask(BIT(intr));
409 raw_spin_unlock_irqrestore(&gic_lock, flags);
412 static struct irq_chip gic_all_vpes_local_irq_controller = {
413 .name = "MIPS GIC Local",
414 .irq_mask = gic_mask_local_irq_all_vpes,
415 .irq_unmask = gic_unmask_local_irq_all_vpes,
418 static void __gic_irq_dispatch(void)
420 gic_handle_local_int(false);
421 gic_handle_shared_int(false);
424 static void gic_irq_dispatch(struct irq_desc *desc)
426 gic_handle_local_int(true);
427 gic_handle_shared_int(true);
430 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
431 irq_hw_number_t hw, unsigned int cpu)
433 int intr = GIC_HWIRQ_TO_SHARED(hw);
434 struct irq_data *data;
437 data = irq_get_irq_data(virq);
439 raw_spin_lock_irqsave(&gic_lock, flags);
440 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
441 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
442 irq_data_update_effective_affinity(data, cpumask_of(cpu));
443 raw_spin_unlock_irqrestore(&gic_lock, flags);
448 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
449 const u32 *intspec, unsigned int intsize,
450 irq_hw_number_t *out_hwirq,
451 unsigned int *out_type)
456 if (intspec[0] == GIC_SHARED)
457 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
458 else if (intspec[0] == GIC_LOCAL)
459 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
462 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
467 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
468 irq_hw_number_t hwirq)
470 struct gic_all_vpes_chip_data *cd;
476 if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
477 #ifdef CONFIG_GENERIC_IRQ_IPI
478 /* verify that shared irqs don't conflict with an IPI irq */
479 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
481 #endif /* CONFIG_GENERIC_IRQ_IPI */
483 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
484 &gic_level_irq_controller,
489 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
490 return gic_shared_irq_domain_map(d, virq, hwirq, 0);
493 intr = GIC_HWIRQ_TO_LOCAL(hwirq);
494 map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
497 * If adding support for more per-cpu interrupts, keep the the
498 * array in gic_all_vpes_irq_cpu_online() in sync.
501 case GIC_LOCAL_INT_TIMER:
502 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
503 map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
505 case GIC_LOCAL_INT_PERFCTR:
506 case GIC_LOCAL_INT_FDC:
508 * HACK: These are all really percpu interrupts, but
509 * the rest of the MIPS kernel code does not use the
510 * percpu IRQ API for them.
512 cd = &gic_all_vpes_chip_data[intr];
514 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
515 &gic_all_vpes_local_irq_controller,
520 irq_set_handler(virq, handle_percpu_irq);
524 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
525 &gic_local_irq_controller,
530 irq_set_handler(virq, handle_percpu_devid_irq);
531 irq_set_percpu_devid(virq);
535 if (!gic_local_irq_is_routable(intr))
538 raw_spin_lock_irqsave(&gic_lock, flags);
539 for_each_online_cpu(cpu) {
540 write_gic_vl_other(mips_cm_vp_id(cpu));
541 write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
543 raw_spin_unlock_irqrestore(&gic_lock, flags);
548 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
549 unsigned int nr_irqs, void *arg)
551 struct irq_fwspec *fwspec = arg;
552 irq_hw_number_t hwirq;
554 if (fwspec->param[0] == GIC_SHARED)
555 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
557 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
559 return gic_irq_domain_map(d, virq, hwirq);
562 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
563 unsigned int nr_irqs)
567 static const struct irq_domain_ops gic_irq_domain_ops = {
568 .xlate = gic_irq_domain_xlate,
569 .alloc = gic_irq_domain_alloc,
570 .free = gic_irq_domain_free,
571 .map = gic_irq_domain_map,
574 #ifdef CONFIG_GENERIC_IRQ_IPI
576 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
577 const u32 *intspec, unsigned int intsize,
578 irq_hw_number_t *out_hwirq,
579 unsigned int *out_type)
582 * There's nothing to translate here. hwirq is dynamically allocated and
583 * the irq type is always edge triggered.
586 *out_type = IRQ_TYPE_EDGE_RISING;
591 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
592 unsigned int nr_irqs, void *arg)
594 struct cpumask *ipimask = arg;
595 irq_hw_number_t hwirq, base_hwirq;
598 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
599 if (base_hwirq == gic_shared_intrs)
602 /* check that we have enough space */
603 for (i = base_hwirq; i < nr_irqs; i++) {
604 if (!test_bit(i, ipi_available))
607 bitmap_clear(ipi_available, base_hwirq, nr_irqs);
609 /* map the hwirq for each cpu consecutively */
611 for_each_cpu(cpu, ipimask) {
612 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
614 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
615 &gic_edge_irq_controller,
620 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
621 &gic_edge_irq_controller,
626 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
630 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
639 bitmap_set(ipi_available, base_hwirq, nr_irqs);
643 static void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
644 unsigned int nr_irqs)
646 irq_hw_number_t base_hwirq;
647 struct irq_data *data;
649 data = irq_get_irq_data(virq);
653 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
654 bitmap_set(ipi_available, base_hwirq, nr_irqs);
657 static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
658 enum irq_domain_bus_token bus_token)
664 is_ipi = d->bus_token == bus_token;
665 return (!node || to_of_node(d->fwnode) == node) && is_ipi;
672 static const struct irq_domain_ops gic_ipi_domain_ops = {
673 .xlate = gic_ipi_domain_xlate,
674 .alloc = gic_ipi_domain_alloc,
675 .free = gic_ipi_domain_free,
676 .match = gic_ipi_domain_match,
679 static int gic_register_ipi_domain(struct device_node *node)
681 struct irq_domain *gic_ipi_domain;
682 unsigned int v[2], num_ipis;
684 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
685 IRQ_DOMAIN_FLAG_IPI_PER_CPU,
686 GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
687 node, &gic_ipi_domain_ops, NULL);
688 if (!gic_ipi_domain) {
689 pr_err("Failed to add IPI domain");
693 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
696 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
697 bitmap_set(ipi_resrv, v[0], v[1]);
700 * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
701 * meeting the requirements of arch/mips SMP.
703 num_ipis = 2 * num_possible_cpus();
704 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
707 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
712 #else /* !CONFIG_GENERIC_IRQ_IPI */
714 static inline int gic_register_ipi_domain(struct device_node *node)
719 #endif /* !CONFIG_GENERIC_IRQ_IPI */
721 static int gic_cpu_startup(unsigned int cpu)
723 /* Enable or disable EIC */
724 change_gic_vl_ctl(GIC_VX_CTL_EIC,
725 cpu_has_veic ? GIC_VX_CTL_EIC : 0);
727 /* Clear all local IRQ masks (ie. disable all local interrupts) */
728 write_gic_vl_rmask(~0);
730 /* Enable desired interrupts */
731 gic_all_vpes_irq_cpu_online();
736 static int __init gic_of_init(struct device_node *node,
737 struct device_node *parent)
739 unsigned int cpu_vec, i, gicconfig;
740 unsigned long reserved;
741 phys_addr_t gic_base;
746 /* Find the first available CPU vector. */
748 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
749 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
751 reserved |= BIT(cpu_vec);
753 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
754 if (cpu_vec == hweight_long(ST0_IM)) {
755 pr_err("No CPU vectors available\n");
759 if (of_address_to_resource(node, 0, &res)) {
761 * Probe the CM for the GIC base address if not specified
762 * in the device-tree.
764 if (mips_cm_present()) {
765 gic_base = read_gcr_gic_base() &
766 ~CM_GCR_GIC_BASE_GICEN;
768 pr_warn("Using inherited base address %pa\n",
771 pr_err("Failed to get memory range\n");
775 gic_base = res.start;
776 gic_len = resource_size(&res);
779 if (mips_cm_present()) {
780 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
781 /* Ensure GIC region is enabled before trying to access it */
785 mips_gic_base = ioremap(gic_base, gic_len);
786 if (!mips_gic_base) {
787 pr_err("Failed to ioremap gic_base\n");
791 gicconfig = read_gic_config();
792 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
793 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
794 gic_shared_intrs = (gic_shared_intrs + 1) * 8;
797 /* Always use vector 1 in EIC mode */
799 timer_cpu_pin = gic_cpu_pin;
800 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
803 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
804 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
807 * With the CMP implementation of SMP (deprecated), other CPUs
808 * are started by the bootloader and put into a timer based
809 * waiting poll loop. We must not re-route those CPU's local
810 * timer interrupts as the wait instruction will never finish,
811 * so just handle whatever CPU interrupt it is routed to by
814 * This workaround should be removed when CMP support is
817 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
818 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
819 timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP;
820 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
825 timer_cpu_pin = gic_cpu_pin;
829 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
831 &gic_irq_domain_ops, NULL);
832 if (!gic_irq_domain) {
833 pr_err("Failed to add IRQ domain");
837 ret = gic_register_ipi_domain(node);
841 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
844 for (i = 0; i < gic_shared_intrs; i++) {
845 change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
846 change_gic_trig(i, GIC_TRIG_LEVEL);
850 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
851 "irqchip/mips/gic:starting",
852 gic_cpu_startup, NULL);
854 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);