2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/of_address.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
20 #include <asm/mips-cps.h>
21 #include <asm/setup.h>
22 #include <asm/traps.h>
24 #include <dt-bindings/interrupt-controller/mips-gic.h>
26 #define GIC_MAX_INTRS 256
27 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS)
29 /* Add 2 to convert GIC CPU pin to core interrupt */
30 #define GIC_CPU_PIN_OFFSET 2
32 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
33 #define GIC_PIN_TO_VEC_OFFSET 1
35 /* Convert between local/shared IRQ number and GIC HW IRQ number. */
36 #define GIC_LOCAL_HWIRQ_BASE 0
37 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
38 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
39 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
40 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
41 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
43 void __iomem *mips_gic_base;
45 DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
47 static DEFINE_SPINLOCK(gic_lock);
48 static struct irq_domain *gic_irq_domain;
49 static struct irq_domain *gic_ipi_domain;
50 static int gic_shared_intrs;
52 static unsigned int gic_cpu_pin;
53 static unsigned int timer_cpu_pin;
54 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
55 DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
56 DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
58 static void gic_clear_pcpu_masks(unsigned int intr)
62 /* Clear the interrupt's bit in all pcpu_masks */
63 for_each_possible_cpu(i)
64 clear_bit(intr, per_cpu_ptr(pcpu_masks, i));
67 static bool gic_local_irq_is_routable(int intr)
71 /* All local interrupts are routable in EIC mode. */
75 vpe_ctl = read_gic_vl_ctl();
77 case GIC_LOCAL_INT_TIMER:
78 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE;
79 case GIC_LOCAL_INT_PERFCTR:
80 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE;
81 case GIC_LOCAL_INT_FDC:
82 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE;
83 case GIC_LOCAL_INT_SWINT0:
84 case GIC_LOCAL_INT_SWINT1:
85 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE;
91 static void gic_bind_eic_interrupt(int irq, int set)
93 /* Convert irq vector # to hw int # */
94 irq -= GIC_PIN_TO_VEC_OFFSET;
96 /* Set irq to use shadow set */
97 write_gic_vl_eic_shadow_set(irq, set);
100 static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
102 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
104 write_gic_wedge(GIC_WEDGE_RW | hwirq);
107 int gic_get_c0_compare_int(void)
109 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
110 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
111 return irq_create_mapping(gic_irq_domain,
112 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
115 int gic_get_c0_perfcount_int(void)
117 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
118 /* Is the performance counter shared with the timer? */
119 if (cp0_perfcount_irq < 0)
121 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
123 return irq_create_mapping(gic_irq_domain,
124 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
127 int gic_get_c0_fdc_int(void)
129 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
130 /* Is the FDC IRQ even present? */
133 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
136 return irq_create_mapping(gic_irq_domain,
137 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
140 static void gic_handle_shared_int(bool chained)
142 unsigned int intr, virq;
143 unsigned long *pcpu_mask;
144 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
146 /* Get per-cpu bitmaps */
147 pcpu_mask = this_cpu_ptr(pcpu_masks);
150 __ioread64_copy(pending, addr_gic_pend(),
151 DIV_ROUND_UP(gic_shared_intrs, 64));
153 __ioread32_copy(pending, addr_gic_pend(),
154 DIV_ROUND_UP(gic_shared_intrs, 32));
156 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
158 for_each_set_bit(intr, pending, gic_shared_intrs) {
159 virq = irq_linear_revmap(gic_irq_domain,
160 GIC_SHARED_TO_HWIRQ(intr));
162 generic_handle_irq(virq);
168 static void gic_mask_irq(struct irq_data *d)
170 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
172 write_gic_rmask(intr);
173 gic_clear_pcpu_masks(intr);
176 static void gic_unmask_irq(struct irq_data *d)
178 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
181 write_gic_smask(intr);
183 gic_clear_pcpu_masks(intr);
184 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
185 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
188 static void gic_ack_irq(struct irq_data *d)
190 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
192 write_gic_wedge(irq);
195 static int gic_set_type(struct irq_data *d, unsigned int type)
197 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
201 spin_lock_irqsave(&gic_lock, flags);
202 switch (type & IRQ_TYPE_SENSE_MASK) {
203 case IRQ_TYPE_EDGE_FALLING:
204 change_gic_pol(irq, GIC_POL_FALLING_EDGE);
205 change_gic_trig(irq, GIC_TRIG_EDGE);
206 change_gic_dual(irq, GIC_DUAL_SINGLE);
209 case IRQ_TYPE_EDGE_RISING:
210 change_gic_pol(irq, GIC_POL_RISING_EDGE);
211 change_gic_trig(irq, GIC_TRIG_EDGE);
212 change_gic_dual(irq, GIC_DUAL_SINGLE);
215 case IRQ_TYPE_EDGE_BOTH:
216 /* polarity is irrelevant in this case */
217 change_gic_trig(irq, GIC_TRIG_EDGE);
218 change_gic_dual(irq, GIC_DUAL_DUAL);
221 case IRQ_TYPE_LEVEL_LOW:
222 change_gic_pol(irq, GIC_POL_ACTIVE_LOW);
223 change_gic_trig(irq, GIC_TRIG_LEVEL);
224 change_gic_dual(irq, GIC_DUAL_SINGLE);
227 case IRQ_TYPE_LEVEL_HIGH:
229 change_gic_pol(irq, GIC_POL_ACTIVE_HIGH);
230 change_gic_trig(irq, GIC_TRIG_LEVEL);
231 change_gic_dual(irq, GIC_DUAL_SINGLE);
237 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
238 handle_edge_irq, NULL);
240 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
241 handle_level_irq, NULL);
242 spin_unlock_irqrestore(&gic_lock, flags);
248 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
251 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
255 cpu = cpumask_first_and(cpumask, cpu_online_mask);
259 /* Assumption : cpumask refers to a single CPU */
260 spin_lock_irqsave(&gic_lock, flags);
262 /* Re-route this IRQ */
263 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
265 /* Update the pcpu_masks */
266 gic_clear_pcpu_masks(irq);
267 if (read_gic_mask(irq))
268 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
270 irq_data_update_effective_affinity(d, cpumask_of(cpu));
271 spin_unlock_irqrestore(&gic_lock, flags);
273 return IRQ_SET_MASK_OK;
277 static struct irq_chip gic_level_irq_controller = {
279 .irq_mask = gic_mask_irq,
280 .irq_unmask = gic_unmask_irq,
281 .irq_set_type = gic_set_type,
283 .irq_set_affinity = gic_set_affinity,
287 static struct irq_chip gic_edge_irq_controller = {
289 .irq_ack = gic_ack_irq,
290 .irq_mask = gic_mask_irq,
291 .irq_unmask = gic_unmask_irq,
292 .irq_set_type = gic_set_type,
294 .irq_set_affinity = gic_set_affinity,
296 .ipi_send_single = gic_send_ipi,
299 static void gic_handle_local_int(bool chained)
301 unsigned long pending, masked;
302 unsigned int intr, virq;
304 pending = read_gic_vl_pend();
305 masked = read_gic_vl_mask();
307 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
309 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
310 virq = irq_linear_revmap(gic_irq_domain,
311 GIC_LOCAL_TO_HWIRQ(intr));
313 generic_handle_irq(virq);
319 static void gic_mask_local_irq(struct irq_data *d)
321 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
323 write_gic_vl_rmask(BIT(intr));
326 static void gic_unmask_local_irq(struct irq_data *d)
328 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
330 write_gic_vl_smask(BIT(intr));
333 static struct irq_chip gic_local_irq_controller = {
334 .name = "MIPS GIC Local",
335 .irq_mask = gic_mask_local_irq,
336 .irq_unmask = gic_unmask_local_irq,
339 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
341 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
345 spin_lock_irqsave(&gic_lock, flags);
346 for (i = 0; i < gic_vpes; i++) {
347 write_gic_vl_other(mips_cm_vp_id(i));
348 write_gic_vo_rmask(BIT(intr));
350 spin_unlock_irqrestore(&gic_lock, flags);
353 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
355 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
359 spin_lock_irqsave(&gic_lock, flags);
360 for (i = 0; i < gic_vpes; i++) {
361 write_gic_vl_other(mips_cm_vp_id(i));
362 write_gic_vo_smask(BIT(intr));
364 spin_unlock_irqrestore(&gic_lock, flags);
367 static struct irq_chip gic_all_vpes_local_irq_controller = {
368 .name = "MIPS GIC Local",
369 .irq_mask = gic_mask_local_irq_all_vpes,
370 .irq_unmask = gic_unmask_local_irq_all_vpes,
373 static void __gic_irq_dispatch(void)
375 gic_handle_local_int(false);
376 gic_handle_shared_int(false);
379 static void gic_irq_dispatch(struct irq_desc *desc)
381 gic_handle_local_int(true);
382 gic_handle_shared_int(true);
385 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
388 int intr = GIC_HWIRQ_TO_LOCAL(hw);
393 if (!gic_local_irq_is_routable(intr))
396 if (intr > GIC_LOCAL_INT_FDC) {
397 pr_err("Invalid local IRQ %d\n", intr);
401 if (intr == GIC_LOCAL_INT_TIMER) {
402 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
403 val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
405 val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
408 spin_lock_irqsave(&gic_lock, flags);
409 for (i = 0; i < gic_vpes; i++) {
410 write_gic_vl_other(mips_cm_vp_id(i));
411 write_gic_vo_map(intr, val);
413 spin_unlock_irqrestore(&gic_lock, flags);
418 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
419 irq_hw_number_t hw, unsigned int cpu)
421 int intr = GIC_HWIRQ_TO_SHARED(hw);
422 struct irq_data *data;
425 data = irq_get_irq_data(virq);
427 spin_lock_irqsave(&gic_lock, flags);
428 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
429 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
430 irq_data_update_effective_affinity(data, cpumask_of(cpu));
431 spin_unlock_irqrestore(&gic_lock, flags);
436 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
437 const u32 *intspec, unsigned int intsize,
438 irq_hw_number_t *out_hwirq,
439 unsigned int *out_type)
444 if (intspec[0] == GIC_SHARED)
445 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
446 else if (intspec[0] == GIC_LOCAL)
447 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
450 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
455 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
456 irq_hw_number_t hwirq)
460 if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
461 /* verify that shared irqs don't conflict with an IPI irq */
462 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
465 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
466 &gic_level_irq_controller,
471 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
472 return gic_shared_irq_domain_map(d, virq, hwirq, 0);
475 switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
476 case GIC_LOCAL_INT_TIMER:
477 case GIC_LOCAL_INT_PERFCTR:
478 case GIC_LOCAL_INT_FDC:
480 * HACK: These are all really percpu interrupts, but
481 * the rest of the MIPS kernel code does not use the
482 * percpu IRQ API for them.
484 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
485 &gic_all_vpes_local_irq_controller,
490 irq_set_handler(virq, handle_percpu_irq);
494 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
495 &gic_local_irq_controller,
500 irq_set_handler(virq, handle_percpu_devid_irq);
501 irq_set_percpu_devid(virq);
505 return gic_local_irq_domain_map(d, virq, hwirq);
508 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
509 unsigned int nr_irqs, void *arg)
511 struct irq_fwspec *fwspec = arg;
512 irq_hw_number_t hwirq;
514 if (fwspec->param[0] == GIC_SHARED)
515 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
517 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
519 return gic_irq_domain_map(d, virq, hwirq);
522 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
523 unsigned int nr_irqs)
527 static const struct irq_domain_ops gic_irq_domain_ops = {
528 .xlate = gic_irq_domain_xlate,
529 .alloc = gic_irq_domain_alloc,
530 .free = gic_irq_domain_free,
531 .map = gic_irq_domain_map,
534 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
535 const u32 *intspec, unsigned int intsize,
536 irq_hw_number_t *out_hwirq,
537 unsigned int *out_type)
540 * There's nothing to translate here. hwirq is dynamically allocated and
541 * the irq type is always edge triggered.
544 *out_type = IRQ_TYPE_EDGE_RISING;
549 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
550 unsigned int nr_irqs, void *arg)
552 struct cpumask *ipimask = arg;
553 irq_hw_number_t hwirq, base_hwirq;
556 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
557 if (base_hwirq == gic_shared_intrs)
560 /* check that we have enough space */
561 for (i = base_hwirq; i < nr_irqs; i++) {
562 if (!test_bit(i, ipi_available))
565 bitmap_clear(ipi_available, base_hwirq, nr_irqs);
567 /* map the hwirq for each cpu consecutively */
569 for_each_cpu(cpu, ipimask) {
570 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
572 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
573 &gic_edge_irq_controller,
578 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
579 &gic_edge_irq_controller,
584 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
588 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
597 bitmap_set(ipi_available, base_hwirq, nr_irqs);
601 void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
602 unsigned int nr_irqs)
604 irq_hw_number_t base_hwirq;
605 struct irq_data *data;
607 data = irq_get_irq_data(virq);
611 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
612 bitmap_set(ipi_available, base_hwirq, nr_irqs);
615 int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
616 enum irq_domain_bus_token bus_token)
622 is_ipi = d->bus_token == bus_token;
623 return (!node || to_of_node(d->fwnode) == node) && is_ipi;
630 static const struct irq_domain_ops gic_ipi_domain_ops = {
631 .xlate = gic_ipi_domain_xlate,
632 .alloc = gic_ipi_domain_alloc,
633 .free = gic_ipi_domain_free,
634 .match = gic_ipi_domain_match,
638 static int __init gic_of_init(struct device_node *node,
639 struct device_node *parent)
641 unsigned int cpu_vec, i, j, gicconfig, cpu, v[2];
642 unsigned long reserved;
643 phys_addr_t gic_base;
647 /* Find the first available CPU vector. */
649 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
650 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
652 reserved |= BIT(cpu_vec);
654 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
655 if (cpu_vec == hweight_long(ST0_IM)) {
656 pr_err("No CPU vectors available for GIC\n");
660 if (of_address_to_resource(node, 0, &res)) {
662 * Probe the CM for the GIC base address if not specified
663 * in the device-tree.
665 if (mips_cm_present()) {
666 gic_base = read_gcr_gic_base() &
667 ~CM_GCR_GIC_BASE_GICEN;
670 pr_err("Failed to get GIC memory range\n");
674 gic_base = res.start;
675 gic_len = resource_size(&res);
678 if (mips_cm_present()) {
679 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
680 /* Ensure GIC region is enabled before trying to access it */
684 mips_gic_base = ioremap_nocache(gic_base, gic_len);
686 gicconfig = read_gic_config();
687 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
688 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
689 gic_shared_intrs = (gic_shared_intrs + 1) * 8;
691 gic_vpes = gicconfig & GIC_CONFIG_PVPS;
692 gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
693 gic_vpes = gic_vpes + 1;
696 /* Set EIC mode for all VPEs */
697 for_each_present_cpu(cpu) {
698 write_gic_vl_other(mips_cm_vp_id(cpu));
699 write_gic_vo_ctl(GIC_VX_CTL_EIC);
702 /* Always use vector 1 in EIC mode */
704 timer_cpu_pin = gic_cpu_pin;
705 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
708 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
709 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
712 * With the CMP implementation of SMP (deprecated), other CPUs
713 * are started by the bootloader and put into a timer based
714 * waiting poll loop. We must not re-route those CPU's local
715 * timer interrupts as the wait instruction will never finish,
716 * so just handle whatever CPU interrupt it is routed to by
719 * This workaround should be removed when CMP support is
722 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
723 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
724 timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP;
725 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
730 timer_cpu_pin = gic_cpu_pin;
734 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
736 &gic_irq_domain_ops, NULL);
737 if (!gic_irq_domain) {
738 pr_err("Failed to add GIC IRQ domain");
742 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
743 IRQ_DOMAIN_FLAG_IPI_PER_CPU,
744 GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
745 node, &gic_ipi_domain_ops, NULL);
746 if (!gic_ipi_domain) {
747 pr_err("Failed to add GIC IPI domain");
751 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
754 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
755 bitmap_set(ipi_resrv, v[0], v[1]);
757 /* Make the last 2 * gic_vpes available for IPIs */
758 bitmap_set(ipi_resrv,
759 gic_shared_intrs - 2 * gic_vpes,
763 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
765 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
768 for (i = 0; i < gic_shared_intrs; i++) {
769 change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
770 change_gic_trig(i, GIC_TRIG_LEVEL);
774 for (i = 0; i < gic_vpes; i++) {
775 write_gic_vl_other(mips_cm_vp_id(i));
776 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
777 if (!gic_local_irq_is_routable(j))
779 write_gic_vo_rmask(BIT(j));
785 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);