1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * arch/powerpc/sysdev/ipic.c
5 * IPIC routines implementations.
7 * Copyright 2005 Freescale Semiconductor, Inc.
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/reboot.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/sched.h>
16 #include <linux/signal.h>
17 #include <linux/syscore_ops.h>
18 #include <linux/device.h>
19 #include <linux/spinlock.h>
20 #include <linux/fsl_devices.h>
21 #include <linux/irqdomain.h>
22 #include <linux/of_address.h>
29 static struct ipic * primary_ipic;
30 static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip;
31 static DEFINE_RAW_SPINLOCK(ipic_lock);
33 static struct ipic_info ipic_info[] = {
37 .force = IPIC_SIFCR_H,
44 .force = IPIC_SIFCR_H,
51 .force = IPIC_SIFCR_H,
58 .force = IPIC_SIFCR_H,
65 .force = IPIC_SIFCR_H,
72 .force = IPIC_SIFCR_H,
79 .force = IPIC_SIFCR_H,
86 .force = IPIC_SIFCR_H,
93 .force = IPIC_SIFCR_H,
100 .force = IPIC_SIFCR_H,
105 .mask = IPIC_SIMSR_H,
106 .prio = IPIC_SIPRR_D,
107 .force = IPIC_SIFCR_H,
112 .mask = IPIC_SIMSR_H,
113 .prio = IPIC_SIPRR_D,
114 .force = IPIC_SIFCR_H,
119 .mask = IPIC_SIMSR_H,
120 .prio = IPIC_SIPRR_D,
121 .force = IPIC_SIFCR_H,
126 .mask = IPIC_SIMSR_H,
127 .prio = IPIC_SIPRR_D,
128 .force = IPIC_SIFCR_H,
133 .mask = IPIC_SIMSR_H,
134 .prio = IPIC_SIPRR_D,
135 .force = IPIC_SIFCR_H,
140 .mask = IPIC_SIMSR_H,
141 .prio = IPIC_SIPRR_D,
142 .force = IPIC_SIFCR_H,
149 .prio = IPIC_SMPRR_A,
157 .prio = IPIC_SMPRR_A,
165 .prio = IPIC_SMPRR_A,
173 .prio = IPIC_SMPRR_B,
181 .prio = IPIC_SMPRR_B,
189 .prio = IPIC_SMPRR_B,
197 .prio = IPIC_SMPRR_B,
203 .mask = IPIC_SIMSR_H,
204 .prio = IPIC_SIPRR_A,
205 .force = IPIC_SIFCR_H,
210 .mask = IPIC_SIMSR_H,
211 .prio = IPIC_SIPRR_A,
212 .force = IPIC_SIFCR_H,
217 .mask = IPIC_SIMSR_H,
218 .prio = IPIC_SIPRR_A,
219 .force = IPIC_SIFCR_H,
224 .mask = IPIC_SIMSR_H,
225 .prio = IPIC_SIPRR_A,
226 .force = IPIC_SIFCR_H,
231 .mask = IPIC_SIMSR_H,
232 .prio = IPIC_SIPRR_A,
233 .force = IPIC_SIFCR_H,
238 .mask = IPIC_SIMSR_H,
239 .prio = IPIC_SIPRR_A,
240 .force = IPIC_SIFCR_H,
245 .mask = IPIC_SIMSR_H,
246 .prio = IPIC_SIPRR_A,
247 .force = IPIC_SIFCR_H,
252 .mask = IPIC_SIMSR_H,
253 .prio = IPIC_SIPRR_A,
254 .force = IPIC_SIFCR_H,
259 .mask = IPIC_SIMSR_H,
260 .prio = IPIC_SIPRR_B,
261 .force = IPIC_SIFCR_H,
266 .mask = IPIC_SIMSR_H,
267 .prio = IPIC_SIPRR_B,
268 .force = IPIC_SIFCR_H,
273 .mask = IPIC_SIMSR_H,
274 .prio = IPIC_SIPRR_B,
275 .force = IPIC_SIFCR_H,
280 .mask = IPIC_SIMSR_H,
281 .prio = IPIC_SIPRR_B,
282 .force = IPIC_SIFCR_H,
287 .mask = IPIC_SIMSR_H,
288 .prio = IPIC_SIPRR_B,
289 .force = IPIC_SIFCR_H,
294 .mask = IPIC_SIMSR_H,
295 .prio = IPIC_SIPRR_B,
296 .force = IPIC_SIFCR_H,
301 .mask = IPIC_SIMSR_H,
302 .prio = IPIC_SIPRR_B,
303 .force = IPIC_SIFCR_H,
308 .mask = IPIC_SIMSR_H,
309 .prio = IPIC_SIPRR_B,
310 .force = IPIC_SIFCR_H,
317 .prio = IPIC_SMPRR_A,
323 .mask = IPIC_SIMSR_L,
324 .prio = IPIC_SMPRR_A,
325 .force = IPIC_SIFCR_L,
330 .mask = IPIC_SIMSR_L,
331 .prio = IPIC_SMPRR_A,
332 .force = IPIC_SIFCR_L,
337 .mask = IPIC_SIMSR_L,
338 .prio = IPIC_SMPRR_A,
339 .force = IPIC_SIFCR_L,
344 .mask = IPIC_SIMSR_L,
345 .prio = IPIC_SMPRR_A,
346 .force = IPIC_SIFCR_L,
351 .mask = IPIC_SIMSR_L,
352 .prio = IPIC_SMPRR_B,
353 .force = IPIC_SIFCR_L,
358 .mask = IPIC_SIMSR_L,
359 .prio = IPIC_SMPRR_B,
360 .force = IPIC_SIFCR_L,
365 .mask = IPIC_SIMSR_L,
366 .prio = IPIC_SMPRR_B,
367 .force = IPIC_SIFCR_L,
372 .mask = IPIC_SIMSR_L,
373 .prio = IPIC_SMPRR_B,
374 .force = IPIC_SIFCR_L,
379 .mask = IPIC_SIMSR_L,
381 .force = IPIC_SIFCR_L,
385 .mask = IPIC_SIMSR_L,
387 .force = IPIC_SIFCR_L,
391 .mask = IPIC_SIMSR_L,
393 .force = IPIC_SIFCR_L,
397 .mask = IPIC_SIMSR_L,
399 .force = IPIC_SIFCR_L,
403 .mask = IPIC_SIMSR_L,
405 .force = IPIC_SIFCR_L,
409 .mask = IPIC_SIMSR_L,
411 .force = IPIC_SIFCR_L,
415 .mask = IPIC_SIMSR_L,
417 .force = IPIC_SIFCR_L,
421 .mask = IPIC_SIMSR_L,
423 .force = IPIC_SIFCR_L,
427 .mask = IPIC_SIMSR_L,
429 .force = IPIC_SIFCR_L,
433 .mask = IPIC_SIMSR_L,
435 .force = IPIC_SIFCR_L,
439 .mask = IPIC_SIMSR_L,
441 .force = IPIC_SIFCR_L,
445 .mask = IPIC_SIMSR_L,
447 .force = IPIC_SIFCR_L,
451 .mask = IPIC_SIMSR_L,
453 .force = IPIC_SIFCR_L,
457 .mask = IPIC_SIMSR_L,
459 .force = IPIC_SIFCR_L,
463 .mask = IPIC_SIMSR_L,
465 .force = IPIC_SIFCR_L,
469 .mask = IPIC_SIMSR_L,
471 .force = IPIC_SIFCR_L,
475 .mask = IPIC_SIMSR_L,
477 .force = IPIC_SIFCR_L,
481 .mask = IPIC_SIMSR_L,
483 .force = IPIC_SIFCR_L,
487 .mask = IPIC_SIMSR_L,
489 .force = IPIC_SIFCR_L,
493 .mask = IPIC_SIMSR_L,
495 .force = IPIC_SIFCR_L,
499 .mask = IPIC_SIMSR_L,
501 .force = IPIC_SIFCR_L,
506 static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
508 return in_be32(base + (reg >> 2));
511 static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
513 out_be32(base + (reg >> 2), value);
516 static inline struct ipic * ipic_from_irq(unsigned int virq)
521 static void ipic_unmask_irq(struct irq_data *d)
523 struct ipic *ipic = ipic_from_irq(d->irq);
524 unsigned int src = irqd_to_hwirq(d);
528 raw_spin_lock_irqsave(&ipic_lock, flags);
530 temp = ipic_read(ipic->regs, ipic_info[src].mask);
531 temp |= (1 << (31 - ipic_info[src].bit));
532 ipic_write(ipic->regs, ipic_info[src].mask, temp);
534 raw_spin_unlock_irqrestore(&ipic_lock, flags);
537 static void ipic_mask_irq(struct irq_data *d)
539 struct ipic *ipic = ipic_from_irq(d->irq);
540 unsigned int src = irqd_to_hwirq(d);
544 raw_spin_lock_irqsave(&ipic_lock, flags);
546 temp = ipic_read(ipic->regs, ipic_info[src].mask);
547 temp &= ~(1 << (31 - ipic_info[src].bit));
548 ipic_write(ipic->regs, ipic_info[src].mask, temp);
550 /* mb() can't guarantee that masking is finished. But it does finish
551 * for nearly all cases. */
554 raw_spin_unlock_irqrestore(&ipic_lock, flags);
557 static void ipic_ack_irq(struct irq_data *d)
559 struct ipic *ipic = ipic_from_irq(d->irq);
560 unsigned int src = irqd_to_hwirq(d);
564 raw_spin_lock_irqsave(&ipic_lock, flags);
566 temp = 1 << (31 - ipic_info[src].bit);
567 ipic_write(ipic->regs, ipic_info[src].ack, temp);
569 /* mb() can't guarantee that ack is finished. But it does finish
570 * for nearly all cases. */
573 raw_spin_unlock_irqrestore(&ipic_lock, flags);
576 static void ipic_mask_irq_and_ack(struct irq_data *d)
578 struct ipic *ipic = ipic_from_irq(d->irq);
579 unsigned int src = irqd_to_hwirq(d);
583 raw_spin_lock_irqsave(&ipic_lock, flags);
585 temp = ipic_read(ipic->regs, ipic_info[src].mask);
586 temp &= ~(1 << (31 - ipic_info[src].bit));
587 ipic_write(ipic->regs, ipic_info[src].mask, temp);
589 temp = 1 << (31 - ipic_info[src].bit);
590 ipic_write(ipic->regs, ipic_info[src].ack, temp);
592 /* mb() can't guarantee that ack is finished. But it does finish
593 * for nearly all cases. */
596 raw_spin_unlock_irqrestore(&ipic_lock, flags);
599 static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
601 struct ipic *ipic = ipic_from_irq(d->irq);
602 unsigned int src = irqd_to_hwirq(d);
603 unsigned int vold, vnew, edibit;
605 if (flow_type == IRQ_TYPE_NONE)
606 flow_type = IRQ_TYPE_LEVEL_LOW;
608 /* ipic supports only low assertion and high-to-low change senses
610 if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) {
611 printk(KERN_ERR "ipic: sense type 0x%x not supported\n",
615 /* ipic supports only edge mode on external interrupts */
616 if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) {
617 printk(KERN_ERR "ipic: edge sense not supported on internal "
623 irqd_set_trigger_type(d, flow_type);
624 if (flow_type & IRQ_TYPE_LEVEL_LOW) {
625 irq_set_handler_locked(d, handle_level_irq);
626 d->chip = &ipic_level_irq_chip;
628 irq_set_handler_locked(d, handle_edge_irq);
629 d->chip = &ipic_edge_irq_chip;
632 /* only EXT IRQ senses are programmable on ipic
633 * internal IRQ senses are LEVEL_LOW
635 if (src == IPIC_IRQ_EXT0)
638 if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7)
639 edibit = (14 - (src - IPIC_IRQ_EXT1));
641 return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
643 vold = ipic_read(ipic->regs, IPIC_SECNR);
644 if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) {
645 vnew = vold | (1 << edibit);
647 vnew = vold & ~(1 << edibit);
650 ipic_write(ipic->regs, IPIC_SECNR, vnew);
651 return IRQ_SET_MASK_OK_NOCOPY;
654 /* level interrupts and edge interrupts have different ack operations */
655 static struct irq_chip ipic_level_irq_chip = {
657 .irq_unmask = ipic_unmask_irq,
658 .irq_mask = ipic_mask_irq,
659 .irq_mask_ack = ipic_mask_irq,
660 .irq_set_type = ipic_set_irq_type,
663 static struct irq_chip ipic_edge_irq_chip = {
665 .irq_unmask = ipic_unmask_irq,
666 .irq_mask = ipic_mask_irq,
667 .irq_mask_ack = ipic_mask_irq_and_ack,
668 .irq_ack = ipic_ack_irq,
669 .irq_set_type = ipic_set_irq_type,
672 static int ipic_host_match(struct irq_domain *h, struct device_node *node,
673 enum irq_domain_bus_token bus_token)
675 /* Exact match, unless ipic node is NULL */
676 struct device_node *of_node = irq_domain_get_of_node(h);
677 return of_node == NULL || of_node == node;
680 static int ipic_host_map(struct irq_domain *h, unsigned int virq,
683 struct ipic *ipic = h->host_data;
685 irq_set_chip_data(virq, ipic);
686 irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
688 /* Set default irq type */
689 irq_set_irq_type(virq, IRQ_TYPE_NONE);
694 static const struct irq_domain_ops ipic_host_ops = {
695 .match = ipic_host_match,
696 .map = ipic_host_map,
697 .xlate = irq_domain_xlate_onetwocell,
700 struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
706 ret = of_address_to_resource(node, 0, &res);
710 ipic = kzalloc(sizeof(*ipic), GFP_KERNEL);
714 ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
715 &ipic_host_ops, ipic);
716 if (ipic->irqhost == NULL) {
721 ipic->regs = ioremap(res.start, resource_size(&res));
724 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
726 /* default priority scheme is grouped. If spread mode is required
727 * configure SICFR accordingly */
728 if (flags & IPIC_SPREADMODE_GRP_A)
730 if (flags & IPIC_SPREADMODE_GRP_B)
732 if (flags & IPIC_SPREADMODE_GRP_C)
734 if (flags & IPIC_SPREADMODE_GRP_D)
736 if (flags & IPIC_SPREADMODE_MIX_A)
738 if (flags & IPIC_SPREADMODE_MIX_B)
741 ipic_write(ipic->regs, IPIC_SICFR, temp);
743 /* handle MCP route */
745 if (flags & IPIC_DISABLE_MCP_OUT)
747 ipic_write(ipic->regs, IPIC_SERCR, temp);
749 /* handle routing of IRQ0 to MCP */
750 temp = ipic_read(ipic->regs, IPIC_SEMSR);
752 if (flags & IPIC_IRQ0_MCP)
755 temp &= ~SEMSR_SIRQ0;
757 ipic_write(ipic->regs, IPIC_SEMSR, temp);
760 irq_set_default_host(primary_ipic->irqhost);
762 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
763 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
765 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
771 void __init ipic_set_default_priority(void)
773 ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
774 ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
775 ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT);
776 ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT);
777 ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT);
778 ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT);
781 u32 ipic_get_mcp_status(void)
783 return primary_ipic ? ipic_read(primary_ipic->regs, IPIC_SERSR) : 0;
786 void ipic_clear_mcp_status(u32 mask)
788 ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
791 /* Return an interrupt vector or 0 if no interrupt is pending. */
792 unsigned int ipic_get_irq(void)
796 BUG_ON(primary_ipic == NULL);
798 #define IPIC_SIVCR_VECTOR_MASK 0x7f
799 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
801 if (irq == 0) /* 0 --> no irq is pending */
804 return irq_linear_revmap(primary_ipic->irqhost, irq);
807 #ifdef CONFIG_SUSPEND
820 static int ipic_suspend(void)
822 struct ipic *ipic = primary_ipic;
824 ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR);
825 ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A);
826 ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D);
827 ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H);
828 ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L);
829 ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR);
830 ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A);
831 ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B);
832 ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR);
833 ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR);
834 ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR);
835 ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR);
837 if (fsl_deep_sleep()) {
838 /* In deep sleep, make sure there can be no
839 * pending interrupts, as this can cause
842 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
843 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
844 ipic_write(ipic->regs, IPIC_SEMSR, 0);
845 ipic_write(ipic->regs, IPIC_SERMR, 0);
851 static void ipic_resume(void)
853 struct ipic *ipic = primary_ipic;
855 ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr);
856 ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]);
857 ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]);
858 ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]);
859 ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]);
860 ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr);
861 ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]);
862 ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]);
863 ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr);
864 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
865 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
866 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
869 #define ipic_suspend NULL
870 #define ipic_resume NULL
873 static struct syscore_ops ipic_syscore_ops = {
874 .suspend = ipic_suspend,
875 .resume = ipic_resume,
878 static int __init init_ipic_syscore(void)
880 if (!primary_ipic || !primary_ipic->regs)
883 printk(KERN_DEBUG "Registering ipic system core operations\n");
884 register_syscore_ops(&ipic_syscore_ops);
889 subsys_initcall(init_ipic_syscore);