2 * arch/powerpc/sysdev/ipic.c
4 * IPIC routines implementations.
6 * Copyright 2005 Freescale Semiconductor, Inc.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/sched.h>
20 #include <linux/signal.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/device.h>
23 #include <linux/spinlock.h>
24 #include <linux/fsl_devices.h>
32 static struct ipic * primary_ipic;
33 static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip;
34 static DEFINE_RAW_SPINLOCK(ipic_lock);
36 static struct ipic_info ipic_info[] = {
40 .force = IPIC_SIFCR_H,
47 .force = IPIC_SIFCR_H,
54 .force = IPIC_SIFCR_H,
61 .force = IPIC_SIFCR_H,
68 .force = IPIC_SIFCR_H,
75 .force = IPIC_SIFCR_H,
82 .force = IPIC_SIFCR_H,
89 .force = IPIC_SIFCR_H,
96 .force = IPIC_SIFCR_H,
101 .mask = IPIC_SIMSR_H,
102 .prio = IPIC_SIPRR_D,
103 .force = IPIC_SIFCR_H,
108 .mask = IPIC_SIMSR_H,
109 .prio = IPIC_SIPRR_D,
110 .force = IPIC_SIFCR_H,
115 .mask = IPIC_SIMSR_H,
116 .prio = IPIC_SIPRR_D,
117 .force = IPIC_SIFCR_H,
122 .mask = IPIC_SIMSR_H,
123 .prio = IPIC_SIPRR_D,
124 .force = IPIC_SIFCR_H,
129 .mask = IPIC_SIMSR_H,
130 .prio = IPIC_SIPRR_D,
131 .force = IPIC_SIFCR_H,
136 .mask = IPIC_SIMSR_H,
137 .prio = IPIC_SIPRR_D,
138 .force = IPIC_SIFCR_H,
143 .mask = IPIC_SIMSR_H,
144 .prio = IPIC_SIPRR_D,
145 .force = IPIC_SIFCR_H,
152 .prio = IPIC_SMPRR_A,
160 .prio = IPIC_SMPRR_A,
168 .prio = IPIC_SMPRR_A,
176 .prio = IPIC_SMPRR_B,
184 .prio = IPIC_SMPRR_B,
192 .prio = IPIC_SMPRR_B,
200 .prio = IPIC_SMPRR_B,
206 .mask = IPIC_SIMSR_H,
207 .prio = IPIC_SIPRR_A,
208 .force = IPIC_SIFCR_H,
213 .mask = IPIC_SIMSR_H,
214 .prio = IPIC_SIPRR_A,
215 .force = IPIC_SIFCR_H,
220 .mask = IPIC_SIMSR_H,
221 .prio = IPIC_SIPRR_A,
222 .force = IPIC_SIFCR_H,
227 .mask = IPIC_SIMSR_H,
228 .prio = IPIC_SIPRR_A,
229 .force = IPIC_SIFCR_H,
234 .mask = IPIC_SIMSR_H,
235 .prio = IPIC_SIPRR_A,
236 .force = IPIC_SIFCR_H,
241 .mask = IPIC_SIMSR_H,
242 .prio = IPIC_SIPRR_A,
243 .force = IPIC_SIFCR_H,
248 .mask = IPIC_SIMSR_H,
249 .prio = IPIC_SIPRR_A,
250 .force = IPIC_SIFCR_H,
255 .mask = IPIC_SIMSR_H,
256 .prio = IPIC_SIPRR_A,
257 .force = IPIC_SIFCR_H,
262 .mask = IPIC_SIMSR_H,
263 .prio = IPIC_SIPRR_B,
264 .force = IPIC_SIFCR_H,
269 .mask = IPIC_SIMSR_H,
270 .prio = IPIC_SIPRR_B,
271 .force = IPIC_SIFCR_H,
276 .mask = IPIC_SIMSR_H,
277 .prio = IPIC_SIPRR_B,
278 .force = IPIC_SIFCR_H,
283 .mask = IPIC_SIMSR_H,
284 .prio = IPIC_SIPRR_B,
285 .force = IPIC_SIFCR_H,
290 .mask = IPIC_SIMSR_H,
291 .prio = IPIC_SIPRR_B,
292 .force = IPIC_SIFCR_H,
297 .mask = IPIC_SIMSR_H,
298 .prio = IPIC_SIPRR_B,
299 .force = IPIC_SIFCR_H,
304 .mask = IPIC_SIMSR_H,
305 .prio = IPIC_SIPRR_B,
306 .force = IPIC_SIFCR_H,
311 .mask = IPIC_SIMSR_H,
312 .prio = IPIC_SIPRR_B,
313 .force = IPIC_SIFCR_H,
320 .prio = IPIC_SMPRR_A,
326 .mask = IPIC_SIMSR_L,
327 .prio = IPIC_SMPRR_A,
328 .force = IPIC_SIFCR_L,
333 .mask = IPIC_SIMSR_L,
334 .prio = IPIC_SMPRR_A,
335 .force = IPIC_SIFCR_L,
340 .mask = IPIC_SIMSR_L,
341 .prio = IPIC_SMPRR_A,
342 .force = IPIC_SIFCR_L,
347 .mask = IPIC_SIMSR_L,
348 .prio = IPIC_SMPRR_A,
349 .force = IPIC_SIFCR_L,
354 .mask = IPIC_SIMSR_L,
355 .prio = IPIC_SMPRR_B,
356 .force = IPIC_SIFCR_L,
361 .mask = IPIC_SIMSR_L,
362 .prio = IPIC_SMPRR_B,
363 .force = IPIC_SIFCR_L,
368 .mask = IPIC_SIMSR_L,
369 .prio = IPIC_SMPRR_B,
370 .force = IPIC_SIFCR_L,
375 .mask = IPIC_SIMSR_L,
376 .prio = IPIC_SMPRR_B,
377 .force = IPIC_SIFCR_L,
382 .mask = IPIC_SIMSR_L,
384 .force = IPIC_SIFCR_L,
388 .mask = IPIC_SIMSR_L,
390 .force = IPIC_SIFCR_L,
394 .mask = IPIC_SIMSR_L,
396 .force = IPIC_SIFCR_L,
400 .mask = IPIC_SIMSR_L,
402 .force = IPIC_SIFCR_L,
406 .mask = IPIC_SIMSR_L,
408 .force = IPIC_SIFCR_L,
412 .mask = IPIC_SIMSR_L,
414 .force = IPIC_SIFCR_L,
418 .mask = IPIC_SIMSR_L,
420 .force = IPIC_SIFCR_L,
424 .mask = IPIC_SIMSR_L,
426 .force = IPIC_SIFCR_L,
430 .mask = IPIC_SIMSR_L,
432 .force = IPIC_SIFCR_L,
436 .mask = IPIC_SIMSR_L,
438 .force = IPIC_SIFCR_L,
442 .mask = IPIC_SIMSR_L,
444 .force = IPIC_SIFCR_L,
448 .mask = IPIC_SIMSR_L,
450 .force = IPIC_SIFCR_L,
454 .mask = IPIC_SIMSR_L,
456 .force = IPIC_SIFCR_L,
460 .mask = IPIC_SIMSR_L,
462 .force = IPIC_SIFCR_L,
466 .mask = IPIC_SIMSR_L,
468 .force = IPIC_SIFCR_L,
472 .mask = IPIC_SIMSR_L,
474 .force = IPIC_SIFCR_L,
478 .mask = IPIC_SIMSR_L,
480 .force = IPIC_SIFCR_L,
484 .mask = IPIC_SIMSR_L,
486 .force = IPIC_SIFCR_L,
490 .mask = IPIC_SIMSR_L,
492 .force = IPIC_SIFCR_L,
496 .mask = IPIC_SIMSR_L,
498 .force = IPIC_SIFCR_L,
502 .mask = IPIC_SIMSR_L,
504 .force = IPIC_SIFCR_L,
509 static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
511 return in_be32(base + (reg >> 2));
514 static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
516 out_be32(base + (reg >> 2), value);
519 static inline struct ipic * ipic_from_irq(unsigned int virq)
524 static void ipic_unmask_irq(struct irq_data *d)
526 struct ipic *ipic = ipic_from_irq(d->irq);
527 unsigned int src = irqd_to_hwirq(d);
531 raw_spin_lock_irqsave(&ipic_lock, flags);
533 temp = ipic_read(ipic->regs, ipic_info[src].mask);
534 temp |= (1 << (31 - ipic_info[src].bit));
535 ipic_write(ipic->regs, ipic_info[src].mask, temp);
537 raw_spin_unlock_irqrestore(&ipic_lock, flags);
540 static void ipic_mask_irq(struct irq_data *d)
542 struct ipic *ipic = ipic_from_irq(d->irq);
543 unsigned int src = irqd_to_hwirq(d);
547 raw_spin_lock_irqsave(&ipic_lock, flags);
549 temp = ipic_read(ipic->regs, ipic_info[src].mask);
550 temp &= ~(1 << (31 - ipic_info[src].bit));
551 ipic_write(ipic->regs, ipic_info[src].mask, temp);
553 /* mb() can't guarantee that masking is finished. But it does finish
554 * for nearly all cases. */
557 raw_spin_unlock_irqrestore(&ipic_lock, flags);
560 static void ipic_ack_irq(struct irq_data *d)
562 struct ipic *ipic = ipic_from_irq(d->irq);
563 unsigned int src = irqd_to_hwirq(d);
567 raw_spin_lock_irqsave(&ipic_lock, flags);
569 temp = 1 << (31 - ipic_info[src].bit);
570 ipic_write(ipic->regs, ipic_info[src].ack, temp);
572 /* mb() can't guarantee that ack is finished. But it does finish
573 * for nearly all cases. */
576 raw_spin_unlock_irqrestore(&ipic_lock, flags);
579 static void ipic_mask_irq_and_ack(struct irq_data *d)
581 struct ipic *ipic = ipic_from_irq(d->irq);
582 unsigned int src = irqd_to_hwirq(d);
586 raw_spin_lock_irqsave(&ipic_lock, flags);
588 temp = ipic_read(ipic->regs, ipic_info[src].mask);
589 temp &= ~(1 << (31 - ipic_info[src].bit));
590 ipic_write(ipic->regs, ipic_info[src].mask, temp);
592 temp = 1 << (31 - ipic_info[src].bit);
593 ipic_write(ipic->regs, ipic_info[src].ack, temp);
595 /* mb() can't guarantee that ack is finished. But it does finish
596 * for nearly all cases. */
599 raw_spin_unlock_irqrestore(&ipic_lock, flags);
602 static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
604 struct ipic *ipic = ipic_from_irq(d->irq);
605 unsigned int src = irqd_to_hwirq(d);
606 unsigned int vold, vnew, edibit;
608 if (flow_type == IRQ_TYPE_NONE)
609 flow_type = IRQ_TYPE_LEVEL_LOW;
611 /* ipic supports only low assertion and high-to-low change senses
613 if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) {
614 printk(KERN_ERR "ipic: sense type 0x%x not supported\n",
618 /* ipic supports only edge mode on external interrupts */
619 if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) {
620 printk(KERN_ERR "ipic: edge sense not supported on internal "
626 irqd_set_trigger_type(d, flow_type);
627 if (flow_type & IRQ_TYPE_LEVEL_LOW) {
628 irq_set_handler_locked(d, handle_level_irq);
629 d->chip = &ipic_level_irq_chip;
631 irq_set_handler_locked(d, handle_edge_irq);
632 d->chip = &ipic_edge_irq_chip;
635 /* only EXT IRQ senses are programmable on ipic
636 * internal IRQ senses are LEVEL_LOW
638 if (src == IPIC_IRQ_EXT0)
641 if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7)
642 edibit = (14 - (src - IPIC_IRQ_EXT1));
644 return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
646 vold = ipic_read(ipic->regs, IPIC_SECNR);
647 if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) {
648 vnew = vold | (1 << edibit);
650 vnew = vold & ~(1 << edibit);
653 ipic_write(ipic->regs, IPIC_SECNR, vnew);
654 return IRQ_SET_MASK_OK_NOCOPY;
657 /* level interrupts and edge interrupts have different ack operations */
658 static struct irq_chip ipic_level_irq_chip = {
660 .irq_unmask = ipic_unmask_irq,
661 .irq_mask = ipic_mask_irq,
662 .irq_mask_ack = ipic_mask_irq,
663 .irq_set_type = ipic_set_irq_type,
666 static struct irq_chip ipic_edge_irq_chip = {
668 .irq_unmask = ipic_unmask_irq,
669 .irq_mask = ipic_mask_irq,
670 .irq_mask_ack = ipic_mask_irq_and_ack,
671 .irq_ack = ipic_ack_irq,
672 .irq_set_type = ipic_set_irq_type,
675 static int ipic_host_match(struct irq_domain *h, struct device_node *node,
676 enum irq_domain_bus_token bus_token)
678 /* Exact match, unless ipic node is NULL */
679 struct device_node *of_node = irq_domain_get_of_node(h);
680 return of_node == NULL || of_node == node;
683 static int ipic_host_map(struct irq_domain *h, unsigned int virq,
686 struct ipic *ipic = h->host_data;
688 irq_set_chip_data(virq, ipic);
689 irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
691 /* Set default irq type */
692 irq_set_irq_type(virq, IRQ_TYPE_NONE);
697 static const struct irq_domain_ops ipic_host_ops = {
698 .match = ipic_host_match,
699 .map = ipic_host_map,
700 .xlate = irq_domain_xlate_onetwocell,
703 struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
709 ret = of_address_to_resource(node, 0, &res);
713 ipic = kzalloc(sizeof(*ipic), GFP_KERNEL);
717 ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
718 &ipic_host_ops, ipic);
719 if (ipic->irqhost == NULL) {
724 ipic->regs = ioremap(res.start, resource_size(&res));
727 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
729 /* default priority scheme is grouped. If spread mode is required
730 * configure SICFR accordingly */
731 if (flags & IPIC_SPREADMODE_GRP_A)
733 if (flags & IPIC_SPREADMODE_GRP_B)
735 if (flags & IPIC_SPREADMODE_GRP_C)
737 if (flags & IPIC_SPREADMODE_GRP_D)
739 if (flags & IPIC_SPREADMODE_MIX_A)
741 if (flags & IPIC_SPREADMODE_MIX_B)
744 ipic_write(ipic->regs, IPIC_SICFR, temp);
746 /* handle MCP route */
748 if (flags & IPIC_DISABLE_MCP_OUT)
750 ipic_write(ipic->regs, IPIC_SERCR, temp);
752 /* handle routing of IRQ0 to MCP */
753 temp = ipic_read(ipic->regs, IPIC_SEMSR);
755 if (flags & IPIC_IRQ0_MCP)
758 temp &= ~SEMSR_SIRQ0;
760 ipic_write(ipic->regs, IPIC_SEMSR, temp);
763 irq_set_default_host(primary_ipic->irqhost);
765 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
766 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
768 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
774 int ipic_set_priority(unsigned int virq, unsigned int priority)
776 struct ipic *ipic = ipic_from_irq(virq);
777 unsigned int src = virq_to_hw(virq);
784 if (ipic_info[src].prio == 0)
787 temp = ipic_read(ipic->regs, ipic_info[src].prio);
790 temp &= ~(0x7 << (20 + (3 - priority) * 3));
791 temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3);
793 temp &= ~(0x7 << (4 + (7 - priority) * 3));
794 temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3);
797 ipic_write(ipic->regs, ipic_info[src].prio, temp);
802 void ipic_set_highest_priority(unsigned int virq)
804 struct ipic *ipic = ipic_from_irq(virq);
805 unsigned int src = virq_to_hw(virq);
808 temp = ipic_read(ipic->regs, IPIC_SICFR);
810 /* clear and set HPI */
812 temp |= (src & 0x7f) << 24;
814 ipic_write(ipic->regs, IPIC_SICFR, temp);
817 void ipic_set_default_priority(void)
819 ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
820 ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
821 ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT);
822 ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT);
823 ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT);
824 ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT);
827 void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq)
829 struct ipic *ipic = primary_ipic;
832 temp = ipic_read(ipic->regs, IPIC_SERMR);
833 temp |= (1 << (31 - mcp_irq));
834 ipic_write(ipic->regs, IPIC_SERMR, temp);
837 void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
839 struct ipic *ipic = primary_ipic;
842 temp = ipic_read(ipic->regs, IPIC_SERMR);
843 temp &= (1 << (31 - mcp_irq));
844 ipic_write(ipic->regs, IPIC_SERMR, temp);
847 u32 ipic_get_mcp_status(void)
849 return ipic_read(primary_ipic->regs, IPIC_SERSR);
852 void ipic_clear_mcp_status(u32 mask)
854 ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
857 /* Return an interrupt vector or 0 if no interrupt is pending. */
858 unsigned int ipic_get_irq(void)
862 BUG_ON(primary_ipic == NULL);
864 #define IPIC_SIVCR_VECTOR_MASK 0x7f
865 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
867 if (irq == 0) /* 0 --> no irq is pending */
870 return irq_linear_revmap(primary_ipic->irqhost, irq);
873 #ifdef CONFIG_SUSPEND
886 static int ipic_suspend(void)
888 struct ipic *ipic = primary_ipic;
890 ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR);
891 ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A);
892 ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D);
893 ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H);
894 ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L);
895 ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR);
896 ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A);
897 ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B);
898 ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR);
899 ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR);
900 ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR);
901 ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR);
903 if (fsl_deep_sleep()) {
904 /* In deep sleep, make sure there can be no
905 * pending interrupts, as this can cause
908 ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
909 ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
910 ipic_write(ipic->regs, IPIC_SEMSR, 0);
911 ipic_write(ipic->regs, IPIC_SERMR, 0);
917 static void ipic_resume(void)
919 struct ipic *ipic = primary_ipic;
921 ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr);
922 ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]);
923 ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]);
924 ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]);
925 ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]);
926 ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr);
927 ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]);
928 ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]);
929 ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr);
930 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
931 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
932 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
935 #define ipic_suspend NULL
936 #define ipic_resume NULL
939 static struct syscore_ops ipic_syscore_ops = {
940 .suspend = ipic_suspend,
941 .resume = ipic_resume,
944 static int __init init_ipic_syscore(void)
946 if (!primary_ipic || !primary_ipic->regs)
949 printk(KERN_DEBUG "Registering ipic system core operations\n");
950 register_syscore_ops(&ipic_syscore_ops);
955 subsys_initcall(init_ipic_syscore);