2 * OMAP WakeupGen Source file
4 * OMAP WakeupGen is the interrupt controller extension used along
5 * with ARM GIC to wake the CPU out from low power states on
6 * external interrupts. It is responsible for generating wakeup
7 * event from the incoming interrupts and enable bits. It is
8 * implemented in MPU always ON power domain. During normal operation,
9 * WakeupGen delivers external interrupts directly to the GIC.
11 * Copyright (C) 2011 Texas Instruments, Inc.
12 * Santosh Shilimkar <santosh.shilimkar@ti.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
19 #include <linux/kernel.h>
20 #include <linux/init.h>
22 #include <linux/irq.h>
23 #include <linux/irqchip.h>
24 #include <linux/irqdomain.h>
25 #include <linux/of_address.h>
26 #include <linux/platform_device.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/cpu_pm.h>
31 #include "omap-wakeupgen.h"
32 #include "omap-secure.h"
35 #include "omap4-sar-layout.h"
39 #define AM43XX_NR_REG_BANKS 7
40 #define AM43XX_IRQS 224
41 #define MAX_NR_REG_BANKS AM43XX_NR_REG_BANKS
42 #define MAX_IRQS AM43XX_IRQS
43 #define DEFAULT_NR_REG_BANKS 5
44 #define DEFAULT_IRQS 160
45 #define WKG_MASK_ALL 0x00000000
46 #define WKG_UNMASK_ALL 0xffffffff
47 #define CPU_ENA_OFFSET 0x400
50 #define OMAP4_NR_BANKS 4
51 #define OMAP4_NR_IRQS 128
53 static void __iomem *wakeupgen_base;
54 static void __iomem *sar_base;
55 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
56 static unsigned int irq_target_cpu[MAX_IRQS];
57 static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
58 static unsigned int max_irqs = DEFAULT_IRQS;
59 static unsigned int omap_secure_apis;
62 static unsigned int wakeupgen_context[MAX_NR_REG_BANKS];
65 struct omap_wakeupgen_ops {
66 void (*save_context)(void);
67 void (*restore_context)(void);
70 static struct omap_wakeupgen_ops *wakeupgen_ops;
73 * Static helper functions.
75 static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
77 return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 +
78 (cpu * CPU_ENA_OFFSET) + (idx * 4));
81 static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
83 writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
84 (cpu * CPU_ENA_OFFSET) + (idx * 4));
87 static inline void sar_writel(u32 val, u32 offset, u8 idx)
89 writel_relaxed(val, sar_base + offset + (idx * 4));
92 static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
95 * Each WakeupGen register controls 32 interrupt.
96 * i.e. 1 bit per SPI IRQ
98 *reg_index = irq >> 5;
99 *bit_posn = irq %= 32;
104 static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
109 if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
112 val = wakeupgen_readl(i, cpu);
113 val &= ~BIT(bit_number);
114 wakeupgen_writel(val, i, cpu);
117 static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
122 if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
125 val = wakeupgen_readl(i, cpu);
126 val |= BIT(bit_number);
127 wakeupgen_writel(val, i, cpu);
131 * Architecture specific Mask extension
133 static void wakeupgen_mask(struct irq_data *d)
137 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
138 _wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]);
139 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
140 irq_chip_mask_parent(d);
144 * Architecture specific Unmask extension
146 static void wakeupgen_unmask(struct irq_data *d)
150 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
151 _wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]);
152 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
153 irq_chip_unmask_parent(d);
156 #ifdef CONFIG_HOTPLUG_CPU
157 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
159 static void _wakeupgen_save_masks(unsigned int cpu)
163 for (i = 0; i < irq_banks; i++)
164 per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
167 static void _wakeupgen_restore_masks(unsigned int cpu)
171 for (i = 0; i < irq_banks; i++)
172 wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
175 static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
179 for (i = 0; i < irq_banks; i++)
180 wakeupgen_writel(reg, i, cpu);
184 * Mask or unmask all interrupts on given CPU.
185 * 0 = Mask all interrupts on the 'cpu'
186 * 1 = Unmask all interrupts on the 'cpu'
187 * Ensure that the initial mask is maintained. This is faster than
188 * iterating through GIC registers to arrive at the correct masks.
190 static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
194 raw_spin_lock_irqsave(&wakeupgen_lock, flags);
196 _wakeupgen_save_masks(cpu);
197 _wakeupgen_set_all(cpu, WKG_MASK_ALL);
199 _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
200 _wakeupgen_restore_masks(cpu);
202 raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
207 static inline void omap4_irq_save_context(void)
211 if (omap_rev() == OMAP4430_REV_ES1_0)
214 for (i = 0; i < irq_banks; i++) {
215 /* Save the CPUx interrupt mask for IRQ 0 to 127 */
216 val = wakeupgen_readl(i, 0);
217 sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
218 val = wakeupgen_readl(i, 1);
219 sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
222 * Disable the secure interrupts for CPUx. The restore
223 * code blindly restores secure and non-secure interrupt
224 * masks from SAR RAM. Secure interrupts are not suppose
225 * to be enabled from HLOS. So overwrite the SAR location
226 * so that the secure interrupt remains disabled.
228 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
229 sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
232 /* Save AuxBoot* registers */
233 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
234 writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET);
235 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
236 writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET);
238 /* Save SyncReq generation logic */
239 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
240 writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
241 val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
242 writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET);
244 /* Set the Backup Bit Mask status */
245 val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET);
246 val |= SAR_BACKUP_STATUS_WAKEUPGEN;
247 writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
251 static inline void omap5_irq_save_context(void)
255 for (i = 0; i < irq_banks; i++) {
256 /* Save the CPUx interrupt mask for IRQ 0 to 159 */
257 val = wakeupgen_readl(i, 0);
258 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
259 val = wakeupgen_readl(i, 1);
260 sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
261 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
262 sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
265 /* Save AuxBoot* registers */
266 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
267 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
268 val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
269 writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
271 /* Set the Backup Bit Mask status */
272 val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
273 val |= SAR_BACKUP_STATUS_WAKEUPGEN;
274 writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
278 static inline void am43xx_irq_save_context(void)
282 for (i = 0; i < irq_banks; i++) {
283 wakeupgen_context[i] = wakeupgen_readl(i, 0);
284 wakeupgen_writel(0, i, CPU0_ID);
289 * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
290 * ROM code. WakeupGen IP is integrated along with GIC to manage the
291 * interrupt wakeups from CPU low power states. It manages
292 * masking/unmasking of Shared peripheral interrupts(SPI). So the
293 * interrupt enable/disable control should be in sync and consistent
294 * at WakeupGen and GIC so that interrupts are not lost.
296 static void irq_save_context(void)
298 /* DRA7 has no SAR to save */
302 if (wakeupgen_ops && wakeupgen_ops->save_context)
303 wakeupgen_ops->save_context();
307 * Clear WakeupGen SAR backup status.
309 static void irq_sar_clear(void)
312 u32 offset = SAR_BACKUP_STATUS_OFFSET;
313 /* DRA7 has no SAR to save */
317 if (soc_is_omap54xx())
318 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
320 val = readl_relaxed(sar_base + offset);
321 val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
322 writel_relaxed(val, sar_base + offset);
325 static void am43xx_irq_restore_context(void)
329 for (i = 0; i < irq_banks; i++)
330 wakeupgen_writel(wakeupgen_context[i], i, CPU0_ID);
333 static void irq_restore_context(void)
335 if (wakeupgen_ops && wakeupgen_ops->restore_context)
336 wakeupgen_ops->restore_context();
340 * Save GIC and Wakeupgen interrupt context using secure API
341 * for HS/EMU devices.
343 static void irq_save_secure_context(void)
346 ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
349 if (ret != API_HAL_RET_VALUE_OK)
350 pr_err("GIC and Wakeupgen context save failed\n");
353 /* Define ops for context save and restore for each SoC */
354 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {
355 .save_context = omap4_irq_save_context,
356 .restore_context = irq_sar_clear,
359 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {
360 .save_context = omap5_irq_save_context,
361 .restore_context = irq_sar_clear,
364 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {
365 .save_context = am43xx_irq_save_context,
366 .restore_context = am43xx_irq_restore_context,
369 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {};
370 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {};
371 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {};
374 #ifdef CONFIG_HOTPLUG_CPU
375 static int omap_wakeupgen_cpu_online(unsigned int cpu)
377 wakeupgen_irqmask_all(cpu, 0);
381 static int omap_wakeupgen_cpu_dead(unsigned int cpu)
383 wakeupgen_irqmask_all(cpu, 1);
387 static void __init irq_hotplug_init(void)
389 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online",
390 omap_wakeupgen_cpu_online, NULL);
391 cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD,
392 "arm/omap-wake:dead", NULL,
393 omap_wakeupgen_cpu_dead);
396 static void __init irq_hotplug_init(void)
401 static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
404 case CPU_CLUSTER_PM_ENTER:
405 if (omap_type() == OMAP2_DEVICE_TYPE_GP)
408 irq_save_secure_context();
410 case CPU_CLUSTER_PM_EXIT:
411 if (omap_type() == OMAP2_DEVICE_TYPE_GP)
412 irq_restore_context();
418 static struct notifier_block irq_notifier_block = {
419 .notifier_call = irq_notifier,
422 static void __init irq_pm_init(void)
424 /* FIXME: Remove this when MPU OSWR support is added */
425 if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
426 cpu_pm_register_notifier(&irq_notifier_block);
429 static void __init irq_pm_init(void)
433 void __iomem *omap_get_wakeupgen_base(void)
435 return wakeupgen_base;
438 int omap_secure_apis_support(void)
440 return omap_secure_apis;
443 static struct irq_chip wakeupgen_chip = {
445 .irq_eoi = irq_chip_eoi_parent,
446 .irq_mask = wakeupgen_mask,
447 .irq_unmask = wakeupgen_unmask,
448 .irq_retrigger = irq_chip_retrigger_hierarchy,
449 .irq_set_type = irq_chip_set_type_parent,
450 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
452 .irq_set_affinity = irq_chip_set_affinity_parent,
456 static int wakeupgen_domain_translate(struct irq_domain *d,
457 struct irq_fwspec *fwspec,
458 unsigned long *hwirq,
461 if (is_of_node(fwspec->fwnode)) {
462 if (fwspec->param_count != 3)
465 /* No PPI should point to this domain */
466 if (fwspec->param[0] != 0)
469 *hwirq = fwspec->param[1];
470 *type = fwspec->param[2];
477 static int wakeupgen_domain_alloc(struct irq_domain *domain,
479 unsigned int nr_irqs, void *data)
481 struct irq_fwspec *fwspec = data;
482 struct irq_fwspec parent_fwspec;
483 irq_hw_number_t hwirq;
486 if (fwspec->param_count != 3)
487 return -EINVAL; /* Not GIC compliant */
488 if (fwspec->param[0] != 0)
489 return -EINVAL; /* No PPI should point to this domain */
491 hwirq = fwspec->param[1];
492 if (hwirq >= MAX_IRQS)
493 return -EINVAL; /* Can't deal with this */
495 for (i = 0; i < nr_irqs; i++)
496 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
497 &wakeupgen_chip, NULL);
499 parent_fwspec = *fwspec;
500 parent_fwspec.fwnode = domain->parent->fwnode;
501 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
505 static const struct irq_domain_ops wakeupgen_domain_ops = {
506 .translate = wakeupgen_domain_translate,
507 .alloc = wakeupgen_domain_alloc,
508 .free = irq_domain_free_irqs_common,
512 * Initialise the wakeupgen module.
514 static int __init wakeupgen_init(struct device_node *node,
515 struct device_node *parent)
517 struct irq_domain *parent_domain, *domain;
519 unsigned int boot_cpu = smp_processor_id();
523 pr_err("%pOF: no parent, giving up\n", node);
527 parent_domain = irq_find_host(parent);
528 if (!parent_domain) {
529 pr_err("%pOF: unable to obtain parent domain\n", node);
532 /* Not supported on OMAP4 ES1.0 silicon */
533 if (omap_rev() == OMAP4430_REV_ES1_0) {
534 WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
538 /* Static mapping, never released */
539 wakeupgen_base = of_iomap(node, 0);
540 if (WARN_ON(!wakeupgen_base))
543 if (cpu_is_omap44xx()) {
544 irq_banks = OMAP4_NR_BANKS;
545 max_irqs = OMAP4_NR_IRQS;
546 omap_secure_apis = 1;
547 wakeupgen_ops = &omap4_wakeupgen_ops;
548 } else if (soc_is_omap54xx()) {
549 wakeupgen_ops = &omap5_wakeupgen_ops;
550 } else if (soc_is_am43xx()) {
551 irq_banks = AM43XX_NR_REG_BANKS;
552 max_irqs = AM43XX_IRQS;
553 wakeupgen_ops = &am43xx_wakeupgen_ops;
556 domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
557 node, &wakeupgen_domain_ops,
560 iounmap(wakeupgen_base);
564 /* Clear all IRQ bitmasks at wakeupGen level */
565 for (i = 0; i < irq_banks; i++) {
566 wakeupgen_writel(0, i, CPU0_ID);
567 if (!soc_is_am43xx())
568 wakeupgen_writel(0, i, CPU1_ID);
572 * FIXME: Add support to set_smp_affinity() once the core
573 * GIC code has necessary hooks in place.
576 /* Associate all the IRQs to boot CPU like GIC init does. */
577 for (i = 0; i < max_irqs; i++)
578 irq_target_cpu[i] = boot_cpu;
581 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE
582 * 0x0: ES1 behavior, CPU cores would enter and exit OFF mode together.
583 * 0x1: ES2 behavior, CPU cores are allowed to enter/exit OFF mode
585 * This needs to be set one time thanks to always ON domain.
587 * We do not support ES1 behavior anymore. OMAP5 is assumed to be
588 * ES2.0, and the same is applicable for DRA7.
590 if (soc_is_omap54xx() || soc_is_dra7xx()) {
591 val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE);
593 omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val);
599 sar_base = omap4_get_sar_ram_base();
603 IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);