2 * Copyright 2016,2017 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 #ifndef _ASM_POWERPC_XIVE_H
10 #define _ASM_POWERPC_XIVE_H
12 #define XIVE_INVALID_VP 0xffffffff
14 #ifdef CONFIG_PPC_XIVE
17 * Thread Interrupt Management Area (TIMA)
19 * This is a global MMIO region divided in 4 pages of varying access
20 * permissions, providing access to per-cpu interrupt management
21 * functions. It always identifies the CPU doing the access based
22 * on the PowerBus initiator ID, thus we always access via the
23 * same offset regardless of where the code is executing
25 extern void __iomem *xive_tima;
28 * Offset in the TM area of our current execution level (provided by
31 extern u32 xive_tima_offset;
34 * Per-irq data (irq_get_handler_data for normal IRQs), IPIs
35 * have it stored in the xive_cpu structure. We also cache
36 * for normal interrupts the current target CPU.
38 * This structure is setup by the backend for each interrupt.
40 struct xive_irq_data {
43 void __iomem *eoi_mmio;
45 void __iomem *trig_mmio;
50 /* Setup/used by frontend */
54 #define XIVE_IRQ_FLAG_STORE_EOI 0x01
55 #define XIVE_IRQ_FLAG_LSI 0x02
56 #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04
57 #define XIVE_IRQ_FLAG_MASK_FW 0x08
58 #define XIVE_IRQ_FLAG_EOI_FW 0x10
59 #define XIVE_IRQ_FLAG_H_INT_ESB 0x20
61 #define XIVE_INVALID_CHIP_ID -1
63 /* A queue tracking structure in a CPU */
72 atomic_t pending_count;
76 * "magic" Event State Buffer (ESB) MMIO offsets.
78 * Each interrupt source has a 2-bit state machine called ESB
79 * which can be controlled by MMIO. It's made of 2 bits, P and
80 * Q. P indicates that an interrupt is pending (has been sent
81 * to a queue and is waiting for an EOI). Q indicates that the
82 * interrupt has been triggered while pending.
84 * This acts as a coalescing mechanism in order to guarantee
85 * that a given interrupt only occurs at most once in a queue.
87 * When doing an EOI, the Q bit will indicate if the interrupt
88 * needs to be re-triggered.
90 * The following offsets into the ESB MMIO allow to read or
91 * manipulate the PQ bits. They must be used with an 8-bytes
92 * load instruction. They all return the previous state of the
93 * interrupt (atomically).
95 * Additionally, some ESB pages support doing an EOI via a
96 * store at 0 and some ESBs support doing a trigger via a
97 * separate trigger page.
99 #define XIVE_ESB_STORE_EOI 0x400 /* Store */
100 #define XIVE_ESB_LOAD_EOI 0x000 /* Load */
101 #define XIVE_ESB_GET 0x800 /* Load */
102 #define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
103 #define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
104 #define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
105 #define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
107 #define XIVE_ESB_VAL_P 0x2
108 #define XIVE_ESB_VAL_Q 0x1
110 /* Global enable flags for the XIVE support */
111 extern bool __xive_enabled;
113 static inline bool xive_enabled(void) { return __xive_enabled; }
115 extern bool xive_spapr_init(void);
116 extern bool xive_native_init(void);
117 extern void xive_smp_probe(void);
118 extern int xive_smp_prepare_cpu(unsigned int cpu);
119 extern void xive_smp_setup_cpu(void);
120 extern void xive_smp_disable_cpu(void);
121 extern void xive_teardown_cpu(void);
122 extern void xive_kexec_teardown_cpu(int secondary);
123 extern void xive_shutdown(void);
124 extern void xive_flush_interrupt(void);
127 extern void xmon_xive_do_dump(int cpu);
129 /* APIs used by KVM */
130 extern u32 xive_native_default_eq_shift(void);
131 extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
132 extern void xive_native_free_vp_block(u32 vp_base);
133 extern int xive_native_populate_irq_data(u32 hw_irq,
134 struct xive_irq_data *data);
135 extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
136 extern u32 xive_native_alloc_irq(void);
137 extern void xive_native_free_irq(u32 irq);
138 extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
140 extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
141 __be32 *qpage, u32 order, bool can_escalate);
142 extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
144 extern void xive_native_sync_source(u32 hw_irq);
145 extern bool is_xive_irq(struct irq_chip *chip);
146 extern int xive_native_enable_vp(u32 vp_id);
147 extern int xive_native_disable_vp(u32 vp_id);
148 extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
152 static inline bool xive_enabled(void) { return false; }
154 static inline bool xive_spapr_init(void) { return false; }
155 static inline bool xive_native_init(void) { return false; }
156 static inline void xive_smp_probe(void) { }
157 extern inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
158 static inline void xive_smp_setup_cpu(void) { }
159 static inline void xive_smp_disable_cpu(void) { }
160 static inline void xive_kexec_teardown_cpu(int secondary) { }
161 static inline void xive_shutdown(void) { }
162 static inline void xive_flush_interrupt(void) { }
164 static inline u32 xive_native_alloc_vp_block(u32 max_vcpus) { return XIVE_INVALID_VP; }
165 static inline void xive_native_free_vp_block(u32 vp_base) { }
169 #endif /* _ASM_POWERPC_XIVE_H */