1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright 2016,2017 IBM Corporation.
5 #ifndef __XIVE_INTERNAL_H
6 #define __XIVE_INTERNAL_H
9 * A "disabled" interrupt should never fire, to catch problems
10 * we set its logical number to this
12 #define XIVE_BAD_IRQ 0x7fffffff
13 #define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
15 /* Each CPU carry one of these with various per-CPU state */
18 /* HW irq number and data of IPI */
20 struct xive_irq_data ipi_data;
21 #endif /* CONFIG_SMP */
25 /* Queue datas. Only one is populated */
26 #define XIVE_MAX_QUEUES 8
27 struct xive_q queue[XIVE_MAX_QUEUES];
30 * Pending mask. Each bit corresponds to a priority that
31 * potentially has pending interrupts.
35 /* Cache of HW CPPR */
41 int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
42 int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
43 int (*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
45 int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
46 void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
47 void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
48 void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
49 bool (*match)(struct device_node *np);
50 void (*shutdown)(void);
52 void (*update_pending)(struct xive_cpu *xc);
53 void (*eoi)(u32 hw_irq);
54 void (*sync_source)(u32 hw_irq);
55 u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
57 int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
58 void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
63 bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
65 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
67 static inline u32 xive_alloc_order(u32 queue_shift)
69 return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
72 extern bool xive_cmdline_disabled;
74 #endif /* __XIVE_INTERNAL_H */