1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2016,2017 IBM Corporation.
6 #define pr_fmt(fmt) "xive: " fmt
8 #include <linux/types.h>
10 #include <linux/smp.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/of_address.h>
15 #include <linux/of_fdt.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/bitmap.h>
19 #include <linux/cpumask.h>
21 #include <linux/delay.h>
22 #include <linux/libfdt.h>
24 #include <asm/machdep.h>
29 #include <asm/errno.h>
31 #include <asm/xive-regs.h>
32 #include <asm/hvcall.h>
34 #include <asm/ultravisor.h>
36 #include "xive-internal.h"
38 static u32 xive_queue_shift;
40 struct xive_irq_bitmap {
41 unsigned long *bitmap;
45 struct list_head list;
48 static LIST_HEAD(xive_irq_bitmaps);
50 static int __init xive_irq_bitmap_add(int base, int count)
52 struct xive_irq_bitmap *xibm;
54 xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
58 spin_lock_init(&xibm->lock);
61 xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL);
66 list_add(&xibm->list, &xive_irq_bitmaps);
68 pr_info("Using IRQ range [%x-%x]", xibm->base,
69 xibm->base + xibm->count - 1);
73 static void xive_irq_bitmap_remove_all(void)
75 struct xive_irq_bitmap *xibm, *tmp;
77 list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) {
78 list_del(&xibm->list);
79 bitmap_free(xibm->bitmap);
84 static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
88 irq = find_first_zero_bit(xibm->bitmap, xibm->count);
89 if (irq != xibm->count) {
90 set_bit(irq, xibm->bitmap);
99 static int xive_irq_bitmap_alloc(void)
101 struct xive_irq_bitmap *xibm;
105 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
106 spin_lock_irqsave(&xibm->lock, flags);
107 irq = __xive_irq_bitmap_alloc(xibm);
108 spin_unlock_irqrestore(&xibm->lock, flags);
115 static void xive_irq_bitmap_free(int irq)
118 struct xive_irq_bitmap *xibm;
120 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
121 if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
122 spin_lock_irqsave(&xibm->lock, flags);
123 clear_bit(irq - xibm->base, xibm->bitmap);
124 spin_unlock_irqrestore(&xibm->lock, flags);
131 /* Based on the similar routines in RTAS */
132 static unsigned int plpar_busy_delay_time(long rc)
136 if (H_IS_LONG_BUSY(rc)) {
137 ms = get_longbusy_msecs(rc);
138 } else if (rc == H_BUSY) {
139 ms = 10; /* seems appropriate for XIVE hcalls */
145 static unsigned int plpar_busy_delay(int rc)
149 ms = plpar_busy_delay_time(rc);
157 * Note: this call has a partition wide scope and can take a while to
158 * complete. If it returns H_LONG_BUSY_* it should be retried
161 static long plpar_int_reset(unsigned long flags)
166 rc = plpar_hcall_norets(H_INT_RESET, flags);
167 } while (plpar_busy_delay(rc));
170 pr_err("H_INT_RESET failed %ld\n", rc);
175 static long plpar_int_get_source_info(unsigned long flags,
177 unsigned long *src_flags,
178 unsigned long *eoi_page,
179 unsigned long *trig_page,
180 unsigned long *esb_shift)
182 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
186 rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
187 } while (plpar_busy_delay(rc));
190 pr_err("H_INT_GET_SOURCE_INFO lisn=0x%lx failed %ld\n", lisn, rc);
194 *src_flags = retbuf[0];
195 *eoi_page = retbuf[1];
196 *trig_page = retbuf[2];
197 *esb_shift = retbuf[3];
199 pr_debug("H_INT_GET_SOURCE_INFO lisn=0x%lx flags=0x%lx eoi=0x%lx trig=0x%lx shift=0x%lx\n",
200 lisn, retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
205 #define XIVE_SRC_SET_EISN (1ull << (63 - 62))
206 #define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
208 static long plpar_int_set_source_config(unsigned long flags,
210 unsigned long target,
212 unsigned long sw_irq)
217 pr_debug("H_INT_SET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx target=%ld prio=%ld sw_irq=%ld\n",
218 flags, lisn, target, prio, sw_irq);
222 rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
223 target, prio, sw_irq);
224 } while (plpar_busy_delay(rc));
227 pr_err("H_INT_SET_SOURCE_CONFIG lisn=0x%lx target=%ld prio=%ld failed %ld\n",
228 lisn, target, prio, rc);
235 static long plpar_int_get_source_config(unsigned long flags,
237 unsigned long *target,
239 unsigned long *sw_irq)
241 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
244 pr_debug("H_INT_GET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx\n", flags, lisn);
247 rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
248 target, prio, sw_irq);
249 } while (plpar_busy_delay(rc));
252 pr_err("H_INT_GET_SOURCE_CONFIG lisn=0x%lx failed %ld\n",
261 pr_debug("H_INT_GET_SOURCE_CONFIG target=%ld prio=%ld sw_irq=%ld\n",
262 retbuf[0], retbuf[1], retbuf[2]);
267 static long plpar_int_get_queue_info(unsigned long flags,
268 unsigned long target,
269 unsigned long priority,
270 unsigned long *esn_page,
271 unsigned long *esn_size)
273 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
277 rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
279 } while (plpar_busy_delay(rc));
282 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
283 target, priority, rc);
287 *esn_page = retbuf[0];
288 *esn_size = retbuf[1];
290 pr_debug("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld page=0x%lx size=0x%lx\n",
291 target, priority, retbuf[0], retbuf[1]);
296 #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
298 static long plpar_int_set_queue_config(unsigned long flags,
299 unsigned long target,
300 unsigned long priority,
306 pr_debug("H_INT_SET_QUEUE_CONFIG flags=0x%lx target=%ld priority=0x%lx qpage=0x%lx qsize=0x%lx\n",
307 flags, target, priority, qpage, qsize);
310 rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
311 priority, qpage, qsize);
312 } while (plpar_busy_delay(rc));
315 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=0x%lx returned %ld\n",
316 target, priority, qpage, rc);
323 static long plpar_int_sync(unsigned long flags, unsigned long lisn)
328 rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
329 } while (plpar_busy_delay(rc));
332 pr_err("H_INT_SYNC lisn=0x%lx returned %ld\n", lisn, rc);
339 #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
341 static long plpar_int_esb(unsigned long flags,
343 unsigned long offset,
344 unsigned long in_data,
345 unsigned long *out_data)
347 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
350 pr_debug("H_INT_ESB flags=0x%lx lisn=0x%lx offset=0x%lx in=0x%lx\n",
351 flags, lisn, offset, in_data);
354 rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
356 } while (plpar_busy_delay(rc));
359 pr_err("H_INT_ESB lisn=0x%lx offset=0x%lx returned %ld\n",
364 *out_data = retbuf[0];
369 static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
371 unsigned long read_data;
374 rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
375 lisn, offset, data, &read_data);
379 return write ? 0 : read_data;
382 #define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
383 #define XIVE_SRC_LSI (1ull << (63 - 61))
384 #define XIVE_SRC_TRIGGER (1ull << (63 - 62))
385 #define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
387 static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
391 unsigned long eoi_page;
392 unsigned long trig_page;
393 unsigned long esb_shift;
395 memset(data, 0, sizeof(*data));
397 rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
402 if (flags & XIVE_SRC_H_INT_ESB)
403 data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
404 if (flags & XIVE_SRC_STORE_EOI)
405 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
406 if (flags & XIVE_SRC_LSI)
407 data->flags |= XIVE_IRQ_FLAG_LSI;
408 data->eoi_page = eoi_page;
409 data->esb_shift = esb_shift;
410 data->trig_page = trig_page;
412 data->hw_irq = hw_irq;
415 * No chip-id for the sPAPR backend. This has an impact how we
416 * pick a target. See xive_pick_irq_target().
418 data->src_chip = XIVE_INVALID_CHIP_ID;
421 * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
422 * be used for interrupt management. Skip the remapping of the
423 * ESB pages which are not available.
425 if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
428 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
429 if (!data->eoi_mmio) {
430 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
434 /* Full function page supports trigger */
435 if (flags & XIVE_SRC_TRIGGER) {
436 data->trig_mmio = data->eoi_mmio;
440 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
441 if (!data->trig_mmio) {
442 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
448 static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
452 rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
455 return rc == 0 ? 0 : -ENXIO;
458 static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
462 unsigned long h_target;
463 unsigned long h_prio;
464 unsigned long h_sw_irq;
466 rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
473 return rc == 0 ? 0 : -ENXIO;
476 /* This can be called multiple time to change a queue configuration */
477 static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
478 __be32 *qpage, u32 order)
481 unsigned long esn_page;
482 unsigned long esn_size;
483 u64 flags, qpage_phys;
485 /* If there's an actual queue page, clean it */
489 qpage_phys = __pa(qpage);
494 /* Initialize the rest of the fields */
495 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
499 rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
501 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
507 /* TODO: add support for the notification page */
508 q->eoi_phys = esn_page;
510 /* Default is to always notify */
511 flags = XIVE_EQ_ALWAYS_NOTIFY;
513 /* Configure and enable the queue in HW */
514 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
516 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
521 if (is_secure_guest())
522 uv_share_page(PHYS_PFN(qpage_phys),
523 1 << xive_alloc_order(order));
529 static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
532 struct xive_q *q = &xc->queue[prio];
535 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
537 return PTR_ERR(qpage);
539 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
540 q, prio, qpage, xive_queue_shift);
543 static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
546 struct xive_q *q = &xc->queue[prio];
547 unsigned int alloc_order;
549 int hw_cpu = get_hard_smp_processor_id(cpu);
551 rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
553 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
556 alloc_order = xive_alloc_order(xive_queue_shift);
557 if (is_secure_guest())
558 uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
559 free_pages((unsigned long)q->qpage, alloc_order);
563 static bool xive_spapr_match(struct device_node *node)
565 /* Ignore cascaded controllers for the moment */
570 static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
572 int irq = xive_irq_bitmap_alloc();
575 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
583 static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
585 if (xc->hw_ipi == XIVE_BAD_IRQ)
588 xive_irq_bitmap_free(xc->hw_ipi);
589 xc->hw_ipi = XIVE_BAD_IRQ;
591 #endif /* CONFIG_SMP */
593 static void xive_spapr_shutdown(void)
599 * Perform an "ack" cycle on the current thread. Grab the pending
600 * active priorities and update the CPPR to the most favored one.
602 static void xive_spapr_update_pending(struct xive_cpu *xc)
608 * Perform the "Acknowledge O/S to Register" cycle.
610 * Let's speedup the access to the TIMA using the raw I/O
611 * accessor as we don't need the synchronisation routine of
612 * the higher level ones
614 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
616 /* Synchronize subsequent queue accesses */
620 * Grab the CPPR and the "NSR" field which indicates the source
621 * of the interrupt (if any)
626 if (nsr & TM_QW1_NSR_EO) {
629 /* Mark the priority pending */
630 xc->pending_prio |= 1 << cppr;
633 * A new interrupt should never have a CPPR less favored
634 * than our current one.
636 if (cppr >= xc->cppr)
637 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
638 smp_processor_id(), cppr, xc->cppr);
640 /* Update our idea of what the CPPR is */
645 static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
647 /* Only some debug on the TIMA settings */
648 pr_debug("(HW value: %08x %08x %08x)\n",
649 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
650 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
651 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
654 static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
659 static void xive_spapr_sync_source(u32 hw_irq)
661 /* Specs are unclear on what this is doing */
662 plpar_int_sync(0, hw_irq);
665 static int xive_spapr_debug_show(struct seq_file *m, void *private)
667 struct xive_irq_bitmap *xibm;
668 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
673 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
674 memset(buf, 0, PAGE_SIZE);
675 bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
676 seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
683 static const struct xive_ops xive_spapr_ops = {
684 .populate_irq_data = xive_spapr_populate_irq_data,
685 .configure_irq = xive_spapr_configure_irq,
686 .get_irq_config = xive_spapr_get_irq_config,
687 .setup_queue = xive_spapr_setup_queue,
688 .cleanup_queue = xive_spapr_cleanup_queue,
689 .match = xive_spapr_match,
690 .shutdown = xive_spapr_shutdown,
691 .update_pending = xive_spapr_update_pending,
692 .setup_cpu = xive_spapr_setup_cpu,
693 .teardown_cpu = xive_spapr_teardown_cpu,
694 .sync_source = xive_spapr_sync_source,
695 .esb_rw = xive_spapr_esb_rw,
697 .get_ipi = xive_spapr_get_ipi,
698 .put_ipi = xive_spapr_put_ipi,
699 .debug_show = xive_spapr_debug_show,
700 #endif /* CONFIG_SMP */
705 * get max priority from "/ibm,plat-res-int-priorities"
707 static bool __init xive_get_max_prio(u8 *max_prio)
709 struct device_node *rootdn;
714 rootdn = of_find_node_by_path("/");
716 pr_err("not root node found !\n");
720 reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
722 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
726 if (len % (2 * sizeof(u32)) != 0) {
727 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
731 /* HW supports priorities in the range [0-7] and 0xFF is a
732 * wildcard priority used to mask. We scan the ranges reserved
733 * by the hypervisor to find the lowest priority we can use.
736 for (prio = 0; prio < 8; prio++) {
740 for (i = 0; i < len / (2 * sizeof(u32)); i++) {
741 int base = be32_to_cpu(reg[2 * i]);
742 int range = be32_to_cpu(reg[2 * i + 1]);
744 if (prio >= base && prio < base + range)
753 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
761 static const u8 *__init get_vec5_feature(unsigned int index)
763 unsigned long root, chosen;
767 root = of_get_flat_dt_root();
768 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
769 if (chosen == -FDT_ERR_NOTFOUND)
772 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
782 static bool __init xive_spapr_disabled(void)
786 vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
790 val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
792 case OV5_FEAT(OV5_XIVE_EITHER):
793 case OV5_FEAT(OV5_XIVE_LEGACY):
795 case OV5_FEAT(OV5_XIVE_EXPLOIT):
796 /* Hypervisor only supports XIVE */
797 if (xive_cmdline_disabled)
798 pr_warn("WARNING: Ignoring cmdline option xive=off\n");
801 pr_warn("%s: Unknown xive support option: 0x%x\n",
807 return xive_cmdline_disabled;
810 bool __init xive_spapr_init(void)
812 struct device_node *np;
815 struct property *prop;
822 if (xive_spapr_disabled())
825 pr_devel("%s()\n", __func__);
826 np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
828 pr_devel("not found !\n");
831 pr_devel("Found %s\n", np->full_name);
833 /* Resource 1 is the OS ring TIMA */
834 if (of_address_to_resource(np, 1, &r)) {
835 pr_err("Failed to get thread mgmnt area resource\n");
838 tima = ioremap(r.start, resource_size(&r));
840 pr_err("Failed to map thread mgmnt area\n");
844 if (!xive_get_max_prio(&max_prio))
847 /* Feed the IRQ number allocator with the ranges given in the DT */
848 reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
850 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
854 if (len % (2 * sizeof(u32)) != 0) {
855 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
859 for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) {
860 err = xive_irq_bitmap_add(be32_to_cpu(reg[0]),
861 be32_to_cpu(reg[1]));
866 /* Iterate the EQ sizes and pick one */
867 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
868 xive_queue_shift = val;
869 if (val == PAGE_SHIFT)
873 /* Initialize XIVE core with our backend */
874 if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
878 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
882 xive_irq_bitmap_remove_all();
890 machine_arch_initcall(pseries, xive_core_debug_init);