1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2016,2017 IBM Corporation.
6 #define pr_fmt(fmt) "xive: " fmt
8 #include <linux/types.h>
10 #include <linux/debugfs.h>
11 #include <linux/smp.h>
12 #include <linux/interrupt.h>
13 #include <linux/seq_file.h>
14 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/delay.h>
19 #include <linux/cpumask.h>
21 #include <linux/kmemleak.h>
27 #include <asm/errno.h>
29 #include <asm/xive-regs.h>
31 #include <asm/kvm_ppc.h>
33 #include "xive-internal.h"
36 static u32 xive_provision_size;
37 static u32 *xive_provision_chips;
38 static u32 xive_provision_chip_count;
39 static u32 xive_queue_shift;
40 static u32 xive_pool_vps = XIVE_INVALID_VP;
41 static struct kmem_cache *xive_provision_cache;
42 static bool xive_has_single_esc;
44 int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
46 __be64 flags, eoi_page, trig_page;
47 __be32 esb_shift, src_chip;
51 memset(data, 0, sizeof(*data));
53 rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
54 &esb_shift, &src_chip);
56 pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
61 opal_flags = be64_to_cpu(flags);
62 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
63 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
64 if (opal_flags & OPAL_XIVE_IRQ_LSI)
65 data->flags |= XIVE_IRQ_FLAG_LSI;
66 if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
67 data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
68 if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
69 data->flags |= XIVE_IRQ_FLAG_MASK_FW;
70 if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
71 data->flags |= XIVE_IRQ_FLAG_EOI_FW;
72 data->eoi_page = be64_to_cpu(eoi_page);
73 data->trig_page = be64_to_cpu(trig_page);
74 data->esb_shift = be32_to_cpu(esb_shift);
75 data->src_chip = be32_to_cpu(src_chip);
77 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
78 if (!data->eoi_mmio) {
79 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
83 data->hw_irq = hw_irq;
87 if (data->trig_page == data->eoi_page) {
88 data->trig_mmio = data->eoi_mmio;
92 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
93 if (!data->trig_mmio) {
94 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
99 EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
101 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
106 rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
109 msleep(OPAL_BUSY_DELAY_MS);
111 return rc == 0 ? 0 : -ENXIO;
113 EXPORT_SYMBOL_GPL(xive_native_configure_irq);
115 static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
122 rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
124 *target = be64_to_cpu(vp);
125 *sw_irq = be32_to_cpu(lirq);
127 return rc == 0 ? 0 : -ENXIO;
130 /* This can be called multiple time to change a queue configuration */
131 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
132 __be32 *qpage, u32 order, bool can_escalate)
137 u64 flags, qpage_phys;
139 /* If there's an actual queue page, clean it */
143 qpage_phys = __pa(qpage);
147 /* Initialize the rest of the fields */
148 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
152 rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
157 pr_err("Error %lld getting queue info prio %d\n", rc, prio);
161 q->eoi_phys = be64_to_cpu(qeoi_page_be);
164 flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
166 /* Escalation needed ? */
168 q->esc_irq = be32_to_cpu(esc_irq_be);
169 flags |= OPAL_XIVE_EQ_ESCALATE;
172 /* Configure and enable the queue in HW */
174 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
177 msleep(OPAL_BUSY_DELAY_MS);
180 pr_err("Error %lld setting queue for prio %d\n", rc, prio);
184 * KVM code requires all of the above to be visible before
185 * q->qpage is set due to how it manages IPI EOIs
193 EXPORT_SYMBOL_GPL(xive_native_configure_queue);
195 static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
199 /* Disable the queue in HW */
201 rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
204 msleep(OPAL_BUSY_DELAY_MS);
207 pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
210 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
212 __xive_native_disable_queue(vp_id, q, prio);
214 EXPORT_SYMBOL_GPL(xive_native_disable_queue);
216 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
218 struct xive_q *q = &xc->queue[prio];
221 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
223 return PTR_ERR(qpage);
225 return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
226 q, prio, qpage, xive_queue_shift, false);
229 static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
231 struct xive_q *q = &xc->queue[prio];
232 unsigned int alloc_order;
235 * We use the variant with no iounmap as this is called on exec
236 * from an IPI and iounmap isn't safe
238 __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
239 alloc_order = xive_alloc_order(xive_queue_shift);
240 free_pages((unsigned long)q->qpage, alloc_order);
244 static bool xive_native_match(struct device_node *node)
246 return of_device_is_compatible(node, "ibm,opal-xive-vc");
249 static s64 opal_xive_allocate_irq(u32 chip_id)
251 s64 irq = opal_xive_allocate_irq_raw(chip_id);
254 * Old versions of skiboot can incorrectly return 0xffffffff to
255 * indicate no space, fix it up here.
257 return irq == 0xffffffff ? OPAL_RESOURCE : irq;
261 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
265 /* Allocate an IPI and populate info about it */
267 irq = opal_xive_allocate_irq(xc->chip_id);
268 if (irq == OPAL_BUSY) {
269 msleep(OPAL_BUSY_DELAY_MS);
273 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
281 #endif /* CONFIG_SMP */
283 u32 xive_native_alloc_irq(void)
288 rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
291 msleep(OPAL_BUSY_DELAY_MS);
297 EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
299 void xive_native_free_irq(u32 irq)
302 s64 rc = opal_xive_free_irq(irq);
305 msleep(OPAL_BUSY_DELAY_MS);
308 EXPORT_SYMBOL_GPL(xive_native_free_irq);
311 static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
316 if (xc->hw_ipi == XIVE_BAD_IRQ)
319 rc = opal_xive_free_irq(xc->hw_ipi);
320 if (rc == OPAL_BUSY) {
321 msleep(OPAL_BUSY_DELAY_MS);
324 xc->hw_ipi = XIVE_BAD_IRQ;
328 #endif /* CONFIG_SMP */
330 static void xive_native_shutdown(void)
332 /* Switch the XIVE to emulation mode */
333 opal_xive_reset(OPAL_XIVE_MODE_EMU);
337 * Perform an "ack" cycle on the current thread, thus
338 * grabbing the pending active priorities and updating
339 * the CPPR to the most favored one.
341 static void xive_native_update_pending(struct xive_cpu *xc)
346 /* Perform the acknowledge hypervisor to register cycle */
347 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
349 /* Synchronize subsequent queue accesses */
353 * Grab the CPPR and the "HE" field which indicates the source
354 * of the hypervisor interrupt (if any)
357 he = (ack >> 8) >> 6;
359 case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
361 case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
364 /* Mark the priority pending */
365 xc->pending_prio |= 1 << cppr;
368 * A new interrupt should never have a CPPR less favored
369 * than our current one.
371 if (cppr >= xc->cppr)
372 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
373 smp_processor_id(), cppr, xc->cppr);
375 /* Update our idea of what the CPPR is */
378 case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
379 case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
380 pr_err("CPU %d got unexpected interrupt type HE=%d\n",
381 smp_processor_id(), he);
386 static void xive_native_eoi(u32 hw_irq)
389 * Not normally used except if specific interrupts need
390 * a workaround on EOI.
392 opal_int_eoi(hw_irq);
395 static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
402 if (xive_pool_vps == XIVE_INVALID_VP)
405 /* Check if pool VP already active, if it is, pull it */
406 if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
407 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
409 /* Enable the pool VP */
410 vp = xive_pool_vps + cpu;
412 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
415 msleep(OPAL_BUSY_DELAY_MS);
418 pr_err("Failed to enable pool VP on CPU %d\n", cpu);
422 /* Grab it's CAM value */
423 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
425 pr_err("Failed to get pool VP info CPU %d\n", cpu);
428 vp_cam = be64_to_cpu(vp_cam_be);
430 /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
431 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
432 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
435 static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
440 if (xive_pool_vps == XIVE_INVALID_VP)
443 /* Pull the pool VP from the CPU */
444 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
447 vp = xive_pool_vps + cpu;
449 rc = opal_xive_set_vp_info(vp, 0, 0);
452 msleep(OPAL_BUSY_DELAY_MS);
456 void xive_native_sync_source(u32 hw_irq)
458 opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
460 EXPORT_SYMBOL_GPL(xive_native_sync_source);
462 void xive_native_sync_queue(u32 hw_irq)
464 opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
466 EXPORT_SYMBOL_GPL(xive_native_sync_queue);
468 static const struct xive_ops xive_native_ops = {
469 .populate_irq_data = xive_native_populate_irq_data,
470 .configure_irq = xive_native_configure_irq,
471 .get_irq_config = xive_native_get_irq_config,
472 .setup_queue = xive_native_setup_queue,
473 .cleanup_queue = xive_native_cleanup_queue,
474 .match = xive_native_match,
475 .shutdown = xive_native_shutdown,
476 .update_pending = xive_native_update_pending,
477 .eoi = xive_native_eoi,
478 .setup_cpu = xive_native_setup_cpu,
479 .teardown_cpu = xive_native_teardown_cpu,
480 .sync_source = xive_native_sync_source,
482 .get_ipi = xive_native_get_ipi,
483 .put_ipi = xive_native_put_ipi,
484 #endif /* CONFIG_SMP */
488 static bool xive_parse_provisioning(struct device_node *np)
492 if (of_property_read_u32(np, "ibm,xive-provision-page-size",
493 &xive_provision_size) < 0)
495 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
497 pr_err("Error %d getting provision chips array\n", rc);
500 xive_provision_chip_count = rc;
504 xive_provision_chips = kcalloc(4, xive_provision_chip_count,
506 if (WARN_ON(!xive_provision_chips))
509 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
510 xive_provision_chips,
511 xive_provision_chip_count);
513 pr_err("Error %d reading provision chips array\n", rc);
517 xive_provision_cache = kmem_cache_create("xive-provision",
521 if (!xive_provision_cache) {
522 pr_err("Failed to allocate provision cache\n");
528 static void xive_native_setup_pools(void)
530 /* Allocate a pool big enough */
531 pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
533 xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
534 if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
535 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
537 pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
538 xive_pool_vps, nr_cpu_ids);
541 u32 xive_native_default_eq_shift(void)
543 return xive_queue_shift;
545 EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
547 unsigned long xive_tima_os;
548 EXPORT_SYMBOL_GPL(xive_tima_os);
550 bool __init xive_native_init(void)
552 struct device_node *np;
555 struct property *prop;
561 if (xive_cmdline_disabled)
564 pr_devel("xive_native_init()\n");
565 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
567 pr_devel("not found !\n");
570 pr_devel("Found %pOF\n", np);
572 /* Resource 1 is HV window */
573 if (of_address_to_resource(np, 1, &r)) {
574 pr_err("Failed to get thread mgmnt area resource\n");
577 tima = ioremap(r.start, resource_size(&r));
579 pr_err("Failed to map thread mgmnt area\n");
583 /* Read number of priorities */
584 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
587 /* Iterate the EQ sizes and pick one */
588 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
589 xive_queue_shift = val;
590 if (val == PAGE_SHIFT)
594 /* Do we support single escalation */
595 if (of_get_property(np, "single-escalation-support", NULL) != NULL)
596 xive_has_single_esc = true;
598 /* Configure Thread Management areas for KVM */
599 for_each_possible_cpu(cpu)
600 kvmppc_set_xive_tima(cpu, r.start, tima);
602 /* Resource 2 is OS window */
603 if (of_address_to_resource(np, 2, &r)) {
604 pr_err("Failed to get thread mgmnt area resource\n");
608 xive_tima_os = r.start;
610 /* Grab size of provisionning pages */
611 xive_parse_provisioning(np);
613 /* Switch the XIVE to exploitation mode */
614 rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
616 pr_err("Switch to exploitation mode failed with error %lld\n", rc);
620 /* Setup some dummy HV pool VPs */
621 xive_native_setup_pools();
623 /* Initialize XIVE core with our backend */
624 if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
626 opal_xive_reset(OPAL_XIVE_MODE_EMU);
629 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
633 static bool xive_native_provision_pages(void)
638 for (i = 0; i < xive_provision_chip_count; i++) {
639 u32 chip = xive_provision_chips[i];
642 * XXX TODO: Try to make the allocation local to the node where
645 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
647 pr_err("Failed to allocate provisioning page\n");
651 opal_xive_donate_page(chip, __pa(p));
656 u32 xive_native_alloc_vp_block(u32 max_vcpus)
661 order = fls(max_vcpus) - 1;
662 if (max_vcpus > (1 << order))
665 pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
669 rc = opal_xive_alloc_vp_block(order);
672 msleep(OPAL_BUSY_DELAY_MS);
674 case OPAL_XIVE_PROVISIONING:
675 if (!xive_native_provision_pages())
676 return XIVE_INVALID_VP;
680 pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
682 return XIVE_INVALID_VP;
688 EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
690 void xive_native_free_vp_block(u32 vp_base)
694 if (vp_base == XIVE_INVALID_VP)
697 rc = opal_xive_free_vp_block(vp_base);
699 pr_warn("OPAL error %lld freeing VP block\n", rc);
701 EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
703 int xive_native_enable_vp(u32 vp_id, bool single_escalation)
706 u64 flags = OPAL_XIVE_VP_ENABLED;
708 if (single_escalation)
709 flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
711 rc = opal_xive_set_vp_info(vp_id, flags, 0);
714 msleep(OPAL_BUSY_DELAY_MS);
716 return rc ? -EIO : 0;
718 EXPORT_SYMBOL_GPL(xive_native_enable_vp);
720 int xive_native_disable_vp(u32 vp_id)
725 rc = opal_xive_set_vp_info(vp_id, 0, 0);
728 msleep(OPAL_BUSY_DELAY_MS);
730 return rc ? -EIO : 0;
732 EXPORT_SYMBOL_GPL(xive_native_disable_vp);
734 int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
737 __be32 vp_chip_id_be;
740 rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
743 *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
744 *out_chip_id = be32_to_cpu(vp_chip_id_be);
748 EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
750 bool xive_native_has_single_escalation(void)
752 return xive_has_single_esc;
754 EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
756 int xive_native_get_queue_info(u32 vp_id, u32 prio,
760 u32 *out_escalate_irq,
770 rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
771 &qeoi_page, &escalate_irq, &qflags);
773 pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
779 *out_qpage = be64_to_cpu(qpage);
781 *out_qsize = be64_to_cpu(qsize);
783 *out_qeoi_page = be64_to_cpu(qeoi_page);
784 if (out_escalate_irq)
785 *out_escalate_irq = be32_to_cpu(escalate_irq);
787 *out_qflags = be64_to_cpu(qflags);
791 EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
793 int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
799 rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
802 pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
808 *qtoggle = be32_to_cpu(opal_qtoggle);
810 *qindex = be32_to_cpu(opal_qindex);
814 EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
816 int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
820 rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
822 pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
829 EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
831 bool xive_native_has_queue_state_support(void)
833 return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
834 opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
836 EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
838 int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
843 rc = opal_xive_get_vp_state(vp_id, &state);
845 pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
851 *out_state = be64_to_cpu(state);
854 EXPORT_SYMBOL_GPL(xive_native_get_vp_state);