1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
5 * Author: Tony Li <tony.li@freescale.com>
6 * Jason Jin <Jason.jin@freescale.com>
8 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
10 #include <linux/irq.h>
11 #include <linux/msi.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/of_platform.h>
15 #include <linux/interrupt.h>
16 #include <linux/seq_file.h>
17 #include <sysdev/fsl_soc.h>
19 #include <asm/hw_irq.h>
20 #include <asm/ppc-pci.h>
22 #include <asm/fsl_hcalls.h>
27 #define MSIIR_OFFSET_MASK 0xfffff
28 #define MSIIR_IBS_SHIFT 0
29 #define MSIIR_SRS_SHIFT 5
30 #define MSIIR1_IBS_SHIFT 4
31 #define MSIIR1_SRS_SHIFT 0
32 #define MSI_SRS_MASK 0xf
33 #define MSI_IBS_MASK 0x1f
35 #define msi_hwirq(msi, msir_index, intr_index) \
36 ((msir_index) << (msi)->srs_shift | \
37 ((intr_index) << (msi)->ibs_shift))
39 static LIST_HEAD(msi_head);
41 struct fsl_msi_feature {
43 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
46 struct fsl_msi_cascade_data {
47 struct fsl_msi *msi_data;
52 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
54 return in_be32(base + (reg >> 2));
58 * We do not need this actually. The MSIR register has been read once
59 * in the cascade interrupt. So, this MSI interrupt has been acked
61 static void fsl_msi_end_irq(struct irq_data *d)
65 static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
67 struct fsl_msi *msi_data = irqd->domain->host_data;
68 irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
69 int cascade_virq, srs;
71 srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
72 cascade_virq = msi_data->cascade_array[srs]->virq;
74 seq_printf(p, " fsl-msi-%d", cascade_virq);
78 static struct irq_chip fsl_msi_chip = {
79 .irq_mask = pci_msi_mask_irq,
80 .irq_unmask = pci_msi_unmask_irq,
81 .irq_ack = fsl_msi_end_irq,
82 .irq_print_chip = fsl_msi_print_chip,
85 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
88 struct fsl_msi *msi_data = h->host_data;
89 struct irq_chip *chip = &fsl_msi_chip;
91 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
93 irq_set_chip_data(virq, msi_data);
94 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
99 static const struct irq_domain_ops fsl_msi_host_ops = {
100 .map = fsl_msi_host_map,
103 static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
107 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
108 irq_domain_get_of_node(msi_data->irqhost));
113 * Reserve all the hwirqs
114 * The available hwirqs will be released in fsl_msi_setup_hwirq()
116 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
117 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
122 static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
124 struct msi_desc *entry;
125 struct fsl_msi *msi_data;
126 irq_hw_number_t hwirq;
128 for_each_pci_msi_entry(entry, pdev) {
131 hwirq = virq_to_hw(entry->irq);
132 msi_data = irq_get_chip_data(entry->irq);
133 irq_set_msi_desc(entry->irq, NULL);
134 irq_dispose_mapping(entry->irq);
135 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
141 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
143 struct fsl_msi *fsl_msi_data)
145 struct fsl_msi *msi_data = fsl_msi_data;
146 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
147 u64 address; /* Physical address of the MSIIR */
151 /* If the msi-address-64 property exists, then use it */
152 reg = of_get_property(hose->dn, "msi-address-64", &len);
153 if (reg && (len == sizeof(u64)))
154 address = be64_to_cpup(reg);
156 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
158 msg->address_lo = lower_32_bits(address);
159 msg->address_hi = upper_32_bits(address);
162 * MPIC version 2.0 has erratum PIC1. It causes
163 * that neither MSI nor MSI-X can work fine.
164 * This is a workaround to allow MSI-X to function
165 * properly. It only works for MSI-X, we prevent
166 * MSI on buggy chips in fsl_setup_msi_irqs().
168 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
169 msg->data = __swab32(hwirq);
173 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
174 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
175 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
178 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
180 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
181 struct device_node *np;
183 int rc, hwirq = -ENOMEM;
185 struct msi_desc *entry;
187 struct fsl_msi *msi_data;
189 if (type == PCI_CAP_ID_MSI) {
191 * MPIC version 2.0 has erratum PIC1. For now MSI
192 * could not work. So check to prevent MSI from
193 * being used on the board with this erratum.
195 list_for_each_entry(msi_data, &msi_head, list)
196 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
201 * If the PCI node has an fsl,msi property, then we need to use it
202 * to find the specific MSI.
204 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
206 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
207 of_device_is_compatible(np, "fsl,vmpic-msi") ||
208 of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
209 phandle = np->phandle;
212 "node %pOF has an invalid fsl,msi phandle %u\n",
213 hose->dn, np->phandle);
220 for_each_pci_msi_entry(entry, pdev) {
222 * Loop over all the MSI devices until we find one that has an
223 * available interrupt.
225 list_for_each_entry(msi_data, &msi_head, list) {
227 * If the PCI node has an fsl,msi property, then we
228 * restrict our search to the corresponding MSI node.
229 * The simplest way is to skip over MSI nodes with the
230 * wrong phandle. Under the Freescale hypervisor, this
231 * has the additional benefit of skipping over MSI
232 * nodes that are not mapped in the PAMU.
234 if (phandle && (phandle != msi_data->phandle))
237 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
244 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
248 virq = irq_create_mapping(msi_data->irqhost, hwirq);
251 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
252 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
256 /* chip_data is msi_data via host->hostdata in host->map() */
257 irq_set_msi_desc(virq, entry);
259 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
260 pci_write_msi_msg(virq, &msg);
265 /* free by the caller of this function */
269 static irqreturn_t fsl_msi_cascade(int irq, void *data)
271 unsigned int cascade_irq;
272 struct fsl_msi *msi_data;
277 struct fsl_msi_cascade_data *cascade_data = data;
278 irqreturn_t ret = IRQ_NONE;
280 msi_data = cascade_data->msi_data;
282 msir_index = cascade_data->index;
284 if (msir_index >= NR_MSI_REG_MAX)
287 switch (msi_data->feature & FSL_PIC_IP_MASK) {
288 case FSL_PIC_IP_MPIC:
289 msir_value = fsl_msi_read(msi_data->msi_regs,
292 case FSL_PIC_IP_IPIC:
293 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
295 #ifdef CONFIG_EPAPR_PARAVIRT
296 case FSL_PIC_IP_VMPIC: {
298 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
300 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
301 "irq %u (ret=%u)\n", irq, ret);
310 intr_index = ffs(msir_value) - 1;
312 cascade_irq = irq_linear_revmap(msi_data->irqhost,
313 msi_hwirq(msi_data, msir_index,
314 intr_index + have_shift));
316 generic_handle_irq(cascade_irq);
319 have_shift += intr_index + 1;
320 msir_value = msir_value >> (intr_index + 1);
326 static int fsl_of_msi_remove(struct platform_device *ofdev)
328 struct fsl_msi *msi = platform_get_drvdata(ofdev);
331 if (msi->list.prev != NULL)
332 list_del(&msi->list);
333 for (i = 0; i < NR_MSI_REG_MAX; i++) {
334 if (msi->cascade_array[i]) {
335 virq = msi->cascade_array[i]->virq;
339 free_irq(virq, msi->cascade_array[i]);
340 kfree(msi->cascade_array[i]);
341 irq_dispose_mapping(virq);
344 if (msi->bitmap.bitmap)
345 msi_bitmap_free(&msi->bitmap);
346 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
347 iounmap(msi->msi_regs);
353 static struct lock_class_key fsl_msi_irq_class;
354 static struct lock_class_key fsl_msi_irq_request_class;
356 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
357 int offset, int irq_index)
359 struct fsl_msi_cascade_data *cascade_data = NULL;
360 int virt_msir, i, ret;
362 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
364 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
365 __func__, irq_index);
369 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
371 dev_err(&dev->dev, "No memory for MSI cascade data\n");
374 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
375 &fsl_msi_irq_request_class);
376 cascade_data->index = offset;
377 cascade_data->msi_data = msi;
378 cascade_data->virq = virt_msir;
379 msi->cascade_array[irq_index] = cascade_data;
381 ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
382 "fsl-msi-cascade", cascade_data);
384 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
389 /* Release the hwirqs corresponding to this MSI register */
390 for (i = 0; i < IRQS_PER_MSI_REG; i++)
391 msi_bitmap_free_hwirqs(&msi->bitmap,
392 msi_hwirq(msi, offset, i), 1);
397 static const struct of_device_id fsl_of_msi_ids[];
398 static int fsl_of_msi_probe(struct platform_device *dev)
400 const struct of_device_id *match;
402 struct resource res, msiir;
403 int err, i, j, irq_index, count;
405 const struct fsl_msi_feature *features;
408 struct pci_controller *phb;
410 match = of_match_device(fsl_of_msi_ids, &dev->dev);
413 features = match->data;
415 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
417 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
419 dev_err(&dev->dev, "No memory for MSI structure\n");
422 platform_set_drvdata(dev, msi);
424 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
425 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
427 if (msi->irqhost == NULL) {
428 dev_err(&dev->dev, "No memory for MSI irqhost\n");
434 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
435 * property. Instead, we use hypercalls to access the MSI.
437 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
438 err = of_address_to_resource(dev->dev.of_node, 0, &res);
440 dev_err(&dev->dev, "invalid resource for node %pOF\n",
445 msi->msi_regs = ioremap(res.start, resource_size(&res));
446 if (!msi->msi_regs) {
448 dev_err(&dev->dev, "could not map node %pOF\n",
453 features->msiir_offset + (res.start & 0xfffff);
456 * First read the MSIIR/MSIIR1 offset from dts
457 * On failure use the hardcode MSIIR offset
459 if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
460 msi->msiir_offset = features->msiir_offset +
461 (res.start & MSIIR_OFFSET_MASK);
463 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
466 msi->feature = features->fsl_pic_ip;
468 /* For erratum PIC1 on MPIC version 2.0*/
469 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
470 && (fsl_mpic_primary_get_version() == 0x0200))
471 msi->feature |= MSI_HW_ERRATA_ENDIAN;
474 * Remember the phandle, so that we can match with any PCI nodes
475 * that have an "fsl,msi" property.
477 msi->phandle = dev->dev.of_node->phandle;
479 err = fsl_msi_init_allocator(msi);
481 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
485 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
487 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
488 of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
489 msi->srs_shift = MSIIR1_SRS_SHIFT;
490 msi->ibs_shift = MSIIR1_IBS_SHIFT;
492 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
495 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
497 err = fsl_msi_setup_hwirq(msi, dev,
498 irq_index, irq_index);
503 static const u32 all_avail[] =
504 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
506 msi->srs_shift = MSIIR_SRS_SHIFT;
507 msi->ibs_shift = MSIIR_IBS_SHIFT;
509 if (p && len % (2 * sizeof(u32)) != 0) {
510 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
518 len = sizeof(all_avail);
521 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
522 if (p[i * 2] % IRQS_PER_MSI_REG ||
523 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
524 pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n",
525 __func__, dev->dev.of_node,
526 p[i * 2 + 1], p[i * 2]);
531 offset = p[i * 2] / IRQS_PER_MSI_REG;
532 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
534 for (j = 0; j < count; j++, irq_index++) {
535 err = fsl_msi_setup_hwirq(msi, dev, offset + j,
543 list_add_tail(&msi->list, &msi_head);
546 * Apply the MSI ops to all the controllers.
547 * It doesn't hurt to reassign the same ops,
548 * but bail out if we find another MSI driver.
550 list_for_each_entry(phb, &hose_list, list_node) {
551 if (!phb->controller_ops.setup_msi_irqs) {
552 phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
553 phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
554 } else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
555 dev_err(&dev->dev, "Different MSI driver already installed!\n");
562 fsl_of_msi_remove(dev);
566 static const struct fsl_msi_feature mpic_msi_feature = {
567 .fsl_pic_ip = FSL_PIC_IP_MPIC,
568 .msiir_offset = 0x140,
571 static const struct fsl_msi_feature ipic_msi_feature = {
572 .fsl_pic_ip = FSL_PIC_IP_IPIC,
573 .msiir_offset = 0x38,
576 static const struct fsl_msi_feature vmpic_msi_feature = {
577 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
581 static const struct of_device_id fsl_of_msi_ids[] = {
583 .compatible = "fsl,mpic-msi",
584 .data = &mpic_msi_feature,
587 .compatible = "fsl,mpic-msi-v4.3",
588 .data = &mpic_msi_feature,
591 .compatible = "fsl,ipic-msi",
592 .data = &ipic_msi_feature,
594 #ifdef CONFIG_EPAPR_PARAVIRT
596 .compatible = "fsl,vmpic-msi",
597 .data = &vmpic_msi_feature,
600 .compatible = "fsl,vmpic-msi-v4.3",
601 .data = &vmpic_msi_feature,
607 static struct platform_driver fsl_of_msi_driver = {
610 .of_match_table = fsl_of_msi_ids,
612 .probe = fsl_of_msi_probe,
613 .remove = fsl_of_msi_remove,
616 static __init int fsl_of_msi_init(void)
618 return platform_driver_register(&fsl_of_msi_driver);
621 subsys_initcall(fsl_of_msi_init);