2 #define pr_fmt(fmt) "DMAR-IR: " fmt
4 #include <linux/interrupt.h>
5 #include <linux/dmar.h>
6 #include <linux/spinlock.h>
7 #include <linux/slab.h>
8 #include <linux/jiffies.h>
9 #include <linux/hpet.h>
10 #include <linux/pci.h>
11 #include <linux/irq.h>
12 #include <linux/intel-iommu.h>
13 #include <linux/acpi.h>
14 #include <linux/irqdomain.h>
15 #include <linux/crash_dump.h>
16 #include <asm/io_apic.h>
19 #include <asm/irq_remapping.h>
20 #include <asm/pci-direct.h>
21 #include <asm/msidef.h>
23 #include "irq_remapping.h"
31 struct intel_iommu *iommu;
33 unsigned int bus; /* PCI bus number */
34 unsigned int devfn; /* PCI devfn number */
38 struct intel_iommu *iommu;
45 struct intel_iommu *iommu;
52 struct intel_ir_data {
53 struct irq_2_iommu irq_2_iommu;
54 struct irte irte_entry;
56 struct msi_msg msi_entry;
60 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
61 #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
63 static int __read_mostly eim_mode;
64 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
65 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
72 * ->iommu->register_lock
74 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
75 * in single-threaded environment with interrupt disabled, so no need to tabke
76 * the dmar_global_lock.
78 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
79 static struct irq_domain_ops intel_ir_domain_ops;
81 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
82 static int __init parse_ioapics_under_ir(void);
84 static bool ir_pre_enabled(struct intel_iommu *iommu)
86 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
89 static void clear_ir_pre_enabled(struct intel_iommu *iommu)
91 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
94 static void init_ir_status(struct intel_iommu *iommu)
98 gsts = readl(iommu->reg + DMAR_GSTS_REG);
99 if (gsts & DMA_GSTS_IRES)
100 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
103 static int alloc_irte(struct intel_iommu *iommu, int irq,
104 struct irq_2_iommu *irq_iommu, u16 count)
106 struct ir_table *table = iommu->ir_table;
107 unsigned int mask = 0;
111 if (!count || !irq_iommu)
115 count = __roundup_pow_of_two(count);
119 if (mask > ecap_max_handle_mask(iommu->ecap)) {
120 pr_err("Requested mask %x exceeds the max invalidation handle"
121 " mask value %Lx\n", mask,
122 ecap_max_handle_mask(iommu->ecap));
126 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
127 index = bitmap_find_free_region(table->bitmap,
128 INTR_REMAP_TABLE_ENTRIES, mask);
130 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
132 irq_iommu->iommu = iommu;
133 irq_iommu->irte_index = index;
134 irq_iommu->sub_handle = 0;
135 irq_iommu->irte_mask = mask;
136 irq_iommu->mode = IRQ_REMAPPING;
138 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
143 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
147 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
151 return qi_submit_sync(&desc, iommu);
154 static int modify_irte(struct irq_2_iommu *irq_iommu,
155 struct irte *irte_modified)
157 struct intel_iommu *iommu;
165 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
167 iommu = irq_iommu->iommu;
169 index = irq_iommu->irte_index + irq_iommu->sub_handle;
170 irte = &iommu->ir_table->base[index];
172 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
173 if ((irte->pst == 1) || (irte_modified->pst == 1)) {
176 ret = cmpxchg_double(&irte->low, &irte->high,
177 irte->low, irte->high,
178 irte_modified->low, irte_modified->high);
180 * We use cmpxchg16 to atomically update the 128-bit IRTE,
181 * and it cannot be updated by the hardware or other processors
182 * behind us, so the return value of cmpxchg16 should be the
183 * same as the old value.
189 set_64bit(&irte->low, irte_modified->low);
190 set_64bit(&irte->high, irte_modified->high);
192 __iommu_flush_cache(iommu, irte, sizeof(*irte));
194 rc = qi_flush_iec(iommu, index, 0);
196 /* Update iommu mode according to the IRTE mode */
197 irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
198 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
203 static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
207 for (i = 0; i < MAX_HPET_TBS; i++)
208 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
209 return ir_hpet[i].iommu;
213 static struct intel_iommu *map_ioapic_to_ir(int apic)
217 for (i = 0; i < MAX_IO_APICS; i++)
218 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
219 return ir_ioapic[i].iommu;
223 static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
225 struct dmar_drhd_unit *drhd;
227 drhd = dmar_find_matched_drhd_unit(dev);
234 static int clear_entries(struct irq_2_iommu *irq_iommu)
236 struct irte *start, *entry, *end;
237 struct intel_iommu *iommu;
240 if (irq_iommu->sub_handle)
243 iommu = irq_iommu->iommu;
244 index = irq_iommu->irte_index;
246 start = iommu->ir_table->base + index;
247 end = start + (1 << irq_iommu->irte_mask);
249 for (entry = start; entry < end; entry++) {
250 set_64bit(&entry->low, 0);
251 set_64bit(&entry->high, 0);
253 bitmap_release_region(iommu->ir_table->bitmap, index,
254 irq_iommu->irte_mask);
256 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
260 * source validation type
262 #define SVT_NO_VERIFY 0x0 /* no verification is required */
263 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
264 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
267 * source-id qualifier
269 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
270 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
271 * the third least significant bit
273 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
274 * the second and third least significant bits
276 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
277 * the least three significant bits
281 * set SVT, SQ and SID fields of irte to verify
282 * source ids of interrupt requests
284 static void set_irte_sid(struct irte *irte, unsigned int svt,
285 unsigned int sq, unsigned int sid)
287 if (disable_sourceid_checking)
294 static int set_ioapic_sid(struct irte *irte, int apic)
302 down_read(&dmar_global_lock);
303 for (i = 0; i < MAX_IO_APICS; i++) {
304 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
305 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
309 up_read(&dmar_global_lock);
312 pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
316 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
321 static int set_hpet_sid(struct irte *irte, u8 id)
329 down_read(&dmar_global_lock);
330 for (i = 0; i < MAX_HPET_TBS; i++) {
331 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
332 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
336 up_read(&dmar_global_lock);
339 pr_warn("Failed to set source-id of HPET block (%d)\n", id);
344 * Should really use SQ_ALL_16. Some platforms are broken.
345 * While we figure out the right quirks for these broken platforms, use
346 * SQ_13_IGNORE_3 for now.
348 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
353 struct set_msi_sid_data {
354 struct pci_dev *pdev;
358 static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
360 struct set_msi_sid_data *data = opaque;
368 static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
370 struct set_msi_sid_data data;
375 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
378 * DMA alias provides us with a PCI device and alias. The only case
379 * where the it will return an alias on a different bus than the
380 * device is the case of a PCIe-to-PCI bridge, where the alias is for
381 * the subordinate bus. In this case we can only verify the bus.
383 * If the alias device is on a different bus than our source device
384 * then we have a topology based alias, use it.
386 * Otherwise, the alias is for a device DMA quirk and we cannot
387 * assume that MSI uses the same requester ID. Therefore use the
390 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
391 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
392 PCI_DEVID(PCI_BUS_NUM(data.alias),
394 else if (data.pdev->bus->number != dev->bus->number)
395 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
397 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
398 PCI_DEVID(dev->bus->number, dev->devfn));
403 static int iommu_load_old_irte(struct intel_iommu *iommu)
405 struct irte *old_ir_table;
406 phys_addr_t irt_phys;
411 if (!is_kdump_kernel()) {
412 pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
414 clear_ir_pre_enabled(iommu);
415 iommu_disable_irq_remapping(iommu);
419 /* Check whether the old ir-table has the same size as ours */
420 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
421 if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
422 != INTR_REMAP_TABLE_REG_SIZE)
425 irt_phys = irta & VTD_PAGE_MASK;
426 size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
428 /* Map the old IR table */
429 old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
434 memcpy(iommu->ir_table->base, old_ir_table, size);
436 __iommu_flush_cache(iommu, iommu->ir_table->base, size);
439 * Now check the table for used entries and mark those as
440 * allocated in the bitmap
442 for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
443 if (iommu->ir_table->base[i].present)
444 bitmap_set(iommu->ir_table->bitmap, i, 1);
447 memunmap(old_ir_table);
453 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
459 addr = virt_to_phys((void *)iommu->ir_table->base);
461 raw_spin_lock_irqsave(&iommu->register_lock, flags);
463 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
464 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
466 /* Set interrupt-remapping table pointer */
467 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
469 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
470 readl, (sts & DMA_GSTS_IRTPS), sts);
471 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
474 * Global invalidation of interrupt entry cache to make sure the
475 * hardware uses the new irq remapping table.
477 qi_global_iec(iommu);
480 static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
485 raw_spin_lock_irqsave(&iommu->register_lock, flags);
487 /* Enable interrupt-remapping */
488 iommu->gcmd |= DMA_GCMD_IRE;
489 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
490 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
491 readl, (sts & DMA_GSTS_IRES), sts);
493 /* Block compatibility-format MSIs */
494 if (sts & DMA_GSTS_CFIS) {
495 iommu->gcmd &= ~DMA_GCMD_CFI;
496 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
497 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
498 readl, !(sts & DMA_GSTS_CFIS), sts);
502 * With CFI clear in the Global Command register, we should be
503 * protected from dangerous (i.e. compatibility) interrupts
504 * regardless of x2apic status. Check just to be sure.
506 if (sts & DMA_GSTS_CFIS)
508 "Compatibility-format IRQs enabled despite intr remapping;\n"
509 "you are vulnerable to IRQ injection.\n");
511 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
514 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
516 struct ir_table *ir_table;
518 unsigned long *bitmap;
523 ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
527 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
528 INTR_REMAP_PAGE_ORDER);
530 pr_err("IR%d: failed to allocate pages of order %d\n",
531 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
535 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
536 sizeof(long), GFP_ATOMIC);
537 if (bitmap == NULL) {
538 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
542 iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(),
543 0, INTR_REMAP_TABLE_ENTRIES,
544 NULL, &intel_ir_domain_ops,
546 if (!iommu->ir_domain) {
547 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
548 goto out_free_bitmap;
550 iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
552 ir_table->base = page_address(pages);
553 ir_table->bitmap = bitmap;
554 iommu->ir_table = ir_table;
557 * If the queued invalidation is already initialized,
558 * shouldn't disable it.
562 * Clear previous faults.
564 dmar_fault(-1, iommu);
565 dmar_disable_qi(iommu);
567 if (dmar_enable_qi(iommu)) {
568 pr_err("Failed to enable queued invalidation\n");
569 goto out_free_bitmap;
573 init_ir_status(iommu);
575 if (ir_pre_enabled(iommu)) {
576 if (iommu_load_old_irte(iommu))
577 pr_err("Failed to copy IR table for %s from previous kernel\n",
580 pr_info("Copied IR table for %s from previous kernel\n",
584 iommu_set_irq_remapping(iommu, eim_mode);
591 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
595 iommu->ir_table = NULL;
600 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
602 if (iommu && iommu->ir_table) {
603 if (iommu->ir_msi_domain) {
604 irq_domain_remove(iommu->ir_msi_domain);
605 iommu->ir_msi_domain = NULL;
607 if (iommu->ir_domain) {
608 irq_domain_remove(iommu->ir_domain);
609 iommu->ir_domain = NULL;
611 free_pages((unsigned long)iommu->ir_table->base,
612 INTR_REMAP_PAGE_ORDER);
613 kfree(iommu->ir_table->bitmap);
614 kfree(iommu->ir_table);
615 iommu->ir_table = NULL;
620 * Disable Interrupt Remapping.
622 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
627 if (!ecap_ir_support(iommu->ecap))
631 * global invalidation of interrupt entry cache before disabling
632 * interrupt-remapping.
634 qi_global_iec(iommu);
636 raw_spin_lock_irqsave(&iommu->register_lock, flags);
638 sts = readl(iommu->reg + DMAR_GSTS_REG);
639 if (!(sts & DMA_GSTS_IRES))
642 iommu->gcmd &= ~DMA_GCMD_IRE;
643 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
645 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
646 readl, !(sts & DMA_GSTS_IRES), sts);
649 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
652 static int __init dmar_x2apic_optout(void)
654 struct acpi_table_dmar *dmar;
655 dmar = (struct acpi_table_dmar *)dmar_tbl;
656 if (!dmar || no_x2apic_optout)
658 return dmar->flags & DMAR_X2APIC_OPT_OUT;
661 static void __init intel_cleanup_irq_remapping(void)
663 struct dmar_drhd_unit *drhd;
664 struct intel_iommu *iommu;
666 for_each_iommu(iommu, drhd) {
667 if (ecap_ir_support(iommu->ecap)) {
668 iommu_disable_irq_remapping(iommu);
669 intel_teardown_irq_remapping(iommu);
673 if (x2apic_supported())
674 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
677 static int __init intel_prepare_irq_remapping(void)
679 struct dmar_drhd_unit *drhd;
680 struct intel_iommu *iommu;
683 if (irq_remap_broken) {
684 pr_warn("This system BIOS has enabled interrupt remapping\n"
685 "on a chipset that contains an erratum making that\n"
686 "feature unstable. To maintain system stability\n"
687 "interrupt remapping is being disabled. Please\n"
688 "contact your BIOS vendor for an update\n");
689 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
693 if (dmar_table_init() < 0)
696 if (!dmar_ir_support())
699 if (parse_ioapics_under_ir()) {
700 pr_info("Not enabling interrupt remapping\n");
704 /* First make sure all IOMMUs support IRQ remapping */
705 for_each_iommu(iommu, drhd)
706 if (!ecap_ir_support(iommu->ecap))
709 /* Detect remapping mode: lapic or x2apic */
710 if (x2apic_supported()) {
711 eim = !dmar_x2apic_optout();
713 pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
714 pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
718 for_each_iommu(iommu, drhd) {
719 if (eim && !ecap_eim_support(iommu->ecap)) {
720 pr_info("%s does not support EIM\n", iommu->name);
727 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
729 /* Do the initializations early */
730 for_each_iommu(iommu, drhd) {
731 if (intel_setup_irq_remapping(iommu)) {
732 pr_err("Failed to setup irq remapping for %s\n",
741 intel_cleanup_irq_remapping();
746 * Set Posted-Interrupts capability.
748 static inline void set_irq_posting_cap(void)
750 struct dmar_drhd_unit *drhd;
751 struct intel_iommu *iommu;
753 if (!disable_irq_post) {
755 * If IRTE is in posted format, the 'pda' field goes across the
756 * 64-bit boundary, we need use cmpxchg16b to atomically update
757 * it. We only expose posted-interrupt when X86_FEATURE_CX16
758 * is supported. Actually, hardware platforms supporting PI
759 * should have X86_FEATURE_CX16 support, this has been confirmed
760 * with Intel hardware guys.
762 if (boot_cpu_has(X86_FEATURE_CX16))
763 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
765 for_each_iommu(iommu, drhd)
766 if (!cap_pi_support(iommu->cap)) {
767 intel_irq_remap_ops.capability &=
768 ~(1 << IRQ_POSTING_CAP);
774 static int __init intel_enable_irq_remapping(void)
776 struct dmar_drhd_unit *drhd;
777 struct intel_iommu *iommu;
781 * Setup Interrupt-remapping for all the DRHD's now.
783 for_each_iommu(iommu, drhd) {
784 if (!ir_pre_enabled(iommu))
785 iommu_enable_irq_remapping(iommu);
792 irq_remapping_enabled = 1;
794 set_irq_posting_cap();
796 pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
798 return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
801 intel_cleanup_irq_remapping();
805 static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
806 struct intel_iommu *iommu,
807 struct acpi_dmar_hardware_unit *drhd)
809 struct acpi_dmar_pci_path *path;
811 int count, free = -1;
814 path = (struct acpi_dmar_pci_path *)(scope + 1);
815 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
816 / sizeof(struct acpi_dmar_pci_path);
818 while (--count > 0) {
820 * Access PCI directly due to the PCI
821 * subsystem isn't initialized yet.
823 bus = read_pci_config_byte(bus, path->device, path->function,
828 for (count = 0; count < MAX_HPET_TBS; count++) {
829 if (ir_hpet[count].iommu == iommu &&
830 ir_hpet[count].id == scope->enumeration_id)
832 else if (ir_hpet[count].iommu == NULL && free == -1)
836 pr_warn("Exceeded Max HPET blocks\n");
840 ir_hpet[free].iommu = iommu;
841 ir_hpet[free].id = scope->enumeration_id;
842 ir_hpet[free].bus = bus;
843 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
844 pr_info("HPET id %d under DRHD base 0x%Lx\n",
845 scope->enumeration_id, drhd->address);
850 static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
851 struct intel_iommu *iommu,
852 struct acpi_dmar_hardware_unit *drhd)
854 struct acpi_dmar_pci_path *path;
856 int count, free = -1;
859 path = (struct acpi_dmar_pci_path *)(scope + 1);
860 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
861 / sizeof(struct acpi_dmar_pci_path);
863 while (--count > 0) {
865 * Access PCI directly due to the PCI
866 * subsystem isn't initialized yet.
868 bus = read_pci_config_byte(bus, path->device, path->function,
873 for (count = 0; count < MAX_IO_APICS; count++) {
874 if (ir_ioapic[count].iommu == iommu &&
875 ir_ioapic[count].id == scope->enumeration_id)
877 else if (ir_ioapic[count].iommu == NULL && free == -1)
881 pr_warn("Exceeded Max IO APICS\n");
885 ir_ioapic[free].bus = bus;
886 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
887 ir_ioapic[free].iommu = iommu;
888 ir_ioapic[free].id = scope->enumeration_id;
889 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
890 scope->enumeration_id, drhd->address, iommu->seq_id);
895 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
896 struct intel_iommu *iommu)
899 struct acpi_dmar_hardware_unit *drhd;
900 struct acpi_dmar_device_scope *scope;
903 drhd = (struct acpi_dmar_hardware_unit *)header;
904 start = (void *)(drhd + 1);
905 end = ((void *)drhd) + header->length;
907 while (start < end && ret == 0) {
909 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
910 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
911 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
912 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
913 start += scope->length;
919 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
923 for (i = 0; i < MAX_HPET_TBS; i++)
924 if (ir_hpet[i].iommu == iommu)
925 ir_hpet[i].iommu = NULL;
927 for (i = 0; i < MAX_IO_APICS; i++)
928 if (ir_ioapic[i].iommu == iommu)
929 ir_ioapic[i].iommu = NULL;
933 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
936 static int __init parse_ioapics_under_ir(void)
938 struct dmar_drhd_unit *drhd;
939 struct intel_iommu *iommu;
940 bool ir_supported = false;
943 for_each_iommu(iommu, drhd) {
946 if (!ecap_ir_support(iommu->ecap))
949 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
959 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
960 int ioapic_id = mpc_ioapic_id(ioapic_idx);
961 if (!map_ioapic_to_ir(ioapic_id)) {
962 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
963 "interrupt remapping will be disabled\n",
972 static int __init ir_dev_scope_init(void)
976 if (!irq_remapping_enabled)
979 down_write(&dmar_global_lock);
980 ret = dmar_dev_scope_init();
981 up_write(&dmar_global_lock);
985 rootfs_initcall(ir_dev_scope_init);
987 static void disable_irq_remapping(void)
989 struct dmar_drhd_unit *drhd;
990 struct intel_iommu *iommu = NULL;
993 * Disable Interrupt-remapping for all the DRHD's now.
995 for_each_iommu(iommu, drhd) {
996 if (!ecap_ir_support(iommu->ecap))
999 iommu_disable_irq_remapping(iommu);
1003 * Clear Posted-Interrupts capability.
1005 if (!disable_irq_post)
1006 intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
1009 static int reenable_irq_remapping(int eim)
1011 struct dmar_drhd_unit *drhd;
1013 struct intel_iommu *iommu = NULL;
1015 for_each_iommu(iommu, drhd)
1017 dmar_reenable_qi(iommu);
1020 * Setup Interrupt-remapping for all the DRHD's now.
1022 for_each_iommu(iommu, drhd) {
1023 if (!ecap_ir_support(iommu->ecap))
1026 /* Set up interrupt remapping for iommu.*/
1027 iommu_set_irq_remapping(iommu, eim);
1028 iommu_enable_irq_remapping(iommu);
1035 set_irq_posting_cap();
1041 * handle error condition gracefully here!
1046 static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
1048 memset(irte, 0, sizeof(*irte));
1051 irte->dst_mode = apic->irq_dest_mode;
1053 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
1054 * actual level or edge trigger will be setup in the IO-APIC
1055 * RTE. This will help simplify level triggered irq migration.
1056 * For more details, see the comments (in io_apic.c) explainig IO-APIC
1057 * irq migration in the presence of interrupt-remapping.
1059 irte->trigger_mode = 0;
1060 irte->dlvry_mode = apic->irq_delivery_mode;
1061 irte->vector = vector;
1062 irte->dest_id = IRTE_DEST(dest);
1063 irte->redir_hint = 1;
1066 static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info)
1068 struct intel_iommu *iommu = NULL;
1073 switch (info->type) {
1074 case X86_IRQ_ALLOC_TYPE_IOAPIC:
1075 iommu = map_ioapic_to_ir(info->ioapic_id);
1077 case X86_IRQ_ALLOC_TYPE_HPET:
1078 iommu = map_hpet_to_ir(info->hpet_id);
1080 case X86_IRQ_ALLOC_TYPE_MSI:
1081 case X86_IRQ_ALLOC_TYPE_MSIX:
1082 iommu = map_dev_to_ir(info->msi_dev);
1089 return iommu ? iommu->ir_domain : NULL;
1092 static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info)
1094 struct intel_iommu *iommu;
1099 switch (info->type) {
1100 case X86_IRQ_ALLOC_TYPE_MSI:
1101 case X86_IRQ_ALLOC_TYPE_MSIX:
1102 iommu = map_dev_to_ir(info->msi_dev);
1104 return iommu->ir_msi_domain;
1113 struct irq_remap_ops intel_irq_remap_ops = {
1114 .prepare = intel_prepare_irq_remapping,
1115 .enable = intel_enable_irq_remapping,
1116 .disable = disable_irq_remapping,
1117 .reenable = reenable_irq_remapping,
1118 .enable_faulting = enable_drhd_fault_handling,
1119 .get_ir_irq_domain = intel_get_ir_irq_domain,
1120 .get_irq_domain = intel_get_irq_domain,
1124 * Migrate the IO-APIC irq in the presence of intr-remapping.
1126 * For both level and edge triggered, irq migration is a simple atomic
1127 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
1129 * For level triggered, we eliminate the io-apic RTE modification (with the
1130 * updated vector information), by using a virtual vector (io-apic pin number).
1131 * Real vector that is used for interrupting cpu will be coming from
1132 * the interrupt-remapping table entry.
1134 * As the migration is a simple atomic update of IRTE, the same mechanism
1135 * is used to migrate MSI irq's in the presence of interrupt-remapping.
1138 intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
1141 struct intel_ir_data *ir_data = data->chip_data;
1142 struct irte *irte = &ir_data->irte_entry;
1143 struct irq_cfg *cfg = irqd_cfg(data);
1144 struct irq_data *parent = data->parent_data;
1147 ret = parent->chip->irq_set_affinity(parent, mask, force);
1148 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
1152 * Atomically updates the IRTE with the new destination, vector
1153 * and flushes the interrupt entry cache.
1155 irte->vector = cfg->vector;
1156 irte->dest_id = IRTE_DEST(cfg->dest_apicid);
1158 /* Update the hardware only if the interrupt is in remapped mode. */
1159 if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
1160 modify_irte(&ir_data->irq_2_iommu, irte);
1163 * After this point, all the interrupts will start arriving
1164 * at the new destination. So, time to cleanup the previous
1165 * vector allocation.
1167 send_cleanup_vector(cfg);
1169 return IRQ_SET_MASK_OK_DONE;
1172 static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
1173 struct msi_msg *msg)
1175 struct intel_ir_data *ir_data = irq_data->chip_data;
1177 *msg = ir_data->msi_entry;
1180 static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
1182 struct intel_ir_data *ir_data = data->chip_data;
1183 struct vcpu_data *vcpu_pi_info = info;
1185 /* stop posting interrupts, back to remapping mode */
1186 if (!vcpu_pi_info) {
1187 modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
1189 struct irte irte_pi;
1192 * We are not caching the posted interrupt entry. We
1193 * copy the data from the remapped entry and modify
1194 * the fields which are relevant for posted mode. The
1195 * cached remapped entry is used for switching back to
1198 memset(&irte_pi, 0, sizeof(irte_pi));
1199 dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
1201 /* Update the posted mode fields */
1203 irte_pi.p_urgent = 0;
1204 irte_pi.p_vector = vcpu_pi_info->vector;
1205 irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
1206 (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
1207 irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
1208 ~(-1UL << PDA_HIGH_BIT);
1210 modify_irte(&ir_data->irq_2_iommu, &irte_pi);
1216 static struct irq_chip intel_ir_chip = {
1217 .irq_ack = ir_ack_apic_edge,
1218 .irq_set_affinity = intel_ir_set_affinity,
1219 .irq_compose_msi_msg = intel_ir_compose_msi_msg,
1220 .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
1223 static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
1224 struct irq_cfg *irq_cfg,
1225 struct irq_alloc_info *info,
1226 int index, int sub_handle)
1228 struct IR_IO_APIC_route_entry *entry;
1229 struct irte *irte = &data->irte_entry;
1230 struct msi_msg *msg = &data->msi_entry;
1232 prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
1233 switch (info->type) {
1234 case X86_IRQ_ALLOC_TYPE_IOAPIC:
1235 /* Set source-id of interrupt request */
1236 set_ioapic_sid(irte, info->ioapic_id);
1237 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
1238 info->ioapic_id, irte->present, irte->fpd,
1239 irte->dst_mode, irte->redir_hint,
1240 irte->trigger_mode, irte->dlvry_mode,
1241 irte->avail, irte->vector, irte->dest_id,
1242 irte->sid, irte->sq, irte->svt);
1244 entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry;
1245 info->ioapic_entry = NULL;
1246 memset(entry, 0, sizeof(*entry));
1247 entry->index2 = (index >> 15) & 0x1;
1250 entry->index = (index & 0x7fff);
1252 * IO-APIC RTE will be configured with virtual vector.
1253 * irq handler will do the explicit EOI to the io-apic.
1255 entry->vector = info->ioapic_pin;
1256 entry->mask = 0; /* enable IRQ */
1257 entry->trigger = info->ioapic_trigger;
1258 entry->polarity = info->ioapic_polarity;
1259 if (info->ioapic_trigger)
1260 entry->mask = 1; /* Mask level triggered irqs. */
1263 case X86_IRQ_ALLOC_TYPE_HPET:
1264 case X86_IRQ_ALLOC_TYPE_MSI:
1265 case X86_IRQ_ALLOC_TYPE_MSIX:
1266 if (info->type == X86_IRQ_ALLOC_TYPE_HPET)
1267 set_hpet_sid(irte, info->hpet_id);
1269 set_msi_sid(irte, info->msi_dev);
1271 msg->address_hi = MSI_ADDR_BASE_HI;
1272 msg->data = sub_handle;
1273 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
1275 MSI_ADDR_IR_INDEX1(index) |
1276 MSI_ADDR_IR_INDEX2(index);
1285 static void intel_free_irq_resources(struct irq_domain *domain,
1286 unsigned int virq, unsigned int nr_irqs)
1288 struct irq_data *irq_data;
1289 struct intel_ir_data *data;
1290 struct irq_2_iommu *irq_iommu;
1291 unsigned long flags;
1293 for (i = 0; i < nr_irqs; i++) {
1294 irq_data = irq_domain_get_irq_data(domain, virq + i);
1295 if (irq_data && irq_data->chip_data) {
1296 data = irq_data->chip_data;
1297 irq_iommu = &data->irq_2_iommu;
1298 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
1299 clear_entries(irq_iommu);
1300 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
1301 irq_domain_reset_irq_data(irq_data);
1307 static int intel_irq_remapping_alloc(struct irq_domain *domain,
1308 unsigned int virq, unsigned int nr_irqs,
1311 struct intel_iommu *iommu = domain->host_data;
1312 struct irq_alloc_info *info = arg;
1313 struct intel_ir_data *data, *ird;
1314 struct irq_data *irq_data;
1315 struct irq_cfg *irq_cfg;
1318 if (!info || !iommu)
1320 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
1321 info->type != X86_IRQ_ALLOC_TYPE_MSIX)
1325 * With IRQ remapping enabled, don't need contiguous CPU vectors
1326 * to support multiple MSI interrupts.
1328 if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
1329 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
1331 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
1336 data = kzalloc(sizeof(*data), GFP_KERNEL);
1338 goto out_free_parent;
1340 down_read(&dmar_global_lock);
1341 index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs);
1342 up_read(&dmar_global_lock);
1344 pr_warn("Failed to allocate IRTE\n");
1346 goto out_free_parent;
1349 for (i = 0; i < nr_irqs; i++) {
1350 irq_data = irq_domain_get_irq_data(domain, virq + i);
1351 irq_cfg = irqd_cfg(irq_data);
1352 if (!irq_data || !irq_cfg) {
1360 ird = kzalloc(sizeof(*ird), GFP_KERNEL);
1363 /* Initialize the common data */
1364 ird->irq_2_iommu = data->irq_2_iommu;
1365 ird->irq_2_iommu.sub_handle = i;
1370 irq_data->hwirq = (index << 16) + i;
1371 irq_data->chip_data = ird;
1372 irq_data->chip = &intel_ir_chip;
1373 intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
1374 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
1379 intel_free_irq_resources(domain, virq, i);
1381 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1385 static void intel_irq_remapping_free(struct irq_domain *domain,
1386 unsigned int virq, unsigned int nr_irqs)
1388 intel_free_irq_resources(domain, virq, nr_irqs);
1389 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1392 static void intel_irq_remapping_activate(struct irq_domain *domain,
1393 struct irq_data *irq_data)
1395 struct intel_ir_data *data = irq_data->chip_data;
1397 modify_irte(&data->irq_2_iommu, &data->irte_entry);
1400 static void intel_irq_remapping_deactivate(struct irq_domain *domain,
1401 struct irq_data *irq_data)
1403 struct intel_ir_data *data = irq_data->chip_data;
1406 memset(&entry, 0, sizeof(entry));
1407 modify_irte(&data->irq_2_iommu, &entry);
1410 static struct irq_domain_ops intel_ir_domain_ops = {
1411 .alloc = intel_irq_remapping_alloc,
1412 .free = intel_irq_remapping_free,
1413 .activate = intel_irq_remapping_activate,
1414 .deactivate = intel_irq_remapping_deactivate,
1418 * Support of Interrupt Remapping Unit Hotplug
1420 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1423 int eim = x2apic_enabled();
1425 if (eim && !ecap_eim_support(iommu->ecap)) {
1426 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1427 iommu->reg_phys, iommu->ecap);
1431 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1432 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1437 /* TODO: check all IOAPICs are covered by IOMMU */
1439 /* Setup Interrupt-remapping now. */
1440 ret = intel_setup_irq_remapping(iommu);
1442 pr_err("Failed to setup irq remapping for %s\n",
1444 intel_teardown_irq_remapping(iommu);
1445 ir_remove_ioapic_hpet_scope(iommu);
1447 iommu_enable_irq_remapping(iommu);
1453 int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1456 struct intel_iommu *iommu = dmaru->iommu;
1458 if (!irq_remapping_enabled)
1462 if (!ecap_ir_support(iommu->ecap))
1464 if (irq_remapping_cap(IRQ_POSTING_CAP) &&
1465 !cap_pi_support(iommu->cap))
1469 if (!iommu->ir_table)
1470 ret = dmar_ir_add(dmaru, iommu);
1472 if (iommu->ir_table) {
1473 if (!bitmap_empty(iommu->ir_table->bitmap,
1474 INTR_REMAP_TABLE_ENTRIES)) {
1477 iommu_disable_irq_remapping(iommu);
1478 intel_teardown_irq_remapping(iommu);
1479 ir_remove_ioapic_hpet_scope(iommu);