1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2014 Intel Corporation.
5 * Authors: David Woodhouse <dwmw2@infradead.org>,
6 * Ashok Raj <ashok.raj@intel.com>,
7 * Shaohua Li <shaohua.li@intel.com>,
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Joerg Roedel <jroedel@suse.de>
13 #define pr_fmt(fmt) "DMAR: " fmt
14 #define dev_fmt(fmt) pr_fmt(fmt)
16 #include <linux/init.h>
17 #include <linux/bitmap.h>
18 #include <linux/debugfs.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/irq.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/pci.h>
25 #include <linux/dmar.h>
26 #include <linux/dma-map-ops.h>
27 #include <linux/mempool.h>
28 #include <linux/memory.h>
29 #include <linux/cpu.h>
30 #include <linux/timer.h>
32 #include <linux/iova.h>
33 #include <linux/iommu.h>
34 #include <linux/dma-iommu.h>
35 #include <linux/intel-iommu.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/tboot.h>
38 #include <linux/dmi.h>
39 #include <linux/pci-ats.h>
40 #include <linux/memblock.h>
41 #include <linux/dma-direct.h>
42 #include <linux/crash_dump.h>
43 #include <linux/numa.h>
44 #include <asm/irq_remapping.h>
45 #include <asm/cacheflush.h>
46 #include <asm/iommu.h>
48 #include "../irq_remapping.h"
50 #include "cap_audit.h"
52 #define ROOT_SIZE VTD_PAGE_SIZE
53 #define CONTEXT_SIZE VTD_PAGE_SIZE
55 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
57 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
58 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
60 #define IOAPIC_RANGE_START (0xfee00000)
61 #define IOAPIC_RANGE_END (0xfeefffff)
62 #define IOVA_START_ADDR (0x1000)
64 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
66 #define MAX_AGAW_WIDTH 64
67 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
69 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
70 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
72 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
73 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
74 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
75 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
76 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
78 /* IO virtual address start page frame number */
79 #define IOVA_START_PFN (1)
81 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
83 /* page table handling */
84 #define LEVEL_STRIDE (9)
85 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
88 * This bitmap is used to advertise the page sizes our hardware support
89 * to the IOMMU core, which will then use this information to split
90 * physically contiguous memory regions it is mapping into page sizes
93 * Traditionally the IOMMU core just handed us the mappings directly,
94 * after making sure the size is an order of a 4KiB page and that the
95 * mapping has natural alignment.
97 * To retain this behavior, we currently advertise that we support
98 * all page sizes that are an order of 4KiB.
100 * If at some point we'd like to utilize the IOMMU core's new behavior,
101 * we could change this to advertise the real page sizes we support.
103 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
105 static inline int agaw_to_level(int agaw)
110 static inline int agaw_to_width(int agaw)
112 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
115 static inline int width_to_agaw(int width)
117 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
120 static inline unsigned int level_to_offset_bits(int level)
122 return (level - 1) * LEVEL_STRIDE;
125 static inline int pfn_level_offset(u64 pfn, int level)
127 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
130 static inline u64 level_mask(int level)
132 return -1ULL << level_to_offset_bits(level);
135 static inline u64 level_size(int level)
137 return 1ULL << level_to_offset_bits(level);
140 static inline u64 align_to_level(u64 pfn, int level)
142 return (pfn + level_size(level) - 1) & level_mask(level);
145 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
147 return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
150 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
151 are never going to work. */
152 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
154 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
157 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
159 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
161 static inline unsigned long page_to_dma_pfn(struct page *pg)
163 return mm_to_dma_pfn(page_to_pfn(pg));
165 static inline unsigned long virt_to_dma_pfn(void *p)
167 return page_to_dma_pfn(virt_to_page(p));
170 /* global iommu list, set NULL for ignored DMAR units */
171 static struct intel_iommu **g_iommus;
173 static void __init check_tylersburg_isoch(void);
174 static int rwbf_quirk;
177 * set to 1 to panic kernel if can't successfully enable VT-d
178 * (used when kernel is launched w/ TXT)
180 static int force_on = 0;
181 static int intel_iommu_tboot_noforce;
182 static int no_platform_optin;
184 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
190 static phys_addr_t root_entry_lctp(struct root_entry *re)
195 return re->lo & VTD_PAGE_MASK;
199 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
202 static phys_addr_t root_entry_uctp(struct root_entry *re)
207 return re->hi & VTD_PAGE_MASK;
210 static inline void context_clear_pasid_enable(struct context_entry *context)
212 context->lo &= ~(1ULL << 11);
215 static inline bool context_pasid_enabled(struct context_entry *context)
217 return !!(context->lo & (1ULL << 11));
220 static inline void context_set_copied(struct context_entry *context)
222 context->hi |= (1ull << 3);
225 static inline bool context_copied(struct context_entry *context)
227 return !!(context->hi & (1ULL << 3));
230 static inline bool __context_present(struct context_entry *context)
232 return (context->lo & 1);
235 bool context_present(struct context_entry *context)
237 return context_pasid_enabled(context) ?
238 __context_present(context) :
239 __context_present(context) && !context_copied(context);
242 static inline void context_set_present(struct context_entry *context)
247 static inline void context_set_fault_enable(struct context_entry *context)
249 context->lo &= (((u64)-1) << 2) | 1;
252 static inline void context_set_translation_type(struct context_entry *context,
255 context->lo &= (((u64)-1) << 4) | 3;
256 context->lo |= (value & 3) << 2;
259 static inline void context_set_address_root(struct context_entry *context,
262 context->lo &= ~VTD_PAGE_MASK;
263 context->lo |= value & VTD_PAGE_MASK;
266 static inline void context_set_address_width(struct context_entry *context,
269 context->hi |= value & 7;
272 static inline void context_set_domain_id(struct context_entry *context,
275 context->hi |= (value & ((1 << 16) - 1)) << 8;
278 static inline int context_domain_id(struct context_entry *c)
280 return((c->hi >> 8) & 0xffff);
283 static inline void context_clear_entry(struct context_entry *context)
290 * This domain is a statically identity mapping domain.
291 * 1. This domain creats a static 1:1 mapping to all usable memory.
292 * 2. It maps to each iommu if successful.
293 * 3. Each iommu mapps to this domain if successful.
295 static struct dmar_domain *si_domain;
296 static int hw_pass_through = 1;
298 #define for_each_domain_iommu(idx, domain) \
299 for (idx = 0; idx < g_num_of_iommus; idx++) \
300 if (domain->iommu_refcnt[idx])
302 struct dmar_rmrr_unit {
303 struct list_head list; /* list of rmrr units */
304 struct acpi_dmar_header *hdr; /* ACPI header */
305 u64 base_address; /* reserved base address*/
306 u64 end_address; /* reserved end address */
307 struct dmar_dev_scope *devices; /* target devices */
308 int devices_cnt; /* target device count */
311 struct dmar_atsr_unit {
312 struct list_head list; /* list of ATSR units */
313 struct acpi_dmar_header *hdr; /* ACPI header */
314 struct dmar_dev_scope *devices; /* target devices */
315 int devices_cnt; /* target device count */
316 u8 include_all:1; /* include all ports */
319 struct dmar_satc_unit {
320 struct list_head list; /* list of SATC units */
321 struct acpi_dmar_header *hdr; /* ACPI header */
322 struct dmar_dev_scope *devices; /* target devices */
323 struct intel_iommu *iommu; /* the corresponding iommu */
324 int devices_cnt; /* target device count */
325 u8 atc_required:1; /* ATS is required */
328 static LIST_HEAD(dmar_atsr_units);
329 static LIST_HEAD(dmar_rmrr_units);
330 static LIST_HEAD(dmar_satc_units);
332 #define for_each_rmrr_units(rmrr) \
333 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
335 /* bitmap for indexing intel_iommus */
336 static int g_num_of_iommus;
338 static void domain_exit(struct dmar_domain *domain);
339 static void domain_remove_dev_info(struct dmar_domain *domain);
340 static void dmar_remove_one_dev_info(struct device *dev);
341 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
342 static int intel_iommu_attach_device(struct iommu_domain *domain,
344 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
347 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
348 int dmar_disabled = 0;
350 int dmar_disabled = 1;
351 #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
353 #ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
354 int intel_iommu_sm = 1;
357 #endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
359 int intel_iommu_enabled = 0;
360 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
362 static int dmar_map_gfx = 1;
363 static int intel_iommu_strict;
364 static int intel_iommu_superpage = 1;
365 static int iommu_identity_mapping;
366 static int iommu_skip_te_disable;
368 #define IDENTMAP_GFX 2
369 #define IDENTMAP_AZALIA 4
371 int intel_iommu_gfx_mapped;
372 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
374 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
375 struct device_domain_info *get_domain_info(struct device *dev)
377 struct device_domain_info *info;
382 info = dev_iommu_priv_get(dev);
383 if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
389 DEFINE_SPINLOCK(device_domain_lock);
390 static LIST_HEAD(device_domain_list);
393 * Iterate over elements in device_domain_list and call the specified
394 * callback @fn against each element.
396 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
397 void *data), void *data)
401 struct device_domain_info *info;
403 spin_lock_irqsave(&device_domain_lock, flags);
404 list_for_each_entry(info, &device_domain_list, global) {
405 ret = fn(info, data);
407 spin_unlock_irqrestore(&device_domain_lock, flags);
411 spin_unlock_irqrestore(&device_domain_lock, flags);
416 const struct iommu_ops intel_iommu_ops;
418 static bool translation_pre_enabled(struct intel_iommu *iommu)
420 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
423 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
425 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
428 static void init_translation_status(struct intel_iommu *iommu)
432 gsts = readl(iommu->reg + DMAR_GSTS_REG);
433 if (gsts & DMA_GSTS_TES)
434 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
437 static int __init intel_iommu_setup(char *str)
442 if (!strncmp(str, "on", 2)) {
444 pr_info("IOMMU enabled\n");
445 } else if (!strncmp(str, "off", 3)) {
447 no_platform_optin = 1;
448 pr_info("IOMMU disabled\n");
449 } else if (!strncmp(str, "igfx_off", 8)) {
451 pr_info("Disable GFX device mapping\n");
452 } else if (!strncmp(str, "forcedac", 8)) {
453 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
454 iommu_dma_forcedac = true;
455 } else if (!strncmp(str, "strict", 6)) {
456 pr_info("Disable batched IOTLB flush\n");
457 intel_iommu_strict = 1;
458 } else if (!strncmp(str, "sp_off", 6)) {
459 pr_info("Disable supported super page\n");
460 intel_iommu_superpage = 0;
461 } else if (!strncmp(str, "sm_on", 5)) {
462 pr_info("Intel-IOMMU: scalable mode supported\n");
464 } else if (!strncmp(str, "tboot_noforce", 13)) {
465 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
466 intel_iommu_tboot_noforce = 1;
469 str += strcspn(str, ",");
475 __setup("intel_iommu=", intel_iommu_setup);
477 static struct kmem_cache *iommu_domain_cache;
478 static struct kmem_cache *iommu_devinfo_cache;
480 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
482 struct dmar_domain **domains;
485 domains = iommu->domains[idx];
489 return domains[did & 0xff];
492 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
493 struct dmar_domain *domain)
495 struct dmar_domain **domains;
498 if (!iommu->domains[idx]) {
499 size_t size = 256 * sizeof(struct dmar_domain *);
500 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
503 domains = iommu->domains[idx];
504 if (WARN_ON(!domains))
507 domains[did & 0xff] = domain;
510 void *alloc_pgtable_page(int node)
515 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
517 vaddr = page_address(page);
521 void free_pgtable_page(void *vaddr)
523 free_page((unsigned long)vaddr);
526 static inline void *alloc_domain_mem(void)
528 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
531 static void free_domain_mem(void *vaddr)
533 kmem_cache_free(iommu_domain_cache, vaddr);
536 static inline void * alloc_devinfo_mem(void)
538 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
541 static inline void free_devinfo_mem(void *vaddr)
543 kmem_cache_free(iommu_devinfo_cache, vaddr);
546 static inline int domain_type_is_si(struct dmar_domain *domain)
548 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
551 static inline bool domain_use_first_level(struct dmar_domain *domain)
553 return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
556 static inline int domain_pfn_supported(struct dmar_domain *domain,
559 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
561 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
564 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
569 sagaw = cap_sagaw(iommu->cap);
570 for (agaw = width_to_agaw(max_gaw);
572 if (test_bit(agaw, &sagaw))
580 * Calculate max SAGAW for each iommu.
582 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
584 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
588 * calculate agaw for each iommu.
589 * "SAGAW" may be different across iommus, use a default agaw, and
590 * get a supported less agaw for iommus that don't support the default agaw.
592 int iommu_calculate_agaw(struct intel_iommu *iommu)
594 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
597 /* This functionin only returns single iommu in a domain */
598 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
602 /* si_domain and vm domain should not get here. */
603 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
606 for_each_domain_iommu(iommu_id, domain)
609 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
612 return g_iommus[iommu_id];
615 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
617 return sm_supported(iommu) ?
618 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
621 static void domain_update_iommu_coherency(struct dmar_domain *domain)
623 struct dmar_drhd_unit *drhd;
624 struct intel_iommu *iommu;
628 domain->iommu_coherency = 1;
630 for_each_domain_iommu(i, domain) {
632 if (!iommu_paging_structure_coherency(g_iommus[i])) {
633 domain->iommu_coherency = 0;
640 /* No hardware attached; use lowest common denominator */
642 for_each_active_iommu(iommu, drhd) {
643 if (!iommu_paging_structure_coherency(iommu)) {
644 domain->iommu_coherency = 0;
651 static int domain_update_iommu_snooping(struct intel_iommu *skip)
653 struct dmar_drhd_unit *drhd;
654 struct intel_iommu *iommu;
658 for_each_active_iommu(iommu, drhd) {
661 * If the hardware is operating in the scalable mode,
662 * the snooping control is always supported since we
663 * always set PASID-table-entry.PGSNP bit if the domain
664 * is managed outside (UNMANAGED).
666 if (!sm_supported(iommu) &&
667 !ecap_sc_support(iommu->ecap)) {
678 static int domain_update_iommu_superpage(struct dmar_domain *domain,
679 struct intel_iommu *skip)
681 struct dmar_drhd_unit *drhd;
682 struct intel_iommu *iommu;
685 if (!intel_iommu_superpage) {
689 /* set iommu_superpage to the smallest common denominator */
691 for_each_active_iommu(iommu, drhd) {
693 if (domain && domain_use_first_level(domain)) {
694 if (!cap_fl1gp_support(iommu->cap))
697 mask &= cap_super_page_val(iommu->cap);
709 static int domain_update_device_node(struct dmar_domain *domain)
711 struct device_domain_info *info;
712 int nid = NUMA_NO_NODE;
714 assert_spin_locked(&device_domain_lock);
716 if (list_empty(&domain->devices))
719 list_for_each_entry(info, &domain->devices, link) {
724 * There could possibly be multiple device numa nodes as devices
725 * within the same domain may sit behind different IOMMUs. There
726 * isn't perfect answer in such situation, so we select first
727 * come first served policy.
729 nid = dev_to_node(info->dev);
730 if (nid != NUMA_NO_NODE)
737 static void domain_update_iotlb(struct dmar_domain *domain);
739 /* Some capabilities may be different across iommus */
740 static void domain_update_iommu_cap(struct dmar_domain *domain)
742 domain_update_iommu_coherency(domain);
743 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
744 domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
747 * If RHSA is missing, we should default to the device numa domain
750 if (domain->nid == NUMA_NO_NODE)
751 domain->nid = domain_update_device_node(domain);
754 * First-level translation restricts the input-address to a
755 * canonical address (i.e., address bits 63:N have the same
756 * value as address bit [N-1], where N is 48-bits with 4-level
757 * paging and 57-bits with 5-level paging). Hence, skip bit
760 if (domain_use_first_level(domain))
761 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
763 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
765 domain_update_iotlb(domain);
768 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
771 struct root_entry *root = &iommu->root_entry[bus];
772 struct context_entry *context;
776 if (sm_supported(iommu)) {
784 context = phys_to_virt(*entry & VTD_PAGE_MASK);
786 unsigned long phy_addr;
790 context = alloc_pgtable_page(iommu->node);
794 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
795 phy_addr = virt_to_phys((void *)context);
796 *entry = phy_addr | 1;
797 __iommu_flush_cache(iommu, entry, sizeof(*entry));
799 return &context[devfn];
802 static bool attach_deferred(struct device *dev)
804 return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
808 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
809 * sub-hierarchy of a candidate PCI-PCI bridge
810 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
811 * @bridge: the candidate PCI-PCI bridge
813 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
816 is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
818 struct pci_dev *pdev, *pbridge;
820 if (!dev_is_pci(dev) || !dev_is_pci(bridge))
823 pdev = to_pci_dev(dev);
824 pbridge = to_pci_dev(bridge);
826 if (pbridge->subordinate &&
827 pbridge->subordinate->number <= pdev->bus->number &&
828 pbridge->subordinate->busn_res.end >= pdev->bus->number)
834 static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
836 struct dmar_drhd_unit *drhd;
840 /* We know that this device on this chipset has its own IOMMU.
841 * If we find it under a different IOMMU, then the BIOS is lying
842 * to us. Hope that the IOMMU for this device is actually
843 * disabled, and it needs no translation...
845 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
848 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
853 /* we know that the this iommu should be at offset 0xa000 from vtbar */
854 drhd = dmar_find_matched_drhd_unit(pdev);
855 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
856 pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
857 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
864 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
866 if (!iommu || iommu->drhd->ignored)
869 if (dev_is_pci(dev)) {
870 struct pci_dev *pdev = to_pci_dev(dev);
872 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
873 pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
874 quirk_ioat_snb_local_iommu(pdev))
881 struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
883 struct dmar_drhd_unit *drhd = NULL;
884 struct pci_dev *pdev = NULL;
885 struct intel_iommu *iommu;
893 if (dev_is_pci(dev)) {
894 struct pci_dev *pf_pdev;
896 pdev = pci_real_dma_dev(to_pci_dev(dev));
898 /* VFs aren't listed in scope tables; we need to look up
899 * the PF instead to find the IOMMU. */
900 pf_pdev = pci_physfn(pdev);
902 segment = pci_domain_nr(pdev->bus);
903 } else if (has_acpi_companion(dev))
904 dev = &ACPI_COMPANION(dev)->dev;
907 for_each_iommu(iommu, drhd) {
908 if (pdev && segment != drhd->segment)
911 for_each_active_dev_scope(drhd->devices,
912 drhd->devices_cnt, i, tmp) {
914 /* For a VF use its original BDF# not that of the PF
915 * which we used for the IOMMU lookup. Strictly speaking
916 * we could do this for all PCI devices; we only need to
917 * get the BDF# from the scope table for ACPI matches. */
918 if (pdev && pdev->is_virtfn)
922 *bus = drhd->devices[i].bus;
923 *devfn = drhd->devices[i].devfn;
928 if (is_downstream_to_pci_bridge(dev, tmp))
932 if (pdev && drhd->include_all) {
935 *bus = pdev->bus->number;
936 *devfn = pdev->devfn;
943 if (iommu_is_dummy(iommu, dev))
951 static void domain_flush_cache(struct dmar_domain *domain,
952 void *addr, int size)
954 if (!domain->iommu_coherency)
955 clflush_cache_range(addr, size);
958 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
960 struct context_entry *context;
964 spin_lock_irqsave(&iommu->lock, flags);
965 context = iommu_context_addr(iommu, bus, devfn, 0);
967 ret = context_present(context);
968 spin_unlock_irqrestore(&iommu->lock, flags);
972 static void free_context_table(struct intel_iommu *iommu)
976 struct context_entry *context;
978 spin_lock_irqsave(&iommu->lock, flags);
979 if (!iommu->root_entry) {
982 for (i = 0; i < ROOT_ENTRY_NR; i++) {
983 context = iommu_context_addr(iommu, i, 0, 0);
985 free_pgtable_page(context);
987 if (!sm_supported(iommu))
990 context = iommu_context_addr(iommu, i, 0x80, 0);
992 free_pgtable_page(context);
995 free_pgtable_page(iommu->root_entry);
996 iommu->root_entry = NULL;
998 spin_unlock_irqrestore(&iommu->lock, flags);
1001 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1002 unsigned long pfn, int *target_level)
1004 struct dma_pte *parent, *pte;
1005 int level = agaw_to_level(domain->agaw);
1008 BUG_ON(!domain->pgd);
1010 if (!domain_pfn_supported(domain, pfn))
1011 /* Address beyond IOMMU's addressing capabilities. */
1014 parent = domain->pgd;
1019 offset = pfn_level_offset(pfn, level);
1020 pte = &parent[offset];
1021 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1023 if (level == *target_level)
1026 if (!dma_pte_present(pte)) {
1029 tmp_page = alloc_pgtable_page(domain->nid);
1034 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1035 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1036 if (domain_use_first_level(domain)) {
1037 pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
1038 if (domain->domain.type == IOMMU_DOMAIN_DMA)
1039 pteval |= DMA_FL_PTE_ACCESS;
1041 if (cmpxchg64(&pte->val, 0ULL, pteval))
1042 /* Someone else set it while we were thinking; use theirs. */
1043 free_pgtable_page(tmp_page);
1045 domain_flush_cache(domain, pte, sizeof(*pte));
1050 parent = phys_to_virt(dma_pte_addr(pte));
1055 *target_level = level;
1060 /* return address's pte at specific level */
1061 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1063 int level, int *large_page)
1065 struct dma_pte *parent, *pte;
1066 int total = agaw_to_level(domain->agaw);
1069 parent = domain->pgd;
1070 while (level <= total) {
1071 offset = pfn_level_offset(pfn, total);
1072 pte = &parent[offset];
1076 if (!dma_pte_present(pte)) {
1077 *large_page = total;
1081 if (dma_pte_superpage(pte)) {
1082 *large_page = total;
1086 parent = phys_to_virt(dma_pte_addr(pte));
1092 /* clear last level pte, a tlb flush should be followed */
1093 static void dma_pte_clear_range(struct dmar_domain *domain,
1094 unsigned long start_pfn,
1095 unsigned long last_pfn)
1097 unsigned int large_page;
1098 struct dma_pte *first_pte, *pte;
1100 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1101 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1102 BUG_ON(start_pfn > last_pfn);
1104 /* we don't need lock here; nobody else touches the iova range */
1107 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1109 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1114 start_pfn += lvl_to_nr_pages(large_page);
1116 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1118 domain_flush_cache(domain, first_pte,
1119 (void *)pte - (void *)first_pte);
1121 } while (start_pfn && start_pfn <= last_pfn);
1124 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1125 int retain_level, struct dma_pte *pte,
1126 unsigned long pfn, unsigned long start_pfn,
1127 unsigned long last_pfn)
1129 pfn = max(start_pfn, pfn);
1130 pte = &pte[pfn_level_offset(pfn, level)];
1133 unsigned long level_pfn;
1134 struct dma_pte *level_pte;
1136 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1139 level_pfn = pfn & level_mask(level);
1140 level_pte = phys_to_virt(dma_pte_addr(pte));
1143 dma_pte_free_level(domain, level - 1, retain_level,
1144 level_pte, level_pfn, start_pfn,
1149 * Free the page table if we're below the level we want to
1150 * retain and the range covers the entire table.
1152 if (level < retain_level && !(start_pfn > level_pfn ||
1153 last_pfn < level_pfn + level_size(level) - 1)) {
1155 domain_flush_cache(domain, pte, sizeof(*pte));
1156 free_pgtable_page(level_pte);
1159 pfn += level_size(level);
1160 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1164 * clear last level (leaf) ptes and free page table pages below the
1165 * level we wish to keep intact.
1167 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1168 unsigned long start_pfn,
1169 unsigned long last_pfn,
1172 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1173 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1174 BUG_ON(start_pfn > last_pfn);
1176 dma_pte_clear_range(domain, start_pfn, last_pfn);
1178 /* We don't need lock here; nobody else touches the iova range */
1179 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1180 domain->pgd, 0, start_pfn, last_pfn);
1183 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1184 free_pgtable_page(domain->pgd);
1189 /* When a page at a given level is being unlinked from its parent, we don't
1190 need to *modify* it at all. All we need to do is make a list of all the
1191 pages which can be freed just as soon as we've flushed the IOTLB and we
1192 know the hardware page-walk will no longer touch them.
1193 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1195 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1196 int level, struct dma_pte *pte,
1197 struct page *freelist)
1201 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1202 pg->freelist = freelist;
1208 pte = page_address(pg);
1210 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1211 freelist = dma_pte_list_pagetables(domain, level - 1,
1214 } while (!first_pte_in_page(pte));
1219 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1220 struct dma_pte *pte, unsigned long pfn,
1221 unsigned long start_pfn,
1222 unsigned long last_pfn,
1223 struct page *freelist)
1225 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1227 pfn = max(start_pfn, pfn);
1228 pte = &pte[pfn_level_offset(pfn, level)];
1231 unsigned long level_pfn;
1233 if (!dma_pte_present(pte))
1236 level_pfn = pfn & level_mask(level);
1238 /* If range covers entire pagetable, free it */
1239 if (start_pfn <= level_pfn &&
1240 last_pfn >= level_pfn + level_size(level) - 1) {
1241 /* These suborbinate page tables are going away entirely. Don't
1242 bother to clear them; we're just going to *free* them. */
1243 if (level > 1 && !dma_pte_superpage(pte))
1244 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1250 } else if (level > 1) {
1251 /* Recurse down into a level that isn't *entirely* obsolete */
1252 freelist = dma_pte_clear_level(domain, level - 1,
1253 phys_to_virt(dma_pte_addr(pte)),
1254 level_pfn, start_pfn, last_pfn,
1258 pfn += level_size(level);
1259 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1262 domain_flush_cache(domain, first_pte,
1263 (void *)++last_pte - (void *)first_pte);
1268 /* We can't just free the pages because the IOMMU may still be walking
1269 the page tables, and may have cached the intermediate levels. The
1270 pages can only be freed after the IOTLB flush has been done. */
1271 static struct page *domain_unmap(struct dmar_domain *domain,
1272 unsigned long start_pfn,
1273 unsigned long last_pfn,
1274 struct page *freelist)
1276 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1277 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1278 BUG_ON(start_pfn > last_pfn);
1280 /* we don't need lock here; nobody else touches the iova range */
1281 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1282 domain->pgd, 0, start_pfn, last_pfn,
1286 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1287 struct page *pgd_page = virt_to_page(domain->pgd);
1288 pgd_page->freelist = freelist;
1289 freelist = pgd_page;
1297 static void dma_free_pagelist(struct page *freelist)
1301 while ((pg = freelist)) {
1302 freelist = pg->freelist;
1303 free_pgtable_page(page_address(pg));
1307 /* iommu handling */
1308 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1310 struct root_entry *root;
1311 unsigned long flags;
1313 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1315 pr_err("Allocating root entry for %s failed\n",
1320 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1322 spin_lock_irqsave(&iommu->lock, flags);
1323 iommu->root_entry = root;
1324 spin_unlock_irqrestore(&iommu->lock, flags);
1329 static void iommu_set_root_entry(struct intel_iommu *iommu)
1335 addr = virt_to_phys(iommu->root_entry);
1336 if (sm_supported(iommu))
1337 addr |= DMA_RTADDR_SMT;
1339 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1340 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1342 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1344 /* Make sure hardware complete it */
1345 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1346 readl, (sts & DMA_GSTS_RTPS), sts);
1348 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1350 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1351 if (sm_supported(iommu))
1352 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
1353 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1356 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1361 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1364 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1365 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1367 /* Make sure hardware complete it */
1368 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1369 readl, (!(val & DMA_GSTS_WBFS)), val);
1371 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1374 /* return value determine if we need a write buffer flush */
1375 static void __iommu_flush_context(struct intel_iommu *iommu,
1376 u16 did, u16 source_id, u8 function_mask,
1383 case DMA_CCMD_GLOBAL_INVL:
1384 val = DMA_CCMD_GLOBAL_INVL;
1386 case DMA_CCMD_DOMAIN_INVL:
1387 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1389 case DMA_CCMD_DEVICE_INVL:
1390 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1391 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1396 val |= DMA_CCMD_ICC;
1398 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1399 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1401 /* Make sure hardware complete it */
1402 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1403 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1405 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1408 /* return value determine if we need a write buffer flush */
1409 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1410 u64 addr, unsigned int size_order, u64 type)
1412 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1413 u64 val = 0, val_iva = 0;
1417 case DMA_TLB_GLOBAL_FLUSH:
1418 /* global flush doesn't need set IVA_REG */
1419 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1421 case DMA_TLB_DSI_FLUSH:
1422 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1424 case DMA_TLB_PSI_FLUSH:
1425 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1426 /* IH bit is passed in as part of address */
1427 val_iva = size_order | addr;
1432 /* Note: set drain read/write */
1435 * This is probably to be super secure.. Looks like we can
1436 * ignore it without any impact.
1438 if (cap_read_drain(iommu->cap))
1439 val |= DMA_TLB_READ_DRAIN;
1441 if (cap_write_drain(iommu->cap))
1442 val |= DMA_TLB_WRITE_DRAIN;
1444 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1445 /* Note: Only uses first TLB reg currently */
1447 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1448 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1450 /* Make sure hardware complete it */
1451 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1452 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1454 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1456 /* check IOTLB invalidation granularity */
1457 if (DMA_TLB_IAIG(val) == 0)
1458 pr_err("Flush IOTLB failed\n");
1459 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1460 pr_debug("TLB flush request %Lx, actual %Lx\n",
1461 (unsigned long long)DMA_TLB_IIRG(type),
1462 (unsigned long long)DMA_TLB_IAIG(val));
1465 static struct device_domain_info *
1466 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1469 struct device_domain_info *info;
1471 assert_spin_locked(&device_domain_lock);
1476 list_for_each_entry(info, &domain->devices, link)
1477 if (info->iommu == iommu && info->bus == bus &&
1478 info->devfn == devfn) {
1479 if (info->ats_supported && info->dev)
1487 static void domain_update_iotlb(struct dmar_domain *domain)
1489 struct device_domain_info *info;
1490 bool has_iotlb_device = false;
1492 assert_spin_locked(&device_domain_lock);
1494 list_for_each_entry(info, &domain->devices, link)
1495 if (info->ats_enabled) {
1496 has_iotlb_device = true;
1500 if (!has_iotlb_device) {
1501 struct subdev_domain_info *sinfo;
1503 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
1504 info = get_domain_info(sinfo->pdev);
1505 if (info && info->ats_enabled) {
1506 has_iotlb_device = true;
1512 domain->has_iotlb_device = has_iotlb_device;
1515 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1517 struct pci_dev *pdev;
1519 assert_spin_locked(&device_domain_lock);
1521 if (!info || !dev_is_pci(info->dev))
1524 pdev = to_pci_dev(info->dev);
1525 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1526 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1527 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1528 * reserved, which should be set to 0.
1530 if (!ecap_dit(info->iommu->ecap))
1533 struct pci_dev *pf_pdev;
1535 /* pdev will be returned if device is not a vf */
1536 pf_pdev = pci_physfn(pdev);
1537 info->pfsid = pci_dev_id(pf_pdev);
1540 #ifdef CONFIG_INTEL_IOMMU_SVM
1541 /* The PCIe spec, in its wisdom, declares that the behaviour of
1542 the device if you enable PASID support after ATS support is
1543 undefined. So always enable PASID support on devices which
1544 have it, even if we can't yet know if we're ever going to
1546 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1547 info->pasid_enabled = 1;
1549 if (info->pri_supported &&
1550 (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
1551 !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1552 info->pri_enabled = 1;
1554 if (info->ats_supported && pci_ats_page_aligned(pdev) &&
1555 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1556 info->ats_enabled = 1;
1557 domain_update_iotlb(info->domain);
1558 info->ats_qdep = pci_ats_queue_depth(pdev);
1562 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1564 struct pci_dev *pdev;
1566 assert_spin_locked(&device_domain_lock);
1568 if (!dev_is_pci(info->dev))
1571 pdev = to_pci_dev(info->dev);
1573 if (info->ats_enabled) {
1574 pci_disable_ats(pdev);
1575 info->ats_enabled = 0;
1576 domain_update_iotlb(info->domain);
1578 #ifdef CONFIG_INTEL_IOMMU_SVM
1579 if (info->pri_enabled) {
1580 pci_disable_pri(pdev);
1581 info->pri_enabled = 0;
1583 if (info->pasid_enabled) {
1584 pci_disable_pasid(pdev);
1585 info->pasid_enabled = 0;
1590 static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
1591 u64 addr, unsigned int mask)
1595 if (!info || !info->ats_enabled)
1598 sid = info->bus << 8 | info->devfn;
1599 qdep = info->ats_qdep;
1600 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1604 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1605 u64 addr, unsigned mask)
1607 unsigned long flags;
1608 struct device_domain_info *info;
1609 struct subdev_domain_info *sinfo;
1611 if (!domain->has_iotlb_device)
1614 spin_lock_irqsave(&device_domain_lock, flags);
1615 list_for_each_entry(info, &domain->devices, link)
1616 __iommu_flush_dev_iotlb(info, addr, mask);
1618 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
1619 info = get_domain_info(sinfo->pdev);
1620 __iommu_flush_dev_iotlb(info, addr, mask);
1622 spin_unlock_irqrestore(&device_domain_lock, flags);
1625 static void domain_flush_piotlb(struct intel_iommu *iommu,
1626 struct dmar_domain *domain,
1627 u64 addr, unsigned long npages, bool ih)
1629 u16 did = domain->iommu_did[iommu->seq_id];
1631 if (domain->default_pasid)
1632 qi_flush_piotlb(iommu, did, domain->default_pasid,
1635 if (!list_empty(&domain->devices))
1636 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
1639 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1640 struct dmar_domain *domain,
1641 unsigned long pfn, unsigned int pages,
1644 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1645 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1646 u16 did = domain->iommu_did[iommu->seq_id];
1653 if (domain_use_first_level(domain)) {
1654 domain_flush_piotlb(iommu, domain, addr, pages, ih);
1657 * Fallback to domain selective flush if no PSI support or
1658 * the size is too big. PSI requires page size to be 2 ^ x,
1659 * and the base address is naturally aligned to the size.
1661 if (!cap_pgsel_inv(iommu->cap) ||
1662 mask > cap_max_amask_val(iommu->cap))
1663 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1666 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1671 * In caching mode, changes of pages from non-present to present require
1672 * flush. However, device IOTLB doesn't need to be flushed in this case.
1674 if (!cap_caching_mode(iommu->cap) || !map)
1675 iommu_flush_dev_iotlb(domain, addr, mask);
1678 /* Notification for newly created mappings */
1679 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1680 struct dmar_domain *domain,
1681 unsigned long pfn, unsigned int pages)
1684 * It's a non-present to present mapping. Only flush if caching mode
1687 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
1688 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1690 iommu_flush_write_buffer(iommu);
1693 static void intel_flush_iotlb_all(struct iommu_domain *domain)
1695 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
1698 for_each_domain_iommu(idx, dmar_domain) {
1699 struct intel_iommu *iommu = g_iommus[idx];
1700 u16 did = dmar_domain->iommu_did[iommu->seq_id];
1702 if (domain_use_first_level(dmar_domain))
1703 domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0);
1705 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1708 if (!cap_caching_mode(iommu->cap))
1709 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1710 0, MAX_AGAW_PFN_WIDTH);
1714 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1717 unsigned long flags;
1719 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1722 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1723 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1724 pmen &= ~DMA_PMEN_EPM;
1725 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1727 /* wait for the protected region status bit to clear */
1728 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1729 readl, !(pmen & DMA_PMEN_PRS), pmen);
1731 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1734 static void iommu_enable_translation(struct intel_iommu *iommu)
1737 unsigned long flags;
1739 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1740 iommu->gcmd |= DMA_GCMD_TE;
1741 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1743 /* Make sure hardware complete it */
1744 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1745 readl, (sts & DMA_GSTS_TES), sts);
1747 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1750 static void iommu_disable_translation(struct intel_iommu *iommu)
1755 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
1756 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
1759 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1760 iommu->gcmd &= ~DMA_GCMD_TE;
1761 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1763 /* Make sure hardware complete it */
1764 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1765 readl, (!(sts & DMA_GSTS_TES)), sts);
1767 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1770 static int iommu_init_domains(struct intel_iommu *iommu)
1772 u32 ndomains, nlongs;
1775 ndomains = cap_ndoms(iommu->cap);
1776 pr_debug("%s: Number of Domains supported <%d>\n",
1777 iommu->name, ndomains);
1778 nlongs = BITS_TO_LONGS(ndomains);
1780 spin_lock_init(&iommu->lock);
1782 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1783 if (!iommu->domain_ids) {
1784 pr_err("%s: Allocating domain id array failed\n",
1789 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1790 iommu->domains = kzalloc(size, GFP_KERNEL);
1792 if (iommu->domains) {
1793 size = 256 * sizeof(struct dmar_domain *);
1794 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1797 if (!iommu->domains || !iommu->domains[0]) {
1798 pr_err("%s: Allocating domain array failed\n",
1800 kfree(iommu->domain_ids);
1801 kfree(iommu->domains);
1802 iommu->domain_ids = NULL;
1803 iommu->domains = NULL;
1808 * If Caching mode is set, then invalid translations are tagged
1809 * with domain-id 0, hence we need to pre-allocate it. We also
1810 * use domain-id 0 as a marker for non-allocated domain-id, so
1811 * make sure it is not used for a real domain.
1813 set_bit(0, iommu->domain_ids);
1816 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1817 * entry for first-level or pass-through translation modes should
1818 * be programmed with a domain id different from those used for
1819 * second-level or nested translation. We reserve a domain id for
1822 if (sm_supported(iommu))
1823 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1828 static void disable_dmar_iommu(struct intel_iommu *iommu)
1830 struct device_domain_info *info, *tmp;
1831 unsigned long flags;
1833 if (!iommu->domains || !iommu->domain_ids)
1836 spin_lock_irqsave(&device_domain_lock, flags);
1837 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1838 if (info->iommu != iommu)
1841 if (!info->dev || !info->domain)
1844 __dmar_remove_one_dev_info(info);
1846 spin_unlock_irqrestore(&device_domain_lock, flags);
1848 if (iommu->gcmd & DMA_GCMD_TE)
1849 iommu_disable_translation(iommu);
1852 static void free_dmar_iommu(struct intel_iommu *iommu)
1854 if ((iommu->domains) && (iommu->domain_ids)) {
1855 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1858 for (i = 0; i < elems; i++)
1859 kfree(iommu->domains[i]);
1860 kfree(iommu->domains);
1861 kfree(iommu->domain_ids);
1862 iommu->domains = NULL;
1863 iommu->domain_ids = NULL;
1866 g_iommus[iommu->seq_id] = NULL;
1868 /* free context mapping */
1869 free_context_table(iommu);
1871 #ifdef CONFIG_INTEL_IOMMU_SVM
1872 if (pasid_supported(iommu)) {
1873 if (ecap_prs(iommu->ecap))
1874 intel_svm_finish_prq(iommu);
1876 if (vccap_pasid(iommu->vccap))
1877 ioasid_unregister_allocator(&iommu->pasid_allocator);
1883 * Check and return whether first level is used by default for
1886 static bool first_level_by_default(void)
1888 return scalable_mode_support() && intel_cap_flts_sanity();
1891 static struct dmar_domain *alloc_domain(int flags)
1893 struct dmar_domain *domain;
1895 domain = alloc_domain_mem();
1899 memset(domain, 0, sizeof(*domain));
1900 domain->nid = NUMA_NO_NODE;
1901 domain->flags = flags;
1902 if (first_level_by_default())
1903 domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
1904 domain->has_iotlb_device = false;
1905 INIT_LIST_HEAD(&domain->devices);
1906 INIT_LIST_HEAD(&domain->subdevices);
1911 /* Must be called with iommu->lock */
1912 static int domain_attach_iommu(struct dmar_domain *domain,
1913 struct intel_iommu *iommu)
1915 unsigned long ndomains;
1918 assert_spin_locked(&device_domain_lock);
1919 assert_spin_locked(&iommu->lock);
1921 domain->iommu_refcnt[iommu->seq_id] += 1;
1922 domain->iommu_count += 1;
1923 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1924 ndomains = cap_ndoms(iommu->cap);
1925 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1927 if (num >= ndomains) {
1928 pr_err("%s: No free domain ids\n", iommu->name);
1929 domain->iommu_refcnt[iommu->seq_id] -= 1;
1930 domain->iommu_count -= 1;
1934 set_bit(num, iommu->domain_ids);
1935 set_iommu_domain(iommu, num, domain);
1937 domain->iommu_did[iommu->seq_id] = num;
1938 domain->nid = iommu->node;
1940 domain_update_iommu_cap(domain);
1946 static int domain_detach_iommu(struct dmar_domain *domain,
1947 struct intel_iommu *iommu)
1951 assert_spin_locked(&device_domain_lock);
1952 assert_spin_locked(&iommu->lock);
1954 domain->iommu_refcnt[iommu->seq_id] -= 1;
1955 count = --domain->iommu_count;
1956 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1957 num = domain->iommu_did[iommu->seq_id];
1958 clear_bit(num, iommu->domain_ids);
1959 set_iommu_domain(iommu, num, NULL);
1961 domain_update_iommu_cap(domain);
1962 domain->iommu_did[iommu->seq_id] = 0;
1968 static inline int guestwidth_to_adjustwidth(int gaw)
1971 int r = (gaw - 12) % 9;
1982 static void domain_exit(struct dmar_domain *domain)
1985 /* Remove associated devices and clear attached or cached domains */
1986 domain_remove_dev_info(domain);
1989 if (domain->domain.type == IOMMU_DOMAIN_DMA)
1990 iommu_put_dma_cookie(&domain->domain);
1993 struct page *freelist;
1995 freelist = domain_unmap(domain, 0,
1996 DOMAIN_MAX_PFN(domain->gaw), NULL);
1997 dma_free_pagelist(freelist);
2000 free_domain_mem(domain);
2004 * Get the PASID directory size for scalable mode context entry.
2005 * Value of X in the PDTS field of a scalable mode context entry
2006 * indicates PASID directory with 2^(X + 7) entries.
2008 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
2012 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
2013 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
2021 * Set the RID_PASID field of a scalable mode context entry. The
2022 * IOMMU hardware will use the PASID value set in this field for
2023 * DMA translations of DMA requests without PASID.
2026 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
2028 context->hi |= pasid & ((1 << 20) - 1);
2032 * Set the DTE(Device-TLB Enable) field of a scalable mode context
2035 static inline void context_set_sm_dte(struct context_entry *context)
2037 context->lo |= (1 << 2);
2041 * Set the PRE(Page Request Enable) field of a scalable mode context
2044 static inline void context_set_sm_pre(struct context_entry *context)
2046 context->lo |= (1 << 4);
2049 /* Convert value to context PASID directory size field coding. */
2050 #define context_pdts(pds) (((pds) & 0x7) << 9)
2052 static int domain_context_mapping_one(struct dmar_domain *domain,
2053 struct intel_iommu *iommu,
2054 struct pasid_table *table,
2057 u16 did = domain->iommu_did[iommu->seq_id];
2058 int translation = CONTEXT_TT_MULTI_LEVEL;
2059 struct device_domain_info *info = NULL;
2060 struct context_entry *context;
2061 unsigned long flags;
2066 if (hw_pass_through && domain_type_is_si(domain))
2067 translation = CONTEXT_TT_PASS_THROUGH;
2069 pr_debug("Set context mapping for %02x:%02x.%d\n",
2070 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2072 BUG_ON(!domain->pgd);
2074 spin_lock_irqsave(&device_domain_lock, flags);
2075 spin_lock(&iommu->lock);
2078 context = iommu_context_addr(iommu, bus, devfn, 1);
2083 if (context_present(context))
2087 * For kdump cases, old valid entries may be cached due to the
2088 * in-flight DMA and copied pgtable, but there is no unmapping
2089 * behaviour for them, thus we need an explicit cache flush for
2090 * the newly-mapped device. For kdump, at this point, the device
2091 * is supposed to finish reset at its driver probe stage, so no
2092 * in-flight DMA will exist, and we don't need to worry anymore
2095 if (context_copied(context)) {
2096 u16 did_old = context_domain_id(context);
2098 if (did_old < cap_ndoms(iommu->cap)) {
2099 iommu->flush.flush_context(iommu, did_old,
2100 (((u16)bus) << 8) | devfn,
2101 DMA_CCMD_MASK_NOBIT,
2102 DMA_CCMD_DEVICE_INVL);
2103 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2108 context_clear_entry(context);
2110 if (sm_supported(iommu)) {
2115 /* Setup the PASID DIR pointer: */
2116 pds = context_get_sm_pds(table);
2117 context->lo = (u64)virt_to_phys(table->table) |
2120 /* Setup the RID_PASID field: */
2121 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2124 * Setup the Device-TLB enable bit and Page request
2127 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2128 if (info && info->ats_supported)
2129 context_set_sm_dte(context);
2130 if (info && info->pri_supported)
2131 context_set_sm_pre(context);
2133 struct dma_pte *pgd = domain->pgd;
2136 context_set_domain_id(context, did);
2138 if (translation != CONTEXT_TT_PASS_THROUGH) {
2140 * Skip top levels of page tables for iommu which has
2141 * less agaw than default. Unnecessary for PT mode.
2143 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2145 pgd = phys_to_virt(dma_pte_addr(pgd));
2146 if (!dma_pte_present(pgd))
2150 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2151 if (info && info->ats_supported)
2152 translation = CONTEXT_TT_DEV_IOTLB;
2154 translation = CONTEXT_TT_MULTI_LEVEL;
2156 context_set_address_root(context, virt_to_phys(pgd));
2157 context_set_address_width(context, agaw);
2160 * In pass through mode, AW must be programmed to
2161 * indicate the largest AGAW value supported by
2162 * hardware. And ASR is ignored by hardware.
2164 context_set_address_width(context, iommu->msagaw);
2167 context_set_translation_type(context, translation);
2170 context_set_fault_enable(context);
2171 context_set_present(context);
2172 if (!ecap_coherent(iommu->ecap))
2173 clflush_cache_range(context, sizeof(*context));
2176 * It's a non-present to present mapping. If hardware doesn't cache
2177 * non-present entry we only need to flush the write-buffer. If the
2178 * _does_ cache non-present entries, then it does so in the special
2179 * domain #0, which we have to flush:
2181 if (cap_caching_mode(iommu->cap)) {
2182 iommu->flush.flush_context(iommu, 0,
2183 (((u16)bus) << 8) | devfn,
2184 DMA_CCMD_MASK_NOBIT,
2185 DMA_CCMD_DEVICE_INVL);
2186 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2188 iommu_flush_write_buffer(iommu);
2190 iommu_enable_dev_iotlb(info);
2195 spin_unlock(&iommu->lock);
2196 spin_unlock_irqrestore(&device_domain_lock, flags);
2201 struct domain_context_mapping_data {
2202 struct dmar_domain *domain;
2203 struct intel_iommu *iommu;
2204 struct pasid_table *table;
2207 static int domain_context_mapping_cb(struct pci_dev *pdev,
2208 u16 alias, void *opaque)
2210 struct domain_context_mapping_data *data = opaque;
2212 return domain_context_mapping_one(data->domain, data->iommu,
2213 data->table, PCI_BUS_NUM(alias),
2218 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2220 struct domain_context_mapping_data data;
2221 struct pasid_table *table;
2222 struct intel_iommu *iommu;
2225 iommu = device_to_iommu(dev, &bus, &devfn);
2229 table = intel_pasid_get_table(dev);
2231 if (!dev_is_pci(dev))
2232 return domain_context_mapping_one(domain, iommu, table,
2235 data.domain = domain;
2239 return pci_for_each_dma_alias(to_pci_dev(dev),
2240 &domain_context_mapping_cb, &data);
2243 static int domain_context_mapped_cb(struct pci_dev *pdev,
2244 u16 alias, void *opaque)
2246 struct intel_iommu *iommu = opaque;
2248 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2251 static int domain_context_mapped(struct device *dev)
2253 struct intel_iommu *iommu;
2256 iommu = device_to_iommu(dev, &bus, &devfn);
2260 if (!dev_is_pci(dev))
2261 return device_context_mapped(iommu, bus, devfn);
2263 return !pci_for_each_dma_alias(to_pci_dev(dev),
2264 domain_context_mapped_cb, iommu);
2267 /* Returns a number of VTD pages, but aligned to MM page size */
2268 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2271 host_addr &= ~PAGE_MASK;
2272 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2275 /* Return largest possible superpage level for a given mapping */
2276 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2277 unsigned long iov_pfn,
2278 unsigned long phy_pfn,
2279 unsigned long pages)
2281 int support, level = 1;
2282 unsigned long pfnmerge;
2284 support = domain->iommu_superpage;
2286 /* To use a large page, the virtual *and* physical addresses
2287 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2288 of them will mean we have to use smaller pages. So just
2289 merge them and check both at once. */
2290 pfnmerge = iov_pfn | phy_pfn;
2292 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2293 pages >>= VTD_STRIDE_SHIFT;
2296 pfnmerge >>= VTD_STRIDE_SHIFT;
2304 * Ensure that old small page tables are removed to make room for superpage(s).
2305 * We're going to add new large pages, so make sure we don't remove their parent
2306 * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
2308 static void switch_to_super_page(struct dmar_domain *domain,
2309 unsigned long start_pfn,
2310 unsigned long end_pfn, int level)
2312 unsigned long lvl_pages = lvl_to_nr_pages(level);
2313 struct dma_pte *pte = NULL;
2316 while (start_pfn <= end_pfn) {
2318 pte = pfn_to_dma_pte(domain, start_pfn, &level);
2320 if (dma_pte_present(pte)) {
2321 dma_pte_free_pagetable(domain, start_pfn,
2322 start_pfn + lvl_pages - 1,
2325 for_each_domain_iommu(i, domain)
2326 iommu_flush_iotlb_psi(g_iommus[i], domain,
2327 start_pfn, lvl_pages,
2332 start_pfn += lvl_pages;
2333 if (first_pte_in_page(pte))
2339 __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2340 unsigned long phys_pfn, unsigned long nr_pages, int prot)
2342 unsigned int largepage_lvl = 0;
2343 unsigned long lvl_pages = 0;
2344 struct dma_pte *pte = NULL;
2348 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2350 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2353 attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
2354 attr |= DMA_FL_PTE_PRESENT;
2355 if (domain_use_first_level(domain)) {
2356 attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
2358 if (domain->domain.type == IOMMU_DOMAIN_DMA) {
2359 attr |= DMA_FL_PTE_ACCESS;
2360 if (prot & DMA_PTE_WRITE)
2361 attr |= DMA_FL_PTE_DIRTY;
2365 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
2367 while (nr_pages > 0) {
2371 largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
2372 phys_pfn, nr_pages);
2374 pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2377 /* It is large page*/
2378 if (largepage_lvl > 1) {
2379 unsigned long end_pfn;
2381 pteval |= DMA_PTE_LARGE_PAGE;
2382 end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
2383 switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
2385 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2389 /* We don't need lock here, nobody else
2390 * touches the iova range
2392 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2394 static int dumps = 5;
2395 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2396 iov_pfn, tmp, (unsigned long long)pteval);
2399 debug_dma_dump_mappings(NULL);
2404 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2406 BUG_ON(nr_pages < lvl_pages);
2408 nr_pages -= lvl_pages;
2409 iov_pfn += lvl_pages;
2410 phys_pfn += lvl_pages;
2411 pteval += lvl_pages * VTD_PAGE_SIZE;
2413 /* If the next PTE would be the first in a new page, then we
2414 * need to flush the cache on the entries we've just written.
2415 * And then we'll need to recalculate 'pte', so clear it and
2416 * let it get set again in the if (!pte) block above.
2418 * If we're done (!nr_pages) we need to flush the cache too.
2420 * Also if we've been setting superpages, we may need to
2421 * recalculate 'pte' and switch back to smaller pages for the
2422 * end of the mapping, if the trailing size is not enough to
2423 * use another superpage (i.e. nr_pages < lvl_pages).
2425 * We leave clflush for the leaf pte changes to iotlb_sync_map()
2429 if (!nr_pages || first_pte_in_page(pte) ||
2430 (largepage_lvl > 1 && nr_pages < lvl_pages))
2437 static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
2439 struct intel_iommu *iommu = info->iommu;
2440 struct context_entry *context;
2441 unsigned long flags;
2447 spin_lock_irqsave(&iommu->lock, flags);
2448 context = iommu_context_addr(iommu, bus, devfn, 0);
2450 spin_unlock_irqrestore(&iommu->lock, flags);
2454 if (sm_supported(iommu)) {
2455 if (hw_pass_through && domain_type_is_si(info->domain))
2456 did_old = FLPT_DEFAULT_DID;
2458 did_old = info->domain->iommu_did[iommu->seq_id];
2460 did_old = context_domain_id(context);
2463 context_clear_entry(context);
2464 __iommu_flush_cache(iommu, context, sizeof(*context));
2465 spin_unlock_irqrestore(&iommu->lock, flags);
2466 iommu->flush.flush_context(iommu,
2468 (((u16)bus) << 8) | devfn,
2469 DMA_CCMD_MASK_NOBIT,
2470 DMA_CCMD_DEVICE_INVL);
2472 if (sm_supported(iommu))
2473 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
2475 iommu->flush.flush_iotlb(iommu,
2481 __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
2484 static inline void unlink_domain_info(struct device_domain_info *info)
2486 assert_spin_locked(&device_domain_lock);
2487 list_del(&info->link);
2488 list_del(&info->global);
2490 dev_iommu_priv_set(info->dev, NULL);
2493 static void domain_remove_dev_info(struct dmar_domain *domain)
2495 struct device_domain_info *info, *tmp;
2496 unsigned long flags;
2498 spin_lock_irqsave(&device_domain_lock, flags);
2499 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2500 __dmar_remove_one_dev_info(info);
2501 spin_unlock_irqrestore(&device_domain_lock, flags);
2504 struct dmar_domain *find_domain(struct device *dev)
2506 struct device_domain_info *info;
2508 if (unlikely(!dev || !dev->iommu))
2511 if (unlikely(attach_deferred(dev)))
2514 /* No lock here, assumes no domain exit in normal case */
2515 info = get_domain_info(dev);
2517 return info->domain;
2522 static inline struct device_domain_info *
2523 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2525 struct device_domain_info *info;
2527 list_for_each_entry(info, &device_domain_list, global)
2528 if (info->segment == segment && info->bus == bus &&
2529 info->devfn == devfn)
2535 static int domain_setup_first_level(struct intel_iommu *iommu,
2536 struct dmar_domain *domain,
2540 struct dma_pte *pgd = domain->pgd;
2545 * Skip top levels of page tables for iommu which has
2546 * less agaw than default. Unnecessary for PT mode.
2548 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2549 pgd = phys_to_virt(dma_pte_addr(pgd));
2550 if (!dma_pte_present(pgd))
2554 level = agaw_to_level(agaw);
2555 if (level != 4 && level != 5)
2558 if (pasid != PASID_RID2PASID)
2559 flags |= PASID_FLAG_SUPERVISOR_MODE;
2561 flags |= PASID_FLAG_FL5LP;
2563 if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
2564 flags |= PASID_FLAG_PAGE_SNOOP;
2566 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2567 domain->iommu_did[iommu->seq_id],
2571 static bool dev_is_real_dma_subdevice(struct device *dev)
2573 return dev && dev_is_pci(dev) &&
2574 pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
2577 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2580 struct dmar_domain *domain)
2582 struct dmar_domain *found = NULL;
2583 struct device_domain_info *info;
2584 unsigned long flags;
2587 info = alloc_devinfo_mem();
2591 if (!dev_is_real_dma_subdevice(dev)) {
2593 info->devfn = devfn;
2594 info->segment = iommu->segment;
2596 struct pci_dev *pdev = to_pci_dev(dev);
2598 info->bus = pdev->bus->number;
2599 info->devfn = pdev->devfn;
2600 info->segment = pci_domain_nr(pdev->bus);
2603 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2604 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2607 info->domain = domain;
2608 info->iommu = iommu;
2609 info->pasid_table = NULL;
2610 info->auxd_enabled = 0;
2611 INIT_LIST_HEAD(&info->subdevices);
2613 if (dev && dev_is_pci(dev)) {
2614 struct pci_dev *pdev = to_pci_dev(info->dev);
2616 if (ecap_dev_iotlb_support(iommu->ecap) &&
2617 pci_ats_supported(pdev) &&
2618 dmar_find_matched_atsr_unit(pdev))
2619 info->ats_supported = 1;
2621 if (sm_supported(iommu)) {
2622 if (pasid_supported(iommu)) {
2623 int features = pci_pasid_features(pdev);
2625 info->pasid_supported = features | 1;
2628 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2629 pci_pri_supported(pdev))
2630 info->pri_supported = 1;
2634 spin_lock_irqsave(&device_domain_lock, flags);
2636 found = find_domain(dev);
2639 struct device_domain_info *info2;
2640 info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
2643 found = info2->domain;
2649 spin_unlock_irqrestore(&device_domain_lock, flags);
2650 free_devinfo_mem(info);
2651 /* Caller must free the original domain */
2655 spin_lock(&iommu->lock);
2656 ret = domain_attach_iommu(domain, iommu);
2657 spin_unlock(&iommu->lock);
2660 spin_unlock_irqrestore(&device_domain_lock, flags);
2661 free_devinfo_mem(info);
2665 list_add(&info->link, &domain->devices);
2666 list_add(&info->global, &device_domain_list);
2668 dev_iommu_priv_set(dev, info);
2669 spin_unlock_irqrestore(&device_domain_lock, flags);
2671 /* PASID table is mandatory for a PCI device in scalable mode. */
2672 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2673 ret = intel_pasid_alloc_table(dev);
2675 dev_err(dev, "PASID table allocation failed\n");
2676 dmar_remove_one_dev_info(dev);
2680 /* Setup the PASID entry for requests without PASID: */
2681 spin_lock_irqsave(&iommu->lock, flags);
2682 if (hw_pass_through && domain_type_is_si(domain))
2683 ret = intel_pasid_setup_pass_through(iommu, domain,
2684 dev, PASID_RID2PASID);
2685 else if (domain_use_first_level(domain))
2686 ret = domain_setup_first_level(iommu, domain, dev,
2689 ret = intel_pasid_setup_second_level(iommu, domain,
2690 dev, PASID_RID2PASID);
2691 spin_unlock_irqrestore(&iommu->lock, flags);
2693 dev_err(dev, "Setup RID2PASID failed\n");
2694 dmar_remove_one_dev_info(dev);
2699 if (dev && domain_context_mapping(domain, dev)) {
2700 dev_err(dev, "Domain context map failed\n");
2701 dmar_remove_one_dev_info(dev);
2708 static int iommu_domain_identity_map(struct dmar_domain *domain,
2709 unsigned long first_vpfn,
2710 unsigned long last_vpfn)
2713 * RMRR range might have overlap with physical memory range,
2716 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2718 return __domain_mapping(domain, first_vpfn,
2719 first_vpfn, last_vpfn - first_vpfn + 1,
2720 DMA_PTE_READ|DMA_PTE_WRITE);
2723 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2725 static int __init si_domain_init(int hw)
2727 struct dmar_rmrr_unit *rmrr;
2731 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2735 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2736 domain_exit(si_domain);
2743 for_each_online_node(nid) {
2744 unsigned long start_pfn, end_pfn;
2747 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2748 ret = iommu_domain_identity_map(si_domain,
2749 mm_to_dma_pfn(start_pfn),
2750 mm_to_dma_pfn(end_pfn));
2757 * Identity map the RMRRs so that devices with RMRRs could also use
2760 for_each_rmrr_units(rmrr) {
2761 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2763 unsigned long long start = rmrr->base_address;
2764 unsigned long long end = rmrr->end_address;
2766 if (WARN_ON(end < start ||
2767 end >> agaw_to_width(si_domain->agaw)))
2770 ret = iommu_domain_identity_map(si_domain,
2771 mm_to_dma_pfn(start >> PAGE_SHIFT),
2772 mm_to_dma_pfn(end >> PAGE_SHIFT));
2781 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2783 struct dmar_domain *ndomain;
2784 struct intel_iommu *iommu;
2787 iommu = device_to_iommu(dev, &bus, &devfn);
2791 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2792 if (ndomain != domain)
2798 static bool device_has_rmrr(struct device *dev)
2800 struct dmar_rmrr_unit *rmrr;
2805 for_each_rmrr_units(rmrr) {
2807 * Return TRUE if this RMRR contains the device that
2810 for_each_active_dev_scope(rmrr->devices,
2811 rmrr->devices_cnt, i, tmp)
2813 is_downstream_to_pci_bridge(dev, tmp)) {
2823 * device_rmrr_is_relaxable - Test whether the RMRR of this device
2824 * is relaxable (ie. is allowed to be not enforced under some conditions)
2825 * @dev: device handle
2827 * We assume that PCI USB devices with RMRRs have them largely
2828 * for historical reasons and that the RMRR space is not actively used post
2829 * boot. This exclusion may change if vendors begin to abuse it.
2831 * The same exception is made for graphics devices, with the requirement that
2832 * any use of the RMRR regions will be torn down before assigning the device
2835 * Return: true if the RMRR is relaxable, false otherwise
2837 static bool device_rmrr_is_relaxable(struct device *dev)
2839 struct pci_dev *pdev;
2841 if (!dev_is_pci(dev))
2844 pdev = to_pci_dev(dev);
2845 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2852 * There are a couple cases where we need to restrict the functionality of
2853 * devices associated with RMRRs. The first is when evaluating a device for
2854 * identity mapping because problems exist when devices are moved in and out
2855 * of domains and their respective RMRR information is lost. This means that
2856 * a device with associated RMRRs will never be in a "passthrough" domain.
2857 * The second is use of the device through the IOMMU API. This interface
2858 * expects to have full control of the IOVA space for the device. We cannot
2859 * satisfy both the requirement that RMRR access is maintained and have an
2860 * unencumbered IOVA space. We also have no ability to quiesce the device's
2861 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2862 * We therefore prevent devices associated with an RMRR from participating in
2863 * the IOMMU API, which eliminates them from device assignment.
2865 * In both cases, devices which have relaxable RMRRs are not concerned by this
2866 * restriction. See device_rmrr_is_relaxable comment.
2868 static bool device_is_rmrr_locked(struct device *dev)
2870 if (!device_has_rmrr(dev))
2873 if (device_rmrr_is_relaxable(dev))
2880 * Return the required default domain type for a specific device.
2882 * @dev: the device in query
2883 * @startup: true if this is during early boot
2886 * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2887 * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2888 * - 0: both identity and dynamic domains work for this device
2890 static int device_def_domain_type(struct device *dev)
2892 if (dev_is_pci(dev)) {
2893 struct pci_dev *pdev = to_pci_dev(dev);
2895 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2896 return IOMMU_DOMAIN_IDENTITY;
2898 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2899 return IOMMU_DOMAIN_IDENTITY;
2905 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2908 * Start from the sane iommu hardware state.
2909 * If the queued invalidation is already initialized by us
2910 * (for example, while enabling interrupt-remapping) then
2911 * we got the things already rolling from a sane state.
2915 * Clear any previous faults.
2917 dmar_fault(-1, iommu);
2919 * Disable queued invalidation if supported and already enabled
2920 * before OS handover.
2922 dmar_disable_qi(iommu);
2925 if (dmar_enable_qi(iommu)) {
2927 * Queued Invalidate not enabled, use Register Based Invalidate
2929 iommu->flush.flush_context = __iommu_flush_context;
2930 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2931 pr_info("%s: Using Register based invalidation\n",
2934 iommu->flush.flush_context = qi_flush_context;
2935 iommu->flush.flush_iotlb = qi_flush_iotlb;
2936 pr_info("%s: Using Queued invalidation\n", iommu->name);
2940 static int copy_context_table(struct intel_iommu *iommu,
2941 struct root_entry *old_re,
2942 struct context_entry **tbl,
2945 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2946 struct context_entry *new_ce = NULL, ce;
2947 struct context_entry *old_ce = NULL;
2948 struct root_entry re;
2949 phys_addr_t old_ce_phys;
2951 tbl_idx = ext ? bus * 2 : bus;
2952 memcpy(&re, old_re, sizeof(re));
2954 for (devfn = 0; devfn < 256; devfn++) {
2955 /* First calculate the correct index */
2956 idx = (ext ? devfn * 2 : devfn) % 256;
2959 /* First save what we may have and clean up */
2961 tbl[tbl_idx] = new_ce;
2962 __iommu_flush_cache(iommu, new_ce,
2972 old_ce_phys = root_entry_lctp(&re);
2974 old_ce_phys = root_entry_uctp(&re);
2977 if (ext && devfn == 0) {
2978 /* No LCTP, try UCTP */
2987 old_ce = memremap(old_ce_phys, PAGE_SIZE,
2992 new_ce = alloc_pgtable_page(iommu->node);
2999 /* Now copy the context entry */
3000 memcpy(&ce, old_ce + idx, sizeof(ce));
3002 if (!__context_present(&ce))
3005 did = context_domain_id(&ce);
3006 if (did >= 0 && did < cap_ndoms(iommu->cap))
3007 set_bit(did, iommu->domain_ids);
3010 * We need a marker for copied context entries. This
3011 * marker needs to work for the old format as well as
3012 * for extended context entries.
3014 * Bit 67 of the context entry is used. In the old
3015 * format this bit is available to software, in the
3016 * extended format it is the PGE bit, but PGE is ignored
3017 * by HW if PASIDs are disabled (and thus still
3020 * So disable PASIDs first and then mark the entry
3021 * copied. This means that we don't copy PASID
3022 * translations from the old kernel, but this is fine as
3023 * faults there are not fatal.
3025 context_clear_pasid_enable(&ce);
3026 context_set_copied(&ce);
3031 tbl[tbl_idx + pos] = new_ce;
3033 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3042 static int copy_translation_tables(struct intel_iommu *iommu)
3044 struct context_entry **ctxt_tbls;
3045 struct root_entry *old_rt;
3046 phys_addr_t old_rt_phys;
3047 int ctxt_table_entries;
3048 unsigned long flags;
3053 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3054 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3055 new_ext = !!ecap_ecs(iommu->ecap);
3058 * The RTT bit can only be changed when translation is disabled,
3059 * but disabling translation means to open a window for data
3060 * corruption. So bail out and don't copy anything if we would
3061 * have to change the bit.
3066 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3070 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3074 /* This is too big for the stack - allocate it from slab */
3075 ctxt_table_entries = ext ? 512 : 256;
3077 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3081 for (bus = 0; bus < 256; bus++) {
3082 ret = copy_context_table(iommu, &old_rt[bus],
3083 ctxt_tbls, bus, ext);
3085 pr_err("%s: Failed to copy context table for bus %d\n",
3091 spin_lock_irqsave(&iommu->lock, flags);
3093 /* Context tables are copied, now write them to the root_entry table */
3094 for (bus = 0; bus < 256; bus++) {
3095 int idx = ext ? bus * 2 : bus;
3098 if (ctxt_tbls[idx]) {
3099 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3100 iommu->root_entry[bus].lo = val;
3103 if (!ext || !ctxt_tbls[idx + 1])
3106 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3107 iommu->root_entry[bus].hi = val;
3110 spin_unlock_irqrestore(&iommu->lock, flags);
3114 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3124 #ifdef CONFIG_INTEL_IOMMU_SVM
3125 static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
3127 struct intel_iommu *iommu = data;
3131 return INVALID_IOASID;
3133 * VT-d virtual command interface always uses the full 20 bit
3134 * PASID range. Host can partition guest PASID range based on
3135 * policies but it is out of guest's control.
3137 if (min < PASID_MIN || max > intel_pasid_max_id)
3138 return INVALID_IOASID;
3140 if (vcmd_alloc_pasid(iommu, &ioasid))
3141 return INVALID_IOASID;
3146 static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
3148 struct intel_iommu *iommu = data;
3153 * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
3154 * We can only free the PASID when all the devices are unbound.
3156 if (ioasid_find(NULL, ioasid, NULL)) {
3157 pr_alert("Cannot free active IOASID %d\n", ioasid);
3160 vcmd_free_pasid(iommu, ioasid);
3163 static void register_pasid_allocator(struct intel_iommu *iommu)
3166 * If we are running in the host, no need for custom allocator
3167 * in that PASIDs are allocated from the host system-wide.
3169 if (!cap_caching_mode(iommu->cap))
3172 if (!sm_supported(iommu)) {
3173 pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
3178 * Register a custom PASID allocator if we are running in a guest,
3179 * guest PASID must be obtained via virtual command interface.
3180 * There can be multiple vIOMMUs in each guest but only one allocator
3181 * is active. All vIOMMU allocators will eventually be calling the same
3184 if (!vccap_pasid(iommu->vccap))
3187 pr_info("Register custom PASID allocator\n");
3188 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
3189 iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
3190 iommu->pasid_allocator.pdata = (void *)iommu;
3191 if (ioasid_register_allocator(&iommu->pasid_allocator)) {
3192 pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
3194 * Disable scalable mode on this IOMMU if there
3195 * is no custom allocator. Mixing SM capable vIOMMU
3196 * and non-SM vIOMMU are not supported.
3203 static int __init init_dmars(void)
3205 struct dmar_drhd_unit *drhd;
3206 struct intel_iommu *iommu;
3212 * initialize and program root entry to not present
3215 for_each_drhd_unit(drhd) {
3217 * lock not needed as this is only incremented in the single
3218 * threaded kernel __init code path all other access are read
3221 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3225 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3228 /* Preallocate enough resources for IOMMU hot-addition */
3229 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3230 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3232 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3235 pr_err("Allocating global iommu array failed\n");
3240 ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
3244 for_each_iommu(iommu, drhd) {
3245 if (drhd->ignored) {
3246 iommu_disable_translation(iommu);
3251 * Find the max pasid size of all IOMMU's in the system.
3252 * We need to ensure the system pasid table is no bigger
3253 * than the smallest supported.
3255 if (pasid_supported(iommu)) {
3256 u32 temp = 2 << ecap_pss(iommu->ecap);
3258 intel_pasid_max_id = min_t(u32, temp,
3259 intel_pasid_max_id);
3262 g_iommus[iommu->seq_id] = iommu;
3264 intel_iommu_init_qi(iommu);
3266 ret = iommu_init_domains(iommu);
3270 init_translation_status(iommu);
3272 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3273 iommu_disable_translation(iommu);
3274 clear_translation_pre_enabled(iommu);
3275 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3281 * we could share the same root & context tables
3282 * among all IOMMU's. Need to Split it later.
3284 ret = iommu_alloc_root_entry(iommu);
3288 if (translation_pre_enabled(iommu)) {
3289 pr_info("Translation already enabled - trying to copy translation structures\n");
3291 ret = copy_translation_tables(iommu);
3294 * We found the IOMMU with translation
3295 * enabled - but failed to copy over the
3296 * old root-entry table. Try to proceed
3297 * by disabling translation now and
3298 * allocating a clean root-entry table.
3299 * This might cause DMAR faults, but
3300 * probably the dump will still succeed.
3302 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3304 iommu_disable_translation(iommu);
3305 clear_translation_pre_enabled(iommu);
3307 pr_info("Copied translation tables from previous kernel for %s\n",
3312 if (!ecap_pass_through(iommu->ecap))
3313 hw_pass_through = 0;
3314 intel_svm_check(iommu);
3318 * Now that qi is enabled on all iommus, set the root entry and flush
3319 * caches. This is required on some Intel X58 chipsets, otherwise the
3320 * flush_context function will loop forever and the boot hangs.
3322 for_each_active_iommu(iommu, drhd) {
3323 iommu_flush_write_buffer(iommu);
3324 #ifdef CONFIG_INTEL_IOMMU_SVM
3325 register_pasid_allocator(iommu);
3327 iommu_set_root_entry(iommu);
3330 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3335 iommu_identity_mapping |= IDENTMAP_GFX;
3337 check_tylersburg_isoch();
3339 ret = si_domain_init(hw_pass_through);
3346 * global invalidate context cache
3347 * global invalidate iotlb
3348 * enable translation
3350 for_each_iommu(iommu, drhd) {
3351 if (drhd->ignored) {
3353 * we always have to disable PMRs or DMA may fail on
3357 iommu_disable_protect_mem_regions(iommu);
3361 iommu_flush_write_buffer(iommu);
3363 #ifdef CONFIG_INTEL_IOMMU_SVM
3364 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3366 * Call dmar_alloc_hwirq() with dmar_global_lock held,
3367 * could cause possible lock race condition.
3369 up_write(&dmar_global_lock);
3370 ret = intel_svm_enable_prq(iommu);
3371 down_write(&dmar_global_lock);
3376 ret = dmar_set_interrupt(iommu);
3384 for_each_active_iommu(iommu, drhd) {
3385 disable_dmar_iommu(iommu);
3386 free_dmar_iommu(iommu);
3395 static inline int iommu_domain_cache_init(void)
3399 iommu_domain_cache = kmem_cache_create("iommu_domain",
3400 sizeof(struct dmar_domain),
3405 if (!iommu_domain_cache) {
3406 pr_err("Couldn't create iommu_domain cache\n");
3413 static inline int iommu_devinfo_cache_init(void)
3417 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3418 sizeof(struct device_domain_info),
3422 if (!iommu_devinfo_cache) {
3423 pr_err("Couldn't create devinfo cache\n");
3430 static int __init iommu_init_mempool(void)
3433 ret = iova_cache_get();
3437 ret = iommu_domain_cache_init();
3441 ret = iommu_devinfo_cache_init();
3445 kmem_cache_destroy(iommu_domain_cache);
3452 static void __init iommu_exit_mempool(void)
3454 kmem_cache_destroy(iommu_devinfo_cache);
3455 kmem_cache_destroy(iommu_domain_cache);
3459 static void __init init_no_remapping_devices(void)
3461 struct dmar_drhd_unit *drhd;
3465 for_each_drhd_unit(drhd) {
3466 if (!drhd->include_all) {
3467 for_each_active_dev_scope(drhd->devices,
3468 drhd->devices_cnt, i, dev)
3470 /* ignore DMAR unit if no devices exist */
3471 if (i == drhd->devices_cnt)
3476 for_each_active_drhd_unit(drhd) {
3477 if (drhd->include_all)
3480 for_each_active_dev_scope(drhd->devices,
3481 drhd->devices_cnt, i, dev)
3482 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3484 if (i < drhd->devices_cnt)
3487 /* This IOMMU has *only* gfx devices. Either bypass it or
3488 set the gfx_mapped flag, as appropriate */
3489 drhd->gfx_dedicated = 1;
3495 #ifdef CONFIG_SUSPEND
3496 static int init_iommu_hw(void)
3498 struct dmar_drhd_unit *drhd;
3499 struct intel_iommu *iommu = NULL;
3501 for_each_active_iommu(iommu, drhd)
3503 dmar_reenable_qi(iommu);
3505 for_each_iommu(iommu, drhd) {
3506 if (drhd->ignored) {
3508 * we always have to disable PMRs or DMA may fail on
3512 iommu_disable_protect_mem_regions(iommu);
3516 iommu_flush_write_buffer(iommu);
3517 iommu_set_root_entry(iommu);
3518 iommu_enable_translation(iommu);
3519 iommu_disable_protect_mem_regions(iommu);
3525 static void iommu_flush_all(void)
3527 struct dmar_drhd_unit *drhd;
3528 struct intel_iommu *iommu;
3530 for_each_active_iommu(iommu, drhd) {
3531 iommu->flush.flush_context(iommu, 0, 0, 0,
3532 DMA_CCMD_GLOBAL_INVL);
3533 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3534 DMA_TLB_GLOBAL_FLUSH);
3538 static int iommu_suspend(void)
3540 struct dmar_drhd_unit *drhd;
3541 struct intel_iommu *iommu = NULL;
3544 for_each_active_iommu(iommu, drhd) {
3545 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
3547 if (!iommu->iommu_state)
3553 for_each_active_iommu(iommu, drhd) {
3554 iommu_disable_translation(iommu);
3556 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3558 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3559 readl(iommu->reg + DMAR_FECTL_REG);
3560 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3561 readl(iommu->reg + DMAR_FEDATA_REG);
3562 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3563 readl(iommu->reg + DMAR_FEADDR_REG);
3564 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3565 readl(iommu->reg + DMAR_FEUADDR_REG);
3567 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3572 for_each_active_iommu(iommu, drhd)
3573 kfree(iommu->iommu_state);
3578 static void iommu_resume(void)
3580 struct dmar_drhd_unit *drhd;
3581 struct intel_iommu *iommu = NULL;
3584 if (init_iommu_hw()) {
3586 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3588 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3592 for_each_active_iommu(iommu, drhd) {
3594 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3596 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3597 iommu->reg + DMAR_FECTL_REG);
3598 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3599 iommu->reg + DMAR_FEDATA_REG);
3600 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3601 iommu->reg + DMAR_FEADDR_REG);
3602 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3603 iommu->reg + DMAR_FEUADDR_REG);
3605 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3608 for_each_active_iommu(iommu, drhd)
3609 kfree(iommu->iommu_state);
3612 static struct syscore_ops iommu_syscore_ops = {
3613 .resume = iommu_resume,
3614 .suspend = iommu_suspend,
3617 static void __init init_iommu_pm_ops(void)
3619 register_syscore_ops(&iommu_syscore_ops);
3623 static inline void init_iommu_pm_ops(void) {}
3624 #endif /* CONFIG_PM */
3626 static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
3628 if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
3629 !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
3630 rmrr->end_address <= rmrr->base_address ||
3631 arch_rmrr_sanity_check(rmrr))
3637 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3639 struct acpi_dmar_reserved_memory *rmrr;
3640 struct dmar_rmrr_unit *rmrru;
3642 rmrr = (struct acpi_dmar_reserved_memory *)header;
3643 if (rmrr_sanity_check(rmrr)) {
3645 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
3646 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3647 rmrr->base_address, rmrr->end_address,
3648 dmi_get_system_info(DMI_BIOS_VENDOR),
3649 dmi_get_system_info(DMI_BIOS_VERSION),
3650 dmi_get_system_info(DMI_PRODUCT_VERSION));
3651 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
3654 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3658 rmrru->hdr = header;
3660 rmrru->base_address = rmrr->base_address;
3661 rmrru->end_address = rmrr->end_address;
3663 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3664 ((void *)rmrr) + rmrr->header.length,
3665 &rmrru->devices_cnt);
3666 if (rmrru->devices_cnt && rmrru->devices == NULL)
3669 list_add(&rmrru->list, &dmar_rmrr_units);
3678 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3680 struct dmar_atsr_unit *atsru;
3681 struct acpi_dmar_atsr *tmp;
3683 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
3685 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3686 if (atsr->segment != tmp->segment)
3688 if (atsr->header.length != tmp->header.length)
3690 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3697 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3699 struct acpi_dmar_atsr *atsr;
3700 struct dmar_atsr_unit *atsru;
3702 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
3705 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3706 atsru = dmar_find_atsr(atsr);
3710 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
3715 * If memory is allocated from slab by ACPI _DSM method, we need to
3716 * copy the memory content because the memory buffer will be freed
3719 atsru->hdr = (void *)(atsru + 1);
3720 memcpy(atsru->hdr, hdr, hdr->length);
3721 atsru->include_all = atsr->flags & 0x1;
3722 if (!atsru->include_all) {
3723 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3724 (void *)atsr + atsr->header.length,
3725 &atsru->devices_cnt);
3726 if (atsru->devices_cnt && atsru->devices == NULL) {
3732 list_add_rcu(&atsru->list, &dmar_atsr_units);
3737 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3739 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3743 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3745 struct acpi_dmar_atsr *atsr;
3746 struct dmar_atsr_unit *atsru;
3748 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3749 atsru = dmar_find_atsr(atsr);
3751 list_del_rcu(&atsru->list);
3753 intel_iommu_free_atsr(atsru);
3759 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3763 struct acpi_dmar_atsr *atsr;
3764 struct dmar_atsr_unit *atsru;
3766 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3767 atsru = dmar_find_atsr(atsr);
3771 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
3772 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3780 static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
3782 struct dmar_satc_unit *satcu;
3783 struct acpi_dmar_satc *tmp;
3785 list_for_each_entry_rcu(satcu, &dmar_satc_units, list,
3787 tmp = (struct acpi_dmar_satc *)satcu->hdr;
3788 if (satc->segment != tmp->segment)
3790 if (satc->header.length != tmp->header.length)
3792 if (memcmp(satc, tmp, satc->header.length) == 0)
3799 int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
3801 struct acpi_dmar_satc *satc;
3802 struct dmar_satc_unit *satcu;
3804 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
3807 satc = container_of(hdr, struct acpi_dmar_satc, header);
3808 satcu = dmar_find_satc(satc);
3812 satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL);
3816 satcu->hdr = (void *)(satcu + 1);
3817 memcpy(satcu->hdr, hdr, hdr->length);
3818 satcu->atc_required = satc->flags & 0x1;
3819 satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1),
3820 (void *)satc + satc->header.length,
3821 &satcu->devices_cnt);
3822 if (satcu->devices_cnt && !satcu->devices) {
3826 list_add_rcu(&satcu->list, &dmar_satc_units);
3831 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3834 struct intel_iommu *iommu = dmaru->iommu;
3836 if (g_iommus[iommu->seq_id])
3839 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
3843 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3844 pr_warn("%s: Doesn't support hardware pass through.\n",
3848 if (!ecap_sc_support(iommu->ecap) &&
3849 domain_update_iommu_snooping(iommu)) {
3850 pr_warn("%s: Doesn't support snooping.\n",
3854 sp = domain_update_iommu_superpage(NULL, iommu) - 1;
3855 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3856 pr_warn("%s: Doesn't support large page.\n",
3862 * Disable translation if already enabled prior to OS handover.
3864 if (iommu->gcmd & DMA_GCMD_TE)
3865 iommu_disable_translation(iommu);
3867 g_iommus[iommu->seq_id] = iommu;
3868 ret = iommu_init_domains(iommu);
3870 ret = iommu_alloc_root_entry(iommu);
3874 intel_svm_check(iommu);
3876 if (dmaru->ignored) {
3878 * we always have to disable PMRs or DMA may fail on this device
3881 iommu_disable_protect_mem_regions(iommu);
3885 intel_iommu_init_qi(iommu);
3886 iommu_flush_write_buffer(iommu);
3888 #ifdef CONFIG_INTEL_IOMMU_SVM
3889 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3890 ret = intel_svm_enable_prq(iommu);
3895 ret = dmar_set_interrupt(iommu);
3899 iommu_set_root_entry(iommu);
3900 iommu_enable_translation(iommu);
3902 iommu_disable_protect_mem_regions(iommu);
3906 disable_dmar_iommu(iommu);
3908 free_dmar_iommu(iommu);
3912 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3915 struct intel_iommu *iommu = dmaru->iommu;
3917 if (!intel_iommu_enabled)
3923 ret = intel_iommu_add(dmaru);
3925 disable_dmar_iommu(iommu);
3926 free_dmar_iommu(iommu);
3932 static void intel_iommu_free_dmars(void)
3934 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3935 struct dmar_atsr_unit *atsru, *atsr_n;
3936 struct dmar_satc_unit *satcu, *satc_n;
3938 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3939 list_del(&rmrru->list);
3940 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3944 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3945 list_del(&atsru->list);
3946 intel_iommu_free_atsr(atsru);
3948 list_for_each_entry_safe(satcu, satc_n, &dmar_satc_units, list) {
3949 list_del(&satcu->list);
3950 dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt);
3955 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3958 struct pci_bus *bus;
3959 struct pci_dev *bridge = NULL;
3961 struct acpi_dmar_atsr *atsr;
3962 struct dmar_atsr_unit *atsru;
3964 dev = pci_physfn(dev);
3965 for (bus = dev->bus; bus; bus = bus->parent) {
3967 /* If it's an integrated device, allow ATS */
3970 /* Connected via non-PCIe: no ATS */
3971 if (!pci_is_pcie(bridge) ||
3972 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3974 /* If we found the root port, look it up in the ATSR */
3975 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3980 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3981 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3982 if (atsr->segment != pci_domain_nr(dev->bus))
3985 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3986 if (tmp == &bridge->dev)
3989 if (atsru->include_all)
3999 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4002 struct dmar_rmrr_unit *rmrru;
4003 struct dmar_atsr_unit *atsru;
4004 struct dmar_satc_unit *satcu;
4005 struct acpi_dmar_atsr *atsr;
4006 struct acpi_dmar_reserved_memory *rmrr;
4007 struct acpi_dmar_satc *satc;
4009 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4012 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4013 rmrr = container_of(rmrru->hdr,
4014 struct acpi_dmar_reserved_memory, header);
4015 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4016 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4017 ((void *)rmrr) + rmrr->header.length,
4018 rmrr->segment, rmrru->devices,
4019 rmrru->devices_cnt);
4022 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4023 dmar_remove_dev_scope(info, rmrr->segment,
4024 rmrru->devices, rmrru->devices_cnt);
4028 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4029 if (atsru->include_all)
4032 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4033 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4034 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4035 (void *)atsr + atsr->header.length,
4036 atsr->segment, atsru->devices,
4037 atsru->devices_cnt);
4042 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4043 if (dmar_remove_dev_scope(info, atsr->segment,
4044 atsru->devices, atsru->devices_cnt))
4048 list_for_each_entry(satcu, &dmar_satc_units, list) {
4049 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
4050 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4051 ret = dmar_insert_dev_scope(info, (void *)(satc + 1),
4052 (void *)satc + satc->header.length,
4053 satc->segment, satcu->devices,
4054 satcu->devices_cnt);
4059 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4060 if (dmar_remove_dev_scope(info, satc->segment,
4061 satcu->devices, satcu->devices_cnt))
4069 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4070 unsigned long val, void *v)
4072 struct memory_notify *mhp = v;
4073 unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4074 unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
4078 case MEM_GOING_ONLINE:
4079 if (iommu_domain_identity_map(si_domain,
4080 start_vpfn, last_vpfn)) {
4081 pr_warn("Failed to build identity map for [%lx-%lx]\n",
4082 start_vpfn, last_vpfn);
4088 case MEM_CANCEL_ONLINE:
4090 struct dmar_drhd_unit *drhd;
4091 struct intel_iommu *iommu;
4092 struct page *freelist;
4094 freelist = domain_unmap(si_domain,
4095 start_vpfn, last_vpfn,
4099 for_each_active_iommu(iommu, drhd)
4100 iommu_flush_iotlb_psi(iommu, si_domain,
4101 start_vpfn, mhp->nr_pages,
4104 dma_free_pagelist(freelist);
4112 static struct notifier_block intel_iommu_memory_nb = {
4113 .notifier_call = intel_iommu_memory_notifier,
4117 static void intel_disable_iommus(void)
4119 struct intel_iommu *iommu = NULL;
4120 struct dmar_drhd_unit *drhd;
4122 for_each_iommu(iommu, drhd)
4123 iommu_disable_translation(iommu);
4126 void intel_iommu_shutdown(void)
4128 struct dmar_drhd_unit *drhd;
4129 struct intel_iommu *iommu = NULL;
4131 if (no_iommu || dmar_disabled)
4134 down_write(&dmar_global_lock);
4136 /* Disable PMRs explicitly here. */
4137 for_each_iommu(iommu, drhd)
4138 iommu_disable_protect_mem_regions(iommu);
4140 /* Make sure the IOMMUs are switched off */
4141 intel_disable_iommus();
4143 up_write(&dmar_global_lock);
4146 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4148 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4150 return container_of(iommu_dev, struct intel_iommu, iommu);
4153 static ssize_t intel_iommu_show_version(struct device *dev,
4154 struct device_attribute *attr,
4157 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4158 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4159 return sprintf(buf, "%d:%d\n",
4160 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4162 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4164 static ssize_t intel_iommu_show_address(struct device *dev,
4165 struct device_attribute *attr,
4168 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4169 return sprintf(buf, "%llx\n", iommu->reg_phys);
4171 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4173 static ssize_t intel_iommu_show_cap(struct device *dev,
4174 struct device_attribute *attr,
4177 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4178 return sprintf(buf, "%llx\n", iommu->cap);
4180 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4182 static ssize_t intel_iommu_show_ecap(struct device *dev,
4183 struct device_attribute *attr,
4186 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4187 return sprintf(buf, "%llx\n", iommu->ecap);
4189 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4191 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4192 struct device_attribute *attr,
4195 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4196 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4198 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4200 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4201 struct device_attribute *attr,
4204 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4205 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4206 cap_ndoms(iommu->cap)));
4208 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4210 static struct attribute *intel_iommu_attrs[] = {
4211 &dev_attr_version.attr,
4212 &dev_attr_address.attr,
4214 &dev_attr_ecap.attr,
4215 &dev_attr_domains_supported.attr,
4216 &dev_attr_domains_used.attr,
4220 static struct attribute_group intel_iommu_group = {
4221 .name = "intel-iommu",
4222 .attrs = intel_iommu_attrs,
4225 const struct attribute_group *intel_iommu_groups[] = {
4230 static inline bool has_external_pci(void)
4232 struct pci_dev *pdev = NULL;
4234 for_each_pci_dev(pdev)
4235 if (pdev->external_facing)
4241 static int __init platform_optin_force_iommu(void)
4243 if (!dmar_platform_optin() || no_platform_optin || !has_external_pci())
4246 if (no_iommu || dmar_disabled)
4247 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4250 * If Intel-IOMMU is disabled by default, we will apply identity
4251 * map for all devices except those marked as being untrusted.
4254 iommu_set_default_passthrough(false);
4262 static int __init probe_acpi_namespace_devices(void)
4264 struct dmar_drhd_unit *drhd;
4265 /* To avoid a -Wunused-but-set-variable warning. */
4266 struct intel_iommu *iommu __maybe_unused;
4270 for_each_active_iommu(iommu, drhd) {
4271 for_each_active_dev_scope(drhd->devices,
4272 drhd->devices_cnt, i, dev) {
4273 struct acpi_device_physical_node *pn;
4274 struct iommu_group *group;
4275 struct acpi_device *adev;
4277 if (dev->bus != &acpi_bus_type)
4280 adev = to_acpi_device(dev);
4281 mutex_lock(&adev->physical_node_lock);
4282 list_for_each_entry(pn,
4283 &adev->physical_node_list, node) {
4284 group = iommu_group_get(pn->dev);
4286 iommu_group_put(group);
4290 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4291 ret = iommu_probe_device(pn->dev);
4295 mutex_unlock(&adev->physical_node_lock);
4305 int __init intel_iommu_init(void)
4308 struct dmar_drhd_unit *drhd;
4309 struct intel_iommu *iommu;
4312 * Intel IOMMU is required for a TXT/tboot launch or platform
4313 * opt in, so enforce that.
4315 force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
4316 platform_optin_force_iommu();
4318 if (iommu_init_mempool()) {
4320 panic("tboot: Failed to initialize iommu memory\n");
4324 down_write(&dmar_global_lock);
4325 if (dmar_table_init()) {
4327 panic("tboot: Failed to initialize DMAR table\n");
4331 if (dmar_dev_scope_init() < 0) {
4333 panic("tboot: Failed to initialize DMAR device scope\n");
4337 up_write(&dmar_global_lock);
4340 * The bus notifier takes the dmar_global_lock, so lockdep will
4341 * complain later when we register it under the lock.
4343 dmar_register_bus_notifier();
4345 down_write(&dmar_global_lock);
4348 intel_iommu_debugfs_init();
4350 if (no_iommu || dmar_disabled) {
4352 * We exit the function here to ensure IOMMU's remapping and
4353 * mempool aren't setup, which means that the IOMMU's PMRs
4354 * won't be disabled via the call to init_dmars(). So disable
4355 * it explicitly here. The PMRs were setup by tboot prior to
4356 * calling SENTER, but the kernel is expected to reset/tear
4359 if (intel_iommu_tboot_noforce) {
4360 for_each_iommu(iommu, drhd)
4361 iommu_disable_protect_mem_regions(iommu);
4365 * Make sure the IOMMUs are switched off, even when we
4366 * boot into a kexec kernel and the previous kernel left
4369 intel_disable_iommus();
4373 if (list_empty(&dmar_rmrr_units))
4374 pr_info("No RMRR found\n");
4376 if (list_empty(&dmar_atsr_units))
4377 pr_info("No ATSR found\n");
4379 if (list_empty(&dmar_satc_units))
4380 pr_info("No SATC found\n");
4383 intel_iommu_gfx_mapped = 1;
4385 init_no_remapping_devices();
4390 panic("tboot: Failed to initialize DMARs\n");
4391 pr_err("Initialization failed\n");
4394 up_write(&dmar_global_lock);
4396 init_iommu_pm_ops();
4398 down_read(&dmar_global_lock);
4399 for_each_active_iommu(iommu, drhd) {
4401 * The flush queue implementation does not perform
4402 * page-selective invalidations that are required for efficient
4403 * TLB flushes in virtual environments. The benefit of batching
4404 * is likely to be much lower than the overhead of synchronizing
4405 * the virtual and physical IOMMU page-tables.
4407 if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
4408 pr_warn("IOMMU batching is disabled due to virtualization");
4409 intel_iommu_strict = 1;
4411 iommu_device_sysfs_add(&iommu->iommu, NULL,
4414 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
4416 up_read(&dmar_global_lock);
4418 iommu_set_dma_strict(intel_iommu_strict);
4419 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4420 if (si_domain && !hw_pass_through)
4421 register_memory_notifier(&intel_iommu_memory_nb);
4423 down_read(&dmar_global_lock);
4424 if (probe_acpi_namespace_devices())
4425 pr_warn("ACPI name space devices didn't probe correctly\n");
4427 /* Finally, we enable the DMA remapping hardware. */
4428 for_each_iommu(iommu, drhd) {
4429 if (!drhd->ignored && !translation_pre_enabled(iommu))
4430 iommu_enable_translation(iommu);
4432 iommu_disable_protect_mem_regions(iommu);
4434 up_read(&dmar_global_lock);
4436 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4438 intel_iommu_enabled = 1;
4443 intel_iommu_free_dmars();
4444 up_write(&dmar_global_lock);
4445 iommu_exit_mempool();
4449 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4451 struct device_domain_info *info = opaque;
4453 domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff);
4458 * NB - intel-iommu lacks any sort of reference counting for the users of
4459 * dependent devices. If multiple endpoints have intersecting dependent
4460 * devices, unbinding the driver from any one of them will possibly leave
4461 * the others unable to operate.
4463 static void domain_context_clear(struct device_domain_info *info)
4465 if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
4468 pci_for_each_dma_alias(to_pci_dev(info->dev),
4469 &domain_context_clear_one_cb, info);
4472 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4474 struct dmar_domain *domain;
4475 struct intel_iommu *iommu;
4476 unsigned long flags;
4478 assert_spin_locked(&device_domain_lock);
4483 iommu = info->iommu;
4484 domain = info->domain;
4486 if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
4487 if (dev_is_pci(info->dev) && sm_supported(iommu))
4488 intel_pasid_tear_down_entry(iommu, info->dev,
4489 PASID_RID2PASID, false);
4491 iommu_disable_dev_iotlb(info);
4492 domain_context_clear(info);
4493 intel_pasid_free_table(info->dev);
4496 unlink_domain_info(info);
4498 spin_lock_irqsave(&iommu->lock, flags);
4499 domain_detach_iommu(domain, iommu);
4500 spin_unlock_irqrestore(&iommu->lock, flags);
4502 free_devinfo_mem(info);
4505 static void dmar_remove_one_dev_info(struct device *dev)
4507 struct device_domain_info *info;
4508 unsigned long flags;
4510 spin_lock_irqsave(&device_domain_lock, flags);
4511 info = get_domain_info(dev);
4513 __dmar_remove_one_dev_info(info);
4514 spin_unlock_irqrestore(&device_domain_lock, flags);
4517 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4521 /* calculate AGAW */
4522 domain->gaw = guest_width;
4523 adjust_width = guestwidth_to_adjustwidth(guest_width);
4524 domain->agaw = width_to_agaw(adjust_width);
4526 domain->iommu_coherency = 0;
4527 domain->iommu_snooping = 0;
4528 domain->iommu_superpage = 0;
4529 domain->max_addr = 0;
4531 /* always allocate the top pgd */
4532 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4535 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4539 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4541 struct dmar_domain *dmar_domain;
4542 struct iommu_domain *domain;
4545 case IOMMU_DOMAIN_DMA:
4546 case IOMMU_DOMAIN_UNMANAGED:
4547 dmar_domain = alloc_domain(0);
4549 pr_err("Can't allocate dmar_domain\n");
4552 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4553 pr_err("Domain initialization failed\n");
4554 domain_exit(dmar_domain);
4558 if (type == IOMMU_DOMAIN_DMA &&
4559 iommu_get_dma_cookie(&dmar_domain->domain))
4562 domain = &dmar_domain->domain;
4563 domain->geometry.aperture_start = 0;
4564 domain->geometry.aperture_end =
4565 __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4566 domain->geometry.force_aperture = true;
4569 case IOMMU_DOMAIN_IDENTITY:
4570 return &si_domain->domain;
4578 static void intel_iommu_domain_free(struct iommu_domain *domain)
4580 if (domain != &si_domain->domain)
4581 domain_exit(to_dmar_domain(domain));
4585 * Check whether a @domain could be attached to the @dev through the
4586 * aux-domain attach/detach APIs.
4589 is_aux_domain(struct device *dev, struct iommu_domain *domain)
4591 struct device_domain_info *info = get_domain_info(dev);
4593 return info && info->auxd_enabled &&
4594 domain->type == IOMMU_DOMAIN_UNMANAGED;
4597 static inline struct subdev_domain_info *
4598 lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
4600 struct subdev_domain_info *sinfo;
4602 if (!list_empty(&domain->subdevices)) {
4603 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
4604 if (sinfo->pdev == dev)
4612 static int auxiliary_link_device(struct dmar_domain *domain,
4615 struct device_domain_info *info = get_domain_info(dev);
4616 struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
4618 assert_spin_locked(&device_domain_lock);
4623 sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
4626 sinfo->domain = domain;
4628 list_add(&sinfo->link_phys, &info->subdevices);
4629 list_add(&sinfo->link_domain, &domain->subdevices);
4632 return ++sinfo->users;
4635 static int auxiliary_unlink_device(struct dmar_domain *domain,
4638 struct device_domain_info *info = get_domain_info(dev);
4639 struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
4642 assert_spin_locked(&device_domain_lock);
4643 if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
4646 ret = --sinfo->users;
4648 list_del(&sinfo->link_phys);
4649 list_del(&sinfo->link_domain);
4656 static int aux_domain_add_dev(struct dmar_domain *domain,
4660 unsigned long flags;
4661 struct intel_iommu *iommu;
4663 iommu = device_to_iommu(dev, NULL, NULL);
4667 if (domain->default_pasid <= 0) {
4670 /* No private data needed for the default pasid */
4671 pasid = ioasid_alloc(NULL, PASID_MIN,
4672 pci_max_pasids(to_pci_dev(dev)) - 1,
4674 if (pasid == INVALID_IOASID) {
4675 pr_err("Can't allocate default pasid\n");
4678 domain->default_pasid = pasid;
4681 spin_lock_irqsave(&device_domain_lock, flags);
4682 ret = auxiliary_link_device(domain, dev);
4687 * Subdevices from the same physical device can be attached to the
4688 * same domain. For such cases, only the first subdevice attachment
4689 * needs to go through the full steps in this function. So if ret >
4696 * iommu->lock must be held to attach domain to iommu and setup the
4697 * pasid entry for second level translation.
4699 spin_lock(&iommu->lock);
4700 ret = domain_attach_iommu(domain, iommu);
4704 /* Setup the PASID entry for mediated devices: */
4705 if (domain_use_first_level(domain))
4706 ret = domain_setup_first_level(iommu, domain, dev,
4707 domain->default_pasid);
4709 ret = intel_pasid_setup_second_level(iommu, domain, dev,
4710 domain->default_pasid);
4714 spin_unlock(&iommu->lock);
4716 spin_unlock_irqrestore(&device_domain_lock, flags);
4721 domain_detach_iommu(domain, iommu);
4723 spin_unlock(&iommu->lock);
4724 auxiliary_unlink_device(domain, dev);
4726 spin_unlock_irqrestore(&device_domain_lock, flags);
4727 if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
4728 ioasid_put(domain->default_pasid);
4733 static void aux_domain_remove_dev(struct dmar_domain *domain,
4736 struct device_domain_info *info;
4737 struct intel_iommu *iommu;
4738 unsigned long flags;
4740 if (!is_aux_domain(dev, &domain->domain))
4743 spin_lock_irqsave(&device_domain_lock, flags);
4744 info = get_domain_info(dev);
4745 iommu = info->iommu;
4747 if (!auxiliary_unlink_device(domain, dev)) {
4748 spin_lock(&iommu->lock);
4749 intel_pasid_tear_down_entry(iommu, dev,
4750 domain->default_pasid, false);
4751 domain_detach_iommu(domain, iommu);
4752 spin_unlock(&iommu->lock);
4755 spin_unlock_irqrestore(&device_domain_lock, flags);
4757 if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
4758 ioasid_put(domain->default_pasid);
4761 static int prepare_domain_attach_device(struct iommu_domain *domain,
4764 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4765 struct intel_iommu *iommu;
4768 iommu = device_to_iommu(dev, NULL, NULL);
4772 /* check if this iommu agaw is sufficient for max mapped address */
4773 addr_width = agaw_to_width(iommu->agaw);
4774 if (addr_width > cap_mgaw(iommu->cap))
4775 addr_width = cap_mgaw(iommu->cap);
4777 if (dmar_domain->max_addr > (1LL << addr_width)) {
4778 dev_err(dev, "%s: iommu width (%d) is not "
4779 "sufficient for the mapped address (%llx)\n",
4780 __func__, addr_width, dmar_domain->max_addr);
4783 dmar_domain->gaw = addr_width;
4786 * Knock out extra levels of page tables if necessary
4788 while (iommu->agaw < dmar_domain->agaw) {
4789 struct dma_pte *pte;
4791 pte = dmar_domain->pgd;
4792 if (dma_pte_present(pte)) {
4793 dmar_domain->pgd = (struct dma_pte *)
4794 phys_to_virt(dma_pte_addr(pte));
4795 free_pgtable_page(pte);
4797 dmar_domain->agaw--;
4803 static int intel_iommu_attach_device(struct iommu_domain *domain,
4808 if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
4809 device_is_rmrr_locked(dev)) {
4810 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4814 if (is_aux_domain(dev, domain))
4817 /* normally dev is not mapped */
4818 if (unlikely(domain_context_mapped(dev))) {
4819 struct dmar_domain *old_domain;
4821 old_domain = find_domain(dev);
4823 dmar_remove_one_dev_info(dev);
4826 ret = prepare_domain_attach_device(domain, dev);
4830 return domain_add_dev_info(to_dmar_domain(domain), dev);
4833 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
4838 if (!is_aux_domain(dev, domain))
4841 ret = prepare_domain_attach_device(domain, dev);
4845 return aux_domain_add_dev(to_dmar_domain(domain), dev);
4848 static void intel_iommu_detach_device(struct iommu_domain *domain,
4851 dmar_remove_one_dev_info(dev);
4854 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
4857 aux_domain_remove_dev(to_dmar_domain(domain), dev);
4860 #ifdef CONFIG_INTEL_IOMMU_SVM
4862 * 2D array for converting and sanitizing IOMMU generic TLB granularity to
4863 * VT-d granularity. Invalidation is typically included in the unmap operation
4864 * as a result of DMA or VFIO unmap. However, for assigned devices guest
4865 * owns the first level page tables. Invalidations of translation caches in the
4866 * guest are trapped and passed down to the host.
4868 * vIOMMU in the guest will only expose first level page tables, therefore
4869 * we do not support IOTLB granularity for request without PASID (second level).
4871 * For example, to find the VT-d granularity encoding for IOTLB
4872 * type and page selective granularity within PASID:
4873 * X: indexed by iommu cache type
4874 * Y: indexed by enum iommu_inv_granularity
4875 * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
4879 inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
4881 * PASID based IOTLB invalidation: PASID selective (per PASID),
4882 * page selective (address granularity)
4884 {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
4885 /* PASID based dev TLBs */
4886 {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
4888 {-EINVAL, -EINVAL, -EINVAL}
4891 static inline int to_vtd_granularity(int type, int granu)
4893 return inv_type_granu_table[type][granu];
4896 static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
4898 u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
4900 /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
4901 * IOMMU cache invalidate API passes granu_size in bytes, and number of
4902 * granu size in contiguous memory.
4904 return order_base_2(nr_pages);
4908 intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
4909 struct iommu_cache_invalidate_info *inv_info)
4911 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4912 struct device_domain_info *info;
4913 struct intel_iommu *iommu;
4914 unsigned long flags;
4921 if (!inv_info || !dmar_domain)
4924 if (!dev || !dev_is_pci(dev))
4927 iommu = device_to_iommu(dev, &bus, &devfn);
4931 if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
4934 spin_lock_irqsave(&device_domain_lock, flags);
4935 spin_lock(&iommu->lock);
4936 info = get_domain_info(dev);
4941 did = dmar_domain->iommu_did[iommu->seq_id];
4942 sid = PCI_DEVID(bus, devfn);
4944 /* Size is only valid in address selective invalidation */
4945 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
4946 size = to_vtd_size(inv_info->granu.addr_info.granule_size,
4947 inv_info->granu.addr_info.nb_granules);
4949 for_each_set_bit(cache_type,
4950 (unsigned long *)&inv_info->cache,
4951 IOMMU_CACHE_INV_TYPE_NR) {
4956 granu = to_vtd_granularity(cache_type, inv_info->granularity);
4957 if (granu == -EINVAL) {
4958 pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
4959 cache_type, inv_info->granularity);
4964 * PASID is stored in different locations based on the
4967 if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
4968 (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
4969 pasid = inv_info->granu.pasid_info.pasid;
4970 else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
4971 (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
4972 pasid = inv_info->granu.addr_info.pasid;
4974 switch (BIT(cache_type)) {
4975 case IOMMU_CACHE_INV_TYPE_IOTLB:
4976 /* HW will ignore LSB bits based on address mask */
4977 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
4979 (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
4980 pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
4981 inv_info->granu.addr_info.addr, size);
4985 * If granu is PASID-selective, address is ignored.
4986 * We use npages = -1 to indicate that.
4988 qi_flush_piotlb(iommu, did, pasid,
4989 mm_to_dma_pfn(inv_info->granu.addr_info.addr),
4990 (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
4991 inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
4993 if (!info->ats_enabled)
4996 * Always flush device IOTLB if ATS is enabled. vIOMMU
4997 * in the guest may assume IOTLB flush is inclusive,
4998 * which is more efficient.
5001 case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
5003 * PASID based device TLB invalidation does not support
5004 * IOMMU_INV_GRANU_PASID granularity but only supports
5005 * IOMMU_INV_GRANU_ADDR.
5006 * The equivalent of that is we set the size to be the
5007 * entire range of 64 bit. User only provides PASID info
5008 * without address info. So we set addr to 0.
5010 if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
5011 size = 64 - VTD_PAGE_SHIFT;
5013 } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
5014 addr = inv_info->granu.addr_info.addr;
5017 if (info->ats_enabled)
5018 qi_flush_dev_iotlb_pasid(iommu, sid,
5020 info->ats_qdep, addr,
5023 pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
5026 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
5032 spin_unlock(&iommu->lock);
5033 spin_unlock_irqrestore(&device_domain_lock, flags);
5039 static int intel_iommu_map(struct iommu_domain *domain,
5040 unsigned long iova, phys_addr_t hpa,
5041 size_t size, int iommu_prot, gfp_t gfp)
5043 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5047 if (iommu_prot & IOMMU_READ)
5048 prot |= DMA_PTE_READ;
5049 if (iommu_prot & IOMMU_WRITE)
5050 prot |= DMA_PTE_WRITE;
5051 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5052 prot |= DMA_PTE_SNP;
5054 max_addr = iova + size;
5055 if (dmar_domain->max_addr < max_addr) {
5058 /* check if minimum agaw is sufficient for mapped address */
5059 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5060 if (end < max_addr) {
5061 pr_err("%s: iommu width (%d) is not "
5062 "sufficient for the mapped address (%llx)\n",
5063 __func__, dmar_domain->gaw, max_addr);
5066 dmar_domain->max_addr = max_addr;
5068 /* Round up size to next multiple of PAGE_SIZE, if it and
5069 the low bits of hpa would take us onto the next page */
5070 size = aligned_nrpages(hpa, size);
5071 return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5072 hpa >> VTD_PAGE_SHIFT, size, prot);
5075 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5076 unsigned long iova, size_t size,
5077 struct iommu_iotlb_gather *gather)
5079 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5080 unsigned long start_pfn, last_pfn;
5083 /* Cope with horrid API which requires us to unmap more than the
5084 size argument if it happens to be a large-page mapping. */
5085 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5087 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5088 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5090 start_pfn = iova >> VTD_PAGE_SHIFT;
5091 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5093 gather->freelist = domain_unmap(dmar_domain, start_pfn,
5094 last_pfn, gather->freelist);
5096 if (dmar_domain->max_addr == iova + size)
5097 dmar_domain->max_addr = iova;
5099 iommu_iotlb_gather_add_page(domain, gather, iova, size);
5104 static void intel_iommu_tlb_sync(struct iommu_domain *domain,
5105 struct iommu_iotlb_gather *gather)
5107 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5108 unsigned long iova_pfn = IOVA_PFN(gather->start);
5109 size_t size = gather->end - gather->start;
5110 unsigned long start_pfn;
5111 unsigned long nrpages;
5114 nrpages = aligned_nrpages(gather->start, size);
5115 start_pfn = mm_to_dma_pfn(iova_pfn);
5117 for_each_domain_iommu(iommu_id, dmar_domain)
5118 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5119 start_pfn, nrpages, !gather->freelist, 0);
5121 dma_free_pagelist(gather->freelist);
5124 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5127 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5128 struct dma_pte *pte;
5132 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5133 if (pte && dma_pte_present(pte))
5134 phys = dma_pte_addr(pte) +
5135 (iova & (BIT_MASK(level_to_offset_bits(level) +
5136 VTD_PAGE_SHIFT) - 1));
5141 static bool intel_iommu_capable(enum iommu_cap cap)
5143 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5144 return domain_update_iommu_snooping(NULL) == 1;
5145 if (cap == IOMMU_CAP_INTR_REMAP)
5146 return irq_remapping_enabled == 1;
5151 static struct iommu_device *intel_iommu_probe_device(struct device *dev)
5153 struct intel_iommu *iommu;
5155 iommu = device_to_iommu(dev, NULL, NULL);
5157 return ERR_PTR(-ENODEV);
5159 if (translation_pre_enabled(iommu))
5160 dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
5162 return &iommu->iommu;
5165 static void intel_iommu_release_device(struct device *dev)
5167 struct intel_iommu *iommu;
5169 iommu = device_to_iommu(dev, NULL, NULL);
5173 dmar_remove_one_dev_info(dev);
5175 set_dma_ops(dev, NULL);
5178 static void intel_iommu_probe_finalize(struct device *dev)
5180 dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
5181 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
5182 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5184 if (domain && domain->type == IOMMU_DOMAIN_DMA)
5185 iommu_setup_dma_ops(dev, base,
5186 __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
5188 set_dma_ops(dev, NULL);
5191 static void intel_iommu_get_resv_regions(struct device *device,
5192 struct list_head *head)
5194 int prot = DMA_PTE_READ | DMA_PTE_WRITE;
5195 struct iommu_resv_region *reg;
5196 struct dmar_rmrr_unit *rmrr;
5197 struct device *i_dev;
5200 down_read(&dmar_global_lock);
5201 for_each_rmrr_units(rmrr) {
5202 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5204 struct iommu_resv_region *resv;
5205 enum iommu_resv_type type;
5208 if (i_dev != device &&
5209 !is_downstream_to_pci_bridge(device, i_dev))
5212 length = rmrr->end_address - rmrr->base_address + 1;
5214 type = device_rmrr_is_relaxable(device) ?
5215 IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
5217 resv = iommu_alloc_resv_region(rmrr->base_address,
5218 length, prot, type);
5222 list_add_tail(&resv->list, head);
5225 up_read(&dmar_global_lock);
5227 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
5228 if (dev_is_pci(device)) {
5229 struct pci_dev *pdev = to_pci_dev(device);
5231 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
5232 reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
5233 IOMMU_RESV_DIRECT_RELAXABLE);
5235 list_add_tail(®->list, head);
5238 #endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
5240 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5241 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5245 list_add_tail(®->list, head);
5248 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5250 struct device_domain_info *info;
5251 struct context_entry *context;
5252 struct dmar_domain *domain;
5253 unsigned long flags;
5257 domain = find_domain(dev);
5261 spin_lock_irqsave(&device_domain_lock, flags);
5262 spin_lock(&iommu->lock);
5265 info = get_domain_info(dev);
5266 if (!info || !info->pasid_supported)
5269 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5270 if (WARN_ON(!context))
5273 ctx_lo = context[0].lo;
5275 if (!(ctx_lo & CONTEXT_PASIDE)) {
5276 ctx_lo |= CONTEXT_PASIDE;
5277 context[0].lo = ctx_lo;
5279 iommu->flush.flush_context(iommu,
5280 domain->iommu_did[iommu->seq_id],
5281 PCI_DEVID(info->bus, info->devfn),
5282 DMA_CCMD_MASK_NOBIT,
5283 DMA_CCMD_DEVICE_INVL);
5286 /* Enable PASID support in the device, if it wasn't already */
5287 if (!info->pasid_enabled)
5288 iommu_enable_dev_iotlb(info);
5293 spin_unlock(&iommu->lock);
5294 spin_unlock_irqrestore(&device_domain_lock, flags);
5299 static struct iommu_group *intel_iommu_device_group(struct device *dev)
5301 if (dev_is_pci(dev))
5302 return pci_device_group(dev);
5303 return generic_device_group(dev);
5306 static int intel_iommu_enable_auxd(struct device *dev)
5308 struct device_domain_info *info;
5309 struct intel_iommu *iommu;
5310 unsigned long flags;
5313 iommu = device_to_iommu(dev, NULL, NULL);
5314 if (!iommu || dmar_disabled)
5317 if (!sm_supported(iommu) || !pasid_supported(iommu))
5320 ret = intel_iommu_enable_pasid(iommu, dev);
5324 spin_lock_irqsave(&device_domain_lock, flags);
5325 info = get_domain_info(dev);
5326 info->auxd_enabled = 1;
5327 spin_unlock_irqrestore(&device_domain_lock, flags);
5332 static int intel_iommu_disable_auxd(struct device *dev)
5334 struct device_domain_info *info;
5335 unsigned long flags;
5337 spin_lock_irqsave(&device_domain_lock, flags);
5338 info = get_domain_info(dev);
5339 if (!WARN_ON(!info))
5340 info->auxd_enabled = 0;
5341 spin_unlock_irqrestore(&device_domain_lock, flags);
5347 * A PCI express designated vendor specific extended capability is defined
5348 * in the section 3.7 of Intel scalable I/O virtualization technical spec
5349 * for system software and tools to detect endpoint devices supporting the
5350 * Intel scalable IO virtualization without host driver dependency.
5352 * Returns the address of the matching extended capability structure within
5353 * the device's PCI configuration space or 0 if the device does not support
5356 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5361 pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5363 pci_read_config_word(pdev, pos + 4, &vendor);
5364 pci_read_config_word(pdev, pos + 8, &id);
5365 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5368 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5375 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5377 struct device_domain_info *info = get_domain_info(dev);
5379 if (feat == IOMMU_DEV_FEAT_AUX) {
5382 if (!dev_is_pci(dev) || dmar_disabled ||
5383 !scalable_mode_support() || !pasid_mode_support())
5386 ret = pci_pasid_features(to_pci_dev(dev));
5390 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5393 if (feat == IOMMU_DEV_FEAT_IOPF)
5394 return info && info->pri_supported;
5396 if (feat == IOMMU_DEV_FEAT_SVA)
5397 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
5398 info->pasid_supported && info->pri_supported &&
5399 info->ats_supported;
5405 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5407 if (feat == IOMMU_DEV_FEAT_AUX)
5408 return intel_iommu_enable_auxd(dev);
5410 if (feat == IOMMU_DEV_FEAT_IOPF)
5411 return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV;
5413 if (feat == IOMMU_DEV_FEAT_SVA) {
5414 struct device_domain_info *info = get_domain_info(dev);
5419 if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
5422 if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
5430 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5432 if (feat == IOMMU_DEV_FEAT_AUX)
5433 return intel_iommu_disable_auxd(dev);
5439 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5441 struct device_domain_info *info = get_domain_info(dev);
5443 if (feat == IOMMU_DEV_FEAT_AUX)
5444 return scalable_mode_support() && info && info->auxd_enabled;
5450 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5452 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5454 return dmar_domain->default_pasid > 0 ?
5455 dmar_domain->default_pasid : -EINVAL;
5458 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
5461 return attach_deferred(dev);
5465 intel_iommu_enable_nesting(struct iommu_domain *domain)
5467 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5468 unsigned long flags;
5471 spin_lock_irqsave(&device_domain_lock, flags);
5472 if (nested_mode_support() && list_empty(&dmar_domain->devices)) {
5473 dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
5474 dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
5477 spin_unlock_irqrestore(&device_domain_lock, flags);
5483 * Check that the device does not live on an external facing PCI port that is
5484 * marked as untrusted. Such devices should not be able to apply quirks and
5485 * thus not be able to bypass the IOMMU restrictions.
5487 static bool risky_device(struct pci_dev *pdev)
5489 if (pdev->untrusted) {
5491 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
5492 pdev->vendor, pdev->device);
5493 pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
5499 static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
5500 unsigned long clf_pages)
5502 struct dma_pte *first_pte = NULL, *pte = NULL;
5503 unsigned long lvl_pages = 0;
5506 while (clf_pages > 0) {
5509 pte = pfn_to_dma_pte(domain, clf_pfn, &level);
5513 lvl_pages = lvl_to_nr_pages(level);
5516 if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
5519 clf_pages -= lvl_pages;
5520 clf_pfn += lvl_pages;
5523 if (!clf_pages || first_pte_in_page(pte) ||
5524 (level > 1 && clf_pages < lvl_pages)) {
5525 domain_flush_cache(domain, first_pte,
5526 (void *)pte - (void *)first_pte);
5532 static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
5533 unsigned long iova, size_t size)
5535 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5536 unsigned long pages = aligned_nrpages(iova, size);
5537 unsigned long pfn = iova >> VTD_PAGE_SHIFT;
5538 struct intel_iommu *iommu;
5541 if (!dmar_domain->iommu_coherency)
5542 clflush_sync_map(dmar_domain, pfn, pages);
5544 for_each_domain_iommu(iommu_id, dmar_domain) {
5545 iommu = g_iommus[iommu_id];
5546 __mapping_notify_one(iommu, dmar_domain, pfn, pages);
5550 const struct iommu_ops intel_iommu_ops = {
5551 .capable = intel_iommu_capable,
5552 .domain_alloc = intel_iommu_domain_alloc,
5553 .domain_free = intel_iommu_domain_free,
5554 .enable_nesting = intel_iommu_enable_nesting,
5555 .attach_dev = intel_iommu_attach_device,
5556 .detach_dev = intel_iommu_detach_device,
5557 .aux_attach_dev = intel_iommu_aux_attach_device,
5558 .aux_detach_dev = intel_iommu_aux_detach_device,
5559 .aux_get_pasid = intel_iommu_aux_get_pasid,
5560 .map = intel_iommu_map,
5561 .iotlb_sync_map = intel_iommu_iotlb_sync_map,
5562 .unmap = intel_iommu_unmap,
5563 .flush_iotlb_all = intel_flush_iotlb_all,
5564 .iotlb_sync = intel_iommu_tlb_sync,
5565 .iova_to_phys = intel_iommu_iova_to_phys,
5566 .probe_device = intel_iommu_probe_device,
5567 .probe_finalize = intel_iommu_probe_finalize,
5568 .release_device = intel_iommu_release_device,
5569 .get_resv_regions = intel_iommu_get_resv_regions,
5570 .put_resv_regions = generic_iommu_put_resv_regions,
5571 .device_group = intel_iommu_device_group,
5572 .dev_has_feat = intel_iommu_dev_has_feat,
5573 .dev_feat_enabled = intel_iommu_dev_feat_enabled,
5574 .dev_enable_feat = intel_iommu_dev_enable_feat,
5575 .dev_disable_feat = intel_iommu_dev_disable_feat,
5576 .is_attach_deferred = intel_iommu_is_attach_deferred,
5577 .def_domain_type = device_def_domain_type,
5578 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5579 #ifdef CONFIG_INTEL_IOMMU_SVM
5580 .cache_invalidate = intel_iommu_sva_invalidate,
5581 .sva_bind_gpasid = intel_svm_bind_gpasid,
5582 .sva_unbind_gpasid = intel_svm_unbind_gpasid,
5583 .sva_bind = intel_svm_bind,
5584 .sva_unbind = intel_svm_unbind,
5585 .sva_get_pasid = intel_svm_get_pasid,
5586 .page_response = intel_svm_page_response,
5590 static void quirk_iommu_igfx(struct pci_dev *dev)
5592 if (risky_device(dev))
5595 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5599 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5600 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
5601 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
5602 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
5603 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
5604 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
5605 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
5606 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
5608 /* Broadwell igfx malfunctions with dmar */
5609 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
5610 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
5611 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
5612 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
5613 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
5614 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
5615 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
5616 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
5617 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
5618 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
5619 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
5620 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
5621 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
5622 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
5623 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
5624 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
5625 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
5626 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
5627 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
5628 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
5629 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
5630 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
5631 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
5632 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
5634 static void quirk_iommu_rwbf(struct pci_dev *dev)
5636 if (risky_device(dev))
5640 * Mobile 4 Series Chipset neglects to set RWBF capability,
5641 * but needs it. Same seems to hold for the desktop versions.
5643 pci_info(dev, "Forcing write-buffer flush capability\n");
5647 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5648 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5649 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5650 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5651 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5652 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5653 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5656 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5657 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5658 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5659 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5660 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5661 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5662 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5663 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5665 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5669 if (risky_device(dev))
5672 if (pci_read_config_word(dev, GGC, &ggc))
5675 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5676 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5678 } else if (dmar_map_gfx) {
5679 /* we have to ensure the gfx device is idle before we flush */
5680 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
5681 intel_iommu_strict = 1;
5684 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5685 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5686 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5687 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5689 static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
5693 if (!IS_GFX_DEVICE(dev))
5696 ver = (dev->device >> 8) & 0xff;
5697 if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
5698 ver != 0x4e && ver != 0x8a && ver != 0x98 &&
5702 if (risky_device(dev))
5705 pci_info(dev, "Skip IOMMU disabling for graphics\n");
5706 iommu_skip_te_disable = 1;
5708 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
5710 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5711 ISOCH DMAR unit for the Azalia sound device, but not give it any
5712 TLB entries, which causes it to deadlock. Check for that. We do
5713 this in a function called from init_dmars(), instead of in a PCI
5714 quirk, because we don't want to print the obnoxious "BIOS broken"
5715 message if VT-d is actually disabled.
5717 static void __init check_tylersburg_isoch(void)
5719 struct pci_dev *pdev;
5720 uint32_t vtisochctrl;
5722 /* If there's no Azalia in the system anyway, forget it. */
5723 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5727 if (risky_device(pdev)) {
5734 /* System Management Registers. Might be hidden, in which case
5735 we can't do the sanity check. But that's OK, because the
5736 known-broken BIOSes _don't_ actually hide it, so far. */
5737 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5741 if (risky_device(pdev)) {
5746 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5753 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5754 if (vtisochctrl & 1)
5757 /* Drop all bits other than the number of TLB entries */
5758 vtisochctrl &= 0x1c;
5760 /* If we have the recommended number of TLB entries (16), fine. */
5761 if (vtisochctrl == 0x10)
5764 /* Zero TLB entries? You get to ride the short bus to school. */
5766 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5767 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5768 dmi_get_system_info(DMI_BIOS_VENDOR),
5769 dmi_get_system_info(DMI_BIOS_VERSION),
5770 dmi_get_system_info(DMI_PRODUCT_VERSION));
5771 iommu_identity_mapping |= IDENTMAP_AZALIA;
5775 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",