GNU Linux-libre 5.13.14-gnu1
[releases.git] / drivers / iommu / intel / iommu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright © 2006-2014 Intel Corporation.
4  *
5  * Authors: David Woodhouse <dwmw2@infradead.org>,
6  *          Ashok Raj <ashok.raj@intel.com>,
7  *          Shaohua Li <shaohua.li@intel.com>,
8  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
9  *          Fenghua Yu <fenghua.yu@intel.com>
10  *          Joerg Roedel <jroedel@suse.de>
11  */
12
13 #define pr_fmt(fmt)     "DMAR: " fmt
14 #define dev_fmt(fmt)    pr_fmt(fmt)
15
16 #include <linux/init.h>
17 #include <linux/bitmap.h>
18 #include <linux/debugfs.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/irq.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/pci.h>
25 #include <linux/dmar.h>
26 #include <linux/dma-map-ops.h>
27 #include <linux/mempool.h>
28 #include <linux/memory.h>
29 #include <linux/cpu.h>
30 #include <linux/timer.h>
31 #include <linux/io.h>
32 #include <linux/iova.h>
33 #include <linux/iommu.h>
34 #include <linux/dma-iommu.h>
35 #include <linux/intel-iommu.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/tboot.h>
38 #include <linux/dmi.h>
39 #include <linux/pci-ats.h>
40 #include <linux/memblock.h>
41 #include <linux/dma-direct.h>
42 #include <linux/crash_dump.h>
43 #include <linux/numa.h>
44 #include <asm/irq_remapping.h>
45 #include <asm/cacheflush.h>
46 #include <asm/iommu.h>
47
48 #include "../irq_remapping.h"
49 #include "pasid.h"
50 #include "cap_audit.h"
51
52 #define ROOT_SIZE               VTD_PAGE_SIZE
53 #define CONTEXT_SIZE            VTD_PAGE_SIZE
54
55 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
57 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
58 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
59
60 #define IOAPIC_RANGE_START      (0xfee00000)
61 #define IOAPIC_RANGE_END        (0xfeefffff)
62 #define IOVA_START_ADDR         (0x1000)
63
64 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
65
66 #define MAX_AGAW_WIDTH 64
67 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
68
69 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
70 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
71
72 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
73    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
74 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
75                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
76 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
77
78 /* IO virtual address start page frame number */
79 #define IOVA_START_PFN          (1)
80
81 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
82
83 /* page table handling */
84 #define LEVEL_STRIDE            (9)
85 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
86
87 /*
88  * This bitmap is used to advertise the page sizes our hardware support
89  * to the IOMMU core, which will then use this information to split
90  * physically contiguous memory regions it is mapping into page sizes
91  * that we support.
92  *
93  * Traditionally the IOMMU core just handed us the mappings directly,
94  * after making sure the size is an order of a 4KiB page and that the
95  * mapping has natural alignment.
96  *
97  * To retain this behavior, we currently advertise that we support
98  * all page sizes that are an order of 4KiB.
99  *
100  * If at some point we'd like to utilize the IOMMU core's new behavior,
101  * we could change this to advertise the real page sizes we support.
102  */
103 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
104
105 static inline int agaw_to_level(int agaw)
106 {
107         return agaw + 2;
108 }
109
110 static inline int agaw_to_width(int agaw)
111 {
112         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
113 }
114
115 static inline int width_to_agaw(int width)
116 {
117         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
118 }
119
120 static inline unsigned int level_to_offset_bits(int level)
121 {
122         return (level - 1) * LEVEL_STRIDE;
123 }
124
125 static inline int pfn_level_offset(u64 pfn, int level)
126 {
127         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
128 }
129
130 static inline u64 level_mask(int level)
131 {
132         return -1ULL << level_to_offset_bits(level);
133 }
134
135 static inline u64 level_size(int level)
136 {
137         return 1ULL << level_to_offset_bits(level);
138 }
139
140 static inline u64 align_to_level(u64 pfn, int level)
141 {
142         return (pfn + level_size(level) - 1) & level_mask(level);
143 }
144
145 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
146 {
147         return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
148 }
149
150 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
151    are never going to work. */
152 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
153 {
154         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
155 }
156
157 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
158 {
159         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
160 }
161 static inline unsigned long page_to_dma_pfn(struct page *pg)
162 {
163         return mm_to_dma_pfn(page_to_pfn(pg));
164 }
165 static inline unsigned long virt_to_dma_pfn(void *p)
166 {
167         return page_to_dma_pfn(virt_to_page(p));
168 }
169
170 /* global iommu list, set NULL for ignored DMAR units */
171 static struct intel_iommu **g_iommus;
172
173 static void __init check_tylersburg_isoch(void);
174 static int rwbf_quirk;
175
176 /*
177  * set to 1 to panic kernel if can't successfully enable VT-d
178  * (used when kernel is launched w/ TXT)
179  */
180 static int force_on = 0;
181 static int intel_iommu_tboot_noforce;
182 static int no_platform_optin;
183
184 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
185
186 /*
187  * Take a root_entry and return the Lower Context Table Pointer (LCTP)
188  * if marked present.
189  */
190 static phys_addr_t root_entry_lctp(struct root_entry *re)
191 {
192         if (!(re->lo & 1))
193                 return 0;
194
195         return re->lo & VTD_PAGE_MASK;
196 }
197
198 /*
199  * Take a root_entry and return the Upper Context Table Pointer (UCTP)
200  * if marked present.
201  */
202 static phys_addr_t root_entry_uctp(struct root_entry *re)
203 {
204         if (!(re->hi & 1))
205                 return 0;
206
207         return re->hi & VTD_PAGE_MASK;
208 }
209
210 static inline void context_clear_pasid_enable(struct context_entry *context)
211 {
212         context->lo &= ~(1ULL << 11);
213 }
214
215 static inline bool context_pasid_enabled(struct context_entry *context)
216 {
217         return !!(context->lo & (1ULL << 11));
218 }
219
220 static inline void context_set_copied(struct context_entry *context)
221 {
222         context->hi |= (1ull << 3);
223 }
224
225 static inline bool context_copied(struct context_entry *context)
226 {
227         return !!(context->hi & (1ULL << 3));
228 }
229
230 static inline bool __context_present(struct context_entry *context)
231 {
232         return (context->lo & 1);
233 }
234
235 bool context_present(struct context_entry *context)
236 {
237         return context_pasid_enabled(context) ?
238              __context_present(context) :
239              __context_present(context) && !context_copied(context);
240 }
241
242 static inline void context_set_present(struct context_entry *context)
243 {
244         context->lo |= 1;
245 }
246
247 static inline void context_set_fault_enable(struct context_entry *context)
248 {
249         context->lo &= (((u64)-1) << 2) | 1;
250 }
251
252 static inline void context_set_translation_type(struct context_entry *context,
253                                                 unsigned long value)
254 {
255         context->lo &= (((u64)-1) << 4) | 3;
256         context->lo |= (value & 3) << 2;
257 }
258
259 static inline void context_set_address_root(struct context_entry *context,
260                                             unsigned long value)
261 {
262         context->lo &= ~VTD_PAGE_MASK;
263         context->lo |= value & VTD_PAGE_MASK;
264 }
265
266 static inline void context_set_address_width(struct context_entry *context,
267                                              unsigned long value)
268 {
269         context->hi |= value & 7;
270 }
271
272 static inline void context_set_domain_id(struct context_entry *context,
273                                          unsigned long value)
274 {
275         context->hi |= (value & ((1 << 16) - 1)) << 8;
276 }
277
278 static inline int context_domain_id(struct context_entry *c)
279 {
280         return((c->hi >> 8) & 0xffff);
281 }
282
283 static inline void context_clear_entry(struct context_entry *context)
284 {
285         context->lo = 0;
286         context->hi = 0;
287 }
288
289 /*
290  * This domain is a statically identity mapping domain.
291  *      1. This domain creats a static 1:1 mapping to all usable memory.
292  *      2. It maps to each iommu if successful.
293  *      3. Each iommu mapps to this domain if successful.
294  */
295 static struct dmar_domain *si_domain;
296 static int hw_pass_through = 1;
297
298 #define for_each_domain_iommu(idx, domain)                      \
299         for (idx = 0; idx < g_num_of_iommus; idx++)             \
300                 if (domain->iommu_refcnt[idx])
301
302 struct dmar_rmrr_unit {
303         struct list_head list;          /* list of rmrr units   */
304         struct acpi_dmar_header *hdr;   /* ACPI header          */
305         u64     base_address;           /* reserved base address*/
306         u64     end_address;            /* reserved end address */
307         struct dmar_dev_scope *devices; /* target devices */
308         int     devices_cnt;            /* target device count */
309 };
310
311 struct dmar_atsr_unit {
312         struct list_head list;          /* list of ATSR units */
313         struct acpi_dmar_header *hdr;   /* ACPI header */
314         struct dmar_dev_scope *devices; /* target devices */
315         int devices_cnt;                /* target device count */
316         u8 include_all:1;               /* include all ports */
317 };
318
319 struct dmar_satc_unit {
320         struct list_head list;          /* list of SATC units */
321         struct acpi_dmar_header *hdr;   /* ACPI header */
322         struct dmar_dev_scope *devices; /* target devices */
323         struct intel_iommu *iommu;      /* the corresponding iommu */
324         int devices_cnt;                /* target device count */
325         u8 atc_required:1;              /* ATS is required */
326 };
327
328 static LIST_HEAD(dmar_atsr_units);
329 static LIST_HEAD(dmar_rmrr_units);
330 static LIST_HEAD(dmar_satc_units);
331
332 #define for_each_rmrr_units(rmrr) \
333         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
334
335 /* bitmap for indexing intel_iommus */
336 static int g_num_of_iommus;
337
338 static void domain_exit(struct dmar_domain *domain);
339 static void domain_remove_dev_info(struct dmar_domain *domain);
340 static void dmar_remove_one_dev_info(struct device *dev);
341 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
342 static int intel_iommu_attach_device(struct iommu_domain *domain,
343                                      struct device *dev);
344 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
345                                             dma_addr_t iova);
346
347 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
348 int dmar_disabled = 0;
349 #else
350 int dmar_disabled = 1;
351 #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
352
353 #ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
354 int intel_iommu_sm = 1;
355 #else
356 int intel_iommu_sm;
357 #endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
358
359 int intel_iommu_enabled = 0;
360 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
361
362 static int dmar_map_gfx = 1;
363 static int intel_iommu_strict;
364 static int intel_iommu_superpage = 1;
365 static int iommu_identity_mapping;
366 static int iommu_skip_te_disable;
367
368 #define IDENTMAP_GFX            2
369 #define IDENTMAP_AZALIA         4
370
371 int intel_iommu_gfx_mapped;
372 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
373
374 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
375 struct device_domain_info *get_domain_info(struct device *dev)
376 {
377         struct device_domain_info *info;
378
379         if (!dev)
380                 return NULL;
381
382         info = dev_iommu_priv_get(dev);
383         if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
384                 return NULL;
385
386         return info;
387 }
388
389 DEFINE_SPINLOCK(device_domain_lock);
390 static LIST_HEAD(device_domain_list);
391
392 /*
393  * Iterate over elements in device_domain_list and call the specified
394  * callback @fn against each element.
395  */
396 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
397                                      void *data), void *data)
398 {
399         int ret = 0;
400         unsigned long flags;
401         struct device_domain_info *info;
402
403         spin_lock_irqsave(&device_domain_lock, flags);
404         list_for_each_entry(info, &device_domain_list, global) {
405                 ret = fn(info, data);
406                 if (ret) {
407                         spin_unlock_irqrestore(&device_domain_lock, flags);
408                         return ret;
409                 }
410         }
411         spin_unlock_irqrestore(&device_domain_lock, flags);
412
413         return 0;
414 }
415
416 const struct iommu_ops intel_iommu_ops;
417
418 static bool translation_pre_enabled(struct intel_iommu *iommu)
419 {
420         return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
421 }
422
423 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
424 {
425         iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
426 }
427
428 static void init_translation_status(struct intel_iommu *iommu)
429 {
430         u32 gsts;
431
432         gsts = readl(iommu->reg + DMAR_GSTS_REG);
433         if (gsts & DMA_GSTS_TES)
434                 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
435 }
436
437 static int __init intel_iommu_setup(char *str)
438 {
439         if (!str)
440                 return -EINVAL;
441         while (*str) {
442                 if (!strncmp(str, "on", 2)) {
443                         dmar_disabled = 0;
444                         pr_info("IOMMU enabled\n");
445                 } else if (!strncmp(str, "off", 3)) {
446                         dmar_disabled = 1;
447                         no_platform_optin = 1;
448                         pr_info("IOMMU disabled\n");
449                 } else if (!strncmp(str, "igfx_off", 8)) {
450                         dmar_map_gfx = 0;
451                         pr_info("Disable GFX device mapping\n");
452                 } else if (!strncmp(str, "forcedac", 8)) {
453                         pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
454                         iommu_dma_forcedac = true;
455                 } else if (!strncmp(str, "strict", 6)) {
456                         pr_info("Disable batched IOTLB flush\n");
457                         intel_iommu_strict = 1;
458                 } else if (!strncmp(str, "sp_off", 6)) {
459                         pr_info("Disable supported super page\n");
460                         intel_iommu_superpage = 0;
461                 } else if (!strncmp(str, "sm_on", 5)) {
462                         pr_info("Intel-IOMMU: scalable mode supported\n");
463                         intel_iommu_sm = 1;
464                 } else if (!strncmp(str, "tboot_noforce", 13)) {
465                         pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
466                         intel_iommu_tboot_noforce = 1;
467                 }
468
469                 str += strcspn(str, ",");
470                 while (*str == ',')
471                         str++;
472         }
473         return 0;
474 }
475 __setup("intel_iommu=", intel_iommu_setup);
476
477 static struct kmem_cache *iommu_domain_cache;
478 static struct kmem_cache *iommu_devinfo_cache;
479
480 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
481 {
482         struct dmar_domain **domains;
483         int idx = did >> 8;
484
485         domains = iommu->domains[idx];
486         if (!domains)
487                 return NULL;
488
489         return domains[did & 0xff];
490 }
491
492 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
493                              struct dmar_domain *domain)
494 {
495         struct dmar_domain **domains;
496         int idx = did >> 8;
497
498         if (!iommu->domains[idx]) {
499                 size_t size = 256 * sizeof(struct dmar_domain *);
500                 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
501         }
502
503         domains = iommu->domains[idx];
504         if (WARN_ON(!domains))
505                 return;
506         else
507                 domains[did & 0xff] = domain;
508 }
509
510 void *alloc_pgtable_page(int node)
511 {
512         struct page *page;
513         void *vaddr = NULL;
514
515         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
516         if (page)
517                 vaddr = page_address(page);
518         return vaddr;
519 }
520
521 void free_pgtable_page(void *vaddr)
522 {
523         free_page((unsigned long)vaddr);
524 }
525
526 static inline void *alloc_domain_mem(void)
527 {
528         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
529 }
530
531 static void free_domain_mem(void *vaddr)
532 {
533         kmem_cache_free(iommu_domain_cache, vaddr);
534 }
535
536 static inline void * alloc_devinfo_mem(void)
537 {
538         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
539 }
540
541 static inline void free_devinfo_mem(void *vaddr)
542 {
543         kmem_cache_free(iommu_devinfo_cache, vaddr);
544 }
545
546 static inline int domain_type_is_si(struct dmar_domain *domain)
547 {
548         return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
549 }
550
551 static inline bool domain_use_first_level(struct dmar_domain *domain)
552 {
553         return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
554 }
555
556 static inline int domain_pfn_supported(struct dmar_domain *domain,
557                                        unsigned long pfn)
558 {
559         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
560
561         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
562 }
563
564 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
565 {
566         unsigned long sagaw;
567         int agaw = -1;
568
569         sagaw = cap_sagaw(iommu->cap);
570         for (agaw = width_to_agaw(max_gaw);
571              agaw >= 0; agaw--) {
572                 if (test_bit(agaw, &sagaw))
573                         break;
574         }
575
576         return agaw;
577 }
578
579 /*
580  * Calculate max SAGAW for each iommu.
581  */
582 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
583 {
584         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
585 }
586
587 /*
588  * calculate agaw for each iommu.
589  * "SAGAW" may be different across iommus, use a default agaw, and
590  * get a supported less agaw for iommus that don't support the default agaw.
591  */
592 int iommu_calculate_agaw(struct intel_iommu *iommu)
593 {
594         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
595 }
596
597 /* This functionin only returns single iommu in a domain */
598 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
599 {
600         int iommu_id;
601
602         /* si_domain and vm domain should not get here. */
603         if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
604                 return NULL;
605
606         for_each_domain_iommu(iommu_id, domain)
607                 break;
608
609         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
610                 return NULL;
611
612         return g_iommus[iommu_id];
613 }
614
615 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
616 {
617         return sm_supported(iommu) ?
618                         ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
619 }
620
621 static void domain_update_iommu_coherency(struct dmar_domain *domain)
622 {
623         struct dmar_drhd_unit *drhd;
624         struct intel_iommu *iommu;
625         bool found = false;
626         int i;
627
628         domain->iommu_coherency = 1;
629
630         for_each_domain_iommu(i, domain) {
631                 found = true;
632                 if (!iommu_paging_structure_coherency(g_iommus[i])) {
633                         domain->iommu_coherency = 0;
634                         break;
635                 }
636         }
637         if (found)
638                 return;
639
640         /* No hardware attached; use lowest common denominator */
641         rcu_read_lock();
642         for_each_active_iommu(iommu, drhd) {
643                 if (!iommu_paging_structure_coherency(iommu)) {
644                         domain->iommu_coherency = 0;
645                         break;
646                 }
647         }
648         rcu_read_unlock();
649 }
650
651 static int domain_update_iommu_snooping(struct intel_iommu *skip)
652 {
653         struct dmar_drhd_unit *drhd;
654         struct intel_iommu *iommu;
655         int ret = 1;
656
657         rcu_read_lock();
658         for_each_active_iommu(iommu, drhd) {
659                 if (iommu != skip) {
660                         /*
661                          * If the hardware is operating in the scalable mode,
662                          * the snooping control is always supported since we
663                          * always set PASID-table-entry.PGSNP bit if the domain
664                          * is managed outside (UNMANAGED).
665                          */
666                         if (!sm_supported(iommu) &&
667                             !ecap_sc_support(iommu->ecap)) {
668                                 ret = 0;
669                                 break;
670                         }
671                 }
672         }
673         rcu_read_unlock();
674
675         return ret;
676 }
677
678 static int domain_update_iommu_superpage(struct dmar_domain *domain,
679                                          struct intel_iommu *skip)
680 {
681         struct dmar_drhd_unit *drhd;
682         struct intel_iommu *iommu;
683         int mask = 0x3;
684
685         if (!intel_iommu_superpage) {
686                 return 0;
687         }
688
689         /* set iommu_superpage to the smallest common denominator */
690         rcu_read_lock();
691         for_each_active_iommu(iommu, drhd) {
692                 if (iommu != skip) {
693                         if (domain && domain_use_first_level(domain)) {
694                                 if (!cap_fl1gp_support(iommu->cap))
695                                         mask = 0x1;
696                         } else {
697                                 mask &= cap_super_page_val(iommu->cap);
698                         }
699
700                         if (!mask)
701                                 break;
702                 }
703         }
704         rcu_read_unlock();
705
706         return fls(mask);
707 }
708
709 static int domain_update_device_node(struct dmar_domain *domain)
710 {
711         struct device_domain_info *info;
712         int nid = NUMA_NO_NODE;
713
714         assert_spin_locked(&device_domain_lock);
715
716         if (list_empty(&domain->devices))
717                 return NUMA_NO_NODE;
718
719         list_for_each_entry(info, &domain->devices, link) {
720                 if (!info->dev)
721                         continue;
722
723                 /*
724                  * There could possibly be multiple device numa nodes as devices
725                  * within the same domain may sit behind different IOMMUs. There
726                  * isn't perfect answer in such situation, so we select first
727                  * come first served policy.
728                  */
729                 nid = dev_to_node(info->dev);
730                 if (nid != NUMA_NO_NODE)
731                         break;
732         }
733
734         return nid;
735 }
736
737 static void domain_update_iotlb(struct dmar_domain *domain);
738
739 /* Some capabilities may be different across iommus */
740 static void domain_update_iommu_cap(struct dmar_domain *domain)
741 {
742         domain_update_iommu_coherency(domain);
743         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
744         domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
745
746         /*
747          * If RHSA is missing, we should default to the device numa domain
748          * as fall back.
749          */
750         if (domain->nid == NUMA_NO_NODE)
751                 domain->nid = domain_update_device_node(domain);
752
753         /*
754          * First-level translation restricts the input-address to a
755          * canonical address (i.e., address bits 63:N have the same
756          * value as address bit [N-1], where N is 48-bits with 4-level
757          * paging and 57-bits with 5-level paging). Hence, skip bit
758          * [N-1].
759          */
760         if (domain_use_first_level(domain))
761                 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
762         else
763                 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
764
765         domain_update_iotlb(domain);
766 }
767
768 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
769                                          u8 devfn, int alloc)
770 {
771         struct root_entry *root = &iommu->root_entry[bus];
772         struct context_entry *context;
773         u64 *entry;
774
775         entry = &root->lo;
776         if (sm_supported(iommu)) {
777                 if (devfn >= 0x80) {
778                         devfn -= 0x80;
779                         entry = &root->hi;
780                 }
781                 devfn *= 2;
782         }
783         if (*entry & 1)
784                 context = phys_to_virt(*entry & VTD_PAGE_MASK);
785         else {
786                 unsigned long phy_addr;
787                 if (!alloc)
788                         return NULL;
789
790                 context = alloc_pgtable_page(iommu->node);
791                 if (!context)
792                         return NULL;
793
794                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
795                 phy_addr = virt_to_phys((void *)context);
796                 *entry = phy_addr | 1;
797                 __iommu_flush_cache(iommu, entry, sizeof(*entry));
798         }
799         return &context[devfn];
800 }
801
802 static bool attach_deferred(struct device *dev)
803 {
804         return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
805 }
806
807 /**
808  * is_downstream_to_pci_bridge - test if a device belongs to the PCI
809  *                               sub-hierarchy of a candidate PCI-PCI bridge
810  * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
811  * @bridge: the candidate PCI-PCI bridge
812  *
813  * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
814  */
815 static bool
816 is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
817 {
818         struct pci_dev *pdev, *pbridge;
819
820         if (!dev_is_pci(dev) || !dev_is_pci(bridge))
821                 return false;
822
823         pdev = to_pci_dev(dev);
824         pbridge = to_pci_dev(bridge);
825
826         if (pbridge->subordinate &&
827             pbridge->subordinate->number <= pdev->bus->number &&
828             pbridge->subordinate->busn_res.end >= pdev->bus->number)
829                 return true;
830
831         return false;
832 }
833
834 static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
835 {
836         struct dmar_drhd_unit *drhd;
837         u32 vtbar;
838         int rc;
839
840         /* We know that this device on this chipset has its own IOMMU.
841          * If we find it under a different IOMMU, then the BIOS is lying
842          * to us. Hope that the IOMMU for this device is actually
843          * disabled, and it needs no translation...
844          */
845         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
846         if (rc) {
847                 /* "can't" happen */
848                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
849                 return false;
850         }
851         vtbar &= 0xffff0000;
852
853         /* we know that the this iommu should be at offset 0xa000 from vtbar */
854         drhd = dmar_find_matched_drhd_unit(pdev);
855         if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
856                 pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
857                 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
858                 return true;
859         }
860
861         return false;
862 }
863
864 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
865 {
866         if (!iommu || iommu->drhd->ignored)
867                 return true;
868
869         if (dev_is_pci(dev)) {
870                 struct pci_dev *pdev = to_pci_dev(dev);
871
872                 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
873                     pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
874                     quirk_ioat_snb_local_iommu(pdev))
875                         return true;
876         }
877
878         return false;
879 }
880
881 struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
882 {
883         struct dmar_drhd_unit *drhd = NULL;
884         struct pci_dev *pdev = NULL;
885         struct intel_iommu *iommu;
886         struct device *tmp;
887         u16 segment = 0;
888         int i;
889
890         if (!dev)
891                 return NULL;
892
893         if (dev_is_pci(dev)) {
894                 struct pci_dev *pf_pdev;
895
896                 pdev = pci_real_dma_dev(to_pci_dev(dev));
897
898                 /* VFs aren't listed in scope tables; we need to look up
899                  * the PF instead to find the IOMMU. */
900                 pf_pdev = pci_physfn(pdev);
901                 dev = &pf_pdev->dev;
902                 segment = pci_domain_nr(pdev->bus);
903         } else if (has_acpi_companion(dev))
904                 dev = &ACPI_COMPANION(dev)->dev;
905
906         rcu_read_lock();
907         for_each_iommu(iommu, drhd) {
908                 if (pdev && segment != drhd->segment)
909                         continue;
910
911                 for_each_active_dev_scope(drhd->devices,
912                                           drhd->devices_cnt, i, tmp) {
913                         if (tmp == dev) {
914                                 /* For a VF use its original BDF# not that of the PF
915                                  * which we used for the IOMMU lookup. Strictly speaking
916                                  * we could do this for all PCI devices; we only need to
917                                  * get the BDF# from the scope table for ACPI matches. */
918                                 if (pdev && pdev->is_virtfn)
919                                         goto got_pdev;
920
921                                 if (bus && devfn) {
922                                         *bus = drhd->devices[i].bus;
923                                         *devfn = drhd->devices[i].devfn;
924                                 }
925                                 goto out;
926                         }
927
928                         if (is_downstream_to_pci_bridge(dev, tmp))
929                                 goto got_pdev;
930                 }
931
932                 if (pdev && drhd->include_all) {
933                 got_pdev:
934                         if (bus && devfn) {
935                                 *bus = pdev->bus->number;
936                                 *devfn = pdev->devfn;
937                         }
938                         goto out;
939                 }
940         }
941         iommu = NULL;
942  out:
943         if (iommu_is_dummy(iommu, dev))
944                 iommu = NULL;
945
946         rcu_read_unlock();
947
948         return iommu;
949 }
950
951 static void domain_flush_cache(struct dmar_domain *domain,
952                                void *addr, int size)
953 {
954         if (!domain->iommu_coherency)
955                 clflush_cache_range(addr, size);
956 }
957
958 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
959 {
960         struct context_entry *context;
961         int ret = 0;
962         unsigned long flags;
963
964         spin_lock_irqsave(&iommu->lock, flags);
965         context = iommu_context_addr(iommu, bus, devfn, 0);
966         if (context)
967                 ret = context_present(context);
968         spin_unlock_irqrestore(&iommu->lock, flags);
969         return ret;
970 }
971
972 static void free_context_table(struct intel_iommu *iommu)
973 {
974         int i;
975         unsigned long flags;
976         struct context_entry *context;
977
978         spin_lock_irqsave(&iommu->lock, flags);
979         if (!iommu->root_entry) {
980                 goto out;
981         }
982         for (i = 0; i < ROOT_ENTRY_NR; i++) {
983                 context = iommu_context_addr(iommu, i, 0, 0);
984                 if (context)
985                         free_pgtable_page(context);
986
987                 if (!sm_supported(iommu))
988                         continue;
989
990                 context = iommu_context_addr(iommu, i, 0x80, 0);
991                 if (context)
992                         free_pgtable_page(context);
993
994         }
995         free_pgtable_page(iommu->root_entry);
996         iommu->root_entry = NULL;
997 out:
998         spin_unlock_irqrestore(&iommu->lock, flags);
999 }
1000
1001 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1002                                       unsigned long pfn, int *target_level)
1003 {
1004         struct dma_pte *parent, *pte;
1005         int level = agaw_to_level(domain->agaw);
1006         int offset;
1007
1008         BUG_ON(!domain->pgd);
1009
1010         if (!domain_pfn_supported(domain, pfn))
1011                 /* Address beyond IOMMU's addressing capabilities. */
1012                 return NULL;
1013
1014         parent = domain->pgd;
1015
1016         while (1) {
1017                 void *tmp_page;
1018
1019                 offset = pfn_level_offset(pfn, level);
1020                 pte = &parent[offset];
1021                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1022                         break;
1023                 if (level == *target_level)
1024                         break;
1025
1026                 if (!dma_pte_present(pte)) {
1027                         uint64_t pteval;
1028
1029                         tmp_page = alloc_pgtable_page(domain->nid);
1030
1031                         if (!tmp_page)
1032                                 return NULL;
1033
1034                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1035                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1036                         if (domain_use_first_level(domain)) {
1037                                 pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
1038                                 if (domain->domain.type == IOMMU_DOMAIN_DMA)
1039                                         pteval |= DMA_FL_PTE_ACCESS;
1040                         }
1041                         if (cmpxchg64(&pte->val, 0ULL, pteval))
1042                                 /* Someone else set it while we were thinking; use theirs. */
1043                                 free_pgtable_page(tmp_page);
1044                         else
1045                                 domain_flush_cache(domain, pte, sizeof(*pte));
1046                 }
1047                 if (level == 1)
1048                         break;
1049
1050                 parent = phys_to_virt(dma_pte_addr(pte));
1051                 level--;
1052         }
1053
1054         if (!*target_level)
1055                 *target_level = level;
1056
1057         return pte;
1058 }
1059
1060 /* return address's pte at specific level */
1061 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1062                                          unsigned long pfn,
1063                                          int level, int *large_page)
1064 {
1065         struct dma_pte *parent, *pte;
1066         int total = agaw_to_level(domain->agaw);
1067         int offset;
1068
1069         parent = domain->pgd;
1070         while (level <= total) {
1071                 offset = pfn_level_offset(pfn, total);
1072                 pte = &parent[offset];
1073                 if (level == total)
1074                         return pte;
1075
1076                 if (!dma_pte_present(pte)) {
1077                         *large_page = total;
1078                         break;
1079                 }
1080
1081                 if (dma_pte_superpage(pte)) {
1082                         *large_page = total;
1083                         return pte;
1084                 }
1085
1086                 parent = phys_to_virt(dma_pte_addr(pte));
1087                 total--;
1088         }
1089         return NULL;
1090 }
1091
1092 /* clear last level pte, a tlb flush should be followed */
1093 static void dma_pte_clear_range(struct dmar_domain *domain,
1094                                 unsigned long start_pfn,
1095                                 unsigned long last_pfn)
1096 {
1097         unsigned int large_page;
1098         struct dma_pte *first_pte, *pte;
1099
1100         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1101         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1102         BUG_ON(start_pfn > last_pfn);
1103
1104         /* we don't need lock here; nobody else touches the iova range */
1105         do {
1106                 large_page = 1;
1107                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1108                 if (!pte) {
1109                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1110                         continue;
1111                 }
1112                 do {
1113                         dma_clear_pte(pte);
1114                         start_pfn += lvl_to_nr_pages(large_page);
1115                         pte++;
1116                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1117
1118                 domain_flush_cache(domain, first_pte,
1119                                    (void *)pte - (void *)first_pte);
1120
1121         } while (start_pfn && start_pfn <= last_pfn);
1122 }
1123
1124 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1125                                int retain_level, struct dma_pte *pte,
1126                                unsigned long pfn, unsigned long start_pfn,
1127                                unsigned long last_pfn)
1128 {
1129         pfn = max(start_pfn, pfn);
1130         pte = &pte[pfn_level_offset(pfn, level)];
1131
1132         do {
1133                 unsigned long level_pfn;
1134                 struct dma_pte *level_pte;
1135
1136                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1137                         goto next;
1138
1139                 level_pfn = pfn & level_mask(level);
1140                 level_pte = phys_to_virt(dma_pte_addr(pte));
1141
1142                 if (level > 2) {
1143                         dma_pte_free_level(domain, level - 1, retain_level,
1144                                            level_pte, level_pfn, start_pfn,
1145                                            last_pfn);
1146                 }
1147
1148                 /*
1149                  * Free the page table if we're below the level we want to
1150                  * retain and the range covers the entire table.
1151                  */
1152                 if (level < retain_level && !(start_pfn > level_pfn ||
1153                       last_pfn < level_pfn + level_size(level) - 1)) {
1154                         dma_clear_pte(pte);
1155                         domain_flush_cache(domain, pte, sizeof(*pte));
1156                         free_pgtable_page(level_pte);
1157                 }
1158 next:
1159                 pfn += level_size(level);
1160         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1161 }
1162
1163 /*
1164  * clear last level (leaf) ptes and free page table pages below the
1165  * level we wish to keep intact.
1166  */
1167 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1168                                    unsigned long start_pfn,
1169                                    unsigned long last_pfn,
1170                                    int retain_level)
1171 {
1172         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1173         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1174         BUG_ON(start_pfn > last_pfn);
1175
1176         dma_pte_clear_range(domain, start_pfn, last_pfn);
1177
1178         /* We don't need lock here; nobody else touches the iova range */
1179         dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1180                            domain->pgd, 0, start_pfn, last_pfn);
1181
1182         /* free pgd */
1183         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1184                 free_pgtable_page(domain->pgd);
1185                 domain->pgd = NULL;
1186         }
1187 }
1188
1189 /* When a page at a given level is being unlinked from its parent, we don't
1190    need to *modify* it at all. All we need to do is make a list of all the
1191    pages which can be freed just as soon as we've flushed the IOTLB and we
1192    know the hardware page-walk will no longer touch them.
1193    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1194    be freed. */
1195 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1196                                             int level, struct dma_pte *pte,
1197                                             struct page *freelist)
1198 {
1199         struct page *pg;
1200
1201         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1202         pg->freelist = freelist;
1203         freelist = pg;
1204
1205         if (level == 1)
1206                 return freelist;
1207
1208         pte = page_address(pg);
1209         do {
1210                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1211                         freelist = dma_pte_list_pagetables(domain, level - 1,
1212                                                            pte, freelist);
1213                 pte++;
1214         } while (!first_pte_in_page(pte));
1215
1216         return freelist;
1217 }
1218
1219 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1220                                         struct dma_pte *pte, unsigned long pfn,
1221                                         unsigned long start_pfn,
1222                                         unsigned long last_pfn,
1223                                         struct page *freelist)
1224 {
1225         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1226
1227         pfn = max(start_pfn, pfn);
1228         pte = &pte[pfn_level_offset(pfn, level)];
1229
1230         do {
1231                 unsigned long level_pfn;
1232
1233                 if (!dma_pte_present(pte))
1234                         goto next;
1235
1236                 level_pfn = pfn & level_mask(level);
1237
1238                 /* If range covers entire pagetable, free it */
1239                 if (start_pfn <= level_pfn &&
1240                     last_pfn >= level_pfn + level_size(level) - 1) {
1241                         /* These suborbinate page tables are going away entirely. Don't
1242                            bother to clear them; we're just going to *free* them. */
1243                         if (level > 1 && !dma_pte_superpage(pte))
1244                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1245
1246                         dma_clear_pte(pte);
1247                         if (!first_pte)
1248                                 first_pte = pte;
1249                         last_pte = pte;
1250                 } else if (level > 1) {
1251                         /* Recurse down into a level that isn't *entirely* obsolete */
1252                         freelist = dma_pte_clear_level(domain, level - 1,
1253                                                        phys_to_virt(dma_pte_addr(pte)),
1254                                                        level_pfn, start_pfn, last_pfn,
1255                                                        freelist);
1256                 }
1257 next:
1258                 pfn += level_size(level);
1259         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1260
1261         if (first_pte)
1262                 domain_flush_cache(domain, first_pte,
1263                                    (void *)++last_pte - (void *)first_pte);
1264
1265         return freelist;
1266 }
1267
1268 /* We can't just free the pages because the IOMMU may still be walking
1269    the page tables, and may have cached the intermediate levels. The
1270    pages can only be freed after the IOTLB flush has been done. */
1271 static struct page *domain_unmap(struct dmar_domain *domain,
1272                                  unsigned long start_pfn,
1273                                  unsigned long last_pfn,
1274                                  struct page *freelist)
1275 {
1276         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1277         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1278         BUG_ON(start_pfn > last_pfn);
1279
1280         /* we don't need lock here; nobody else touches the iova range */
1281         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1282                                        domain->pgd, 0, start_pfn, last_pfn,
1283                                        freelist);
1284
1285         /* free pgd */
1286         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1287                 struct page *pgd_page = virt_to_page(domain->pgd);
1288                 pgd_page->freelist = freelist;
1289                 freelist = pgd_page;
1290
1291                 domain->pgd = NULL;
1292         }
1293
1294         return freelist;
1295 }
1296
1297 static void dma_free_pagelist(struct page *freelist)
1298 {
1299         struct page *pg;
1300
1301         while ((pg = freelist)) {
1302                 freelist = pg->freelist;
1303                 free_pgtable_page(page_address(pg));
1304         }
1305 }
1306
1307 /* iommu handling */
1308 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1309 {
1310         struct root_entry *root;
1311         unsigned long flags;
1312
1313         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1314         if (!root) {
1315                 pr_err("Allocating root entry for %s failed\n",
1316                         iommu->name);
1317                 return -ENOMEM;
1318         }
1319
1320         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1321
1322         spin_lock_irqsave(&iommu->lock, flags);
1323         iommu->root_entry = root;
1324         spin_unlock_irqrestore(&iommu->lock, flags);
1325
1326         return 0;
1327 }
1328
1329 static void iommu_set_root_entry(struct intel_iommu *iommu)
1330 {
1331         u64 addr;
1332         u32 sts;
1333         unsigned long flag;
1334
1335         addr = virt_to_phys(iommu->root_entry);
1336         if (sm_supported(iommu))
1337                 addr |= DMA_RTADDR_SMT;
1338
1339         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1340         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1341
1342         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1343
1344         /* Make sure hardware complete it */
1345         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1346                       readl, (sts & DMA_GSTS_RTPS), sts);
1347
1348         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1349
1350         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1351         if (sm_supported(iommu))
1352                 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
1353         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1354 }
1355
1356 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1357 {
1358         u32 val;
1359         unsigned long flag;
1360
1361         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1362                 return;
1363
1364         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1365         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1366
1367         /* Make sure hardware complete it */
1368         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1369                       readl, (!(val & DMA_GSTS_WBFS)), val);
1370
1371         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1372 }
1373
1374 /* return value determine if we need a write buffer flush */
1375 static void __iommu_flush_context(struct intel_iommu *iommu,
1376                                   u16 did, u16 source_id, u8 function_mask,
1377                                   u64 type)
1378 {
1379         u64 val = 0;
1380         unsigned long flag;
1381
1382         switch (type) {
1383         case DMA_CCMD_GLOBAL_INVL:
1384                 val = DMA_CCMD_GLOBAL_INVL;
1385                 break;
1386         case DMA_CCMD_DOMAIN_INVL:
1387                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1388                 break;
1389         case DMA_CCMD_DEVICE_INVL:
1390                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1391                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1392                 break;
1393         default:
1394                 BUG();
1395         }
1396         val |= DMA_CCMD_ICC;
1397
1398         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1399         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1400
1401         /* Make sure hardware complete it */
1402         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1403                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1404
1405         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1406 }
1407
1408 /* return value determine if we need a write buffer flush */
1409 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1410                                 u64 addr, unsigned int size_order, u64 type)
1411 {
1412         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1413         u64 val = 0, val_iva = 0;
1414         unsigned long flag;
1415
1416         switch (type) {
1417         case DMA_TLB_GLOBAL_FLUSH:
1418                 /* global flush doesn't need set IVA_REG */
1419                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1420                 break;
1421         case DMA_TLB_DSI_FLUSH:
1422                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1423                 break;
1424         case DMA_TLB_PSI_FLUSH:
1425                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1426                 /* IH bit is passed in as part of address */
1427                 val_iva = size_order | addr;
1428                 break;
1429         default:
1430                 BUG();
1431         }
1432         /* Note: set drain read/write */
1433 #if 0
1434         /*
1435          * This is probably to be super secure.. Looks like we can
1436          * ignore it without any impact.
1437          */
1438         if (cap_read_drain(iommu->cap))
1439                 val |= DMA_TLB_READ_DRAIN;
1440 #endif
1441         if (cap_write_drain(iommu->cap))
1442                 val |= DMA_TLB_WRITE_DRAIN;
1443
1444         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1445         /* Note: Only uses first TLB reg currently */
1446         if (val_iva)
1447                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1448         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1449
1450         /* Make sure hardware complete it */
1451         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1452                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1453
1454         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1455
1456         /* check IOTLB invalidation granularity */
1457         if (DMA_TLB_IAIG(val) == 0)
1458                 pr_err("Flush IOTLB failed\n");
1459         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1460                 pr_debug("TLB flush request %Lx, actual %Lx\n",
1461                         (unsigned long long)DMA_TLB_IIRG(type),
1462                         (unsigned long long)DMA_TLB_IAIG(val));
1463 }
1464
1465 static struct device_domain_info *
1466 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1467                          u8 bus, u8 devfn)
1468 {
1469         struct device_domain_info *info;
1470
1471         assert_spin_locked(&device_domain_lock);
1472
1473         if (!iommu->qi)
1474                 return NULL;
1475
1476         list_for_each_entry(info, &domain->devices, link)
1477                 if (info->iommu == iommu && info->bus == bus &&
1478                     info->devfn == devfn) {
1479                         if (info->ats_supported && info->dev)
1480                                 return info;
1481                         break;
1482                 }
1483
1484         return NULL;
1485 }
1486
1487 static void domain_update_iotlb(struct dmar_domain *domain)
1488 {
1489         struct device_domain_info *info;
1490         bool has_iotlb_device = false;
1491
1492         assert_spin_locked(&device_domain_lock);
1493
1494         list_for_each_entry(info, &domain->devices, link)
1495                 if (info->ats_enabled) {
1496                         has_iotlb_device = true;
1497                         break;
1498                 }
1499
1500         if (!has_iotlb_device) {
1501                 struct subdev_domain_info *sinfo;
1502
1503                 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
1504                         info = get_domain_info(sinfo->pdev);
1505                         if (info && info->ats_enabled) {
1506                                 has_iotlb_device = true;
1507                                 break;
1508                         }
1509                 }
1510         }
1511
1512         domain->has_iotlb_device = has_iotlb_device;
1513 }
1514
1515 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1516 {
1517         struct pci_dev *pdev;
1518
1519         assert_spin_locked(&device_domain_lock);
1520
1521         if (!info || !dev_is_pci(info->dev))
1522                 return;
1523
1524         pdev = to_pci_dev(info->dev);
1525         /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1526          * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1527          * queue depth at PF level. If DIT is not set, PFSID will be treated as
1528          * reserved, which should be set to 0.
1529          */
1530         if (!ecap_dit(info->iommu->ecap))
1531                 info->pfsid = 0;
1532         else {
1533                 struct pci_dev *pf_pdev;
1534
1535                 /* pdev will be returned if device is not a vf */
1536                 pf_pdev = pci_physfn(pdev);
1537                 info->pfsid = pci_dev_id(pf_pdev);
1538         }
1539
1540 #ifdef CONFIG_INTEL_IOMMU_SVM
1541         /* The PCIe spec, in its wisdom, declares that the behaviour of
1542            the device if you enable PASID support after ATS support is
1543            undefined. So always enable PASID support on devices which
1544            have it, even if we can't yet know if we're ever going to
1545            use it. */
1546         if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1547                 info->pasid_enabled = 1;
1548
1549         if (info->pri_supported &&
1550             (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1)  &&
1551             !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1552                 info->pri_enabled = 1;
1553 #endif
1554         if (info->ats_supported && pci_ats_page_aligned(pdev) &&
1555             !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1556                 info->ats_enabled = 1;
1557                 domain_update_iotlb(info->domain);
1558                 info->ats_qdep = pci_ats_queue_depth(pdev);
1559         }
1560 }
1561
1562 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1563 {
1564         struct pci_dev *pdev;
1565
1566         assert_spin_locked(&device_domain_lock);
1567
1568         if (!dev_is_pci(info->dev))
1569                 return;
1570
1571         pdev = to_pci_dev(info->dev);
1572
1573         if (info->ats_enabled) {
1574                 pci_disable_ats(pdev);
1575                 info->ats_enabled = 0;
1576                 domain_update_iotlb(info->domain);
1577         }
1578 #ifdef CONFIG_INTEL_IOMMU_SVM
1579         if (info->pri_enabled) {
1580                 pci_disable_pri(pdev);
1581                 info->pri_enabled = 0;
1582         }
1583         if (info->pasid_enabled) {
1584                 pci_disable_pasid(pdev);
1585                 info->pasid_enabled = 0;
1586         }
1587 #endif
1588 }
1589
1590 static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
1591                                     u64 addr, unsigned int mask)
1592 {
1593         u16 sid, qdep;
1594
1595         if (!info || !info->ats_enabled)
1596                 return;
1597
1598         sid = info->bus << 8 | info->devfn;
1599         qdep = info->ats_qdep;
1600         qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1601                            qdep, addr, mask);
1602 }
1603
1604 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1605                                   u64 addr, unsigned mask)
1606 {
1607         unsigned long flags;
1608         struct device_domain_info *info;
1609         struct subdev_domain_info *sinfo;
1610
1611         if (!domain->has_iotlb_device)
1612                 return;
1613
1614         spin_lock_irqsave(&device_domain_lock, flags);
1615         list_for_each_entry(info, &domain->devices, link)
1616                 __iommu_flush_dev_iotlb(info, addr, mask);
1617
1618         list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
1619                 info = get_domain_info(sinfo->pdev);
1620                 __iommu_flush_dev_iotlb(info, addr, mask);
1621         }
1622         spin_unlock_irqrestore(&device_domain_lock, flags);
1623 }
1624
1625 static void domain_flush_piotlb(struct intel_iommu *iommu,
1626                                 struct dmar_domain *domain,
1627                                 u64 addr, unsigned long npages, bool ih)
1628 {
1629         u16 did = domain->iommu_did[iommu->seq_id];
1630
1631         if (domain->default_pasid)
1632                 qi_flush_piotlb(iommu, did, domain->default_pasid,
1633                                 addr, npages, ih);
1634
1635         if (!list_empty(&domain->devices))
1636                 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
1637 }
1638
1639 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1640                                   struct dmar_domain *domain,
1641                                   unsigned long pfn, unsigned int pages,
1642                                   int ih, int map)
1643 {
1644         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1645         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1646         u16 did = domain->iommu_did[iommu->seq_id];
1647
1648         BUG_ON(pages == 0);
1649
1650         if (ih)
1651                 ih = 1 << 6;
1652
1653         if (domain_use_first_level(domain)) {
1654                 domain_flush_piotlb(iommu, domain, addr, pages, ih);
1655         } else {
1656                 /*
1657                  * Fallback to domain selective flush if no PSI support or
1658                  * the size is too big. PSI requires page size to be 2 ^ x,
1659                  * and the base address is naturally aligned to the size.
1660                  */
1661                 if (!cap_pgsel_inv(iommu->cap) ||
1662                     mask > cap_max_amask_val(iommu->cap))
1663                         iommu->flush.flush_iotlb(iommu, did, 0, 0,
1664                                                         DMA_TLB_DSI_FLUSH);
1665                 else
1666                         iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1667                                                         DMA_TLB_PSI_FLUSH);
1668         }
1669
1670         /*
1671          * In caching mode, changes of pages from non-present to present require
1672          * flush. However, device IOTLB doesn't need to be flushed in this case.
1673          */
1674         if (!cap_caching_mode(iommu->cap) || !map)
1675                 iommu_flush_dev_iotlb(domain, addr, mask);
1676 }
1677
1678 /* Notification for newly created mappings */
1679 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1680                                         struct dmar_domain *domain,
1681                                         unsigned long pfn, unsigned int pages)
1682 {
1683         /*
1684          * It's a non-present to present mapping. Only flush if caching mode
1685          * and second level.
1686          */
1687         if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
1688                 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1689         else
1690                 iommu_flush_write_buffer(iommu);
1691 }
1692
1693 static void intel_flush_iotlb_all(struct iommu_domain *domain)
1694 {
1695         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
1696         int idx;
1697
1698         for_each_domain_iommu(idx, dmar_domain) {
1699                 struct intel_iommu *iommu = g_iommus[idx];
1700                 u16 did = dmar_domain->iommu_did[iommu->seq_id];
1701
1702                 if (domain_use_first_level(dmar_domain))
1703                         domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0);
1704                 else
1705                         iommu->flush.flush_iotlb(iommu, did, 0, 0,
1706                                                  DMA_TLB_DSI_FLUSH);
1707
1708                 if (!cap_caching_mode(iommu->cap))
1709                         iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1710                                               0, MAX_AGAW_PFN_WIDTH);
1711         }
1712 }
1713
1714 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1715 {
1716         u32 pmen;
1717         unsigned long flags;
1718
1719         if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1720                 return;
1721
1722         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1723         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1724         pmen &= ~DMA_PMEN_EPM;
1725         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1726
1727         /* wait for the protected region status bit to clear */
1728         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1729                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1730
1731         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1732 }
1733
1734 static void iommu_enable_translation(struct intel_iommu *iommu)
1735 {
1736         u32 sts;
1737         unsigned long flags;
1738
1739         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1740         iommu->gcmd |= DMA_GCMD_TE;
1741         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1742
1743         /* Make sure hardware complete it */
1744         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1745                       readl, (sts & DMA_GSTS_TES), sts);
1746
1747         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1748 }
1749
1750 static void iommu_disable_translation(struct intel_iommu *iommu)
1751 {
1752         u32 sts;
1753         unsigned long flag;
1754
1755         if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
1756             (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
1757                 return;
1758
1759         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1760         iommu->gcmd &= ~DMA_GCMD_TE;
1761         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1762
1763         /* Make sure hardware complete it */
1764         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1765                       readl, (!(sts & DMA_GSTS_TES)), sts);
1766
1767         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1768 }
1769
1770 static int iommu_init_domains(struct intel_iommu *iommu)
1771 {
1772         u32 ndomains, nlongs;
1773         size_t size;
1774
1775         ndomains = cap_ndoms(iommu->cap);
1776         pr_debug("%s: Number of Domains supported <%d>\n",
1777                  iommu->name, ndomains);
1778         nlongs = BITS_TO_LONGS(ndomains);
1779
1780         spin_lock_init(&iommu->lock);
1781
1782         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1783         if (!iommu->domain_ids) {
1784                 pr_err("%s: Allocating domain id array failed\n",
1785                        iommu->name);
1786                 return -ENOMEM;
1787         }
1788
1789         size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1790         iommu->domains = kzalloc(size, GFP_KERNEL);
1791
1792         if (iommu->domains) {
1793                 size = 256 * sizeof(struct dmar_domain *);
1794                 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1795         }
1796
1797         if (!iommu->domains || !iommu->domains[0]) {
1798                 pr_err("%s: Allocating domain array failed\n",
1799                        iommu->name);
1800                 kfree(iommu->domain_ids);
1801                 kfree(iommu->domains);
1802                 iommu->domain_ids = NULL;
1803                 iommu->domains    = NULL;
1804                 return -ENOMEM;
1805         }
1806
1807         /*
1808          * If Caching mode is set, then invalid translations are tagged
1809          * with domain-id 0, hence we need to pre-allocate it. We also
1810          * use domain-id 0 as a marker for non-allocated domain-id, so
1811          * make sure it is not used for a real domain.
1812          */
1813         set_bit(0, iommu->domain_ids);
1814
1815         /*
1816          * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1817          * entry for first-level or pass-through translation modes should
1818          * be programmed with a domain id different from those used for
1819          * second-level or nested translation. We reserve a domain id for
1820          * this purpose.
1821          */
1822         if (sm_supported(iommu))
1823                 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1824
1825         return 0;
1826 }
1827
1828 static void disable_dmar_iommu(struct intel_iommu *iommu)
1829 {
1830         struct device_domain_info *info, *tmp;
1831         unsigned long flags;
1832
1833         if (!iommu->domains || !iommu->domain_ids)
1834                 return;
1835
1836         spin_lock_irqsave(&device_domain_lock, flags);
1837         list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1838                 if (info->iommu != iommu)
1839                         continue;
1840
1841                 if (!info->dev || !info->domain)
1842                         continue;
1843
1844                 __dmar_remove_one_dev_info(info);
1845         }
1846         spin_unlock_irqrestore(&device_domain_lock, flags);
1847
1848         if (iommu->gcmd & DMA_GCMD_TE)
1849                 iommu_disable_translation(iommu);
1850 }
1851
1852 static void free_dmar_iommu(struct intel_iommu *iommu)
1853 {
1854         if ((iommu->domains) && (iommu->domain_ids)) {
1855                 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1856                 int i;
1857
1858                 for (i = 0; i < elems; i++)
1859                         kfree(iommu->domains[i]);
1860                 kfree(iommu->domains);
1861                 kfree(iommu->domain_ids);
1862                 iommu->domains = NULL;
1863                 iommu->domain_ids = NULL;
1864         }
1865
1866         g_iommus[iommu->seq_id] = NULL;
1867
1868         /* free context mapping */
1869         free_context_table(iommu);
1870
1871 #ifdef CONFIG_INTEL_IOMMU_SVM
1872         if (pasid_supported(iommu)) {
1873                 if (ecap_prs(iommu->ecap))
1874                         intel_svm_finish_prq(iommu);
1875         }
1876         if (vccap_pasid(iommu->vccap))
1877                 ioasid_unregister_allocator(&iommu->pasid_allocator);
1878
1879 #endif
1880 }
1881
1882 /*
1883  * Check and return whether first level is used by default for
1884  * DMA translation.
1885  */
1886 static bool first_level_by_default(void)
1887 {
1888         return scalable_mode_support() && intel_cap_flts_sanity();
1889 }
1890
1891 static struct dmar_domain *alloc_domain(int flags)
1892 {
1893         struct dmar_domain *domain;
1894
1895         domain = alloc_domain_mem();
1896         if (!domain)
1897                 return NULL;
1898
1899         memset(domain, 0, sizeof(*domain));
1900         domain->nid = NUMA_NO_NODE;
1901         domain->flags = flags;
1902         if (first_level_by_default())
1903                 domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
1904         domain->has_iotlb_device = false;
1905         INIT_LIST_HEAD(&domain->devices);
1906         INIT_LIST_HEAD(&domain->subdevices);
1907
1908         return domain;
1909 }
1910
1911 /* Must be called with iommu->lock */
1912 static int domain_attach_iommu(struct dmar_domain *domain,
1913                                struct intel_iommu *iommu)
1914 {
1915         unsigned long ndomains;
1916         int num;
1917
1918         assert_spin_locked(&device_domain_lock);
1919         assert_spin_locked(&iommu->lock);
1920
1921         domain->iommu_refcnt[iommu->seq_id] += 1;
1922         domain->iommu_count += 1;
1923         if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1924                 ndomains = cap_ndoms(iommu->cap);
1925                 num      = find_first_zero_bit(iommu->domain_ids, ndomains);
1926
1927                 if (num >= ndomains) {
1928                         pr_err("%s: No free domain ids\n", iommu->name);
1929                         domain->iommu_refcnt[iommu->seq_id] -= 1;
1930                         domain->iommu_count -= 1;
1931                         return -ENOSPC;
1932                 }
1933
1934                 set_bit(num, iommu->domain_ids);
1935                 set_iommu_domain(iommu, num, domain);
1936
1937                 domain->iommu_did[iommu->seq_id] = num;
1938                 domain->nid                      = iommu->node;
1939
1940                 domain_update_iommu_cap(domain);
1941         }
1942
1943         return 0;
1944 }
1945
1946 static int domain_detach_iommu(struct dmar_domain *domain,
1947                                struct intel_iommu *iommu)
1948 {
1949         int num, count;
1950
1951         assert_spin_locked(&device_domain_lock);
1952         assert_spin_locked(&iommu->lock);
1953
1954         domain->iommu_refcnt[iommu->seq_id] -= 1;
1955         count = --domain->iommu_count;
1956         if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1957                 num = domain->iommu_did[iommu->seq_id];
1958                 clear_bit(num, iommu->domain_ids);
1959                 set_iommu_domain(iommu, num, NULL);
1960
1961                 domain_update_iommu_cap(domain);
1962                 domain->iommu_did[iommu->seq_id] = 0;
1963         }
1964
1965         return count;
1966 }
1967
1968 static inline int guestwidth_to_adjustwidth(int gaw)
1969 {
1970         int agaw;
1971         int r = (gaw - 12) % 9;
1972
1973         if (r == 0)
1974                 agaw = gaw;
1975         else
1976                 agaw = gaw + 9 - r;
1977         if (agaw > 64)
1978                 agaw = 64;
1979         return agaw;
1980 }
1981
1982 static void domain_exit(struct dmar_domain *domain)
1983 {
1984
1985         /* Remove associated devices and clear attached or cached domains */
1986         domain_remove_dev_info(domain);
1987
1988         /* destroy iovas */
1989         if (domain->domain.type == IOMMU_DOMAIN_DMA)
1990                 iommu_put_dma_cookie(&domain->domain);
1991
1992         if (domain->pgd) {
1993                 struct page *freelist;
1994
1995                 freelist = domain_unmap(domain, 0,
1996                                         DOMAIN_MAX_PFN(domain->gaw), NULL);
1997                 dma_free_pagelist(freelist);
1998         }
1999
2000         free_domain_mem(domain);
2001 }
2002
2003 /*
2004  * Get the PASID directory size for scalable mode context entry.
2005  * Value of X in the PDTS field of a scalable mode context entry
2006  * indicates PASID directory with 2^(X + 7) entries.
2007  */
2008 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
2009 {
2010         int pds, max_pde;
2011
2012         max_pde = table->max_pasid >> PASID_PDE_SHIFT;
2013         pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
2014         if (pds < 7)
2015                 return 0;
2016
2017         return pds - 7;
2018 }
2019
2020 /*
2021  * Set the RID_PASID field of a scalable mode context entry. The
2022  * IOMMU hardware will use the PASID value set in this field for
2023  * DMA translations of DMA requests without PASID.
2024  */
2025 static inline void
2026 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
2027 {
2028         context->hi |= pasid & ((1 << 20) - 1);
2029 }
2030
2031 /*
2032  * Set the DTE(Device-TLB Enable) field of a scalable mode context
2033  * entry.
2034  */
2035 static inline void context_set_sm_dte(struct context_entry *context)
2036 {
2037         context->lo |= (1 << 2);
2038 }
2039
2040 /*
2041  * Set the PRE(Page Request Enable) field of a scalable mode context
2042  * entry.
2043  */
2044 static inline void context_set_sm_pre(struct context_entry *context)
2045 {
2046         context->lo |= (1 << 4);
2047 }
2048
2049 /* Convert value to context PASID directory size field coding. */
2050 #define context_pdts(pds)       (((pds) & 0x7) << 9)
2051
2052 static int domain_context_mapping_one(struct dmar_domain *domain,
2053                                       struct intel_iommu *iommu,
2054                                       struct pasid_table *table,
2055                                       u8 bus, u8 devfn)
2056 {
2057         u16 did = domain->iommu_did[iommu->seq_id];
2058         int translation = CONTEXT_TT_MULTI_LEVEL;
2059         struct device_domain_info *info = NULL;
2060         struct context_entry *context;
2061         unsigned long flags;
2062         int ret;
2063
2064         WARN_ON(did == 0);
2065
2066         if (hw_pass_through && domain_type_is_si(domain))
2067                 translation = CONTEXT_TT_PASS_THROUGH;
2068
2069         pr_debug("Set context mapping for %02x:%02x.%d\n",
2070                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2071
2072         BUG_ON(!domain->pgd);
2073
2074         spin_lock_irqsave(&device_domain_lock, flags);
2075         spin_lock(&iommu->lock);
2076
2077         ret = -ENOMEM;
2078         context = iommu_context_addr(iommu, bus, devfn, 1);
2079         if (!context)
2080                 goto out_unlock;
2081
2082         ret = 0;
2083         if (context_present(context))
2084                 goto out_unlock;
2085
2086         /*
2087          * For kdump cases, old valid entries may be cached due to the
2088          * in-flight DMA and copied pgtable, but there is no unmapping
2089          * behaviour for them, thus we need an explicit cache flush for
2090          * the newly-mapped device. For kdump, at this point, the device
2091          * is supposed to finish reset at its driver probe stage, so no
2092          * in-flight DMA will exist, and we don't need to worry anymore
2093          * hereafter.
2094          */
2095         if (context_copied(context)) {
2096                 u16 did_old = context_domain_id(context);
2097
2098                 if (did_old < cap_ndoms(iommu->cap)) {
2099                         iommu->flush.flush_context(iommu, did_old,
2100                                                    (((u16)bus) << 8) | devfn,
2101                                                    DMA_CCMD_MASK_NOBIT,
2102                                                    DMA_CCMD_DEVICE_INVL);
2103                         iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2104                                                  DMA_TLB_DSI_FLUSH);
2105                 }
2106         }
2107
2108         context_clear_entry(context);
2109
2110         if (sm_supported(iommu)) {
2111                 unsigned long pds;
2112
2113                 WARN_ON(!table);
2114
2115                 /* Setup the PASID DIR pointer: */
2116                 pds = context_get_sm_pds(table);
2117                 context->lo = (u64)virt_to_phys(table->table) |
2118                                 context_pdts(pds);
2119
2120                 /* Setup the RID_PASID field: */
2121                 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2122
2123                 /*
2124                  * Setup the Device-TLB enable bit and Page request
2125                  * Enable bit:
2126                  */
2127                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2128                 if (info && info->ats_supported)
2129                         context_set_sm_dte(context);
2130                 if (info && info->pri_supported)
2131                         context_set_sm_pre(context);
2132         } else {
2133                 struct dma_pte *pgd = domain->pgd;
2134                 int agaw;
2135
2136                 context_set_domain_id(context, did);
2137
2138                 if (translation != CONTEXT_TT_PASS_THROUGH) {
2139                         /*
2140                          * Skip top levels of page tables for iommu which has
2141                          * less agaw than default. Unnecessary for PT mode.
2142                          */
2143                         for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2144                                 ret = -ENOMEM;
2145                                 pgd = phys_to_virt(dma_pte_addr(pgd));
2146                                 if (!dma_pte_present(pgd))
2147                                         goto out_unlock;
2148                         }
2149
2150                         info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2151                         if (info && info->ats_supported)
2152                                 translation = CONTEXT_TT_DEV_IOTLB;
2153                         else
2154                                 translation = CONTEXT_TT_MULTI_LEVEL;
2155
2156                         context_set_address_root(context, virt_to_phys(pgd));
2157                         context_set_address_width(context, agaw);
2158                 } else {
2159                         /*
2160                          * In pass through mode, AW must be programmed to
2161                          * indicate the largest AGAW value supported by
2162                          * hardware. And ASR is ignored by hardware.
2163                          */
2164                         context_set_address_width(context, iommu->msagaw);
2165                 }
2166
2167                 context_set_translation_type(context, translation);
2168         }
2169
2170         context_set_fault_enable(context);
2171         context_set_present(context);
2172         if (!ecap_coherent(iommu->ecap))
2173                 clflush_cache_range(context, sizeof(*context));
2174
2175         /*
2176          * It's a non-present to present mapping. If hardware doesn't cache
2177          * non-present entry we only need to flush the write-buffer. If the
2178          * _does_ cache non-present entries, then it does so in the special
2179          * domain #0, which we have to flush:
2180          */
2181         if (cap_caching_mode(iommu->cap)) {
2182                 iommu->flush.flush_context(iommu, 0,
2183                                            (((u16)bus) << 8) | devfn,
2184                                            DMA_CCMD_MASK_NOBIT,
2185                                            DMA_CCMD_DEVICE_INVL);
2186                 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2187         } else {
2188                 iommu_flush_write_buffer(iommu);
2189         }
2190         iommu_enable_dev_iotlb(info);
2191
2192         ret = 0;
2193
2194 out_unlock:
2195         spin_unlock(&iommu->lock);
2196         spin_unlock_irqrestore(&device_domain_lock, flags);
2197
2198         return ret;
2199 }
2200
2201 struct domain_context_mapping_data {
2202         struct dmar_domain *domain;
2203         struct intel_iommu *iommu;
2204         struct pasid_table *table;
2205 };
2206
2207 static int domain_context_mapping_cb(struct pci_dev *pdev,
2208                                      u16 alias, void *opaque)
2209 {
2210         struct domain_context_mapping_data *data = opaque;
2211
2212         return domain_context_mapping_one(data->domain, data->iommu,
2213                                           data->table, PCI_BUS_NUM(alias),
2214                                           alias & 0xff);
2215 }
2216
2217 static int
2218 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2219 {
2220         struct domain_context_mapping_data data;
2221         struct pasid_table *table;
2222         struct intel_iommu *iommu;
2223         u8 bus, devfn;
2224
2225         iommu = device_to_iommu(dev, &bus, &devfn);
2226         if (!iommu)
2227                 return -ENODEV;
2228
2229         table = intel_pasid_get_table(dev);
2230
2231         if (!dev_is_pci(dev))
2232                 return domain_context_mapping_one(domain, iommu, table,
2233                                                   bus, devfn);
2234
2235         data.domain = domain;
2236         data.iommu = iommu;
2237         data.table = table;
2238
2239         return pci_for_each_dma_alias(to_pci_dev(dev),
2240                                       &domain_context_mapping_cb, &data);
2241 }
2242
2243 static int domain_context_mapped_cb(struct pci_dev *pdev,
2244                                     u16 alias, void *opaque)
2245 {
2246         struct intel_iommu *iommu = opaque;
2247
2248         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2249 }
2250
2251 static int domain_context_mapped(struct device *dev)
2252 {
2253         struct intel_iommu *iommu;
2254         u8 bus, devfn;
2255
2256         iommu = device_to_iommu(dev, &bus, &devfn);
2257         if (!iommu)
2258                 return -ENODEV;
2259
2260         if (!dev_is_pci(dev))
2261                 return device_context_mapped(iommu, bus, devfn);
2262
2263         return !pci_for_each_dma_alias(to_pci_dev(dev),
2264                                        domain_context_mapped_cb, iommu);
2265 }
2266
2267 /* Returns a number of VTD pages, but aligned to MM page size */
2268 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2269                                             size_t size)
2270 {
2271         host_addr &= ~PAGE_MASK;
2272         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2273 }
2274
2275 /* Return largest possible superpage level for a given mapping */
2276 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2277                                           unsigned long iov_pfn,
2278                                           unsigned long phy_pfn,
2279                                           unsigned long pages)
2280 {
2281         int support, level = 1;
2282         unsigned long pfnmerge;
2283
2284         support = domain->iommu_superpage;
2285
2286         /* To use a large page, the virtual *and* physical addresses
2287            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2288            of them will mean we have to use smaller pages. So just
2289            merge them and check both at once. */
2290         pfnmerge = iov_pfn | phy_pfn;
2291
2292         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2293                 pages >>= VTD_STRIDE_SHIFT;
2294                 if (!pages)
2295                         break;
2296                 pfnmerge >>= VTD_STRIDE_SHIFT;
2297                 level++;
2298                 support--;
2299         }
2300         return level;
2301 }
2302
2303 /*
2304  * Ensure that old small page tables are removed to make room for superpage(s).
2305  * We're going to add new large pages, so make sure we don't remove their parent
2306  * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
2307  */
2308 static void switch_to_super_page(struct dmar_domain *domain,
2309                                  unsigned long start_pfn,
2310                                  unsigned long end_pfn, int level)
2311 {
2312         unsigned long lvl_pages = lvl_to_nr_pages(level);
2313         struct dma_pte *pte = NULL;
2314         int i;
2315
2316         while (start_pfn <= end_pfn) {
2317                 if (!pte)
2318                         pte = pfn_to_dma_pte(domain, start_pfn, &level);
2319
2320                 if (dma_pte_present(pte)) {
2321                         dma_pte_free_pagetable(domain, start_pfn,
2322                                                start_pfn + lvl_pages - 1,
2323                                                level + 1);
2324
2325                         for_each_domain_iommu(i, domain)
2326                                 iommu_flush_iotlb_psi(g_iommus[i], domain,
2327                                                       start_pfn, lvl_pages,
2328                                                       0, 0);
2329                 }
2330
2331                 pte++;
2332                 start_pfn += lvl_pages;
2333                 if (first_pte_in_page(pte))
2334                         pte = NULL;
2335         }
2336 }
2337
2338 static int
2339 __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2340                  unsigned long phys_pfn, unsigned long nr_pages, int prot)
2341 {
2342         unsigned int largepage_lvl = 0;
2343         unsigned long lvl_pages = 0;
2344         struct dma_pte *pte = NULL;
2345         phys_addr_t pteval;
2346         u64 attr;
2347
2348         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2349
2350         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2351                 return -EINVAL;
2352
2353         attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
2354         attr |= DMA_FL_PTE_PRESENT;
2355         if (domain_use_first_level(domain)) {
2356                 attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
2357
2358                 if (domain->domain.type == IOMMU_DOMAIN_DMA) {
2359                         attr |= DMA_FL_PTE_ACCESS;
2360                         if (prot & DMA_PTE_WRITE)
2361                                 attr |= DMA_FL_PTE_DIRTY;
2362                 }
2363         }
2364
2365         pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
2366
2367         while (nr_pages > 0) {
2368                 uint64_t tmp;
2369
2370                 if (!pte) {
2371                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
2372                                         phys_pfn, nr_pages);
2373
2374                         pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2375                         if (!pte)
2376                                 return -ENOMEM;
2377                         /* It is large page*/
2378                         if (largepage_lvl > 1) {
2379                                 unsigned long end_pfn;
2380
2381                                 pteval |= DMA_PTE_LARGE_PAGE;
2382                                 end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
2383                                 switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
2384                         } else {
2385                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2386                         }
2387
2388                 }
2389                 /* We don't need lock here, nobody else
2390                  * touches the iova range
2391                  */
2392                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2393                 if (tmp) {
2394                         static int dumps = 5;
2395                         pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2396                                 iov_pfn, tmp, (unsigned long long)pteval);
2397                         if (dumps) {
2398                                 dumps--;
2399                                 debug_dma_dump_mappings(NULL);
2400                         }
2401                         WARN_ON(1);
2402                 }
2403
2404                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2405
2406                 BUG_ON(nr_pages < lvl_pages);
2407
2408                 nr_pages -= lvl_pages;
2409                 iov_pfn += lvl_pages;
2410                 phys_pfn += lvl_pages;
2411                 pteval += lvl_pages * VTD_PAGE_SIZE;
2412
2413                 /* If the next PTE would be the first in a new page, then we
2414                  * need to flush the cache on the entries we've just written.
2415                  * And then we'll need to recalculate 'pte', so clear it and
2416                  * let it get set again in the if (!pte) block above.
2417                  *
2418                  * If we're done (!nr_pages) we need to flush the cache too.
2419                  *
2420                  * Also if we've been setting superpages, we may need to
2421                  * recalculate 'pte' and switch back to smaller pages for the
2422                  * end of the mapping, if the trailing size is not enough to
2423                  * use another superpage (i.e. nr_pages < lvl_pages).
2424                  *
2425                  * We leave clflush for the leaf pte changes to iotlb_sync_map()
2426                  * callback.
2427                  */
2428                 pte++;
2429                 if (!nr_pages || first_pte_in_page(pte) ||
2430                     (largepage_lvl > 1 && nr_pages < lvl_pages))
2431                         pte = NULL;
2432         }
2433
2434         return 0;
2435 }
2436
2437 static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
2438 {
2439         struct intel_iommu *iommu = info->iommu;
2440         struct context_entry *context;
2441         unsigned long flags;
2442         u16 did_old;
2443
2444         if (!iommu)
2445                 return;
2446
2447         spin_lock_irqsave(&iommu->lock, flags);
2448         context = iommu_context_addr(iommu, bus, devfn, 0);
2449         if (!context) {
2450                 spin_unlock_irqrestore(&iommu->lock, flags);
2451                 return;
2452         }
2453
2454         if (sm_supported(iommu)) {
2455                 if (hw_pass_through && domain_type_is_si(info->domain))
2456                         did_old = FLPT_DEFAULT_DID;
2457                 else
2458                         did_old = info->domain->iommu_did[iommu->seq_id];
2459         } else {
2460                 did_old = context_domain_id(context);
2461         }
2462
2463         context_clear_entry(context);
2464         __iommu_flush_cache(iommu, context, sizeof(*context));
2465         spin_unlock_irqrestore(&iommu->lock, flags);
2466         iommu->flush.flush_context(iommu,
2467                                    did_old,
2468                                    (((u16)bus) << 8) | devfn,
2469                                    DMA_CCMD_MASK_NOBIT,
2470                                    DMA_CCMD_DEVICE_INVL);
2471
2472         if (sm_supported(iommu))
2473                 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
2474
2475         iommu->flush.flush_iotlb(iommu,
2476                                  did_old,
2477                                  0,
2478                                  0,
2479                                  DMA_TLB_DSI_FLUSH);
2480
2481         __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
2482 }
2483
2484 static inline void unlink_domain_info(struct device_domain_info *info)
2485 {
2486         assert_spin_locked(&device_domain_lock);
2487         list_del(&info->link);
2488         list_del(&info->global);
2489         if (info->dev)
2490                 dev_iommu_priv_set(info->dev, NULL);
2491 }
2492
2493 static void domain_remove_dev_info(struct dmar_domain *domain)
2494 {
2495         struct device_domain_info *info, *tmp;
2496         unsigned long flags;
2497
2498         spin_lock_irqsave(&device_domain_lock, flags);
2499         list_for_each_entry_safe(info, tmp, &domain->devices, link)
2500                 __dmar_remove_one_dev_info(info);
2501         spin_unlock_irqrestore(&device_domain_lock, flags);
2502 }
2503
2504 struct dmar_domain *find_domain(struct device *dev)
2505 {
2506         struct device_domain_info *info;
2507
2508         if (unlikely(!dev || !dev->iommu))
2509                 return NULL;
2510
2511         if (unlikely(attach_deferred(dev)))
2512                 return NULL;
2513
2514         /* No lock here, assumes no domain exit in normal case */
2515         info = get_domain_info(dev);
2516         if (likely(info))
2517                 return info->domain;
2518
2519         return NULL;
2520 }
2521
2522 static inline struct device_domain_info *
2523 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2524 {
2525         struct device_domain_info *info;
2526
2527         list_for_each_entry(info, &device_domain_list, global)
2528                 if (info->segment == segment && info->bus == bus &&
2529                     info->devfn == devfn)
2530                         return info;
2531
2532         return NULL;
2533 }
2534
2535 static int domain_setup_first_level(struct intel_iommu *iommu,
2536                                     struct dmar_domain *domain,
2537                                     struct device *dev,
2538                                     u32 pasid)
2539 {
2540         struct dma_pte *pgd = domain->pgd;
2541         int agaw, level;
2542         int flags = 0;
2543
2544         /*
2545          * Skip top levels of page tables for iommu which has
2546          * less agaw than default. Unnecessary for PT mode.
2547          */
2548         for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2549                 pgd = phys_to_virt(dma_pte_addr(pgd));
2550                 if (!dma_pte_present(pgd))
2551                         return -ENOMEM;
2552         }
2553
2554         level = agaw_to_level(agaw);
2555         if (level != 4 && level != 5)
2556                 return -EINVAL;
2557
2558         if (pasid != PASID_RID2PASID)
2559                 flags |= PASID_FLAG_SUPERVISOR_MODE;
2560         if (level == 5)
2561                 flags |= PASID_FLAG_FL5LP;
2562
2563         if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
2564                 flags |= PASID_FLAG_PAGE_SNOOP;
2565
2566         return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2567                                              domain->iommu_did[iommu->seq_id],
2568                                              flags);
2569 }
2570
2571 static bool dev_is_real_dma_subdevice(struct device *dev)
2572 {
2573         return dev && dev_is_pci(dev) &&
2574                pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
2575 }
2576
2577 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2578                                                     int bus, int devfn,
2579                                                     struct device *dev,
2580                                                     struct dmar_domain *domain)
2581 {
2582         struct dmar_domain *found = NULL;
2583         struct device_domain_info *info;
2584         unsigned long flags;
2585         int ret;
2586
2587         info = alloc_devinfo_mem();
2588         if (!info)
2589                 return NULL;
2590
2591         if (!dev_is_real_dma_subdevice(dev)) {
2592                 info->bus = bus;
2593                 info->devfn = devfn;
2594                 info->segment = iommu->segment;
2595         } else {
2596                 struct pci_dev *pdev = to_pci_dev(dev);
2597
2598                 info->bus = pdev->bus->number;
2599                 info->devfn = pdev->devfn;
2600                 info->segment = pci_domain_nr(pdev->bus);
2601         }
2602
2603         info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2604         info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2605         info->ats_qdep = 0;
2606         info->dev = dev;
2607         info->domain = domain;
2608         info->iommu = iommu;
2609         info->pasid_table = NULL;
2610         info->auxd_enabled = 0;
2611         INIT_LIST_HEAD(&info->subdevices);
2612
2613         if (dev && dev_is_pci(dev)) {
2614                 struct pci_dev *pdev = to_pci_dev(info->dev);
2615
2616                 if (ecap_dev_iotlb_support(iommu->ecap) &&
2617                     pci_ats_supported(pdev) &&
2618                     dmar_find_matched_atsr_unit(pdev))
2619                         info->ats_supported = 1;
2620
2621                 if (sm_supported(iommu)) {
2622                         if (pasid_supported(iommu)) {
2623                                 int features = pci_pasid_features(pdev);
2624                                 if (features >= 0)
2625                                         info->pasid_supported = features | 1;
2626                         }
2627
2628                         if (info->ats_supported && ecap_prs(iommu->ecap) &&
2629                             pci_pri_supported(pdev))
2630                                 info->pri_supported = 1;
2631                 }
2632         }
2633
2634         spin_lock_irqsave(&device_domain_lock, flags);
2635         if (dev)
2636                 found = find_domain(dev);
2637
2638         if (!found) {
2639                 struct device_domain_info *info2;
2640                 info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
2641                                                        info->devfn);
2642                 if (info2) {
2643                         found      = info2->domain;
2644                         info2->dev = dev;
2645                 }
2646         }
2647
2648         if (found) {
2649                 spin_unlock_irqrestore(&device_domain_lock, flags);
2650                 free_devinfo_mem(info);
2651                 /* Caller must free the original domain */
2652                 return found;
2653         }
2654
2655         spin_lock(&iommu->lock);
2656         ret = domain_attach_iommu(domain, iommu);
2657         spin_unlock(&iommu->lock);
2658
2659         if (ret) {
2660                 spin_unlock_irqrestore(&device_domain_lock, flags);
2661                 free_devinfo_mem(info);
2662                 return NULL;
2663         }
2664
2665         list_add(&info->link, &domain->devices);
2666         list_add(&info->global, &device_domain_list);
2667         if (dev)
2668                 dev_iommu_priv_set(dev, info);
2669         spin_unlock_irqrestore(&device_domain_lock, flags);
2670
2671         /* PASID table is mandatory for a PCI device in scalable mode. */
2672         if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2673                 ret = intel_pasid_alloc_table(dev);
2674                 if (ret) {
2675                         dev_err(dev, "PASID table allocation failed\n");
2676                         dmar_remove_one_dev_info(dev);
2677                         return NULL;
2678                 }
2679
2680                 /* Setup the PASID entry for requests without PASID: */
2681                 spin_lock_irqsave(&iommu->lock, flags);
2682                 if (hw_pass_through && domain_type_is_si(domain))
2683                         ret = intel_pasid_setup_pass_through(iommu, domain,
2684                                         dev, PASID_RID2PASID);
2685                 else if (domain_use_first_level(domain))
2686                         ret = domain_setup_first_level(iommu, domain, dev,
2687                                         PASID_RID2PASID);
2688                 else
2689                         ret = intel_pasid_setup_second_level(iommu, domain,
2690                                         dev, PASID_RID2PASID);
2691                 spin_unlock_irqrestore(&iommu->lock, flags);
2692                 if (ret) {
2693                         dev_err(dev, "Setup RID2PASID failed\n");
2694                         dmar_remove_one_dev_info(dev);
2695                         return NULL;
2696                 }
2697         }
2698
2699         if (dev && domain_context_mapping(domain, dev)) {
2700                 dev_err(dev, "Domain context map failed\n");
2701                 dmar_remove_one_dev_info(dev);
2702                 return NULL;
2703         }
2704
2705         return domain;
2706 }
2707
2708 static int iommu_domain_identity_map(struct dmar_domain *domain,
2709                                      unsigned long first_vpfn,
2710                                      unsigned long last_vpfn)
2711 {
2712         /*
2713          * RMRR range might have overlap with physical memory range,
2714          * clear it first
2715          */
2716         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2717
2718         return __domain_mapping(domain, first_vpfn,
2719                                 first_vpfn, last_vpfn - first_vpfn + 1,
2720                                 DMA_PTE_READ|DMA_PTE_WRITE);
2721 }
2722
2723 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2724
2725 static int __init si_domain_init(int hw)
2726 {
2727         struct dmar_rmrr_unit *rmrr;
2728         struct device *dev;
2729         int i, nid, ret;
2730
2731         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2732         if (!si_domain)
2733                 return -EFAULT;
2734
2735         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2736                 domain_exit(si_domain);
2737                 return -EFAULT;
2738         }
2739
2740         if (hw)
2741                 return 0;
2742
2743         for_each_online_node(nid) {
2744                 unsigned long start_pfn, end_pfn;
2745                 int i;
2746
2747                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2748                         ret = iommu_domain_identity_map(si_domain,
2749                                         mm_to_dma_pfn(start_pfn),
2750                                         mm_to_dma_pfn(end_pfn));
2751                         if (ret)
2752                                 return ret;
2753                 }
2754         }
2755
2756         /*
2757          * Identity map the RMRRs so that devices with RMRRs could also use
2758          * the si_domain.
2759          */
2760         for_each_rmrr_units(rmrr) {
2761                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2762                                           i, dev) {
2763                         unsigned long long start = rmrr->base_address;
2764                         unsigned long long end = rmrr->end_address;
2765
2766                         if (WARN_ON(end < start ||
2767                                     end >> agaw_to_width(si_domain->agaw)))
2768                                 continue;
2769
2770                         ret = iommu_domain_identity_map(si_domain,
2771                                         mm_to_dma_pfn(start >> PAGE_SHIFT),
2772                                         mm_to_dma_pfn(end >> PAGE_SHIFT));
2773                         if (ret)
2774                                 return ret;
2775                 }
2776         }
2777
2778         return 0;
2779 }
2780
2781 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2782 {
2783         struct dmar_domain *ndomain;
2784         struct intel_iommu *iommu;
2785         u8 bus, devfn;
2786
2787         iommu = device_to_iommu(dev, &bus, &devfn);
2788         if (!iommu)
2789                 return -ENODEV;
2790
2791         ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2792         if (ndomain != domain)
2793                 return -EBUSY;
2794
2795         return 0;
2796 }
2797
2798 static bool device_has_rmrr(struct device *dev)
2799 {
2800         struct dmar_rmrr_unit *rmrr;
2801         struct device *tmp;
2802         int i;
2803
2804         rcu_read_lock();
2805         for_each_rmrr_units(rmrr) {
2806                 /*
2807                  * Return TRUE if this RMRR contains the device that
2808                  * is passed in.
2809                  */
2810                 for_each_active_dev_scope(rmrr->devices,
2811                                           rmrr->devices_cnt, i, tmp)
2812                         if (tmp == dev ||
2813                             is_downstream_to_pci_bridge(dev, tmp)) {
2814                                 rcu_read_unlock();
2815                                 return true;
2816                         }
2817         }
2818         rcu_read_unlock();
2819         return false;
2820 }
2821
2822 /**
2823  * device_rmrr_is_relaxable - Test whether the RMRR of this device
2824  * is relaxable (ie. is allowed to be not enforced under some conditions)
2825  * @dev: device handle
2826  *
2827  * We assume that PCI USB devices with RMRRs have them largely
2828  * for historical reasons and that the RMRR space is not actively used post
2829  * boot.  This exclusion may change if vendors begin to abuse it.
2830  *
2831  * The same exception is made for graphics devices, with the requirement that
2832  * any use of the RMRR regions will be torn down before assigning the device
2833  * to a guest.
2834  *
2835  * Return: true if the RMRR is relaxable, false otherwise
2836  */
2837 static bool device_rmrr_is_relaxable(struct device *dev)
2838 {
2839         struct pci_dev *pdev;
2840
2841         if (!dev_is_pci(dev))
2842                 return false;
2843
2844         pdev = to_pci_dev(dev);
2845         if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2846                 return true;
2847         else
2848                 return false;
2849 }
2850
2851 /*
2852  * There are a couple cases where we need to restrict the functionality of
2853  * devices associated with RMRRs.  The first is when evaluating a device for
2854  * identity mapping because problems exist when devices are moved in and out
2855  * of domains and their respective RMRR information is lost.  This means that
2856  * a device with associated RMRRs will never be in a "passthrough" domain.
2857  * The second is use of the device through the IOMMU API.  This interface
2858  * expects to have full control of the IOVA space for the device.  We cannot
2859  * satisfy both the requirement that RMRR access is maintained and have an
2860  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2861  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2862  * We therefore prevent devices associated with an RMRR from participating in
2863  * the IOMMU API, which eliminates them from device assignment.
2864  *
2865  * In both cases, devices which have relaxable RMRRs are not concerned by this
2866  * restriction. See device_rmrr_is_relaxable comment.
2867  */
2868 static bool device_is_rmrr_locked(struct device *dev)
2869 {
2870         if (!device_has_rmrr(dev))
2871                 return false;
2872
2873         if (device_rmrr_is_relaxable(dev))
2874                 return false;
2875
2876         return true;
2877 }
2878
2879 /*
2880  * Return the required default domain type for a specific device.
2881  *
2882  * @dev: the device in query
2883  * @startup: true if this is during early boot
2884  *
2885  * Returns:
2886  *  - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2887  *  - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2888  *  - 0: both identity and dynamic domains work for this device
2889  */
2890 static int device_def_domain_type(struct device *dev)
2891 {
2892         if (dev_is_pci(dev)) {
2893                 struct pci_dev *pdev = to_pci_dev(dev);
2894
2895                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2896                         return IOMMU_DOMAIN_IDENTITY;
2897
2898                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2899                         return IOMMU_DOMAIN_IDENTITY;
2900         }
2901
2902         return 0;
2903 }
2904
2905 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2906 {
2907         /*
2908          * Start from the sane iommu hardware state.
2909          * If the queued invalidation is already initialized by us
2910          * (for example, while enabling interrupt-remapping) then
2911          * we got the things already rolling from a sane state.
2912          */
2913         if (!iommu->qi) {
2914                 /*
2915                  * Clear any previous faults.
2916                  */
2917                 dmar_fault(-1, iommu);
2918                 /*
2919                  * Disable queued invalidation if supported and already enabled
2920                  * before OS handover.
2921                  */
2922                 dmar_disable_qi(iommu);
2923         }
2924
2925         if (dmar_enable_qi(iommu)) {
2926                 /*
2927                  * Queued Invalidate not enabled, use Register Based Invalidate
2928                  */
2929                 iommu->flush.flush_context = __iommu_flush_context;
2930                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2931                 pr_info("%s: Using Register based invalidation\n",
2932                         iommu->name);
2933         } else {
2934                 iommu->flush.flush_context = qi_flush_context;
2935                 iommu->flush.flush_iotlb = qi_flush_iotlb;
2936                 pr_info("%s: Using Queued invalidation\n", iommu->name);
2937         }
2938 }
2939
2940 static int copy_context_table(struct intel_iommu *iommu,
2941                               struct root_entry *old_re,
2942                               struct context_entry **tbl,
2943                               int bus, bool ext)
2944 {
2945         int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2946         struct context_entry *new_ce = NULL, ce;
2947         struct context_entry *old_ce = NULL;
2948         struct root_entry re;
2949         phys_addr_t old_ce_phys;
2950
2951         tbl_idx = ext ? bus * 2 : bus;
2952         memcpy(&re, old_re, sizeof(re));
2953
2954         for (devfn = 0; devfn < 256; devfn++) {
2955                 /* First calculate the correct index */
2956                 idx = (ext ? devfn * 2 : devfn) % 256;
2957
2958                 if (idx == 0) {
2959                         /* First save what we may have and clean up */
2960                         if (new_ce) {
2961                                 tbl[tbl_idx] = new_ce;
2962                                 __iommu_flush_cache(iommu, new_ce,
2963                                                     VTD_PAGE_SIZE);
2964                                 pos = 1;
2965                         }
2966
2967                         if (old_ce)
2968                                 memunmap(old_ce);
2969
2970                         ret = 0;
2971                         if (devfn < 0x80)
2972                                 old_ce_phys = root_entry_lctp(&re);
2973                         else
2974                                 old_ce_phys = root_entry_uctp(&re);
2975
2976                         if (!old_ce_phys) {
2977                                 if (ext && devfn == 0) {
2978                                         /* No LCTP, try UCTP */
2979                                         devfn = 0x7f;
2980                                         continue;
2981                                 } else {
2982                                         goto out;
2983                                 }
2984                         }
2985
2986                         ret = -ENOMEM;
2987                         old_ce = memremap(old_ce_phys, PAGE_SIZE,
2988                                         MEMREMAP_WB);
2989                         if (!old_ce)
2990                                 goto out;
2991
2992                         new_ce = alloc_pgtable_page(iommu->node);
2993                         if (!new_ce)
2994                                 goto out_unmap;
2995
2996                         ret = 0;
2997                 }
2998
2999                 /* Now copy the context entry */
3000                 memcpy(&ce, old_ce + idx, sizeof(ce));
3001
3002                 if (!__context_present(&ce))
3003                         continue;
3004
3005                 did = context_domain_id(&ce);
3006                 if (did >= 0 && did < cap_ndoms(iommu->cap))
3007                         set_bit(did, iommu->domain_ids);
3008
3009                 /*
3010                  * We need a marker for copied context entries. This
3011                  * marker needs to work for the old format as well as
3012                  * for extended context entries.
3013                  *
3014                  * Bit 67 of the context entry is used. In the old
3015                  * format this bit is available to software, in the
3016                  * extended format it is the PGE bit, but PGE is ignored
3017                  * by HW if PASIDs are disabled (and thus still
3018                  * available).
3019                  *
3020                  * So disable PASIDs first and then mark the entry
3021                  * copied. This means that we don't copy PASID
3022                  * translations from the old kernel, but this is fine as
3023                  * faults there are not fatal.
3024                  */
3025                 context_clear_pasid_enable(&ce);
3026                 context_set_copied(&ce);
3027
3028                 new_ce[idx] = ce;
3029         }
3030
3031         tbl[tbl_idx + pos] = new_ce;
3032
3033         __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3034
3035 out_unmap:
3036         memunmap(old_ce);
3037
3038 out:
3039         return ret;
3040 }
3041
3042 static int copy_translation_tables(struct intel_iommu *iommu)
3043 {
3044         struct context_entry **ctxt_tbls;
3045         struct root_entry *old_rt;
3046         phys_addr_t old_rt_phys;
3047         int ctxt_table_entries;
3048         unsigned long flags;
3049         u64 rtaddr_reg;
3050         int bus, ret;
3051         bool new_ext, ext;
3052
3053         rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3054         ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
3055         new_ext    = !!ecap_ecs(iommu->ecap);
3056
3057         /*
3058          * The RTT bit can only be changed when translation is disabled,
3059          * but disabling translation means to open a window for data
3060          * corruption. So bail out and don't copy anything if we would
3061          * have to change the bit.
3062          */
3063         if (new_ext != ext)
3064                 return -EINVAL;
3065
3066         old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3067         if (!old_rt_phys)
3068                 return -EINVAL;
3069
3070         old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3071         if (!old_rt)
3072                 return -ENOMEM;
3073
3074         /* This is too big for the stack - allocate it from slab */
3075         ctxt_table_entries = ext ? 512 : 256;
3076         ret = -ENOMEM;
3077         ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3078         if (!ctxt_tbls)
3079                 goto out_unmap;
3080
3081         for (bus = 0; bus < 256; bus++) {
3082                 ret = copy_context_table(iommu, &old_rt[bus],
3083                                          ctxt_tbls, bus, ext);
3084                 if (ret) {
3085                         pr_err("%s: Failed to copy context table for bus %d\n",
3086                                 iommu->name, bus);
3087                         continue;
3088                 }
3089         }
3090
3091         spin_lock_irqsave(&iommu->lock, flags);
3092
3093         /* Context tables are copied, now write them to the root_entry table */
3094         for (bus = 0; bus < 256; bus++) {
3095                 int idx = ext ? bus * 2 : bus;
3096                 u64 val;
3097
3098                 if (ctxt_tbls[idx]) {
3099                         val = virt_to_phys(ctxt_tbls[idx]) | 1;
3100                         iommu->root_entry[bus].lo = val;
3101                 }
3102
3103                 if (!ext || !ctxt_tbls[idx + 1])
3104                         continue;
3105
3106                 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3107                 iommu->root_entry[bus].hi = val;
3108         }
3109
3110         spin_unlock_irqrestore(&iommu->lock, flags);
3111
3112         kfree(ctxt_tbls);
3113
3114         __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3115
3116         ret = 0;
3117
3118 out_unmap:
3119         memunmap(old_rt);
3120
3121         return ret;
3122 }
3123
3124 #ifdef CONFIG_INTEL_IOMMU_SVM
3125 static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
3126 {
3127         struct intel_iommu *iommu = data;
3128         ioasid_t ioasid;
3129
3130         if (!iommu)
3131                 return INVALID_IOASID;
3132         /*
3133          * VT-d virtual command interface always uses the full 20 bit
3134          * PASID range. Host can partition guest PASID range based on
3135          * policies but it is out of guest's control.
3136          */
3137         if (min < PASID_MIN || max > intel_pasid_max_id)
3138                 return INVALID_IOASID;
3139
3140         if (vcmd_alloc_pasid(iommu, &ioasid))
3141                 return INVALID_IOASID;
3142
3143         return ioasid;
3144 }
3145
3146 static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
3147 {
3148         struct intel_iommu *iommu = data;
3149
3150         if (!iommu)
3151                 return;
3152         /*
3153          * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
3154          * We can only free the PASID when all the devices are unbound.
3155          */
3156         if (ioasid_find(NULL, ioasid, NULL)) {
3157                 pr_alert("Cannot free active IOASID %d\n", ioasid);
3158                 return;
3159         }
3160         vcmd_free_pasid(iommu, ioasid);
3161 }
3162
3163 static void register_pasid_allocator(struct intel_iommu *iommu)
3164 {
3165         /*
3166          * If we are running in the host, no need for custom allocator
3167          * in that PASIDs are allocated from the host system-wide.
3168          */
3169         if (!cap_caching_mode(iommu->cap))
3170                 return;
3171
3172         if (!sm_supported(iommu)) {
3173                 pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
3174                 return;
3175         }
3176
3177         /*
3178          * Register a custom PASID allocator if we are running in a guest,
3179          * guest PASID must be obtained via virtual command interface.
3180          * There can be multiple vIOMMUs in each guest but only one allocator
3181          * is active. All vIOMMU allocators will eventually be calling the same
3182          * host allocator.
3183          */
3184         if (!vccap_pasid(iommu->vccap))
3185                 return;
3186
3187         pr_info("Register custom PASID allocator\n");
3188         iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
3189         iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
3190         iommu->pasid_allocator.pdata = (void *)iommu;
3191         if (ioasid_register_allocator(&iommu->pasid_allocator)) {
3192                 pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
3193                 /*
3194                  * Disable scalable mode on this IOMMU if there
3195                  * is no custom allocator. Mixing SM capable vIOMMU
3196                  * and non-SM vIOMMU are not supported.
3197                  */
3198                 intel_iommu_sm = 0;
3199         }
3200 }
3201 #endif
3202
3203 static int __init init_dmars(void)
3204 {
3205         struct dmar_drhd_unit *drhd;
3206         struct intel_iommu *iommu;
3207         int ret;
3208
3209         /*
3210          * for each drhd
3211          *    allocate root
3212          *    initialize and program root entry to not present
3213          * endfor
3214          */
3215         for_each_drhd_unit(drhd) {
3216                 /*
3217                  * lock not needed as this is only incremented in the single
3218                  * threaded kernel __init code path all other access are read
3219                  * only
3220                  */
3221                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3222                         g_num_of_iommus++;
3223                         continue;
3224                 }
3225                 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3226         }
3227
3228         /* Preallocate enough resources for IOMMU hot-addition */
3229         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3230                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3231
3232         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3233                         GFP_KERNEL);
3234         if (!g_iommus) {
3235                 pr_err("Allocating global iommu array failed\n");
3236                 ret = -ENOMEM;
3237                 goto error;
3238         }
3239
3240         ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
3241         if (ret)
3242                 goto free_iommu;
3243
3244         for_each_iommu(iommu, drhd) {
3245                 if (drhd->ignored) {
3246                         iommu_disable_translation(iommu);
3247                         continue;
3248                 }
3249
3250                 /*
3251                  * Find the max pasid size of all IOMMU's in the system.
3252                  * We need to ensure the system pasid table is no bigger
3253                  * than the smallest supported.
3254                  */
3255                 if (pasid_supported(iommu)) {
3256                         u32 temp = 2 << ecap_pss(iommu->ecap);
3257
3258                         intel_pasid_max_id = min_t(u32, temp,
3259                                                    intel_pasid_max_id);
3260                 }
3261
3262                 g_iommus[iommu->seq_id] = iommu;
3263
3264                 intel_iommu_init_qi(iommu);
3265
3266                 ret = iommu_init_domains(iommu);
3267                 if (ret)
3268                         goto free_iommu;
3269
3270                 init_translation_status(iommu);
3271
3272                 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3273                         iommu_disable_translation(iommu);
3274                         clear_translation_pre_enabled(iommu);
3275                         pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3276                                 iommu->name);
3277                 }
3278
3279                 /*
3280                  * TBD:
3281                  * we could share the same root & context tables
3282                  * among all IOMMU's. Need to Split it later.
3283                  */
3284                 ret = iommu_alloc_root_entry(iommu);
3285                 if (ret)
3286                         goto free_iommu;
3287
3288                 if (translation_pre_enabled(iommu)) {
3289                         pr_info("Translation already enabled - trying to copy translation structures\n");
3290
3291                         ret = copy_translation_tables(iommu);
3292                         if (ret) {
3293                                 /*
3294                                  * We found the IOMMU with translation
3295                                  * enabled - but failed to copy over the
3296                                  * old root-entry table. Try to proceed
3297                                  * by disabling translation now and
3298                                  * allocating a clean root-entry table.
3299                                  * This might cause DMAR faults, but
3300                                  * probably the dump will still succeed.
3301                                  */
3302                                 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3303                                        iommu->name);
3304                                 iommu_disable_translation(iommu);
3305                                 clear_translation_pre_enabled(iommu);
3306                         } else {
3307                                 pr_info("Copied translation tables from previous kernel for %s\n",
3308                                         iommu->name);
3309                         }
3310                 }
3311
3312                 if (!ecap_pass_through(iommu->ecap))
3313                         hw_pass_through = 0;
3314                 intel_svm_check(iommu);
3315         }
3316
3317         /*
3318          * Now that qi is enabled on all iommus, set the root entry and flush
3319          * caches. This is required on some Intel X58 chipsets, otherwise the
3320          * flush_context function will loop forever and the boot hangs.
3321          */
3322         for_each_active_iommu(iommu, drhd) {
3323                 iommu_flush_write_buffer(iommu);
3324 #ifdef CONFIG_INTEL_IOMMU_SVM
3325                 register_pasid_allocator(iommu);
3326 #endif
3327                 iommu_set_root_entry(iommu);
3328         }
3329
3330 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3331         dmar_map_gfx = 0;
3332 #endif
3333
3334         if (!dmar_map_gfx)
3335                 iommu_identity_mapping |= IDENTMAP_GFX;
3336
3337         check_tylersburg_isoch();
3338
3339         ret = si_domain_init(hw_pass_through);
3340         if (ret)
3341                 goto free_iommu;
3342
3343         /*
3344          * for each drhd
3345          *   enable fault log
3346          *   global invalidate context cache
3347          *   global invalidate iotlb
3348          *   enable translation
3349          */
3350         for_each_iommu(iommu, drhd) {
3351                 if (drhd->ignored) {
3352                         /*
3353                          * we always have to disable PMRs or DMA may fail on
3354                          * this device
3355                          */
3356                         if (force_on)
3357                                 iommu_disable_protect_mem_regions(iommu);
3358                         continue;
3359                 }
3360
3361                 iommu_flush_write_buffer(iommu);
3362
3363 #ifdef CONFIG_INTEL_IOMMU_SVM
3364                 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3365                         /*
3366                          * Call dmar_alloc_hwirq() with dmar_global_lock held,
3367                          * could cause possible lock race condition.
3368                          */
3369                         up_write(&dmar_global_lock);
3370                         ret = intel_svm_enable_prq(iommu);
3371                         down_write(&dmar_global_lock);
3372                         if (ret)
3373                                 goto free_iommu;
3374                 }
3375 #endif
3376                 ret = dmar_set_interrupt(iommu);
3377                 if (ret)
3378                         goto free_iommu;
3379         }
3380
3381         return 0;
3382
3383 free_iommu:
3384         for_each_active_iommu(iommu, drhd) {
3385                 disable_dmar_iommu(iommu);
3386                 free_dmar_iommu(iommu);
3387         }
3388
3389         kfree(g_iommus);
3390
3391 error:
3392         return ret;
3393 }
3394
3395 static inline int iommu_domain_cache_init(void)
3396 {
3397         int ret = 0;
3398
3399         iommu_domain_cache = kmem_cache_create("iommu_domain",
3400                                          sizeof(struct dmar_domain),
3401                                          0,
3402                                          SLAB_HWCACHE_ALIGN,
3403
3404                                          NULL);
3405         if (!iommu_domain_cache) {
3406                 pr_err("Couldn't create iommu_domain cache\n");
3407                 ret = -ENOMEM;
3408         }
3409
3410         return ret;
3411 }
3412
3413 static inline int iommu_devinfo_cache_init(void)
3414 {
3415         int ret = 0;
3416
3417         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3418                                          sizeof(struct device_domain_info),
3419                                          0,
3420                                          SLAB_HWCACHE_ALIGN,
3421                                          NULL);
3422         if (!iommu_devinfo_cache) {
3423                 pr_err("Couldn't create devinfo cache\n");
3424                 ret = -ENOMEM;
3425         }
3426
3427         return ret;
3428 }
3429
3430 static int __init iommu_init_mempool(void)
3431 {
3432         int ret;
3433         ret = iova_cache_get();
3434         if (ret)
3435                 return ret;
3436
3437         ret = iommu_domain_cache_init();
3438         if (ret)
3439                 goto domain_error;
3440
3441         ret = iommu_devinfo_cache_init();
3442         if (!ret)
3443                 return ret;
3444
3445         kmem_cache_destroy(iommu_domain_cache);
3446 domain_error:
3447         iova_cache_put();
3448
3449         return -ENOMEM;
3450 }
3451
3452 static void __init iommu_exit_mempool(void)
3453 {
3454         kmem_cache_destroy(iommu_devinfo_cache);
3455         kmem_cache_destroy(iommu_domain_cache);
3456         iova_cache_put();
3457 }
3458
3459 static void __init init_no_remapping_devices(void)
3460 {
3461         struct dmar_drhd_unit *drhd;
3462         struct device *dev;
3463         int i;
3464
3465         for_each_drhd_unit(drhd) {
3466                 if (!drhd->include_all) {
3467                         for_each_active_dev_scope(drhd->devices,
3468                                                   drhd->devices_cnt, i, dev)
3469                                 break;
3470                         /* ignore DMAR unit if no devices exist */
3471                         if (i == drhd->devices_cnt)
3472                                 drhd->ignored = 1;
3473                 }
3474         }
3475
3476         for_each_active_drhd_unit(drhd) {
3477                 if (drhd->include_all)
3478                         continue;
3479
3480                 for_each_active_dev_scope(drhd->devices,
3481                                           drhd->devices_cnt, i, dev)
3482                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3483                                 break;
3484                 if (i < drhd->devices_cnt)
3485                         continue;
3486
3487                 /* This IOMMU has *only* gfx devices. Either bypass it or
3488                    set the gfx_mapped flag, as appropriate */
3489                 drhd->gfx_dedicated = 1;
3490                 if (!dmar_map_gfx)
3491                         drhd->ignored = 1;
3492         }
3493 }
3494
3495 #ifdef CONFIG_SUSPEND
3496 static int init_iommu_hw(void)
3497 {
3498         struct dmar_drhd_unit *drhd;
3499         struct intel_iommu *iommu = NULL;
3500
3501         for_each_active_iommu(iommu, drhd)
3502                 if (iommu->qi)
3503                         dmar_reenable_qi(iommu);
3504
3505         for_each_iommu(iommu, drhd) {
3506                 if (drhd->ignored) {
3507                         /*
3508                          * we always have to disable PMRs or DMA may fail on
3509                          * this device
3510                          */
3511                         if (force_on)
3512                                 iommu_disable_protect_mem_regions(iommu);
3513                         continue;
3514                 }
3515
3516                 iommu_flush_write_buffer(iommu);
3517                 iommu_set_root_entry(iommu);
3518                 iommu_enable_translation(iommu);
3519                 iommu_disable_protect_mem_regions(iommu);
3520         }
3521
3522         return 0;
3523 }
3524
3525 static void iommu_flush_all(void)
3526 {
3527         struct dmar_drhd_unit *drhd;
3528         struct intel_iommu *iommu;
3529
3530         for_each_active_iommu(iommu, drhd) {
3531                 iommu->flush.flush_context(iommu, 0, 0, 0,
3532                                            DMA_CCMD_GLOBAL_INVL);
3533                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3534                                          DMA_TLB_GLOBAL_FLUSH);
3535         }
3536 }
3537
3538 static int iommu_suspend(void)
3539 {
3540         struct dmar_drhd_unit *drhd;
3541         struct intel_iommu *iommu = NULL;
3542         unsigned long flag;
3543
3544         for_each_active_iommu(iommu, drhd) {
3545                 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
3546                                              GFP_KERNEL);
3547                 if (!iommu->iommu_state)
3548                         goto nomem;
3549         }
3550
3551         iommu_flush_all();
3552
3553         for_each_active_iommu(iommu, drhd) {
3554                 iommu_disable_translation(iommu);
3555
3556                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3557
3558                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3559                         readl(iommu->reg + DMAR_FECTL_REG);
3560                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3561                         readl(iommu->reg + DMAR_FEDATA_REG);
3562                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3563                         readl(iommu->reg + DMAR_FEADDR_REG);
3564                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3565                         readl(iommu->reg + DMAR_FEUADDR_REG);
3566
3567                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3568         }
3569         return 0;
3570
3571 nomem:
3572         for_each_active_iommu(iommu, drhd)
3573                 kfree(iommu->iommu_state);
3574
3575         return -ENOMEM;
3576 }
3577
3578 static void iommu_resume(void)
3579 {
3580         struct dmar_drhd_unit *drhd;
3581         struct intel_iommu *iommu = NULL;
3582         unsigned long flag;
3583
3584         if (init_iommu_hw()) {
3585                 if (force_on)
3586                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3587                 else
3588                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3589                 return;
3590         }
3591
3592         for_each_active_iommu(iommu, drhd) {
3593
3594                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3595
3596                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3597                         iommu->reg + DMAR_FECTL_REG);
3598                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3599                         iommu->reg + DMAR_FEDATA_REG);
3600                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3601                         iommu->reg + DMAR_FEADDR_REG);
3602                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3603                         iommu->reg + DMAR_FEUADDR_REG);
3604
3605                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3606         }
3607
3608         for_each_active_iommu(iommu, drhd)
3609                 kfree(iommu->iommu_state);
3610 }
3611
3612 static struct syscore_ops iommu_syscore_ops = {
3613         .resume         = iommu_resume,
3614         .suspend        = iommu_suspend,
3615 };
3616
3617 static void __init init_iommu_pm_ops(void)
3618 {
3619         register_syscore_ops(&iommu_syscore_ops);
3620 }
3621
3622 #else
3623 static inline void init_iommu_pm_ops(void) {}
3624 #endif  /* CONFIG_PM */
3625
3626 static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
3627 {
3628         if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
3629             !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
3630             rmrr->end_address <= rmrr->base_address ||
3631             arch_rmrr_sanity_check(rmrr))
3632                 return -EINVAL;
3633
3634         return 0;
3635 }
3636
3637 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3638 {
3639         struct acpi_dmar_reserved_memory *rmrr;
3640         struct dmar_rmrr_unit *rmrru;
3641
3642         rmrr = (struct acpi_dmar_reserved_memory *)header;
3643         if (rmrr_sanity_check(rmrr)) {
3644                 pr_warn(FW_BUG
3645                            "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
3646                            "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3647                            rmrr->base_address, rmrr->end_address,
3648                            dmi_get_system_info(DMI_BIOS_VENDOR),
3649                            dmi_get_system_info(DMI_BIOS_VERSION),
3650                            dmi_get_system_info(DMI_PRODUCT_VERSION));
3651                 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
3652         }
3653
3654         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3655         if (!rmrru)
3656                 goto out;
3657
3658         rmrru->hdr = header;
3659
3660         rmrru->base_address = rmrr->base_address;
3661         rmrru->end_address = rmrr->end_address;
3662
3663         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3664                                 ((void *)rmrr) + rmrr->header.length,
3665                                 &rmrru->devices_cnt);
3666         if (rmrru->devices_cnt && rmrru->devices == NULL)
3667                 goto free_rmrru;
3668
3669         list_add(&rmrru->list, &dmar_rmrr_units);
3670
3671         return 0;
3672 free_rmrru:
3673         kfree(rmrru);
3674 out:
3675         return -ENOMEM;
3676 }
3677
3678 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3679 {
3680         struct dmar_atsr_unit *atsru;
3681         struct acpi_dmar_atsr *tmp;
3682
3683         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
3684                                 dmar_rcu_check()) {
3685                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3686                 if (atsr->segment != tmp->segment)
3687                         continue;
3688                 if (atsr->header.length != tmp->header.length)
3689                         continue;
3690                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3691                         return atsru;
3692         }
3693
3694         return NULL;
3695 }
3696
3697 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3698 {
3699         struct acpi_dmar_atsr *atsr;
3700         struct dmar_atsr_unit *atsru;
3701
3702         if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
3703                 return 0;
3704
3705         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3706         atsru = dmar_find_atsr(atsr);
3707         if (atsru)
3708                 return 0;
3709
3710         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
3711         if (!atsru)
3712                 return -ENOMEM;
3713
3714         /*
3715          * If memory is allocated from slab by ACPI _DSM method, we need to
3716          * copy the memory content because the memory buffer will be freed
3717          * on return.
3718          */
3719         atsru->hdr = (void *)(atsru + 1);
3720         memcpy(atsru->hdr, hdr, hdr->length);
3721         atsru->include_all = atsr->flags & 0x1;
3722         if (!atsru->include_all) {
3723                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3724                                 (void *)atsr + atsr->header.length,
3725                                 &atsru->devices_cnt);
3726                 if (atsru->devices_cnt && atsru->devices == NULL) {
3727                         kfree(atsru);
3728                         return -ENOMEM;
3729                 }
3730         }
3731
3732         list_add_rcu(&atsru->list, &dmar_atsr_units);
3733
3734         return 0;
3735 }
3736
3737 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3738 {
3739         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3740         kfree(atsru);
3741 }
3742
3743 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3744 {
3745         struct acpi_dmar_atsr *atsr;
3746         struct dmar_atsr_unit *atsru;
3747
3748         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3749         atsru = dmar_find_atsr(atsr);
3750         if (atsru) {
3751                 list_del_rcu(&atsru->list);
3752                 synchronize_rcu();
3753                 intel_iommu_free_atsr(atsru);
3754         }
3755
3756         return 0;
3757 }
3758
3759 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3760 {
3761         int i;
3762         struct device *dev;
3763         struct acpi_dmar_atsr *atsr;
3764         struct dmar_atsr_unit *atsru;
3765
3766         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3767         atsru = dmar_find_atsr(atsr);
3768         if (!atsru)
3769                 return 0;
3770
3771         if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
3772                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3773                                           i, dev)
3774                         return -EBUSY;
3775         }
3776
3777         return 0;
3778 }
3779
3780 static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
3781 {
3782         struct dmar_satc_unit *satcu;
3783         struct acpi_dmar_satc *tmp;
3784
3785         list_for_each_entry_rcu(satcu, &dmar_satc_units, list,
3786                                 dmar_rcu_check()) {
3787                 tmp = (struct acpi_dmar_satc *)satcu->hdr;
3788                 if (satc->segment != tmp->segment)
3789                         continue;
3790                 if (satc->header.length != tmp->header.length)
3791                         continue;
3792                 if (memcmp(satc, tmp, satc->header.length) == 0)
3793                         return satcu;
3794         }
3795
3796         return NULL;
3797 }
3798
3799 int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
3800 {
3801         struct acpi_dmar_satc *satc;
3802         struct dmar_satc_unit *satcu;
3803
3804         if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
3805                 return 0;
3806
3807         satc = container_of(hdr, struct acpi_dmar_satc, header);
3808         satcu = dmar_find_satc(satc);
3809         if (satcu)
3810                 return 0;
3811
3812         satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL);
3813         if (!satcu)
3814                 return -ENOMEM;
3815
3816         satcu->hdr = (void *)(satcu + 1);
3817         memcpy(satcu->hdr, hdr, hdr->length);
3818         satcu->atc_required = satc->flags & 0x1;
3819         satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1),
3820                                               (void *)satc + satc->header.length,
3821                                               &satcu->devices_cnt);
3822         if (satcu->devices_cnt && !satcu->devices) {
3823                 kfree(satcu);
3824                 return -ENOMEM;
3825         }
3826         list_add_rcu(&satcu->list, &dmar_satc_units);
3827
3828         return 0;
3829 }
3830
3831 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3832 {
3833         int sp, ret;
3834         struct intel_iommu *iommu = dmaru->iommu;
3835
3836         if (g_iommus[iommu->seq_id])
3837                 return 0;
3838
3839         ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
3840         if (ret)
3841                 goto out;
3842
3843         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3844                 pr_warn("%s: Doesn't support hardware pass through.\n",
3845                         iommu->name);
3846                 return -ENXIO;
3847         }
3848         if (!ecap_sc_support(iommu->ecap) &&
3849             domain_update_iommu_snooping(iommu)) {
3850                 pr_warn("%s: Doesn't support snooping.\n",
3851                         iommu->name);
3852                 return -ENXIO;
3853         }
3854         sp = domain_update_iommu_superpage(NULL, iommu) - 1;
3855         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3856                 pr_warn("%s: Doesn't support large page.\n",
3857                         iommu->name);
3858                 return -ENXIO;
3859         }
3860
3861         /*
3862          * Disable translation if already enabled prior to OS handover.
3863          */
3864         if (iommu->gcmd & DMA_GCMD_TE)
3865                 iommu_disable_translation(iommu);
3866
3867         g_iommus[iommu->seq_id] = iommu;
3868         ret = iommu_init_domains(iommu);
3869         if (ret == 0)
3870                 ret = iommu_alloc_root_entry(iommu);
3871         if (ret)
3872                 goto out;
3873
3874         intel_svm_check(iommu);
3875
3876         if (dmaru->ignored) {
3877                 /*
3878                  * we always have to disable PMRs or DMA may fail on this device
3879                  */
3880                 if (force_on)
3881                         iommu_disable_protect_mem_regions(iommu);
3882                 return 0;
3883         }
3884
3885         intel_iommu_init_qi(iommu);
3886         iommu_flush_write_buffer(iommu);
3887
3888 #ifdef CONFIG_INTEL_IOMMU_SVM
3889         if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3890                 ret = intel_svm_enable_prq(iommu);
3891                 if (ret)
3892                         goto disable_iommu;
3893         }
3894 #endif
3895         ret = dmar_set_interrupt(iommu);
3896         if (ret)
3897                 goto disable_iommu;
3898
3899         iommu_set_root_entry(iommu);
3900         iommu_enable_translation(iommu);
3901
3902         iommu_disable_protect_mem_regions(iommu);
3903         return 0;
3904
3905 disable_iommu:
3906         disable_dmar_iommu(iommu);
3907 out:
3908         free_dmar_iommu(iommu);
3909         return ret;
3910 }
3911
3912 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3913 {
3914         int ret = 0;
3915         struct intel_iommu *iommu = dmaru->iommu;
3916
3917         if (!intel_iommu_enabled)
3918                 return 0;
3919         if (iommu == NULL)
3920                 return -EINVAL;
3921
3922         if (insert) {
3923                 ret = intel_iommu_add(dmaru);
3924         } else {
3925                 disable_dmar_iommu(iommu);
3926                 free_dmar_iommu(iommu);
3927         }
3928
3929         return ret;
3930 }
3931
3932 static void intel_iommu_free_dmars(void)
3933 {
3934         struct dmar_rmrr_unit *rmrru, *rmrr_n;
3935         struct dmar_atsr_unit *atsru, *atsr_n;
3936         struct dmar_satc_unit *satcu, *satc_n;
3937
3938         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3939                 list_del(&rmrru->list);
3940                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3941                 kfree(rmrru);
3942         }
3943
3944         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3945                 list_del(&atsru->list);
3946                 intel_iommu_free_atsr(atsru);
3947         }
3948         list_for_each_entry_safe(satcu, satc_n, &dmar_satc_units, list) {
3949                 list_del(&satcu->list);
3950                 dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt);
3951                 kfree(satcu);
3952         }
3953 }
3954
3955 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3956 {
3957         int i, ret = 1;
3958         struct pci_bus *bus;
3959         struct pci_dev *bridge = NULL;
3960         struct device *tmp;
3961         struct acpi_dmar_atsr *atsr;
3962         struct dmar_atsr_unit *atsru;
3963
3964         dev = pci_physfn(dev);
3965         for (bus = dev->bus; bus; bus = bus->parent) {
3966                 bridge = bus->self;
3967                 /* If it's an integrated device, allow ATS */
3968                 if (!bridge)
3969                         return 1;
3970                 /* Connected via non-PCIe: no ATS */
3971                 if (!pci_is_pcie(bridge) ||
3972                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3973                         return 0;
3974                 /* If we found the root port, look it up in the ATSR */
3975                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3976                         break;
3977         }
3978
3979         rcu_read_lock();
3980         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3981                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3982                 if (atsr->segment != pci_domain_nr(dev->bus))
3983                         continue;
3984
3985                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3986                         if (tmp == &bridge->dev)
3987                                 goto out;
3988
3989                 if (atsru->include_all)
3990                         goto out;
3991         }
3992         ret = 0;
3993 out:
3994         rcu_read_unlock();
3995
3996         return ret;
3997 }
3998
3999 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4000 {
4001         int ret;
4002         struct dmar_rmrr_unit *rmrru;
4003         struct dmar_atsr_unit *atsru;
4004         struct dmar_satc_unit *satcu;
4005         struct acpi_dmar_atsr *atsr;
4006         struct acpi_dmar_reserved_memory *rmrr;
4007         struct acpi_dmar_satc *satc;
4008
4009         if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4010                 return 0;
4011
4012         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4013                 rmrr = container_of(rmrru->hdr,
4014                                     struct acpi_dmar_reserved_memory, header);
4015                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4016                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4017                                 ((void *)rmrr) + rmrr->header.length,
4018                                 rmrr->segment, rmrru->devices,
4019                                 rmrru->devices_cnt);
4020                         if (ret < 0)
4021                                 return ret;
4022                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4023                         dmar_remove_dev_scope(info, rmrr->segment,
4024                                 rmrru->devices, rmrru->devices_cnt);
4025                 }
4026         }
4027
4028         list_for_each_entry(atsru, &dmar_atsr_units, list) {
4029                 if (atsru->include_all)
4030                         continue;
4031
4032                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4033                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4034                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4035                                         (void *)atsr + atsr->header.length,
4036                                         atsr->segment, atsru->devices,
4037                                         atsru->devices_cnt);
4038                         if (ret > 0)
4039                                 break;
4040                         else if (ret < 0)
4041                                 return ret;
4042                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4043                         if (dmar_remove_dev_scope(info, atsr->segment,
4044                                         atsru->devices, atsru->devices_cnt))
4045                                 break;
4046                 }
4047         }
4048         list_for_each_entry(satcu, &dmar_satc_units, list) {
4049                 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
4050                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4051                         ret = dmar_insert_dev_scope(info, (void *)(satc + 1),
4052                                         (void *)satc + satc->header.length,
4053                                         satc->segment, satcu->devices,
4054                                         satcu->devices_cnt);
4055                         if (ret > 0)
4056                                 break;
4057                         else if (ret < 0)
4058                                 return ret;
4059                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4060                         if (dmar_remove_dev_scope(info, satc->segment,
4061                                         satcu->devices, satcu->devices_cnt))
4062                                 break;
4063                 }
4064         }
4065
4066         return 0;
4067 }
4068
4069 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4070                                        unsigned long val, void *v)
4071 {
4072         struct memory_notify *mhp = v;
4073         unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4074         unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
4075                         mhp->nr_pages - 1);
4076
4077         switch (val) {
4078         case MEM_GOING_ONLINE:
4079                 if (iommu_domain_identity_map(si_domain,
4080                                               start_vpfn, last_vpfn)) {
4081                         pr_warn("Failed to build identity map for [%lx-%lx]\n",
4082                                 start_vpfn, last_vpfn);
4083                         return NOTIFY_BAD;
4084                 }
4085                 break;
4086
4087         case MEM_OFFLINE:
4088         case MEM_CANCEL_ONLINE:
4089                 {
4090                         struct dmar_drhd_unit *drhd;
4091                         struct intel_iommu *iommu;
4092                         struct page *freelist;
4093
4094                         freelist = domain_unmap(si_domain,
4095                                                 start_vpfn, last_vpfn,
4096                                                 NULL);
4097
4098                         rcu_read_lock();
4099                         for_each_active_iommu(iommu, drhd)
4100                                 iommu_flush_iotlb_psi(iommu, si_domain,
4101                                         start_vpfn, mhp->nr_pages,
4102                                         !freelist, 0);
4103                         rcu_read_unlock();
4104                         dma_free_pagelist(freelist);
4105                 }
4106                 break;
4107         }
4108
4109         return NOTIFY_OK;
4110 }
4111
4112 static struct notifier_block intel_iommu_memory_nb = {
4113         .notifier_call = intel_iommu_memory_notifier,
4114         .priority = 0
4115 };
4116
4117 static void intel_disable_iommus(void)
4118 {
4119         struct intel_iommu *iommu = NULL;
4120         struct dmar_drhd_unit *drhd;
4121
4122         for_each_iommu(iommu, drhd)
4123                 iommu_disable_translation(iommu);
4124 }
4125
4126 void intel_iommu_shutdown(void)
4127 {
4128         struct dmar_drhd_unit *drhd;
4129         struct intel_iommu *iommu = NULL;
4130
4131         if (no_iommu || dmar_disabled)
4132                 return;
4133
4134         down_write(&dmar_global_lock);
4135
4136         /* Disable PMRs explicitly here. */
4137         for_each_iommu(iommu, drhd)
4138                 iommu_disable_protect_mem_regions(iommu);
4139
4140         /* Make sure the IOMMUs are switched off */
4141         intel_disable_iommus();
4142
4143         up_write(&dmar_global_lock);
4144 }
4145
4146 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4147 {
4148         struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4149
4150         return container_of(iommu_dev, struct intel_iommu, iommu);
4151 }
4152
4153 static ssize_t intel_iommu_show_version(struct device *dev,
4154                                         struct device_attribute *attr,
4155                                         char *buf)
4156 {
4157         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4158         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4159         return sprintf(buf, "%d:%d\n",
4160                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4161 }
4162 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4163
4164 static ssize_t intel_iommu_show_address(struct device *dev,
4165                                         struct device_attribute *attr,
4166                                         char *buf)
4167 {
4168         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4169         return sprintf(buf, "%llx\n", iommu->reg_phys);
4170 }
4171 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4172
4173 static ssize_t intel_iommu_show_cap(struct device *dev,
4174                                     struct device_attribute *attr,
4175                                     char *buf)
4176 {
4177         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4178         return sprintf(buf, "%llx\n", iommu->cap);
4179 }
4180 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4181
4182 static ssize_t intel_iommu_show_ecap(struct device *dev,
4183                                     struct device_attribute *attr,
4184                                     char *buf)
4185 {
4186         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4187         return sprintf(buf, "%llx\n", iommu->ecap);
4188 }
4189 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4190
4191 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4192                                       struct device_attribute *attr,
4193                                       char *buf)
4194 {
4195         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4196         return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4197 }
4198 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4199
4200 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4201                                            struct device_attribute *attr,
4202                                            char *buf)
4203 {
4204         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4205         return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4206                                                   cap_ndoms(iommu->cap)));
4207 }
4208 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4209
4210 static struct attribute *intel_iommu_attrs[] = {
4211         &dev_attr_version.attr,
4212         &dev_attr_address.attr,
4213         &dev_attr_cap.attr,
4214         &dev_attr_ecap.attr,
4215         &dev_attr_domains_supported.attr,
4216         &dev_attr_domains_used.attr,
4217         NULL,
4218 };
4219
4220 static struct attribute_group intel_iommu_group = {
4221         .name = "intel-iommu",
4222         .attrs = intel_iommu_attrs,
4223 };
4224
4225 const struct attribute_group *intel_iommu_groups[] = {
4226         &intel_iommu_group,
4227         NULL,
4228 };
4229
4230 static inline bool has_external_pci(void)
4231 {
4232         struct pci_dev *pdev = NULL;
4233
4234         for_each_pci_dev(pdev)
4235                 if (pdev->external_facing)
4236                         return true;
4237
4238         return false;
4239 }
4240
4241 static int __init platform_optin_force_iommu(void)
4242 {
4243         if (!dmar_platform_optin() || no_platform_optin || !has_external_pci())
4244                 return 0;
4245
4246         if (no_iommu || dmar_disabled)
4247                 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4248
4249         /*
4250          * If Intel-IOMMU is disabled by default, we will apply identity
4251          * map for all devices except those marked as being untrusted.
4252          */
4253         if (dmar_disabled)
4254                 iommu_set_default_passthrough(false);
4255
4256         dmar_disabled = 0;
4257         no_iommu = 0;
4258
4259         return 1;
4260 }
4261
4262 static int __init probe_acpi_namespace_devices(void)
4263 {
4264         struct dmar_drhd_unit *drhd;
4265         /* To avoid a -Wunused-but-set-variable warning. */
4266         struct intel_iommu *iommu __maybe_unused;
4267         struct device *dev;
4268         int i, ret = 0;
4269
4270         for_each_active_iommu(iommu, drhd) {
4271                 for_each_active_dev_scope(drhd->devices,
4272                                           drhd->devices_cnt, i, dev) {
4273                         struct acpi_device_physical_node *pn;
4274                         struct iommu_group *group;
4275                         struct acpi_device *adev;
4276
4277                         if (dev->bus != &acpi_bus_type)
4278                                 continue;
4279
4280                         adev = to_acpi_device(dev);
4281                         mutex_lock(&adev->physical_node_lock);
4282                         list_for_each_entry(pn,
4283                                             &adev->physical_node_list, node) {
4284                                 group = iommu_group_get(pn->dev);
4285                                 if (group) {
4286                                         iommu_group_put(group);
4287                                         continue;
4288                                 }
4289
4290                                 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4291                                 ret = iommu_probe_device(pn->dev);
4292                                 if (ret)
4293                                         break;
4294                         }
4295                         mutex_unlock(&adev->physical_node_lock);
4296
4297                         if (ret)
4298                                 return ret;
4299                 }
4300         }
4301
4302         return 0;
4303 }
4304
4305 int __init intel_iommu_init(void)
4306 {
4307         int ret = -ENODEV;
4308         struct dmar_drhd_unit *drhd;
4309         struct intel_iommu *iommu;
4310
4311         /*
4312          * Intel IOMMU is required for a TXT/tboot launch or platform
4313          * opt in, so enforce that.
4314          */
4315         force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
4316                     platform_optin_force_iommu();
4317
4318         if (iommu_init_mempool()) {
4319                 if (force_on)
4320                         panic("tboot: Failed to initialize iommu memory\n");
4321                 return -ENOMEM;
4322         }
4323
4324         down_write(&dmar_global_lock);
4325         if (dmar_table_init()) {
4326                 if (force_on)
4327                         panic("tboot: Failed to initialize DMAR table\n");
4328                 goto out_free_dmar;
4329         }
4330
4331         if (dmar_dev_scope_init() < 0) {
4332                 if (force_on)
4333                         panic("tboot: Failed to initialize DMAR device scope\n");
4334                 goto out_free_dmar;
4335         }
4336
4337         up_write(&dmar_global_lock);
4338
4339         /*
4340          * The bus notifier takes the dmar_global_lock, so lockdep will
4341          * complain later when we register it under the lock.
4342          */
4343         dmar_register_bus_notifier();
4344
4345         down_write(&dmar_global_lock);
4346
4347         if (!no_iommu)
4348                 intel_iommu_debugfs_init();
4349
4350         if (no_iommu || dmar_disabled) {
4351                 /*
4352                  * We exit the function here to ensure IOMMU's remapping and
4353                  * mempool aren't setup, which means that the IOMMU's PMRs
4354                  * won't be disabled via the call to init_dmars(). So disable
4355                  * it explicitly here. The PMRs were setup by tboot prior to
4356                  * calling SENTER, but the kernel is expected to reset/tear
4357                  * down the PMRs.
4358                  */
4359                 if (intel_iommu_tboot_noforce) {
4360                         for_each_iommu(iommu, drhd)
4361                                 iommu_disable_protect_mem_regions(iommu);
4362                 }
4363
4364                 /*
4365                  * Make sure the IOMMUs are switched off, even when we
4366                  * boot into a kexec kernel and the previous kernel left
4367                  * them enabled
4368                  */
4369                 intel_disable_iommus();
4370                 goto out_free_dmar;
4371         }
4372
4373         if (list_empty(&dmar_rmrr_units))
4374                 pr_info("No RMRR found\n");
4375
4376         if (list_empty(&dmar_atsr_units))
4377                 pr_info("No ATSR found\n");
4378
4379         if (list_empty(&dmar_satc_units))
4380                 pr_info("No SATC found\n");
4381
4382         if (dmar_map_gfx)
4383                 intel_iommu_gfx_mapped = 1;
4384
4385         init_no_remapping_devices();
4386
4387         ret = init_dmars();
4388         if (ret) {
4389                 if (force_on)
4390                         panic("tboot: Failed to initialize DMARs\n");
4391                 pr_err("Initialization failed\n");
4392                 goto out_free_dmar;
4393         }
4394         up_write(&dmar_global_lock);
4395
4396         init_iommu_pm_ops();
4397
4398         down_read(&dmar_global_lock);
4399         for_each_active_iommu(iommu, drhd) {
4400                 /*
4401                  * The flush queue implementation does not perform
4402                  * page-selective invalidations that are required for efficient
4403                  * TLB flushes in virtual environments.  The benefit of batching
4404                  * is likely to be much lower than the overhead of synchronizing
4405                  * the virtual and physical IOMMU page-tables.
4406                  */
4407                 if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
4408                         pr_warn("IOMMU batching is disabled due to virtualization");
4409                         intel_iommu_strict = 1;
4410                 }
4411                 iommu_device_sysfs_add(&iommu->iommu, NULL,
4412                                        intel_iommu_groups,
4413                                        "%s", iommu->name);
4414                 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
4415         }
4416         up_read(&dmar_global_lock);
4417
4418         iommu_set_dma_strict(intel_iommu_strict);
4419         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4420         if (si_domain && !hw_pass_through)
4421                 register_memory_notifier(&intel_iommu_memory_nb);
4422
4423         down_read(&dmar_global_lock);
4424         if (probe_acpi_namespace_devices())
4425                 pr_warn("ACPI name space devices didn't probe correctly\n");
4426
4427         /* Finally, we enable the DMA remapping hardware. */
4428         for_each_iommu(iommu, drhd) {
4429                 if (!drhd->ignored && !translation_pre_enabled(iommu))
4430                         iommu_enable_translation(iommu);
4431
4432                 iommu_disable_protect_mem_regions(iommu);
4433         }
4434         up_read(&dmar_global_lock);
4435
4436         pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4437
4438         intel_iommu_enabled = 1;
4439
4440         return 0;
4441
4442 out_free_dmar:
4443         intel_iommu_free_dmars();
4444         up_write(&dmar_global_lock);
4445         iommu_exit_mempool();
4446         return ret;
4447 }
4448
4449 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4450 {
4451         struct device_domain_info *info = opaque;
4452
4453         domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff);
4454         return 0;
4455 }
4456
4457 /*
4458  * NB - intel-iommu lacks any sort of reference counting for the users of
4459  * dependent devices.  If multiple endpoints have intersecting dependent
4460  * devices, unbinding the driver from any one of them will possibly leave
4461  * the others unable to operate.
4462  */
4463 static void domain_context_clear(struct device_domain_info *info)
4464 {
4465         if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
4466                 return;
4467
4468         pci_for_each_dma_alias(to_pci_dev(info->dev),
4469                                &domain_context_clear_one_cb, info);
4470 }
4471
4472 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4473 {
4474         struct dmar_domain *domain;
4475         struct intel_iommu *iommu;
4476         unsigned long flags;
4477
4478         assert_spin_locked(&device_domain_lock);
4479
4480         if (WARN_ON(!info))
4481                 return;
4482
4483         iommu = info->iommu;
4484         domain = info->domain;
4485
4486         if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
4487                 if (dev_is_pci(info->dev) && sm_supported(iommu))
4488                         intel_pasid_tear_down_entry(iommu, info->dev,
4489                                         PASID_RID2PASID, false);
4490
4491                 iommu_disable_dev_iotlb(info);
4492                 domain_context_clear(info);
4493                 intel_pasid_free_table(info->dev);
4494         }
4495
4496         unlink_domain_info(info);
4497
4498         spin_lock_irqsave(&iommu->lock, flags);
4499         domain_detach_iommu(domain, iommu);
4500         spin_unlock_irqrestore(&iommu->lock, flags);
4501
4502         free_devinfo_mem(info);
4503 }
4504
4505 static void dmar_remove_one_dev_info(struct device *dev)
4506 {
4507         struct device_domain_info *info;
4508         unsigned long flags;
4509
4510         spin_lock_irqsave(&device_domain_lock, flags);
4511         info = get_domain_info(dev);
4512         if (info)
4513                 __dmar_remove_one_dev_info(info);
4514         spin_unlock_irqrestore(&device_domain_lock, flags);
4515 }
4516
4517 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4518 {
4519         int adjust_width;
4520
4521         /* calculate AGAW */
4522         domain->gaw = guest_width;
4523         adjust_width = guestwidth_to_adjustwidth(guest_width);
4524         domain->agaw = width_to_agaw(adjust_width);
4525
4526         domain->iommu_coherency = 0;
4527         domain->iommu_snooping = 0;
4528         domain->iommu_superpage = 0;
4529         domain->max_addr = 0;
4530
4531         /* always allocate the top pgd */
4532         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4533         if (!domain->pgd)
4534                 return -ENOMEM;
4535         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4536         return 0;
4537 }
4538
4539 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4540 {
4541         struct dmar_domain *dmar_domain;
4542         struct iommu_domain *domain;
4543
4544         switch (type) {
4545         case IOMMU_DOMAIN_DMA:
4546         case IOMMU_DOMAIN_UNMANAGED:
4547                 dmar_domain = alloc_domain(0);
4548                 if (!dmar_domain) {
4549                         pr_err("Can't allocate dmar_domain\n");
4550                         return NULL;
4551                 }
4552                 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4553                         pr_err("Domain initialization failed\n");
4554                         domain_exit(dmar_domain);
4555                         return NULL;
4556                 }
4557
4558                 if (type == IOMMU_DOMAIN_DMA &&
4559                     iommu_get_dma_cookie(&dmar_domain->domain))
4560                         return NULL;
4561
4562                 domain = &dmar_domain->domain;
4563                 domain->geometry.aperture_start = 0;
4564                 domain->geometry.aperture_end   =
4565                                 __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4566                 domain->geometry.force_aperture = true;
4567
4568                 return domain;
4569         case IOMMU_DOMAIN_IDENTITY:
4570                 return &si_domain->domain;
4571         default:
4572                 return NULL;
4573         }
4574
4575         return NULL;
4576 }
4577
4578 static void intel_iommu_domain_free(struct iommu_domain *domain)
4579 {
4580         if (domain != &si_domain->domain)
4581                 domain_exit(to_dmar_domain(domain));
4582 }
4583
4584 /*
4585  * Check whether a @domain could be attached to the @dev through the
4586  * aux-domain attach/detach APIs.
4587  */
4588 static inline bool
4589 is_aux_domain(struct device *dev, struct iommu_domain *domain)
4590 {
4591         struct device_domain_info *info = get_domain_info(dev);
4592
4593         return info && info->auxd_enabled &&
4594                         domain->type == IOMMU_DOMAIN_UNMANAGED;
4595 }
4596
4597 static inline struct subdev_domain_info *
4598 lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
4599 {
4600         struct subdev_domain_info *sinfo;
4601
4602         if (!list_empty(&domain->subdevices)) {
4603                 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
4604                         if (sinfo->pdev == dev)
4605                                 return sinfo;
4606                 }
4607         }
4608
4609         return NULL;
4610 }
4611
4612 static int auxiliary_link_device(struct dmar_domain *domain,
4613                                  struct device *dev)
4614 {
4615         struct device_domain_info *info = get_domain_info(dev);
4616         struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
4617
4618         assert_spin_locked(&device_domain_lock);
4619         if (WARN_ON(!info))
4620                 return -EINVAL;
4621
4622         if (!sinfo) {
4623                 sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
4624                 if (!sinfo)
4625                         return -ENOMEM;
4626                 sinfo->domain = domain;
4627                 sinfo->pdev = dev;
4628                 list_add(&sinfo->link_phys, &info->subdevices);
4629                 list_add(&sinfo->link_domain, &domain->subdevices);
4630         }
4631
4632         return ++sinfo->users;
4633 }
4634
4635 static int auxiliary_unlink_device(struct dmar_domain *domain,
4636                                    struct device *dev)
4637 {
4638         struct device_domain_info *info = get_domain_info(dev);
4639         struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
4640         int ret;
4641
4642         assert_spin_locked(&device_domain_lock);
4643         if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
4644                 return -EINVAL;
4645
4646         ret = --sinfo->users;
4647         if (!ret) {
4648                 list_del(&sinfo->link_phys);
4649                 list_del(&sinfo->link_domain);
4650                 kfree(sinfo);
4651         }
4652
4653         return ret;
4654 }
4655
4656 static int aux_domain_add_dev(struct dmar_domain *domain,
4657                               struct device *dev)
4658 {
4659         int ret;
4660         unsigned long flags;
4661         struct intel_iommu *iommu;
4662
4663         iommu = device_to_iommu(dev, NULL, NULL);
4664         if (!iommu)
4665                 return -ENODEV;
4666
4667         if (domain->default_pasid <= 0) {
4668                 u32 pasid;
4669
4670                 /* No private data needed for the default pasid */
4671                 pasid = ioasid_alloc(NULL, PASID_MIN,
4672                                      pci_max_pasids(to_pci_dev(dev)) - 1,
4673                                      NULL);
4674                 if (pasid == INVALID_IOASID) {
4675                         pr_err("Can't allocate default pasid\n");
4676                         return -ENODEV;
4677                 }
4678                 domain->default_pasid = pasid;
4679         }
4680
4681         spin_lock_irqsave(&device_domain_lock, flags);
4682         ret = auxiliary_link_device(domain, dev);
4683         if (ret <= 0)
4684                 goto link_failed;
4685
4686         /*
4687          * Subdevices from the same physical device can be attached to the
4688          * same domain. For such cases, only the first subdevice attachment
4689          * needs to go through the full steps in this function. So if ret >
4690          * 1, just goto out.
4691          */
4692         if (ret > 1)
4693                 goto out;
4694
4695         /*
4696          * iommu->lock must be held to attach domain to iommu and setup the
4697          * pasid entry for second level translation.
4698          */
4699         spin_lock(&iommu->lock);
4700         ret = domain_attach_iommu(domain, iommu);
4701         if (ret)
4702                 goto attach_failed;
4703
4704         /* Setup the PASID entry for mediated devices: */
4705         if (domain_use_first_level(domain))
4706                 ret = domain_setup_first_level(iommu, domain, dev,
4707                                                domain->default_pasid);
4708         else
4709                 ret = intel_pasid_setup_second_level(iommu, domain, dev,
4710                                                      domain->default_pasid);
4711         if (ret)
4712                 goto table_failed;
4713
4714         spin_unlock(&iommu->lock);
4715 out:
4716         spin_unlock_irqrestore(&device_domain_lock, flags);
4717
4718         return 0;
4719
4720 table_failed:
4721         domain_detach_iommu(domain, iommu);
4722 attach_failed:
4723         spin_unlock(&iommu->lock);
4724         auxiliary_unlink_device(domain, dev);
4725 link_failed:
4726         spin_unlock_irqrestore(&device_domain_lock, flags);
4727         if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
4728                 ioasid_put(domain->default_pasid);
4729
4730         return ret;
4731 }
4732
4733 static void aux_domain_remove_dev(struct dmar_domain *domain,
4734                                   struct device *dev)
4735 {
4736         struct device_domain_info *info;
4737         struct intel_iommu *iommu;
4738         unsigned long flags;
4739
4740         if (!is_aux_domain(dev, &domain->domain))
4741                 return;
4742
4743         spin_lock_irqsave(&device_domain_lock, flags);
4744         info = get_domain_info(dev);
4745         iommu = info->iommu;
4746
4747         if (!auxiliary_unlink_device(domain, dev)) {
4748                 spin_lock(&iommu->lock);
4749                 intel_pasid_tear_down_entry(iommu, dev,
4750                                             domain->default_pasid, false);
4751                 domain_detach_iommu(domain, iommu);
4752                 spin_unlock(&iommu->lock);
4753         }
4754
4755         spin_unlock_irqrestore(&device_domain_lock, flags);
4756
4757         if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
4758                 ioasid_put(domain->default_pasid);
4759 }
4760
4761 static int prepare_domain_attach_device(struct iommu_domain *domain,
4762                                         struct device *dev)
4763 {
4764         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4765         struct intel_iommu *iommu;
4766         int addr_width;
4767
4768         iommu = device_to_iommu(dev, NULL, NULL);
4769         if (!iommu)
4770                 return -ENODEV;
4771
4772         /* check if this iommu agaw is sufficient for max mapped address */
4773         addr_width = agaw_to_width(iommu->agaw);
4774         if (addr_width > cap_mgaw(iommu->cap))
4775                 addr_width = cap_mgaw(iommu->cap);
4776
4777         if (dmar_domain->max_addr > (1LL << addr_width)) {
4778                 dev_err(dev, "%s: iommu width (%d) is not "
4779                         "sufficient for the mapped address (%llx)\n",
4780                         __func__, addr_width, dmar_domain->max_addr);
4781                 return -EFAULT;
4782         }
4783         dmar_domain->gaw = addr_width;
4784
4785         /*
4786          * Knock out extra levels of page tables if necessary
4787          */
4788         while (iommu->agaw < dmar_domain->agaw) {
4789                 struct dma_pte *pte;
4790
4791                 pte = dmar_domain->pgd;
4792                 if (dma_pte_present(pte)) {
4793                         dmar_domain->pgd = (struct dma_pte *)
4794                                 phys_to_virt(dma_pte_addr(pte));
4795                         free_pgtable_page(pte);
4796                 }
4797                 dmar_domain->agaw--;
4798         }
4799
4800         return 0;
4801 }
4802
4803 static int intel_iommu_attach_device(struct iommu_domain *domain,
4804                                      struct device *dev)
4805 {
4806         int ret;
4807
4808         if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
4809             device_is_rmrr_locked(dev)) {
4810                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
4811                 return -EPERM;
4812         }
4813
4814         if (is_aux_domain(dev, domain))
4815                 return -EPERM;
4816
4817         /* normally dev is not mapped */
4818         if (unlikely(domain_context_mapped(dev))) {
4819                 struct dmar_domain *old_domain;
4820
4821                 old_domain = find_domain(dev);
4822                 if (old_domain)
4823                         dmar_remove_one_dev_info(dev);
4824         }
4825
4826         ret = prepare_domain_attach_device(domain, dev);
4827         if (ret)
4828                 return ret;
4829
4830         return domain_add_dev_info(to_dmar_domain(domain), dev);
4831 }
4832
4833 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
4834                                          struct device *dev)
4835 {
4836         int ret;
4837
4838         if (!is_aux_domain(dev, domain))
4839                 return -EPERM;
4840
4841         ret = prepare_domain_attach_device(domain, dev);
4842         if (ret)
4843                 return ret;
4844
4845         return aux_domain_add_dev(to_dmar_domain(domain), dev);
4846 }
4847
4848 static void intel_iommu_detach_device(struct iommu_domain *domain,
4849                                       struct device *dev)
4850 {
4851         dmar_remove_one_dev_info(dev);
4852 }
4853
4854 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
4855                                           struct device *dev)
4856 {
4857         aux_domain_remove_dev(to_dmar_domain(domain), dev);
4858 }
4859
4860 #ifdef CONFIG_INTEL_IOMMU_SVM
4861 /*
4862  * 2D array for converting and sanitizing IOMMU generic TLB granularity to
4863  * VT-d granularity. Invalidation is typically included in the unmap operation
4864  * as a result of DMA or VFIO unmap. However, for assigned devices guest
4865  * owns the first level page tables. Invalidations of translation caches in the
4866  * guest are trapped and passed down to the host.
4867  *
4868  * vIOMMU in the guest will only expose first level page tables, therefore
4869  * we do not support IOTLB granularity for request without PASID (second level).
4870  *
4871  * For example, to find the VT-d granularity encoding for IOTLB
4872  * type and page selective granularity within PASID:
4873  * X: indexed by iommu cache type
4874  * Y: indexed by enum iommu_inv_granularity
4875  * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
4876  */
4877
4878 static const int
4879 inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
4880         /*
4881          * PASID based IOTLB invalidation: PASID selective (per PASID),
4882          * page selective (address granularity)
4883          */
4884         {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
4885         /* PASID based dev TLBs */
4886         {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
4887         /* PASID cache */
4888         {-EINVAL, -EINVAL, -EINVAL}
4889 };
4890
4891 static inline int to_vtd_granularity(int type, int granu)
4892 {
4893         return inv_type_granu_table[type][granu];
4894 }
4895
4896 static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
4897 {
4898         u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
4899
4900         /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
4901          * IOMMU cache invalidate API passes granu_size in bytes, and number of
4902          * granu size in contiguous memory.
4903          */
4904         return order_base_2(nr_pages);
4905 }
4906
4907 static int
4908 intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
4909                            struct iommu_cache_invalidate_info *inv_info)
4910 {
4911         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4912         struct device_domain_info *info;
4913         struct intel_iommu *iommu;
4914         unsigned long flags;
4915         int cache_type;
4916         u8 bus, devfn;
4917         u16 did, sid;
4918         int ret = 0;
4919         u64 size = 0;
4920
4921         if (!inv_info || !dmar_domain)
4922                 return -EINVAL;
4923
4924         if (!dev || !dev_is_pci(dev))
4925                 return -ENODEV;
4926
4927         iommu = device_to_iommu(dev, &bus, &devfn);
4928         if (!iommu)
4929                 return -ENODEV;
4930
4931         if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
4932                 return -EINVAL;
4933
4934         spin_lock_irqsave(&device_domain_lock, flags);
4935         spin_lock(&iommu->lock);
4936         info = get_domain_info(dev);
4937         if (!info) {
4938                 ret = -EINVAL;
4939                 goto out_unlock;
4940         }
4941         did = dmar_domain->iommu_did[iommu->seq_id];
4942         sid = PCI_DEVID(bus, devfn);
4943
4944         /* Size is only valid in address selective invalidation */
4945         if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
4946                 size = to_vtd_size(inv_info->granu.addr_info.granule_size,
4947                                    inv_info->granu.addr_info.nb_granules);
4948
4949         for_each_set_bit(cache_type,
4950                          (unsigned long *)&inv_info->cache,
4951                          IOMMU_CACHE_INV_TYPE_NR) {
4952                 int granu = 0;
4953                 u64 pasid = 0;
4954                 u64 addr = 0;
4955
4956                 granu = to_vtd_granularity(cache_type, inv_info->granularity);
4957                 if (granu == -EINVAL) {
4958                         pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
4959                                            cache_type, inv_info->granularity);
4960                         break;
4961                 }
4962
4963                 /*
4964                  * PASID is stored in different locations based on the
4965                  * granularity.
4966                  */
4967                 if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
4968                     (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
4969                         pasid = inv_info->granu.pasid_info.pasid;
4970                 else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
4971                          (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
4972                         pasid = inv_info->granu.addr_info.pasid;
4973
4974                 switch (BIT(cache_type)) {
4975                 case IOMMU_CACHE_INV_TYPE_IOTLB:
4976                         /* HW will ignore LSB bits based on address mask */
4977                         if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
4978                             size &&
4979                             (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
4980                                 pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
4981                                                    inv_info->granu.addr_info.addr, size);
4982                         }
4983
4984                         /*
4985                          * If granu is PASID-selective, address is ignored.
4986                          * We use npages = -1 to indicate that.
4987                          */
4988                         qi_flush_piotlb(iommu, did, pasid,
4989                                         mm_to_dma_pfn(inv_info->granu.addr_info.addr),
4990                                         (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
4991                                         inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
4992
4993                         if (!info->ats_enabled)
4994                                 break;
4995                         /*
4996                          * Always flush device IOTLB if ATS is enabled. vIOMMU
4997                          * in the guest may assume IOTLB flush is inclusive,
4998                          * which is more efficient.
4999                          */
5000                         fallthrough;
5001                 case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
5002                         /*
5003                          * PASID based device TLB invalidation does not support
5004                          * IOMMU_INV_GRANU_PASID granularity but only supports
5005                          * IOMMU_INV_GRANU_ADDR.
5006                          * The equivalent of that is we set the size to be the
5007                          * entire range of 64 bit. User only provides PASID info
5008                          * without address info. So we set addr to 0.
5009                          */
5010                         if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
5011                                 size = 64 - VTD_PAGE_SHIFT;
5012                                 addr = 0;
5013                         } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
5014                                 addr = inv_info->granu.addr_info.addr;
5015                         }
5016
5017                         if (info->ats_enabled)
5018                                 qi_flush_dev_iotlb_pasid(iommu, sid,
5019                                                 info->pfsid, pasid,
5020                                                 info->ats_qdep, addr,
5021                                                 size);
5022                         else
5023                                 pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
5024                         break;
5025                 default:
5026                         dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
5027                                             cache_type);
5028                         ret = -EINVAL;
5029                 }
5030         }
5031 out_unlock:
5032         spin_unlock(&iommu->lock);
5033         spin_unlock_irqrestore(&device_domain_lock, flags);
5034
5035         return ret;
5036 }
5037 #endif
5038
5039 static int intel_iommu_map(struct iommu_domain *domain,
5040                            unsigned long iova, phys_addr_t hpa,
5041                            size_t size, int iommu_prot, gfp_t gfp)
5042 {
5043         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5044         u64 max_addr;
5045         int prot = 0;
5046
5047         if (iommu_prot & IOMMU_READ)
5048                 prot |= DMA_PTE_READ;
5049         if (iommu_prot & IOMMU_WRITE)
5050                 prot |= DMA_PTE_WRITE;
5051         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5052                 prot |= DMA_PTE_SNP;
5053
5054         max_addr = iova + size;
5055         if (dmar_domain->max_addr < max_addr) {
5056                 u64 end;
5057
5058                 /* check if minimum agaw is sufficient for mapped address */
5059                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5060                 if (end < max_addr) {
5061                         pr_err("%s: iommu width (%d) is not "
5062                                "sufficient for the mapped address (%llx)\n",
5063                                __func__, dmar_domain->gaw, max_addr);
5064                         return -EFAULT;
5065                 }
5066                 dmar_domain->max_addr = max_addr;
5067         }
5068         /* Round up size to next multiple of PAGE_SIZE, if it and
5069            the low bits of hpa would take us onto the next page */
5070         size = aligned_nrpages(hpa, size);
5071         return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5072                                 hpa >> VTD_PAGE_SHIFT, size, prot);
5073 }
5074
5075 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5076                                 unsigned long iova, size_t size,
5077                                 struct iommu_iotlb_gather *gather)
5078 {
5079         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5080         unsigned long start_pfn, last_pfn;
5081         int level = 0;
5082
5083         /* Cope with horrid API which requires us to unmap more than the
5084            size argument if it happens to be a large-page mapping. */
5085         BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5086
5087         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5088                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5089
5090         start_pfn = iova >> VTD_PAGE_SHIFT;
5091         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5092
5093         gather->freelist = domain_unmap(dmar_domain, start_pfn,
5094                                         last_pfn, gather->freelist);
5095
5096         if (dmar_domain->max_addr == iova + size)
5097                 dmar_domain->max_addr = iova;
5098
5099         iommu_iotlb_gather_add_page(domain, gather, iova, size);
5100
5101         return size;
5102 }
5103
5104 static void intel_iommu_tlb_sync(struct iommu_domain *domain,
5105                                  struct iommu_iotlb_gather *gather)
5106 {
5107         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5108         unsigned long iova_pfn = IOVA_PFN(gather->start);
5109         size_t size = gather->end - gather->start;
5110         unsigned long start_pfn;
5111         unsigned long nrpages;
5112         int iommu_id;
5113
5114         nrpages = aligned_nrpages(gather->start, size);
5115         start_pfn = mm_to_dma_pfn(iova_pfn);
5116
5117         for_each_domain_iommu(iommu_id, dmar_domain)
5118                 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5119                                       start_pfn, nrpages, !gather->freelist, 0);
5120
5121         dma_free_pagelist(gather->freelist);
5122 }
5123
5124 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5125                                             dma_addr_t iova)
5126 {
5127         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5128         struct dma_pte *pte;
5129         int level = 0;
5130         u64 phys = 0;
5131
5132         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5133         if (pte && dma_pte_present(pte))
5134                 phys = dma_pte_addr(pte) +
5135                         (iova & (BIT_MASK(level_to_offset_bits(level) +
5136                                                 VTD_PAGE_SHIFT) - 1));
5137
5138         return phys;
5139 }
5140
5141 static bool intel_iommu_capable(enum iommu_cap cap)
5142 {
5143         if (cap == IOMMU_CAP_CACHE_COHERENCY)
5144                 return domain_update_iommu_snooping(NULL) == 1;
5145         if (cap == IOMMU_CAP_INTR_REMAP)
5146                 return irq_remapping_enabled == 1;
5147
5148         return false;
5149 }
5150
5151 static struct iommu_device *intel_iommu_probe_device(struct device *dev)
5152 {
5153         struct intel_iommu *iommu;
5154
5155         iommu = device_to_iommu(dev, NULL, NULL);
5156         if (!iommu)
5157                 return ERR_PTR(-ENODEV);
5158
5159         if (translation_pre_enabled(iommu))
5160                 dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
5161
5162         return &iommu->iommu;
5163 }
5164
5165 static void intel_iommu_release_device(struct device *dev)
5166 {
5167         struct intel_iommu *iommu;
5168
5169         iommu = device_to_iommu(dev, NULL, NULL);
5170         if (!iommu)
5171                 return;
5172
5173         dmar_remove_one_dev_info(dev);
5174
5175         set_dma_ops(dev, NULL);
5176 }
5177
5178 static void intel_iommu_probe_finalize(struct device *dev)
5179 {
5180         dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
5181         struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
5182         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5183
5184         if (domain && domain->type == IOMMU_DOMAIN_DMA)
5185                 iommu_setup_dma_ops(dev, base,
5186                                     __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
5187         else
5188                 set_dma_ops(dev, NULL);
5189 }
5190
5191 static void intel_iommu_get_resv_regions(struct device *device,
5192                                          struct list_head *head)
5193 {
5194         int prot = DMA_PTE_READ | DMA_PTE_WRITE;
5195         struct iommu_resv_region *reg;
5196         struct dmar_rmrr_unit *rmrr;
5197         struct device *i_dev;
5198         int i;
5199
5200         down_read(&dmar_global_lock);
5201         for_each_rmrr_units(rmrr) {
5202                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5203                                           i, i_dev) {
5204                         struct iommu_resv_region *resv;
5205                         enum iommu_resv_type type;
5206                         size_t length;
5207
5208                         if (i_dev != device &&
5209                             !is_downstream_to_pci_bridge(device, i_dev))
5210                                 continue;
5211
5212                         length = rmrr->end_address - rmrr->base_address + 1;
5213
5214                         type = device_rmrr_is_relaxable(device) ?
5215                                 IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
5216
5217                         resv = iommu_alloc_resv_region(rmrr->base_address,
5218                                                        length, prot, type);
5219                         if (!resv)
5220                                 break;
5221
5222                         list_add_tail(&resv->list, head);
5223                 }
5224         }
5225         up_read(&dmar_global_lock);
5226
5227 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
5228         if (dev_is_pci(device)) {
5229                 struct pci_dev *pdev = to_pci_dev(device);
5230
5231                 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
5232                         reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
5233                                                    IOMMU_RESV_DIRECT_RELAXABLE);
5234                         if (reg)
5235                                 list_add_tail(&reg->list, head);
5236                 }
5237         }
5238 #endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
5239
5240         reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5241                                       IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5242                                       0, IOMMU_RESV_MSI);
5243         if (!reg)
5244                 return;
5245         list_add_tail(&reg->list, head);
5246 }
5247
5248 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5249 {
5250         struct device_domain_info *info;
5251         struct context_entry *context;
5252         struct dmar_domain *domain;
5253         unsigned long flags;
5254         u64 ctx_lo;
5255         int ret;
5256
5257         domain = find_domain(dev);
5258         if (!domain)
5259                 return -EINVAL;
5260
5261         spin_lock_irqsave(&device_domain_lock, flags);
5262         spin_lock(&iommu->lock);
5263
5264         ret = -EINVAL;
5265         info = get_domain_info(dev);
5266         if (!info || !info->pasid_supported)
5267                 goto out;
5268
5269         context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5270         if (WARN_ON(!context))
5271                 goto out;
5272
5273         ctx_lo = context[0].lo;
5274
5275         if (!(ctx_lo & CONTEXT_PASIDE)) {
5276                 ctx_lo |= CONTEXT_PASIDE;
5277                 context[0].lo = ctx_lo;
5278                 wmb();
5279                 iommu->flush.flush_context(iommu,
5280                                            domain->iommu_did[iommu->seq_id],
5281                                            PCI_DEVID(info->bus, info->devfn),
5282                                            DMA_CCMD_MASK_NOBIT,
5283                                            DMA_CCMD_DEVICE_INVL);
5284         }
5285
5286         /* Enable PASID support in the device, if it wasn't already */
5287         if (!info->pasid_enabled)
5288                 iommu_enable_dev_iotlb(info);
5289
5290         ret = 0;
5291
5292  out:
5293         spin_unlock(&iommu->lock);
5294         spin_unlock_irqrestore(&device_domain_lock, flags);
5295
5296         return ret;
5297 }
5298
5299 static struct iommu_group *intel_iommu_device_group(struct device *dev)
5300 {
5301         if (dev_is_pci(dev))
5302                 return pci_device_group(dev);
5303         return generic_device_group(dev);
5304 }
5305
5306 static int intel_iommu_enable_auxd(struct device *dev)
5307 {
5308         struct device_domain_info *info;
5309         struct intel_iommu *iommu;
5310         unsigned long flags;
5311         int ret;
5312
5313         iommu = device_to_iommu(dev, NULL, NULL);
5314         if (!iommu || dmar_disabled)
5315                 return -EINVAL;
5316
5317         if (!sm_supported(iommu) || !pasid_supported(iommu))
5318                 return -EINVAL;
5319
5320         ret = intel_iommu_enable_pasid(iommu, dev);
5321         if (ret)
5322                 return -ENODEV;
5323
5324         spin_lock_irqsave(&device_domain_lock, flags);
5325         info = get_domain_info(dev);
5326         info->auxd_enabled = 1;
5327         spin_unlock_irqrestore(&device_domain_lock, flags);
5328
5329         return 0;
5330 }
5331
5332 static int intel_iommu_disable_auxd(struct device *dev)
5333 {
5334         struct device_domain_info *info;
5335         unsigned long flags;
5336
5337         spin_lock_irqsave(&device_domain_lock, flags);
5338         info = get_domain_info(dev);
5339         if (!WARN_ON(!info))
5340                 info->auxd_enabled = 0;
5341         spin_unlock_irqrestore(&device_domain_lock, flags);
5342
5343         return 0;
5344 }
5345
5346 /*
5347  * A PCI express designated vendor specific extended capability is defined
5348  * in the section 3.7 of Intel scalable I/O virtualization technical spec
5349  * for system software and tools to detect endpoint devices supporting the
5350  * Intel scalable IO virtualization without host driver dependency.
5351  *
5352  * Returns the address of the matching extended capability structure within
5353  * the device's PCI configuration space or 0 if the device does not support
5354  * it.
5355  */
5356 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5357 {
5358         int pos;
5359         u16 vendor, id;
5360
5361         pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5362         while (pos) {
5363                 pci_read_config_word(pdev, pos + 4, &vendor);
5364                 pci_read_config_word(pdev, pos + 8, &id);
5365                 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5366                         return pos;
5367
5368                 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5369         }
5370
5371         return 0;
5372 }
5373
5374 static bool
5375 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5376 {
5377         struct device_domain_info *info = get_domain_info(dev);
5378
5379         if (feat == IOMMU_DEV_FEAT_AUX) {
5380                 int ret;
5381
5382                 if (!dev_is_pci(dev) || dmar_disabled ||
5383                     !scalable_mode_support() || !pasid_mode_support())
5384                         return false;
5385
5386                 ret = pci_pasid_features(to_pci_dev(dev));
5387                 if (ret < 0)
5388                         return false;
5389
5390                 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5391         }
5392
5393         if (feat == IOMMU_DEV_FEAT_IOPF)
5394                 return info && info->pri_supported;
5395
5396         if (feat == IOMMU_DEV_FEAT_SVA)
5397                 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
5398                         info->pasid_supported && info->pri_supported &&
5399                         info->ats_supported;
5400
5401         return false;
5402 }
5403
5404 static int
5405 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5406 {
5407         if (feat == IOMMU_DEV_FEAT_AUX)
5408                 return intel_iommu_enable_auxd(dev);
5409
5410         if (feat == IOMMU_DEV_FEAT_IOPF)
5411                 return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV;
5412
5413         if (feat == IOMMU_DEV_FEAT_SVA) {
5414                 struct device_domain_info *info = get_domain_info(dev);
5415
5416                 if (!info)
5417                         return -EINVAL;
5418
5419                 if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
5420                         return -EINVAL;
5421
5422                 if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
5423                         return 0;
5424         }
5425
5426         return -ENODEV;
5427 }
5428
5429 static int
5430 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5431 {
5432         if (feat == IOMMU_DEV_FEAT_AUX)
5433                 return intel_iommu_disable_auxd(dev);
5434
5435         return -ENODEV;
5436 }
5437
5438 static bool
5439 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5440 {
5441         struct device_domain_info *info = get_domain_info(dev);
5442
5443         if (feat == IOMMU_DEV_FEAT_AUX)
5444                 return scalable_mode_support() && info && info->auxd_enabled;
5445
5446         return false;
5447 }
5448
5449 static int
5450 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5451 {
5452         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5453
5454         return dmar_domain->default_pasid > 0 ?
5455                         dmar_domain->default_pasid : -EINVAL;
5456 }
5457
5458 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
5459                                            struct device *dev)
5460 {
5461         return attach_deferred(dev);
5462 }
5463
5464 static int
5465 intel_iommu_enable_nesting(struct iommu_domain *domain)
5466 {
5467         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5468         unsigned long flags;
5469         int ret = -ENODEV;
5470
5471         spin_lock_irqsave(&device_domain_lock, flags);
5472         if (nested_mode_support() && list_empty(&dmar_domain->devices)) {
5473                 dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
5474                 dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
5475                 ret = 0;
5476         }
5477         spin_unlock_irqrestore(&device_domain_lock, flags);
5478
5479         return ret;
5480 }
5481
5482 /*
5483  * Check that the device does not live on an external facing PCI port that is
5484  * marked as untrusted. Such devices should not be able to apply quirks and
5485  * thus not be able to bypass the IOMMU restrictions.
5486  */
5487 static bool risky_device(struct pci_dev *pdev)
5488 {
5489         if (pdev->untrusted) {
5490                 pci_info(pdev,
5491                          "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
5492                          pdev->vendor, pdev->device);
5493                 pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
5494                 return true;
5495         }
5496         return false;
5497 }
5498
5499 static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
5500                              unsigned long clf_pages)
5501 {
5502         struct dma_pte *first_pte = NULL, *pte = NULL;
5503         unsigned long lvl_pages = 0;
5504         int level = 0;
5505
5506         while (clf_pages > 0) {
5507                 if (!pte) {
5508                         level = 0;
5509                         pte = pfn_to_dma_pte(domain, clf_pfn, &level);
5510                         if (WARN_ON(!pte))
5511                                 return;
5512                         first_pte = pte;
5513                         lvl_pages = lvl_to_nr_pages(level);
5514                 }
5515
5516                 if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
5517                         return;
5518
5519                 clf_pages -= lvl_pages;
5520                 clf_pfn += lvl_pages;
5521                 pte++;
5522
5523                 if (!clf_pages || first_pte_in_page(pte) ||
5524                     (level > 1 && clf_pages < lvl_pages)) {
5525                         domain_flush_cache(domain, first_pte,
5526                                            (void *)pte - (void *)first_pte);
5527                         pte = NULL;
5528                 }
5529         }
5530 }
5531
5532 static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
5533                                        unsigned long iova, size_t size)
5534 {
5535         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5536         unsigned long pages = aligned_nrpages(iova, size);
5537         unsigned long pfn = iova >> VTD_PAGE_SHIFT;
5538         struct intel_iommu *iommu;
5539         int iommu_id;
5540
5541         if (!dmar_domain->iommu_coherency)
5542                 clflush_sync_map(dmar_domain, pfn, pages);
5543
5544         for_each_domain_iommu(iommu_id, dmar_domain) {
5545                 iommu = g_iommus[iommu_id];
5546                 __mapping_notify_one(iommu, dmar_domain, pfn, pages);
5547         }
5548 }
5549
5550 const struct iommu_ops intel_iommu_ops = {
5551         .capable                = intel_iommu_capable,
5552         .domain_alloc           = intel_iommu_domain_alloc,
5553         .domain_free            = intel_iommu_domain_free,
5554         .enable_nesting         = intel_iommu_enable_nesting,
5555         .attach_dev             = intel_iommu_attach_device,
5556         .detach_dev             = intel_iommu_detach_device,
5557         .aux_attach_dev         = intel_iommu_aux_attach_device,
5558         .aux_detach_dev         = intel_iommu_aux_detach_device,
5559         .aux_get_pasid          = intel_iommu_aux_get_pasid,
5560         .map                    = intel_iommu_map,
5561         .iotlb_sync_map         = intel_iommu_iotlb_sync_map,
5562         .unmap                  = intel_iommu_unmap,
5563         .flush_iotlb_all        = intel_flush_iotlb_all,
5564         .iotlb_sync             = intel_iommu_tlb_sync,
5565         .iova_to_phys           = intel_iommu_iova_to_phys,
5566         .probe_device           = intel_iommu_probe_device,
5567         .probe_finalize         = intel_iommu_probe_finalize,
5568         .release_device         = intel_iommu_release_device,
5569         .get_resv_regions       = intel_iommu_get_resv_regions,
5570         .put_resv_regions       = generic_iommu_put_resv_regions,
5571         .device_group           = intel_iommu_device_group,
5572         .dev_has_feat           = intel_iommu_dev_has_feat,
5573         .dev_feat_enabled       = intel_iommu_dev_feat_enabled,
5574         .dev_enable_feat        = intel_iommu_dev_enable_feat,
5575         .dev_disable_feat       = intel_iommu_dev_disable_feat,
5576         .is_attach_deferred     = intel_iommu_is_attach_deferred,
5577         .def_domain_type        = device_def_domain_type,
5578         .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
5579 #ifdef CONFIG_INTEL_IOMMU_SVM
5580         .cache_invalidate       = intel_iommu_sva_invalidate,
5581         .sva_bind_gpasid        = intel_svm_bind_gpasid,
5582         .sva_unbind_gpasid      = intel_svm_unbind_gpasid,
5583         .sva_bind               = intel_svm_bind,
5584         .sva_unbind             = intel_svm_unbind,
5585         .sva_get_pasid          = intel_svm_get_pasid,
5586         .page_response          = intel_svm_page_response,
5587 #endif
5588 };
5589
5590 static void quirk_iommu_igfx(struct pci_dev *dev)
5591 {
5592         if (risky_device(dev))
5593                 return;
5594
5595         pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5596         dmar_map_gfx = 0;
5597 }
5598
5599 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5600 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
5601 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
5602 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
5603 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
5604 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
5605 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
5606 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
5607
5608 /* Broadwell igfx malfunctions with dmar */
5609 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
5610 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
5611 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
5612 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
5613 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
5614 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
5615 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
5616 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
5617 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
5618 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
5619 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
5620 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
5621 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
5622 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
5623 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
5624 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
5625 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
5626 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
5627 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
5628 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
5629 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
5630 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
5631 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
5632 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
5633
5634 static void quirk_iommu_rwbf(struct pci_dev *dev)
5635 {
5636         if (risky_device(dev))
5637                 return;
5638
5639         /*
5640          * Mobile 4 Series Chipset neglects to set RWBF capability,
5641          * but needs it. Same seems to hold for the desktop versions.
5642          */
5643         pci_info(dev, "Forcing write-buffer flush capability\n");
5644         rwbf_quirk = 1;
5645 }
5646
5647 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5648 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5649 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5650 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5651 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5652 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5653 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5654
5655 #define GGC 0x52
5656 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
5657 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
5658 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
5659 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
5660 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
5661 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
5662 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
5663 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
5664
5665 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5666 {
5667         unsigned short ggc;
5668
5669         if (risky_device(dev))
5670                 return;
5671
5672         if (pci_read_config_word(dev, GGC, &ggc))
5673                 return;
5674
5675         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5676                 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5677                 dmar_map_gfx = 0;
5678         } else if (dmar_map_gfx) {
5679                 /* we have to ensure the gfx device is idle before we flush */
5680                 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
5681                 intel_iommu_strict = 1;
5682        }
5683 }
5684 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5685 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5686 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5687 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5688
5689 static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
5690 {
5691         unsigned short ver;
5692
5693         if (!IS_GFX_DEVICE(dev))
5694                 return;
5695
5696         ver = (dev->device >> 8) & 0xff;
5697         if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
5698             ver != 0x4e && ver != 0x8a && ver != 0x98 &&
5699             ver != 0x9a)
5700                 return;
5701
5702         if (risky_device(dev))
5703                 return;
5704
5705         pci_info(dev, "Skip IOMMU disabling for graphics\n");
5706         iommu_skip_te_disable = 1;
5707 }
5708 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
5709
5710 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5711    ISOCH DMAR unit for the Azalia sound device, but not give it any
5712    TLB entries, which causes it to deadlock. Check for that.  We do
5713    this in a function called from init_dmars(), instead of in a PCI
5714    quirk, because we don't want to print the obnoxious "BIOS broken"
5715    message if VT-d is actually disabled.
5716 */
5717 static void __init check_tylersburg_isoch(void)
5718 {
5719         struct pci_dev *pdev;
5720         uint32_t vtisochctrl;
5721
5722         /* If there's no Azalia in the system anyway, forget it. */
5723         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5724         if (!pdev)
5725                 return;
5726
5727         if (risky_device(pdev)) {
5728                 pci_dev_put(pdev);
5729                 return;
5730         }
5731
5732         pci_dev_put(pdev);
5733
5734         /* System Management Registers. Might be hidden, in which case
5735            we can't do the sanity check. But that's OK, because the
5736            known-broken BIOSes _don't_ actually hide it, so far. */
5737         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5738         if (!pdev)
5739                 return;
5740
5741         if (risky_device(pdev)) {
5742                 pci_dev_put(pdev);
5743                 return;
5744         }
5745
5746         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5747                 pci_dev_put(pdev);
5748                 return;
5749         }
5750
5751         pci_dev_put(pdev);
5752
5753         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5754         if (vtisochctrl & 1)
5755                 return;
5756
5757         /* Drop all bits other than the number of TLB entries */
5758         vtisochctrl &= 0x1c;
5759
5760         /* If we have the recommended number of TLB entries (16), fine. */
5761         if (vtisochctrl == 0x10)
5762                 return;
5763
5764         /* Zero TLB entries? You get to ride the short bus to school. */
5765         if (!vtisochctrl) {
5766                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5767                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5768                      dmi_get_system_info(DMI_BIOS_VENDOR),
5769                      dmi_get_system_info(DMI_BIOS_VERSION),
5770                      dmi_get_system_info(DMI_PRODUCT_VERSION));
5771                 iommu_identity_mapping |= IDENTMAP_AZALIA;
5772                 return;
5773         }
5774
5775         pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5776                vtisochctrl);
5777 }