1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2006, Intel Corporation.
5 * Copyright (C) 2006-2008 Intel Corporation
6 * Author: Ashok Raj <ashok.raj@intel.com>
7 * Author: Shaohua Li <shaohua.li@intel.com>
8 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * This file implements early detection/parsing of Remapping Devices
11 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
14 * These routines are used by both DMA-remapping and Interrupt-remapping
17 #define pr_fmt(fmt) "DMAR: " fmt
19 #include <linux/pci.h>
20 #include <linux/dmar.h>
21 #include <linux/iova.h>
22 #include <linux/intel-iommu.h>
23 #include <linux/timer.h>
24 #include <linux/irq.h>
25 #include <linux/interrupt.h>
26 #include <linux/tboot.h>
27 #include <linux/dmi.h>
28 #include <linux/slab.h>
29 #include <linux/iommu.h>
30 #include <linux/numa.h>
31 #include <linux/limits.h>
32 #include <asm/irq_remapping.h>
33 #include <asm/iommu_table.h>
35 #include "../irq_remapping.h"
37 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
38 struct dmar_res_callback {
39 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
40 void *arg[ACPI_DMAR_TYPE_RESERVED];
41 bool ignore_unhandled;
47 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
48 * before IO devices managed by that unit.
49 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
50 * after IO devices managed by that unit.
51 * 3) Hotplug events are rare.
53 * Locking rules for DMA and interrupt remapping related global data structures:
54 * 1) Use dmar_global_lock in process context
55 * 2) Use RCU in interrupt context
57 DECLARE_RWSEM(dmar_global_lock);
58 LIST_HEAD(dmar_drhd_units);
60 struct acpi_table_header * __initdata dmar_tbl;
61 static int dmar_dev_scope_status = 1;
62 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
64 static int alloc_iommu(struct dmar_drhd_unit *drhd);
65 static void free_iommu(struct intel_iommu *iommu);
67 extern const struct iommu_ops intel_iommu_ops;
69 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
72 * add INCLUDE_ALL at the tail, so scan the list will find it at
75 if (drhd->include_all)
76 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
78 list_add_rcu(&drhd->list, &dmar_drhd_units);
81 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
83 struct acpi_dmar_device_scope *scope;
88 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
90 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
92 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
93 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
94 pr_warn("Unsupported device scope\n");
96 start += scope->length;
101 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
104 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
107 struct device *tmp_dev;
109 if (*devices && *cnt) {
110 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
119 /* Optimize out kzalloc()/kfree() for normal cases */
120 static char dmar_pci_notify_info_buf[64];
122 static struct dmar_pci_notify_info *
123 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
128 struct dmar_pci_notify_info *info;
130 BUG_ON(dev->is_virtfn);
133 * Ignore devices that have a domain number higher than what can
134 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
136 if (pci_domain_nr(dev->bus) > U16_MAX)
139 /* Only generate path[] for device addition event */
140 if (event == BUS_NOTIFY_ADD_DEVICE)
141 for (tmp = dev; tmp; tmp = tmp->bus->self)
144 size = struct_size(info, path, level);
145 if (size <= sizeof(dmar_pci_notify_info_buf)) {
146 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
148 info = kzalloc(size, GFP_KERNEL);
150 pr_warn("Out of memory when allocating notify_info "
151 "for %s.\n", pci_name(dev));
152 if (dmar_dev_scope_status == 0)
153 dmar_dev_scope_status = -ENOMEM;
160 info->seg = pci_domain_nr(dev->bus);
162 if (event == BUS_NOTIFY_ADD_DEVICE) {
163 for (tmp = dev; tmp; tmp = tmp->bus->self) {
165 info->path[level].bus = tmp->bus->number;
166 info->path[level].device = PCI_SLOT(tmp->devfn);
167 info->path[level].function = PCI_FUNC(tmp->devfn);
168 if (pci_is_root_bus(tmp->bus))
169 info->bus = tmp->bus->number;
176 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
178 if ((void *)info != dmar_pci_notify_info_buf)
182 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
183 struct acpi_dmar_pci_path *path, int count)
187 if (info->bus != bus)
189 if (info->level != count)
192 for (i = 0; i < count; i++) {
193 if (path[i].device != info->path[i].device ||
194 path[i].function != info->path[i].function)
206 if (bus == info->path[i].bus &&
207 path[0].device == info->path[i].device &&
208 path[0].function == info->path[i].function) {
209 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
210 bus, path[0].device, path[0].function);
217 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
218 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
219 void *start, void*end, u16 segment,
220 struct dmar_dev_scope *devices,
224 struct device *tmp, *dev = &info->dev->dev;
225 struct acpi_dmar_device_scope *scope;
226 struct acpi_dmar_pci_path *path;
228 if (segment != info->seg)
231 for (; start < end; start += scope->length) {
233 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
234 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
237 path = (struct acpi_dmar_pci_path *)(scope + 1);
238 level = (scope->length - sizeof(*scope)) / sizeof(*path);
239 if (!dmar_match_pci_path(info, scope->bus, path, level))
243 * We expect devices with endpoint scope to have normal PCI
244 * headers, and devices with bridge scope to have bridge PCI
245 * headers. However PCI NTB devices may be listed in the
246 * DMAR table with bridge scope, even though they have a
247 * normal PCI header. NTB devices are identified by class
248 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
249 * for this special case.
251 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
252 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
253 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
254 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
255 info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
256 pr_warn("Device scope type does not match for %s\n",
257 pci_name(info->dev));
261 for_each_dev_scope(devices, devices_cnt, i, tmp)
263 devices[i].bus = info->dev->bus->number;
264 devices[i].devfn = info->dev->devfn;
265 rcu_assign_pointer(devices[i].dev,
269 BUG_ON(i >= devices_cnt);
275 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
276 struct dmar_dev_scope *devices, int count)
281 if (info->seg != segment)
284 for_each_active_dev_scope(devices, count, index, tmp)
285 if (tmp == &info->dev->dev) {
286 RCU_INIT_POINTER(devices[index].dev, NULL);
295 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
298 struct dmar_drhd_unit *dmaru;
299 struct acpi_dmar_hardware_unit *drhd;
301 for_each_drhd_unit(dmaru) {
302 if (dmaru->include_all)
305 drhd = container_of(dmaru->hdr,
306 struct acpi_dmar_hardware_unit, header);
307 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
308 ((void *)drhd) + drhd->header.length,
310 dmaru->devices, dmaru->devices_cnt);
315 ret = dmar_iommu_notify_scope_dev(info);
316 if (ret < 0 && dmar_dev_scope_status == 0)
317 dmar_dev_scope_status = ret;
320 intel_irq_remap_add_device(info);
325 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
327 struct dmar_drhd_unit *dmaru;
329 for_each_drhd_unit(dmaru)
330 if (dmar_remove_dev_scope(info, dmaru->segment,
331 dmaru->devices, dmaru->devices_cnt))
333 dmar_iommu_notify_scope_dev(info);
336 static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
338 struct pci_dev *physfn = pci_physfn(pdev);
340 dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
343 static int dmar_pci_bus_notifier(struct notifier_block *nb,
344 unsigned long action, void *data)
346 struct pci_dev *pdev = to_pci_dev(data);
347 struct dmar_pci_notify_info *info;
349 /* Only care about add/remove events for physical functions.
350 * For VFs we actually do the lookup based on the corresponding
351 * PF in device_to_iommu() anyway. */
352 if (pdev->is_virtfn) {
354 * Ensure that the VF device inherits the irq domain of the
355 * PF device. Ideally the device would inherit the domain
356 * from the bus, but DMAR can have multiple units per bus
357 * which makes this impossible. The VF 'bus' could inherit
358 * from the PF device, but that's yet another x86'sism to
359 * inflict on everybody else.
361 if (action == BUS_NOTIFY_ADD_DEVICE)
362 vf_inherit_msi_domain(pdev);
366 if (action != BUS_NOTIFY_ADD_DEVICE &&
367 action != BUS_NOTIFY_REMOVED_DEVICE)
370 info = dmar_alloc_pci_notify_info(pdev, action);
374 down_write(&dmar_global_lock);
375 if (action == BUS_NOTIFY_ADD_DEVICE)
376 dmar_pci_bus_add_dev(info);
377 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
378 dmar_pci_bus_del_dev(info);
379 up_write(&dmar_global_lock);
381 dmar_free_pci_notify_info(info);
386 static struct notifier_block dmar_pci_bus_nb = {
387 .notifier_call = dmar_pci_bus_notifier,
391 static struct dmar_drhd_unit *
392 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
394 struct dmar_drhd_unit *dmaru;
396 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
398 if (dmaru->segment == drhd->segment &&
399 dmaru->reg_base_addr == drhd->address)
406 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
407 * structure which uniquely represent one DMA remapping hardware unit
408 * present in the platform
410 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
412 struct acpi_dmar_hardware_unit *drhd;
413 struct dmar_drhd_unit *dmaru;
416 drhd = (struct acpi_dmar_hardware_unit *)header;
417 dmaru = dmar_find_dmaru(drhd);
421 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
426 * If header is allocated from slab by ACPI _DSM method, we need to
427 * copy the content because the memory buffer will be freed on return.
429 dmaru->hdr = (void *)(dmaru + 1);
430 memcpy(dmaru->hdr, header, header->length);
431 dmaru->reg_base_addr = drhd->address;
432 dmaru->segment = drhd->segment;
433 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
434 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
435 ((void *)drhd) + drhd->header.length,
436 &dmaru->devices_cnt);
437 if (dmaru->devices_cnt && dmaru->devices == NULL) {
442 ret = alloc_iommu(dmaru);
444 dmar_free_dev_scope(&dmaru->devices,
445 &dmaru->devices_cnt);
449 dmar_register_drhd_unit(dmaru);
458 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
460 if (dmaru->devices && dmaru->devices_cnt)
461 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
463 free_iommu(dmaru->iommu);
467 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
470 struct acpi_dmar_andd *andd = (void *)header;
472 /* Check for NUL termination within the designated length */
473 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
475 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
476 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
477 dmi_get_system_info(DMI_BIOS_VENDOR),
478 dmi_get_system_info(DMI_BIOS_VERSION),
479 dmi_get_system_info(DMI_PRODUCT_VERSION));
480 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
483 pr_info("ANDD device: %x name: %s\n", andd->device_number,
489 #ifdef CONFIG_ACPI_NUMA
490 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
492 struct acpi_dmar_rhsa *rhsa;
493 struct dmar_drhd_unit *drhd;
495 rhsa = (struct acpi_dmar_rhsa *)header;
496 for_each_drhd_unit(drhd) {
497 if (drhd->reg_base_addr == rhsa->base_address) {
498 int node = pxm_to_node(rhsa->proximity_domain);
500 if (node != NUMA_NO_NODE && !node_online(node))
502 drhd->iommu->node = node;
507 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
508 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
510 dmi_get_system_info(DMI_BIOS_VENDOR),
511 dmi_get_system_info(DMI_BIOS_VERSION),
512 dmi_get_system_info(DMI_PRODUCT_VERSION));
513 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
518 #define dmar_parse_one_rhsa dmar_res_noop
522 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
524 struct acpi_dmar_hardware_unit *drhd;
525 struct acpi_dmar_reserved_memory *rmrr;
526 struct acpi_dmar_atsr *atsr;
527 struct acpi_dmar_rhsa *rhsa;
529 switch (header->type) {
530 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
531 drhd = container_of(header, struct acpi_dmar_hardware_unit,
533 pr_info("DRHD base: %#016Lx flags: %#x\n",
534 (unsigned long long)drhd->address, drhd->flags);
536 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
537 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
539 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
540 (unsigned long long)rmrr->base_address,
541 (unsigned long long)rmrr->end_address);
543 case ACPI_DMAR_TYPE_ROOT_ATS:
544 atsr = container_of(header, struct acpi_dmar_atsr, header);
545 pr_info("ATSR flags: %#x\n", atsr->flags);
547 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
548 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
549 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
550 (unsigned long long)rhsa->base_address,
551 rhsa->proximity_domain);
553 case ACPI_DMAR_TYPE_NAMESPACE:
554 /* We don't print this here because we need to sanity-check
555 it first. So print it in dmar_parse_one_andd() instead. */
561 * dmar_table_detect - checks to see if the platform supports DMAR devices
563 static int __init dmar_table_detect(void)
565 acpi_status status = AE_OK;
567 /* if we could find DMAR table, then there are DMAR devices */
568 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
570 if (ACPI_SUCCESS(status) && !dmar_tbl) {
571 pr_warn("Unable to map DMAR\n");
572 status = AE_NOT_FOUND;
575 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
578 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
579 size_t len, struct dmar_res_callback *cb)
581 struct acpi_dmar_header *iter, *next;
582 struct acpi_dmar_header *end = ((void *)start) + len;
584 for (iter = start; iter < end; iter = next) {
585 next = (void *)iter + iter->length;
586 if (iter->length == 0) {
587 /* Avoid looping forever on bad ACPI tables */
588 pr_debug(FW_BUG "Invalid 0-length structure\n");
590 } else if (next > end) {
591 /* Avoid passing table end */
592 pr_warn(FW_BUG "Record passes table end\n");
597 dmar_table_print_dmar_entry(iter);
599 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
600 /* continue for forward compatibility */
601 pr_debug("Unknown DMAR structure type %d\n",
603 } else if (cb->cb[iter->type]) {
606 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
609 } else if (!cb->ignore_unhandled) {
610 pr_warn("No handler for DMAR structure type %d\n",
619 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
620 struct dmar_res_callback *cb)
622 return dmar_walk_remapping_entries((void *)(dmar + 1),
623 dmar->header.length - sizeof(*dmar), cb);
627 * parse_dmar_table - parses the DMA reporting table
630 parse_dmar_table(void)
632 struct acpi_table_dmar *dmar;
635 struct dmar_res_callback cb = {
637 .ignore_unhandled = true,
638 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
639 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
640 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
641 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
642 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
643 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
647 * Do it again, earlier dmar_tbl mapping could be mapped with
653 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
654 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
656 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
658 dmar = (struct acpi_table_dmar *)dmar_tbl;
662 if (dmar->width < PAGE_SHIFT - 1) {
663 pr_warn("Invalid DMAR haw\n");
667 pr_info("Host address width %d\n", dmar->width + 1);
668 ret = dmar_walk_dmar_table(dmar, &cb);
669 if (ret == 0 && drhd_count == 0)
670 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
675 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
676 int cnt, struct pci_dev *dev)
682 for_each_active_dev_scope(devices, cnt, index, tmp)
683 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
686 /* Check our parent */
687 dev = dev->bus->self;
693 struct dmar_drhd_unit *
694 dmar_find_matched_drhd_unit(struct pci_dev *dev)
696 struct dmar_drhd_unit *dmaru;
697 struct acpi_dmar_hardware_unit *drhd;
699 dev = pci_physfn(dev);
702 for_each_drhd_unit(dmaru) {
703 drhd = container_of(dmaru->hdr,
704 struct acpi_dmar_hardware_unit,
707 if (dmaru->include_all &&
708 drhd->segment == pci_domain_nr(dev->bus))
711 if (dmar_pci_device_match(dmaru->devices,
712 dmaru->devices_cnt, dev))
722 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
723 struct acpi_device *adev)
725 struct dmar_drhd_unit *dmaru;
726 struct acpi_dmar_hardware_unit *drhd;
727 struct acpi_dmar_device_scope *scope;
730 struct acpi_dmar_pci_path *path;
732 for_each_drhd_unit(dmaru) {
733 drhd = container_of(dmaru->hdr,
734 struct acpi_dmar_hardware_unit,
737 for (scope = (void *)(drhd + 1);
738 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
739 scope = ((void *)scope) + scope->length) {
740 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
742 if (scope->enumeration_id != device_number)
745 path = (void *)(scope + 1);
746 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
747 dev_name(&adev->dev), dmaru->reg_base_addr,
748 scope->bus, path->device, path->function);
749 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
751 dmaru->devices[i].bus = scope->bus;
752 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
754 rcu_assign_pointer(dmaru->devices[i].dev,
755 get_device(&adev->dev));
758 BUG_ON(i >= dmaru->devices_cnt);
761 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
762 device_number, dev_name(&adev->dev));
765 static int __init dmar_acpi_dev_scope_init(void)
767 struct acpi_dmar_andd *andd;
769 if (dmar_tbl == NULL)
772 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
773 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
774 andd = ((void *)andd) + andd->header.length) {
775 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
777 struct acpi_device *adev;
779 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
782 pr_err("Failed to find handle for ACPI object %s\n",
786 if (acpi_bus_get_device(h, &adev)) {
787 pr_err("Failed to get device for ACPI object %s\n",
791 dmar_acpi_insert_dev_scope(andd->device_number, adev);
797 int __init dmar_dev_scope_init(void)
799 struct pci_dev *dev = NULL;
800 struct dmar_pci_notify_info *info;
802 if (dmar_dev_scope_status != 1)
803 return dmar_dev_scope_status;
805 if (list_empty(&dmar_drhd_units)) {
806 dmar_dev_scope_status = -ENODEV;
808 dmar_dev_scope_status = 0;
810 dmar_acpi_dev_scope_init();
812 for_each_pci_dev(dev) {
816 info = dmar_alloc_pci_notify_info(dev,
817 BUS_NOTIFY_ADD_DEVICE);
820 return dmar_dev_scope_status;
822 dmar_pci_bus_add_dev(info);
823 dmar_free_pci_notify_info(info);
828 return dmar_dev_scope_status;
831 void __init dmar_register_bus_notifier(void)
833 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
837 int __init dmar_table_init(void)
839 static int dmar_table_initialized;
842 if (dmar_table_initialized == 0) {
843 ret = parse_dmar_table();
846 pr_info("Parse DMAR table failure.\n");
847 } else if (list_empty(&dmar_drhd_units)) {
848 pr_info("No DMAR devices found\n");
853 dmar_table_initialized = ret;
855 dmar_table_initialized = 1;
858 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
861 static void warn_invalid_dmar(u64 addr, const char *message)
864 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
865 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
867 dmi_get_system_info(DMI_BIOS_VENDOR),
868 dmi_get_system_info(DMI_BIOS_VERSION),
869 dmi_get_system_info(DMI_PRODUCT_VERSION));
870 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
874 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
876 struct acpi_dmar_hardware_unit *drhd;
880 drhd = (void *)entry;
881 if (!drhd->address) {
882 warn_invalid_dmar(0, "");
887 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
889 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
891 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
895 cap = dmar_readq(addr + DMAR_CAP_REG);
896 ecap = dmar_readq(addr + DMAR_ECAP_REG);
901 early_iounmap(addr, VTD_PAGE_SIZE);
903 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
904 warn_invalid_dmar(drhd->address, " returns all ones");
911 int __init detect_intel_iommu(void)
914 struct dmar_res_callback validate_drhd_cb = {
915 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
916 .ignore_unhandled = true,
919 down_write(&dmar_global_lock);
920 ret = dmar_table_detect();
922 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
924 if (!ret && !no_iommu && !iommu_detected &&
925 (!dmar_disabled || dmar_platform_optin())) {
927 /* Make sure ACS will be enabled */
933 x86_init.iommu.iommu_init = intel_iommu_init;
934 x86_platform.iommu_shutdown = intel_iommu_shutdown;
940 acpi_put_table(dmar_tbl);
943 up_write(&dmar_global_lock);
945 return ret ? ret : 1;
948 static void unmap_iommu(struct intel_iommu *iommu)
951 release_mem_region(iommu->reg_phys, iommu->reg_size);
955 * map_iommu: map the iommu's registers
956 * @iommu: the iommu to map
957 * @phys_addr: the physical address of the base resgister
959 * Memory map the iommu's registers. Start w/ a single page, and
960 * possibly expand if that turns out to be insufficent.
962 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
966 iommu->reg_phys = phys_addr;
967 iommu->reg_size = VTD_PAGE_SIZE;
969 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
970 pr_err("Can't reserve memory\n");
975 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
977 pr_err("Can't map the region\n");
982 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
983 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
985 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
987 warn_invalid_dmar(phys_addr, " returns all ones");
990 if (ecap_vcs(iommu->ecap))
991 iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
993 /* the registers might be more than one page */
994 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
995 cap_max_fault_reg_offset(iommu->cap));
996 map_size = VTD_PAGE_ALIGN(map_size);
997 if (map_size > iommu->reg_size) {
999 release_mem_region(iommu->reg_phys, iommu->reg_size);
1000 iommu->reg_size = map_size;
1001 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1003 pr_err("Can't reserve memory\n");
1007 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1009 pr_err("Can't map the region\n");
1018 iounmap(iommu->reg);
1020 release_mem_region(iommu->reg_phys, iommu->reg_size);
1025 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1027 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1028 DMAR_UNITS_SUPPORTED);
1029 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1032 set_bit(iommu->seq_id, dmar_seq_ids);
1033 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1036 return iommu->seq_id;
1039 static void dmar_free_seq_id(struct intel_iommu *iommu)
1041 if (iommu->seq_id >= 0) {
1042 clear_bit(iommu->seq_id, dmar_seq_ids);
1047 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1049 struct intel_iommu *iommu;
1055 if (!drhd->reg_base_addr) {
1056 warn_invalid_dmar(0, "");
1060 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1064 if (dmar_alloc_seq_id(iommu) < 0) {
1065 pr_err("Failed to allocate seq_id\n");
1070 err = map_iommu(iommu, drhd->reg_base_addr);
1072 pr_err("Failed to map %s\n", iommu->name);
1073 goto error_free_seq_id;
1077 if (cap_sagaw(iommu->cap) == 0) {
1078 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1083 if (!drhd->ignored) {
1084 agaw = iommu_calculate_agaw(iommu);
1086 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1091 if (!drhd->ignored) {
1092 msagaw = iommu_calculate_max_sagaw(iommu);
1094 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1101 iommu->msagaw = msagaw;
1102 iommu->segment = drhd->segment;
1104 iommu->node = NUMA_NO_NODE;
1106 ver = readl(iommu->reg + DMAR_VER_REG);
1107 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1109 (unsigned long long)drhd->reg_base_addr,
1110 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1111 (unsigned long long)iommu->cap,
1112 (unsigned long long)iommu->ecap);
1114 /* Reflect status in gcmd */
1115 sts = readl(iommu->reg + DMAR_GSTS_REG);
1116 if (sts & DMA_GSTS_IRES)
1117 iommu->gcmd |= DMA_GCMD_IRE;
1118 if (sts & DMA_GSTS_TES)
1119 iommu->gcmd |= DMA_GCMD_TE;
1120 if (sts & DMA_GSTS_QIES)
1121 iommu->gcmd |= DMA_GCMD_QIE;
1123 raw_spin_lock_init(&iommu->register_lock);
1126 * This is only for hotplug; at boot time intel_iommu_enabled won't
1127 * be set yet. When intel_iommu_init() runs, it registers the units
1128 * present at boot time, then sets intel_iommu_enabled.
1130 if (intel_iommu_enabled && !drhd->ignored) {
1131 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1137 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1139 err = iommu_device_register(&iommu->iommu);
1144 drhd->iommu = iommu;
1150 iommu_device_sysfs_remove(&iommu->iommu);
1154 dmar_free_seq_id(iommu);
1160 static void free_iommu(struct intel_iommu *iommu)
1162 if (intel_iommu_enabled && !iommu->drhd->ignored) {
1163 iommu_device_unregister(&iommu->iommu);
1164 iommu_device_sysfs_remove(&iommu->iommu);
1168 if (iommu->pr_irq) {
1169 free_irq(iommu->pr_irq, iommu);
1170 dmar_free_hwirq(iommu->pr_irq);
1173 free_irq(iommu->irq, iommu);
1174 dmar_free_hwirq(iommu->irq);
1179 free_page((unsigned long)iommu->qi->desc);
1180 kfree(iommu->qi->desc_status);
1187 dmar_free_seq_id(iommu);
1192 * Reclaim all the submitted descriptors which have completed its work.
1194 static inline void reclaim_free_desc(struct q_inval *qi)
1196 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1197 qi->desc_status[qi->free_tail] == QI_ABORT) {
1198 qi->desc_status[qi->free_tail] = QI_FREE;
1199 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1204 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1208 struct q_inval *qi = iommu->qi;
1209 int shift = qi_shift(iommu);
1211 if (qi->desc_status[wait_index] == QI_ABORT)
1214 fault = readl(iommu->reg + DMAR_FSTS_REG);
1217 * If IQE happens, the head points to the descriptor associated
1218 * with the error. No new descriptors are fetched until the IQE
1221 if (fault & DMA_FSTS_IQE) {
1222 head = readl(iommu->reg + DMAR_IQH_REG);
1223 if ((head >> shift) == index) {
1224 struct qi_desc *desc = qi->desc + head;
1227 * desc->qw2 and desc->qw3 are either reserved or
1228 * used by software as private data. We won't print
1229 * out these two qw's for security consideration.
1231 pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
1232 (unsigned long long)desc->qw0,
1233 (unsigned long long)desc->qw1);
1234 memcpy(desc, qi->desc + (wait_index << shift),
1236 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1242 * If ITE happens, all pending wait_desc commands are aborted.
1243 * No new descriptors are fetched until the ITE is cleared.
1245 if (fault & DMA_FSTS_ITE) {
1246 head = readl(iommu->reg + DMAR_IQH_REG);
1247 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1249 tail = readl(iommu->reg + DMAR_IQT_REG);
1250 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1252 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1255 if (qi->desc_status[head] == QI_IN_USE)
1256 qi->desc_status[head] = QI_ABORT;
1257 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1258 } while (head != tail);
1260 if (qi->desc_status[wait_index] == QI_ABORT)
1264 if (fault & DMA_FSTS_ICE)
1265 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1271 * Function to submit invalidation descriptors of all types to the queued
1272 * invalidation interface(QI). Multiple descriptors can be submitted at a
1273 * time, a wait descriptor will be appended to each submission to ensure
1274 * hardware has completed the invalidation before return. Wait descriptors
1275 * can be part of the submission but it will not be polled for completion.
1277 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1278 unsigned int count, unsigned long options)
1280 struct q_inval *qi = iommu->qi;
1281 struct qi_desc wait_desc;
1282 int wait_index, index;
1283 unsigned long flags;
1293 raw_spin_lock_irqsave(&qi->q_lock, flags);
1295 * Check if we have enough empty slots in the queue to submit,
1296 * the calculation is based on:
1297 * # of desc + 1 wait desc + 1 space between head and tail
1299 while (qi->free_cnt < count + 2) {
1300 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1302 raw_spin_lock_irqsave(&qi->q_lock, flags);
1305 index = qi->free_head;
1306 wait_index = (index + count) % QI_LENGTH;
1307 shift = qi_shift(iommu);
1309 for (i = 0; i < count; i++) {
1310 offset = ((index + i) % QI_LENGTH) << shift;
1311 memcpy(qi->desc + offset, &desc[i], 1 << shift);
1312 qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
1314 qi->desc_status[wait_index] = QI_IN_USE;
1316 wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1317 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1318 if (options & QI_OPT_WAIT_DRAIN)
1319 wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
1320 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1324 offset = wait_index << shift;
1325 memcpy(qi->desc + offset, &wait_desc, 1 << shift);
1327 qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
1328 qi->free_cnt -= count + 1;
1331 * update the HW tail register indicating the presence of
1334 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1336 while (qi->desc_status[wait_index] != QI_DONE) {
1338 * We will leave the interrupts disabled, to prevent interrupt
1339 * context to queue another cmd while a cmd is already submitted
1340 * and waiting for completion on this cpu. This is to avoid
1341 * a deadlock where the interrupt context can wait indefinitely
1342 * for free slots in the queue.
1344 rc = qi_check_fault(iommu, index, wait_index);
1348 raw_spin_unlock(&qi->q_lock);
1350 raw_spin_lock(&qi->q_lock);
1353 for (i = 0; i < count; i++)
1354 qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
1356 reclaim_free_desc(qi);
1357 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1366 * Flush the global interrupt entry cache.
1368 void qi_global_iec(struct intel_iommu *iommu)
1370 struct qi_desc desc;
1372 desc.qw0 = QI_IEC_TYPE;
1377 /* should never fail */
1378 qi_submit_sync(iommu, &desc, 1, 0);
1381 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1384 struct qi_desc desc;
1386 desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1387 | QI_CC_GRAN(type) | QI_CC_TYPE;
1392 qi_submit_sync(iommu, &desc, 1, 0);
1395 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1396 unsigned int size_order, u64 type)
1400 struct qi_desc desc;
1403 if (cap_write_drain(iommu->cap))
1406 if (cap_read_drain(iommu->cap))
1409 desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1410 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1411 desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1412 | QI_IOTLB_AM(size_order);
1416 qi_submit_sync(iommu, &desc, 1, 0);
1419 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1420 u16 qdep, u64 addr, unsigned mask)
1422 struct qi_desc desc;
1425 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1426 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1428 desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1430 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1433 desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1434 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1438 qi_submit_sync(iommu, &desc, 1, 0);
1441 /* PASID-based IOTLB invalidation */
1442 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1443 unsigned long npages, bool ih)
1445 struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
1448 * npages == -1 means a PASID-selective invalidation, otherwise,
1449 * a positive value for Page-selective-within-PASID invalidation.
1450 * 0 is not a valid input.
1452 if (WARN_ON(!npages)) {
1453 pr_err("Invalid input npages = %ld\n", npages);
1458 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1459 QI_EIOTLB_DID(did) |
1460 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1464 int mask = ilog2(__roundup_pow_of_two(npages));
1465 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1467 if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
1468 addr = ALIGN_DOWN(addr, align);
1470 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1471 QI_EIOTLB_DID(did) |
1472 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1474 desc.qw1 = QI_EIOTLB_ADDR(addr) |
1479 qi_submit_sync(iommu, &desc, 1, 0);
1482 /* PASID-based device IOTLB Invalidate */
1483 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1484 u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
1486 unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
1487 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1489 desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1490 QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1491 QI_DEV_IOTLB_PFSID(pfsid);
1494 * If S bit is 0, we only flush a single page. If S bit is set,
1495 * The least significant zero bit indicates the invalidation address
1496 * range. VT-d spec 6.5.2.6.
1497 * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
1498 * size order = 0 is PAGE_SIZE 4KB
1499 * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
1502 if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
1503 pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1506 /* Take page address */
1507 desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
1511 * Existing 0s in address below size_order may be the least
1512 * significant bit, we must set them to 1s to avoid having
1513 * smaller size than desired.
1515 desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1517 /* Clear size_order bit to indicate size */
1519 /* Set the S bit to indicate flushing more than 1 page */
1520 desc.qw1 |= QI_DEV_EIOTLB_SIZE;
1523 qi_submit_sync(iommu, &desc, 1, 0);
1526 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1527 u64 granu, u32 pasid)
1529 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1531 desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
1532 QI_PC_GRAN(granu) | QI_PC_TYPE;
1533 qi_submit_sync(iommu, &desc, 1, 0);
1537 * Disable Queued Invalidation interface.
1539 void dmar_disable_qi(struct intel_iommu *iommu)
1541 unsigned long flags;
1543 cycles_t start_time = get_cycles();
1545 if (!ecap_qis(iommu->ecap))
1548 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1550 sts = readl(iommu->reg + DMAR_GSTS_REG);
1551 if (!(sts & DMA_GSTS_QIES))
1555 * Give a chance to HW to complete the pending invalidation requests.
1557 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1558 readl(iommu->reg + DMAR_IQH_REG)) &&
1559 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1562 iommu->gcmd &= ~DMA_GCMD_QIE;
1563 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1565 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1566 !(sts & DMA_GSTS_QIES), sts);
1568 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1572 * Enable queued invalidation.
1574 static void __dmar_enable_qi(struct intel_iommu *iommu)
1577 unsigned long flags;
1578 struct q_inval *qi = iommu->qi;
1579 u64 val = virt_to_phys(qi->desc);
1581 qi->free_head = qi->free_tail = 0;
1582 qi->free_cnt = QI_LENGTH;
1585 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1588 if (ecap_smts(iommu->ecap))
1589 val |= (1 << 11) | 1;
1591 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1593 /* write zero to the tail reg */
1594 writel(0, iommu->reg + DMAR_IQT_REG);
1596 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1598 iommu->gcmd |= DMA_GCMD_QIE;
1599 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1601 /* Make sure hardware complete it */
1602 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1604 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1608 * Enable Queued Invalidation interface. This is a must to support
1609 * interrupt-remapping. Also used by DMA-remapping, which replaces
1610 * register based IOTLB invalidation.
1612 int dmar_enable_qi(struct intel_iommu *iommu)
1615 struct page *desc_page;
1617 if (!ecap_qis(iommu->ecap))
1621 * queued invalidation is already setup and enabled.
1626 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1633 * Need two pages to accommodate 256 descriptors of 256 bits each
1634 * if the remapping hardware supports scalable mode translation.
1636 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1637 !!ecap_smts(iommu->ecap));
1644 qi->desc = page_address(desc_page);
1646 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1647 if (!qi->desc_status) {
1648 free_page((unsigned long) qi->desc);
1654 raw_spin_lock_init(&qi->q_lock);
1656 __dmar_enable_qi(iommu);
1661 /* iommu interrupt handling. Most stuff are MSI-like. */
1669 static const char *dma_remap_fault_reasons[] =
1672 "Present bit in root entry is clear",
1673 "Present bit in context entry is clear",
1674 "Invalid context entry",
1675 "Access beyond MGAW",
1676 "PTE Write access is not set",
1677 "PTE Read access is not set",
1678 "Next page table ptr is invalid",
1679 "Root table address invalid",
1680 "Context table ptr is invalid",
1681 "non-zero reserved fields in RTP",
1682 "non-zero reserved fields in CTP",
1683 "non-zero reserved fields in PTE",
1684 "PCE for translation request specifies blocking",
1687 static const char * const dma_remap_sm_fault_reasons[] = {
1688 "SM: Invalid Root Table Address",
1689 "SM: TTM 0 for request with PASID",
1690 "SM: TTM 0 for page group request",
1691 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1692 "SM: Error attempting to access Root Entry",
1693 "SM: Present bit in Root Entry is clear",
1694 "SM: Non-zero reserved field set in Root Entry",
1695 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1696 "SM: Error attempting to access Context Entry",
1697 "SM: Present bit in Context Entry is clear",
1698 "SM: Non-zero reserved field set in the Context Entry",
1699 "SM: Invalid Context Entry",
1700 "SM: DTE field in Context Entry is clear",
1701 "SM: PASID Enable field in Context Entry is clear",
1702 "SM: PASID is larger than the max in Context Entry",
1703 "SM: PRE field in Context-Entry is clear",
1704 "SM: RID_PASID field error in Context-Entry",
1705 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1706 "SM: Error attempting to access the PASID Directory Entry",
1707 "SM: Present bit in Directory Entry is clear",
1708 "SM: Non-zero reserved field set in PASID Directory Entry",
1709 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1710 "SM: Error attempting to access PASID Table Entry",
1711 "SM: Present bit in PASID Table Entry is clear",
1712 "SM: Non-zero reserved field set in PASID Table Entry",
1713 "SM: Invalid Scalable-Mode PASID Table Entry",
1714 "SM: ERE field is clear in PASID Table Entry",
1715 "SM: SRE field is clear in PASID Table Entry",
1716 "Unknown", "Unknown",/* 0x5E-0x5F */
1717 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1718 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1719 "SM: Error attempting to access first-level paging entry",
1720 "SM: Present bit in first-level paging entry is clear",
1721 "SM: Non-zero reserved field set in first-level paging entry",
1722 "SM: Error attempting to access FL-PML4 entry",
1723 "SM: First-level entry address beyond MGAW in Nested translation",
1724 "SM: Read permission error in FL-PML4 entry in Nested translation",
1725 "SM: Read permission error in first-level paging entry in Nested translation",
1726 "SM: Write permission error in first-level paging entry in Nested translation",
1727 "SM: Error attempting to access second-level paging entry",
1728 "SM: Read/Write permission error in second-level paging entry",
1729 "SM: Non-zero reserved field set in second-level paging entry",
1730 "SM: Invalid second-level page table pointer",
1731 "SM: A/D bit update needed in second-level entry when set up in no snoop",
1732 "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1733 "SM: Address in first-level translation is not canonical",
1734 "SM: U/S set 0 for first-level translation with user privilege",
1735 "SM: No execute permission for request with PASID and ER=1",
1736 "SM: Address beyond the DMA hardware max",
1737 "SM: Second-level entry address beyond the max",
1738 "SM: No write permission for Write/AtomicOp request",
1739 "SM: No read permission for Read/AtomicOp request",
1740 "SM: Invalid address-interrupt address",
1741 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1742 "SM: A/D bit update needed in first-level entry when set up in no snoop",
1745 static const char *irq_remap_fault_reasons[] =
1747 "Detected reserved fields in the decoded interrupt-remapped request",
1748 "Interrupt index exceeded the interrupt-remapping table size",
1749 "Present field in the IRTE entry is clear",
1750 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1751 "Detected reserved fields in the IRTE entry",
1752 "Blocked a compatibility format interrupt request",
1753 "Blocked an interrupt request due to source-id verification failure",
1756 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1758 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1759 ARRAY_SIZE(irq_remap_fault_reasons))) {
1760 *fault_type = INTR_REMAP;
1761 return irq_remap_fault_reasons[fault_reason - 0x20];
1762 } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1763 ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1764 *fault_type = DMA_REMAP;
1765 return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1766 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1767 *fault_type = DMA_REMAP;
1768 return dma_remap_fault_reasons[fault_reason];
1770 *fault_type = UNKNOWN;
1776 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1778 if (iommu->irq == irq)
1779 return DMAR_FECTL_REG;
1780 else if (iommu->pr_irq == irq)
1781 return DMAR_PECTL_REG;
1786 void dmar_msi_unmask(struct irq_data *data)
1788 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1789 int reg = dmar_msi_reg(iommu, data->irq);
1793 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1794 writel(0, iommu->reg + reg);
1795 /* Read a reg to force flush the post write */
1796 readl(iommu->reg + reg);
1797 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1800 void dmar_msi_mask(struct irq_data *data)
1802 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1803 int reg = dmar_msi_reg(iommu, data->irq);
1807 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1808 writel(DMA_FECTL_IM, iommu->reg + reg);
1809 /* Read a reg to force flush the post write */
1810 readl(iommu->reg + reg);
1811 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1814 void dmar_msi_write(int irq, struct msi_msg *msg)
1816 struct intel_iommu *iommu = irq_get_handler_data(irq);
1817 int reg = dmar_msi_reg(iommu, irq);
1820 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1821 writel(msg->data, iommu->reg + reg + 4);
1822 writel(msg->address_lo, iommu->reg + reg + 8);
1823 writel(msg->address_hi, iommu->reg + reg + 12);
1824 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1827 void dmar_msi_read(int irq, struct msi_msg *msg)
1829 struct intel_iommu *iommu = irq_get_handler_data(irq);
1830 int reg = dmar_msi_reg(iommu, irq);
1833 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1834 msg->data = readl(iommu->reg + reg + 4);
1835 msg->address_lo = readl(iommu->reg + reg + 8);
1836 msg->address_hi = readl(iommu->reg + reg + 12);
1837 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1840 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1841 u8 fault_reason, u32 pasid, u16 source_id,
1842 unsigned long long addr)
1847 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1849 if (fault_type == INTR_REMAP)
1850 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1851 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1852 PCI_FUNC(source_id & 0xFF), addr >> 48,
1853 fault_reason, reason);
1855 pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n",
1856 type ? "DMA Read" : "DMA Write",
1857 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1858 PCI_FUNC(source_id & 0xFF), pasid, addr,
1859 fault_reason, reason);
1863 #define PRIMARY_FAULT_REG_LEN (16)
1864 irqreturn_t dmar_fault(int irq, void *dev_id)
1866 struct intel_iommu *iommu = dev_id;
1867 int reg, fault_index;
1870 static DEFINE_RATELIMIT_STATE(rs,
1871 DEFAULT_RATELIMIT_INTERVAL,
1872 DEFAULT_RATELIMIT_BURST);
1874 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1875 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1876 if (fault_status && __ratelimit(&rs))
1877 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1879 /* TBD: ignore advanced fault log currently */
1880 if (!(fault_status & DMA_FSTS_PPF))
1883 fault_index = dma_fsts_fault_record_index(fault_status);
1884 reg = cap_fault_reg_offset(iommu->cap);
1886 /* Disable printing, simply clear the fault when ratelimited */
1887 bool ratelimited = !__ratelimit(&rs);
1896 /* highest 32 bits */
1897 data = readl(iommu->reg + reg +
1898 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1899 if (!(data & DMA_FRCD_F))
1903 fault_reason = dma_frcd_fault_reason(data);
1904 type = dma_frcd_type(data);
1906 pasid = dma_frcd_pasid_value(data);
1907 data = readl(iommu->reg + reg +
1908 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1909 source_id = dma_frcd_source_id(data);
1911 pasid_present = dma_frcd_pasid_present(data);
1912 guest_addr = dmar_readq(iommu->reg + reg +
1913 fault_index * PRIMARY_FAULT_REG_LEN);
1914 guest_addr = dma_frcd_page_addr(guest_addr);
1917 /* clear the fault */
1918 writel(DMA_FRCD_F, iommu->reg + reg +
1919 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1921 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1924 /* Using pasid -1 if pasid is not present */
1925 dmar_fault_do_one(iommu, type, fault_reason,
1926 pasid_present ? pasid : -1,
1927 source_id, guest_addr);
1930 if (fault_index >= cap_num_fault_regs(iommu->cap))
1932 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1935 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
1936 iommu->reg + DMAR_FSTS_REG);
1939 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1943 int dmar_set_interrupt(struct intel_iommu *iommu)
1948 * Check if the fault interrupt is already initialized.
1953 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1957 pr_err("No free IRQ vectors\n");
1961 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1963 pr_err("Can't request irq\n");
1967 int __init enable_drhd_fault_handling(void)
1969 struct dmar_drhd_unit *drhd;
1970 struct intel_iommu *iommu;
1973 * Enable fault control interrupt.
1975 for_each_iommu(iommu, drhd) {
1977 int ret = dmar_set_interrupt(iommu);
1980 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1981 (unsigned long long)drhd->reg_base_addr, ret);
1986 * Clear any previous faults.
1988 dmar_fault(iommu->irq, iommu);
1989 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1990 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1997 * Re-enable Queued Invalidation interface.
1999 int dmar_reenable_qi(struct intel_iommu *iommu)
2001 if (!ecap_qis(iommu->ecap))
2008 * First disable queued invalidation.
2010 dmar_disable_qi(iommu);
2012 * Then enable queued invalidation again. Since there is no pending
2013 * invalidation requests now, it's safe to re-enable queued
2016 __dmar_enable_qi(iommu);
2022 * Check interrupt remapping support in DMAR table description.
2024 int __init dmar_ir_support(void)
2026 struct acpi_table_dmar *dmar;
2027 dmar = (struct acpi_table_dmar *)dmar_tbl;
2030 return dmar->flags & 0x1;
2033 /* Check whether DMAR units are in use */
2034 static inline bool dmar_in_use(void)
2036 return irq_remapping_enabled || intel_iommu_enabled;
2039 static int __init dmar_free_unused_resources(void)
2041 struct dmar_drhd_unit *dmaru, *dmaru_n;
2046 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
2047 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
2049 down_write(&dmar_global_lock);
2050 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
2051 list_del(&dmaru->list);
2052 dmar_free_drhd(dmaru);
2054 up_write(&dmar_global_lock);
2059 late_initcall(dmar_free_unused_resources);
2060 IOMMU_INIT_POST(detect_intel_iommu);
2063 * DMAR Hotplug Support
2064 * For more details, please refer to Intel(R) Virtualization Technology
2065 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
2066 * "Remapping Hardware Unit Hot Plug".
2068 static guid_t dmar_hp_guid =
2069 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
2070 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
2073 * Currently there's only one revision and BIOS will not check the revision id,
2074 * so use 0 for safety.
2076 #define DMAR_DSM_REV_ID 0
2077 #define DMAR_DSM_FUNC_DRHD 1
2078 #define DMAR_DSM_FUNC_ATSR 2
2079 #define DMAR_DSM_FUNC_RHSA 3
2081 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
2083 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
2086 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
2087 dmar_res_handler_t handler, void *arg)
2090 union acpi_object *obj;
2091 struct acpi_dmar_header *start;
2092 struct dmar_res_callback callback;
2093 static int res_type[] = {
2094 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
2095 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
2096 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
2099 if (!dmar_detect_dsm(handle, func))
2102 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
2103 func, NULL, ACPI_TYPE_BUFFER);
2107 memset(&callback, 0, sizeof(callback));
2108 callback.cb[res_type[func]] = handler;
2109 callback.arg[res_type[func]] = arg;
2110 start = (struct acpi_dmar_header *)obj->buffer.pointer;
2111 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
2118 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
2121 struct dmar_drhd_unit *dmaru;
2123 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2127 ret = dmar_ir_hotplug(dmaru, true);
2129 ret = dmar_iommu_hotplug(dmaru, true);
2134 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2138 struct dmar_drhd_unit *dmaru;
2140 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2145 * All PCI devices managed by this unit should have been destroyed.
2147 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
2148 for_each_active_dev_scope(dmaru->devices,
2149 dmaru->devices_cnt, i, dev)
2153 ret = dmar_ir_hotplug(dmaru, false);
2155 ret = dmar_iommu_hotplug(dmaru, false);
2160 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2162 struct dmar_drhd_unit *dmaru;
2164 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2166 list_del_rcu(&dmaru->list);
2168 dmar_free_drhd(dmaru);
2174 static int dmar_hotplug_insert(acpi_handle handle)
2179 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2180 &dmar_validate_one_drhd, (void *)1);
2184 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2185 &dmar_parse_one_drhd, (void *)&drhd_count);
2186 if (ret == 0 && drhd_count == 0) {
2187 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2193 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2194 &dmar_parse_one_rhsa, NULL);
2198 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2199 &dmar_parse_one_atsr, NULL);
2203 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2204 &dmar_hp_add_drhd, NULL);
2208 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2209 &dmar_hp_remove_drhd, NULL);
2211 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2212 &dmar_release_one_atsr, NULL);
2214 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2215 &dmar_hp_release_drhd, NULL);
2220 static int dmar_hotplug_remove(acpi_handle handle)
2224 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2225 &dmar_check_one_atsr, NULL);
2229 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2230 &dmar_hp_remove_drhd, NULL);
2232 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2233 &dmar_release_one_atsr, NULL));
2234 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2235 &dmar_hp_release_drhd, NULL));
2237 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2238 &dmar_hp_add_drhd, NULL);
2244 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2245 void *context, void **retval)
2247 acpi_handle *phdl = retval;
2249 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2251 return AE_CTRL_TERMINATE;
2257 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2260 acpi_handle tmp = NULL;
2266 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2269 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2271 dmar_get_dsm_handle,
2273 if (ACPI_FAILURE(status)) {
2274 pr_warn("Failed to locate _DSM method.\n");
2281 down_write(&dmar_global_lock);
2283 ret = dmar_hotplug_insert(tmp);
2285 ret = dmar_hotplug_remove(tmp);
2286 up_write(&dmar_global_lock);
2291 int dmar_device_add(acpi_handle handle)
2293 return dmar_device_hotplug(handle, true);
2296 int dmar_device_remove(acpi_handle handle)
2298 return dmar_device_hotplug(handle, false);
2302 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2304 * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2305 * the ACPI DMAR table. This means that the platform boot firmware has made
2306 * sure no device can issue DMA outside of RMRR regions.
2308 bool dmar_platform_optin(void)
2310 struct acpi_table_dmar *dmar;
2314 status = acpi_get_table(ACPI_SIG_DMAR, 0,
2315 (struct acpi_table_header **)&dmar);
2316 if (ACPI_FAILURE(status))
2319 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2320 acpi_put_table((struct acpi_table_header *)dmar);
2324 EXPORT_SYMBOL_GPL(dmar_platform_optin);