1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2006, Intel Corporation.
5 * Copyright (C) 2006-2008 Intel Corporation
6 * Author: Ashok Raj <ashok.raj@intel.com>
7 * Author: Shaohua Li <shaohua.li@intel.com>
8 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * This file implements early detection/parsing of Remapping Devices
11 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
14 * These routines are used by both DMA-remapping and Interrupt-remapping
17 #define pr_fmt(fmt) "DMAR: " fmt
19 #include <linux/pci.h>
20 #include <linux/dmar.h>
21 #include <linux/iova.h>
22 #include <linux/timer.h>
23 #include <linux/irq.h>
24 #include <linux/interrupt.h>
25 #include <linux/tboot.h>
26 #include <linux/dmi.h>
27 #include <linux/slab.h>
28 #include <linux/iommu.h>
29 #include <linux/numa.h>
30 #include <linux/limits.h>
31 #include <asm/irq_remapping.h>
34 #include "../irq_remapping.h"
39 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
40 struct dmar_res_callback {
41 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
42 void *arg[ACPI_DMAR_TYPE_RESERVED];
43 bool ignore_unhandled;
49 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
50 * before IO devices managed by that unit.
51 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
52 * after IO devices managed by that unit.
53 * 3) Hotplug events are rare.
55 * Locking rules for DMA and interrupt remapping related global data structures:
56 * 1) Use dmar_global_lock in process context
57 * 2) Use RCU in interrupt context
59 DECLARE_RWSEM(dmar_global_lock);
60 LIST_HEAD(dmar_drhd_units);
62 struct acpi_table_header * __initdata dmar_tbl;
63 static int dmar_dev_scope_status = 1;
64 static DEFINE_IDA(dmar_seq_ids);
66 static int alloc_iommu(struct dmar_drhd_unit *drhd);
67 static void free_iommu(struct intel_iommu *iommu);
69 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
72 * add INCLUDE_ALL at the tail, so scan the list will find it at
75 if (drhd->include_all)
76 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
78 list_add_rcu(&drhd->list, &dmar_drhd_units);
81 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
83 struct acpi_dmar_device_scope *scope;
88 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
90 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
92 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
93 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
94 pr_warn("Unsupported device scope\n");
96 start += scope->length;
101 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
104 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
107 struct device *tmp_dev;
109 if (*devices && *cnt) {
110 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
119 /* Optimize out kzalloc()/kfree() for normal cases */
120 static char dmar_pci_notify_info_buf[64];
122 static struct dmar_pci_notify_info *
123 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
128 struct dmar_pci_notify_info *info;
131 * Ignore devices that have a domain number higher than what can
132 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
134 if (pci_domain_nr(dev->bus) > U16_MAX)
137 /* Only generate path[] for device addition event */
138 if (event == BUS_NOTIFY_ADD_DEVICE)
139 for (tmp = dev; tmp; tmp = tmp->bus->self)
142 size = struct_size(info, path, level);
143 if (size <= sizeof(dmar_pci_notify_info_buf)) {
144 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
146 info = kzalloc(size, GFP_KERNEL);
148 if (dmar_dev_scope_status == 0)
149 dmar_dev_scope_status = -ENOMEM;
156 info->seg = pci_domain_nr(dev->bus);
158 if (event == BUS_NOTIFY_ADD_DEVICE) {
159 for (tmp = dev; tmp; tmp = tmp->bus->self) {
161 info->path[level].bus = tmp->bus->number;
162 info->path[level].device = PCI_SLOT(tmp->devfn);
163 info->path[level].function = PCI_FUNC(tmp->devfn);
164 if (pci_is_root_bus(tmp->bus))
165 info->bus = tmp->bus->number;
172 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
174 if ((void *)info != dmar_pci_notify_info_buf)
178 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
179 struct acpi_dmar_pci_path *path, int count)
183 if (info->bus != bus)
185 if (info->level != count)
188 for (i = 0; i < count; i++) {
189 if (path[i].device != info->path[i].device ||
190 path[i].function != info->path[i].function)
202 if (bus == info->path[i].bus &&
203 path[0].device == info->path[i].device &&
204 path[0].function == info->path[i].function) {
205 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
206 bus, path[0].device, path[0].function);
213 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
214 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
215 void *start, void*end, u16 segment,
216 struct dmar_dev_scope *devices,
220 struct device *tmp, *dev = &info->dev->dev;
221 struct acpi_dmar_device_scope *scope;
222 struct acpi_dmar_pci_path *path;
224 if (segment != info->seg)
227 for (; start < end; start += scope->length) {
229 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
230 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
233 path = (struct acpi_dmar_pci_path *)(scope + 1);
234 level = (scope->length - sizeof(*scope)) / sizeof(*path);
235 if (!dmar_match_pci_path(info, scope->bus, path, level))
239 * We expect devices with endpoint scope to have normal PCI
240 * headers, and devices with bridge scope to have bridge PCI
241 * headers. However PCI NTB devices may be listed in the
242 * DMAR table with bridge scope, even though they have a
243 * normal PCI header. NTB devices are identified by class
244 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
245 * for this special case.
247 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
248 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
249 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
250 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
251 info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
252 pr_warn("Device scope type does not match for %s\n",
253 pci_name(info->dev));
257 for_each_dev_scope(devices, devices_cnt, i, tmp)
259 devices[i].bus = info->dev->bus->number;
260 devices[i].devfn = info->dev->devfn;
261 rcu_assign_pointer(devices[i].dev,
265 if (WARN_ON(i >= devices_cnt))
272 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
273 struct dmar_dev_scope *devices, int count)
278 if (info->seg != segment)
281 for_each_active_dev_scope(devices, count, index, tmp)
282 if (tmp == &info->dev->dev) {
283 RCU_INIT_POINTER(devices[index].dev, NULL);
292 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
295 struct dmar_drhd_unit *dmaru;
296 struct acpi_dmar_hardware_unit *drhd;
298 for_each_drhd_unit(dmaru) {
299 if (dmaru->include_all)
302 drhd = container_of(dmaru->hdr,
303 struct acpi_dmar_hardware_unit, header);
304 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
305 ((void *)drhd) + drhd->header.length,
307 dmaru->devices, dmaru->devices_cnt);
312 ret = dmar_iommu_notify_scope_dev(info);
313 if (ret < 0 && dmar_dev_scope_status == 0)
314 dmar_dev_scope_status = ret;
317 intel_irq_remap_add_device(info);
322 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
324 struct dmar_drhd_unit *dmaru;
326 for_each_drhd_unit(dmaru)
327 if (dmar_remove_dev_scope(info, dmaru->segment,
328 dmaru->devices, dmaru->devices_cnt))
330 dmar_iommu_notify_scope_dev(info);
333 static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
335 struct pci_dev *physfn = pci_physfn(pdev);
337 dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
340 static int dmar_pci_bus_notifier(struct notifier_block *nb,
341 unsigned long action, void *data)
343 struct pci_dev *pdev = to_pci_dev(data);
344 struct dmar_pci_notify_info *info;
346 /* Only care about add/remove events for physical functions.
347 * For VFs we actually do the lookup based on the corresponding
348 * PF in device_to_iommu() anyway. */
349 if (pdev->is_virtfn) {
351 * Ensure that the VF device inherits the irq domain of the
352 * PF device. Ideally the device would inherit the domain
353 * from the bus, but DMAR can have multiple units per bus
354 * which makes this impossible. The VF 'bus' could inherit
355 * from the PF device, but that's yet another x86'sism to
356 * inflict on everybody else.
358 if (action == BUS_NOTIFY_ADD_DEVICE)
359 vf_inherit_msi_domain(pdev);
363 if (action != BUS_NOTIFY_ADD_DEVICE &&
364 action != BUS_NOTIFY_REMOVED_DEVICE)
367 info = dmar_alloc_pci_notify_info(pdev, action);
371 down_write(&dmar_global_lock);
372 if (action == BUS_NOTIFY_ADD_DEVICE)
373 dmar_pci_bus_add_dev(info);
374 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
375 dmar_pci_bus_del_dev(info);
376 up_write(&dmar_global_lock);
378 dmar_free_pci_notify_info(info);
383 static struct notifier_block dmar_pci_bus_nb = {
384 .notifier_call = dmar_pci_bus_notifier,
388 static struct dmar_drhd_unit *
389 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
391 struct dmar_drhd_unit *dmaru;
393 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
395 if (dmaru->segment == drhd->segment &&
396 dmaru->reg_base_addr == drhd->address)
403 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
404 * structure which uniquely represent one DMA remapping hardware unit
405 * present in the platform
407 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
409 struct acpi_dmar_hardware_unit *drhd;
410 struct dmar_drhd_unit *dmaru;
413 drhd = (struct acpi_dmar_hardware_unit *)header;
414 dmaru = dmar_find_dmaru(drhd);
418 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
423 * If header is allocated from slab by ACPI _DSM method, we need to
424 * copy the content because the memory buffer will be freed on return.
426 dmaru->hdr = (void *)(dmaru + 1);
427 memcpy(dmaru->hdr, header, header->length);
428 dmaru->reg_base_addr = drhd->address;
429 dmaru->segment = drhd->segment;
430 /* The size of the register set is 2 ^ N 4 KB pages. */
431 dmaru->reg_size = 1UL << (drhd->size + 12);
432 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
433 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
434 ((void *)drhd) + drhd->header.length,
435 &dmaru->devices_cnt);
436 if (dmaru->devices_cnt && dmaru->devices == NULL) {
441 ret = alloc_iommu(dmaru);
443 dmar_free_dev_scope(&dmaru->devices,
444 &dmaru->devices_cnt);
448 dmar_register_drhd_unit(dmaru);
457 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
459 if (dmaru->devices && dmaru->devices_cnt)
460 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
462 free_iommu(dmaru->iommu);
466 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
469 struct acpi_dmar_andd *andd = (void *)header;
471 /* Check for NUL termination within the designated length */
472 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
474 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
475 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
476 dmi_get_system_info(DMI_BIOS_VENDOR),
477 dmi_get_system_info(DMI_BIOS_VERSION),
478 dmi_get_system_info(DMI_PRODUCT_VERSION));
479 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
482 pr_info("ANDD device: %x name: %s\n", andd->device_number,
488 #ifdef CONFIG_ACPI_NUMA
489 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
491 struct acpi_dmar_rhsa *rhsa;
492 struct dmar_drhd_unit *drhd;
494 rhsa = (struct acpi_dmar_rhsa *)header;
495 for_each_drhd_unit(drhd) {
496 if (drhd->reg_base_addr == rhsa->base_address) {
497 int node = pxm_to_node(rhsa->proximity_domain);
499 if (node != NUMA_NO_NODE && !node_online(node))
501 drhd->iommu->node = node;
506 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
507 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
509 dmi_get_system_info(DMI_BIOS_VENDOR),
510 dmi_get_system_info(DMI_BIOS_VERSION),
511 dmi_get_system_info(DMI_PRODUCT_VERSION));
512 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
517 #define dmar_parse_one_rhsa dmar_res_noop
521 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
523 struct acpi_dmar_hardware_unit *drhd;
524 struct acpi_dmar_reserved_memory *rmrr;
525 struct acpi_dmar_atsr *atsr;
526 struct acpi_dmar_rhsa *rhsa;
527 struct acpi_dmar_satc *satc;
529 switch (header->type) {
530 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
531 drhd = container_of(header, struct acpi_dmar_hardware_unit,
533 pr_info("DRHD base: %#016Lx flags: %#x\n",
534 (unsigned long long)drhd->address, drhd->flags);
536 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
537 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
539 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
540 (unsigned long long)rmrr->base_address,
541 (unsigned long long)rmrr->end_address);
543 case ACPI_DMAR_TYPE_ROOT_ATS:
544 atsr = container_of(header, struct acpi_dmar_atsr, header);
545 pr_info("ATSR flags: %#x\n", atsr->flags);
547 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
548 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
549 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
550 (unsigned long long)rhsa->base_address,
551 rhsa->proximity_domain);
553 case ACPI_DMAR_TYPE_NAMESPACE:
554 /* We don't print this here because we need to sanity-check
555 it first. So print it in dmar_parse_one_andd() instead. */
557 case ACPI_DMAR_TYPE_SATC:
558 satc = container_of(header, struct acpi_dmar_satc, header);
559 pr_info("SATC flags: 0x%x\n", satc->flags);
565 * dmar_table_detect - checks to see if the platform supports DMAR devices
567 static int __init dmar_table_detect(void)
569 acpi_status status = AE_OK;
571 /* if we could find DMAR table, then there are DMAR devices */
572 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
574 if (ACPI_SUCCESS(status) && !dmar_tbl) {
575 pr_warn("Unable to map DMAR\n");
576 status = AE_NOT_FOUND;
579 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
582 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
583 size_t len, struct dmar_res_callback *cb)
585 struct acpi_dmar_header *iter, *next;
586 struct acpi_dmar_header *end = ((void *)start) + len;
588 for (iter = start; iter < end; iter = next) {
589 next = (void *)iter + iter->length;
590 if (iter->length == 0) {
591 /* Avoid looping forever on bad ACPI tables */
592 pr_debug(FW_BUG "Invalid 0-length structure\n");
594 } else if (next > end) {
595 /* Avoid passing table end */
596 pr_warn(FW_BUG "Record passes table end\n");
601 dmar_table_print_dmar_entry(iter);
603 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
604 /* continue for forward compatibility */
605 pr_debug("Unknown DMAR structure type %d\n",
607 } else if (cb->cb[iter->type]) {
610 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
613 } else if (!cb->ignore_unhandled) {
614 pr_warn("No handler for DMAR structure type %d\n",
623 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
624 struct dmar_res_callback *cb)
626 return dmar_walk_remapping_entries((void *)(dmar + 1),
627 dmar->header.length - sizeof(*dmar), cb);
631 * parse_dmar_table - parses the DMA reporting table
634 parse_dmar_table(void)
636 struct acpi_table_dmar *dmar;
639 struct dmar_res_callback cb = {
641 .ignore_unhandled = true,
642 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
643 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
644 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
645 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
646 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
647 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
648 .cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
652 * Do it again, earlier dmar_tbl mapping could be mapped with
658 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
659 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
661 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
663 dmar = (struct acpi_table_dmar *)dmar_tbl;
667 if (dmar->width < PAGE_SHIFT - 1) {
668 pr_warn("Invalid DMAR haw\n");
672 pr_info("Host address width %d\n", dmar->width + 1);
673 ret = dmar_walk_dmar_table(dmar, &cb);
674 if (ret == 0 && drhd_count == 0)
675 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
680 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
681 int cnt, struct pci_dev *dev)
687 for_each_active_dev_scope(devices, cnt, index, tmp)
688 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
691 /* Check our parent */
692 dev = dev->bus->self;
698 struct dmar_drhd_unit *
699 dmar_find_matched_drhd_unit(struct pci_dev *dev)
701 struct dmar_drhd_unit *dmaru;
702 struct acpi_dmar_hardware_unit *drhd;
704 dev = pci_physfn(dev);
707 for_each_drhd_unit(dmaru) {
708 drhd = container_of(dmaru->hdr,
709 struct acpi_dmar_hardware_unit,
712 if (dmaru->include_all &&
713 drhd->segment == pci_domain_nr(dev->bus))
716 if (dmar_pci_device_match(dmaru->devices,
717 dmaru->devices_cnt, dev))
727 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
728 struct acpi_device *adev)
730 struct dmar_drhd_unit *dmaru;
731 struct acpi_dmar_hardware_unit *drhd;
732 struct acpi_dmar_device_scope *scope;
735 struct acpi_dmar_pci_path *path;
737 for_each_drhd_unit(dmaru) {
738 drhd = container_of(dmaru->hdr,
739 struct acpi_dmar_hardware_unit,
742 for (scope = (void *)(drhd + 1);
743 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
744 scope = ((void *)scope) + scope->length) {
745 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
747 if (scope->enumeration_id != device_number)
750 path = (void *)(scope + 1);
751 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
752 dev_name(&adev->dev), dmaru->reg_base_addr,
753 scope->bus, path->device, path->function);
754 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
756 dmaru->devices[i].bus = scope->bus;
757 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
759 rcu_assign_pointer(dmaru->devices[i].dev,
760 get_device(&adev->dev));
763 BUG_ON(i >= dmaru->devices_cnt);
766 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
767 device_number, dev_name(&adev->dev));
770 static int __init dmar_acpi_dev_scope_init(void)
772 struct acpi_dmar_andd *andd;
774 if (dmar_tbl == NULL)
777 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
778 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
779 andd = ((void *)andd) + andd->header.length) {
780 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
782 struct acpi_device *adev;
784 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
787 pr_err("Failed to find handle for ACPI object %s\n",
791 adev = acpi_fetch_acpi_dev(h);
793 pr_err("Failed to get device for ACPI object %s\n",
797 dmar_acpi_insert_dev_scope(andd->device_number, adev);
803 int __init dmar_dev_scope_init(void)
805 struct pci_dev *dev = NULL;
806 struct dmar_pci_notify_info *info;
808 if (dmar_dev_scope_status != 1)
809 return dmar_dev_scope_status;
811 if (list_empty(&dmar_drhd_units)) {
812 dmar_dev_scope_status = -ENODEV;
814 dmar_dev_scope_status = 0;
816 dmar_acpi_dev_scope_init();
818 for_each_pci_dev(dev) {
822 info = dmar_alloc_pci_notify_info(dev,
823 BUS_NOTIFY_ADD_DEVICE);
826 return dmar_dev_scope_status;
828 dmar_pci_bus_add_dev(info);
829 dmar_free_pci_notify_info(info);
834 return dmar_dev_scope_status;
837 void __init dmar_register_bus_notifier(void)
839 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
843 int __init dmar_table_init(void)
845 static int dmar_table_initialized;
848 if (dmar_table_initialized == 0) {
849 ret = parse_dmar_table();
852 pr_info("Parse DMAR table failure.\n");
853 } else if (list_empty(&dmar_drhd_units)) {
854 pr_info("No DMAR devices found\n");
859 dmar_table_initialized = ret;
861 dmar_table_initialized = 1;
864 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
867 static void warn_invalid_dmar(u64 addr, const char *message)
870 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
871 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
873 dmi_get_system_info(DMI_BIOS_VENDOR),
874 dmi_get_system_info(DMI_BIOS_VERSION),
875 dmi_get_system_info(DMI_PRODUCT_VERSION));
876 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
880 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
882 struct acpi_dmar_hardware_unit *drhd;
886 drhd = (void *)entry;
887 if (!drhd->address) {
888 warn_invalid_dmar(0, "");
893 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
895 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
897 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
901 cap = dmar_readq(addr + DMAR_CAP_REG);
902 ecap = dmar_readq(addr + DMAR_ECAP_REG);
907 early_iounmap(addr, VTD_PAGE_SIZE);
909 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
910 warn_invalid_dmar(drhd->address, " returns all ones");
917 void __init detect_intel_iommu(void)
920 struct dmar_res_callback validate_drhd_cb = {
921 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
922 .ignore_unhandled = true,
925 down_write(&dmar_global_lock);
926 ret = dmar_table_detect();
928 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
930 if (!ret && !no_iommu && !iommu_detected &&
931 (!dmar_disabled || dmar_platform_optin())) {
933 /* Make sure ACS will be enabled */
939 x86_init.iommu.iommu_init = intel_iommu_init;
940 x86_platform.iommu_shutdown = intel_iommu_shutdown;
946 acpi_put_table(dmar_tbl);
949 up_write(&dmar_global_lock);
952 static void unmap_iommu(struct intel_iommu *iommu)
955 release_mem_region(iommu->reg_phys, iommu->reg_size);
959 * map_iommu: map the iommu's registers
960 * @iommu: the iommu to map
961 * @drhd: DMA remapping hardware definition structure
963 * Memory map the iommu's registers. Start w/ a single page, and
964 * possibly expand if that turns out to be insufficent.
966 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
968 u64 phys_addr = drhd->reg_base_addr;
971 iommu->reg_phys = phys_addr;
972 iommu->reg_size = drhd->reg_size;
974 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
975 pr_err("Can't reserve memory\n");
980 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
982 pr_err("Can't map the region\n");
987 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
988 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
990 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
992 warn_invalid_dmar(phys_addr, " returns all ones");
996 /* the registers might be more than one page */
997 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
998 cap_max_fault_reg_offset(iommu->cap));
999 map_size = VTD_PAGE_ALIGN(map_size);
1000 if (map_size > iommu->reg_size) {
1001 iounmap(iommu->reg);
1002 release_mem_region(iommu->reg_phys, iommu->reg_size);
1003 iommu->reg_size = map_size;
1004 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1006 pr_err("Can't reserve memory\n");
1010 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1012 pr_err("Can't map the region\n");
1018 if (cap_ecmds(iommu->cap)) {
1021 for (i = 0; i < DMA_MAX_NUM_ECMDCAP; i++) {
1022 iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG +
1023 i * DMA_ECMD_REG_STEP);
1031 iounmap(iommu->reg);
1033 release_mem_region(iommu->reg_phys, iommu->reg_size);
1038 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1040 struct intel_iommu *iommu;
1046 if (!drhd->reg_base_addr) {
1047 warn_invalid_dmar(0, "");
1051 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1055 iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
1056 DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
1057 if (iommu->seq_id < 0) {
1058 pr_err("Failed to allocate seq_id\n");
1059 err = iommu->seq_id;
1062 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1064 err = map_iommu(iommu, drhd);
1066 pr_err("Failed to map %s\n", iommu->name);
1067 goto error_free_seq_id;
1071 if (!cap_sagaw(iommu->cap) &&
1072 (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
1073 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1078 if (!drhd->ignored) {
1079 agaw = iommu_calculate_agaw(iommu);
1081 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1086 if (!drhd->ignored) {
1087 msagaw = iommu_calculate_max_sagaw(iommu);
1089 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1096 iommu->msagaw = msagaw;
1097 iommu->segment = drhd->segment;
1098 iommu->device_rbtree = RB_ROOT;
1099 spin_lock_init(&iommu->device_rbtree_lock);
1100 mutex_init(&iommu->iopf_lock);
1101 iommu->node = NUMA_NO_NODE;
1103 ver = readl(iommu->reg + DMAR_VER_REG);
1104 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1106 (unsigned long long)drhd->reg_base_addr,
1107 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1108 (unsigned long long)iommu->cap,
1109 (unsigned long long)iommu->ecap);
1111 /* Reflect status in gcmd */
1112 sts = readl(iommu->reg + DMAR_GSTS_REG);
1113 if (sts & DMA_GSTS_IRES)
1114 iommu->gcmd |= DMA_GCMD_IRE;
1115 if (sts & DMA_GSTS_TES)
1116 iommu->gcmd |= DMA_GCMD_TE;
1117 if (sts & DMA_GSTS_QIES)
1118 iommu->gcmd |= DMA_GCMD_QIE;
1120 if (alloc_iommu_pmu(iommu))
1121 pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
1123 raw_spin_lock_init(&iommu->register_lock);
1126 * A value of N in PSS field of eCap register indicates hardware
1127 * supports PASID field of N+1 bits.
1129 if (pasid_supported(iommu))
1130 iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
1133 * This is only for hotplug; at boot time intel_iommu_enabled won't
1134 * be set yet. When intel_iommu_init() runs, it registers the units
1135 * present at boot time, then sets intel_iommu_enabled.
1137 if (intel_iommu_enabled && !drhd->ignored) {
1138 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1144 err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
1148 iommu_pmu_register(iommu);
1151 drhd->iommu = iommu;
1157 iommu_device_sysfs_remove(&iommu->iommu);
1159 free_iommu_pmu(iommu);
1162 ida_free(&dmar_seq_ids, iommu->seq_id);
1168 static void free_iommu(struct intel_iommu *iommu)
1170 if (intel_iommu_enabled && !iommu->drhd->ignored) {
1171 iommu_pmu_unregister(iommu);
1172 iommu_device_unregister(&iommu->iommu);
1173 iommu_device_sysfs_remove(&iommu->iommu);
1176 free_iommu_pmu(iommu);
1179 if (iommu->pr_irq) {
1180 free_irq(iommu->pr_irq, iommu);
1181 dmar_free_hwirq(iommu->pr_irq);
1184 free_irq(iommu->irq, iommu);
1185 dmar_free_hwirq(iommu->irq);
1190 free_page((unsigned long)iommu->qi->desc);
1191 kfree(iommu->qi->desc_status);
1198 ida_free(&dmar_seq_ids, iommu->seq_id);
1203 * Reclaim all the submitted descriptors which have completed its work.
1205 static inline void reclaim_free_desc(struct q_inval *qi)
1207 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1208 qi->desc_status[qi->free_tail] == QI_ABORT) {
1209 qi->desc_status[qi->free_tail] = QI_FREE;
1210 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1215 static const char *qi_type_string(u8 type)
1219 return "Context-cache Invalidation";
1221 return "IOTLB Invalidation";
1222 case QI_DIOTLB_TYPE:
1223 return "Device-TLB Invalidation";
1225 return "Interrupt Entry Cache Invalidation";
1227 return "Invalidation Wait";
1228 case QI_EIOTLB_TYPE:
1229 return "PASID-based IOTLB Invalidation";
1231 return "PASID-cache Invalidation";
1232 case QI_DEIOTLB_TYPE:
1233 return "PASID-based Device-TLB Invalidation";
1234 case QI_PGRP_RESP_TYPE:
1235 return "Page Group Response";
1241 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
1243 unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
1244 u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1245 struct qi_desc *desc = iommu->qi->desc + head;
1247 if (fault & DMA_FSTS_IQE)
1248 pr_err("VT-d detected Invalidation Queue Error: Reason %llx",
1249 DMAR_IQER_REG_IQEI(iqe_err));
1250 if (fault & DMA_FSTS_ITE)
1251 pr_err("VT-d detected Invalidation Time-out Error: SID %llx",
1252 DMAR_IQER_REG_ITESID(iqe_err));
1253 if (fault & DMA_FSTS_ICE)
1254 pr_err("VT-d detected Invalidation Completion Error: SID %llx",
1255 DMAR_IQER_REG_ICESID(iqe_err));
1257 pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1258 qi_type_string(desc->qw0 & 0xf),
1259 (unsigned long long)desc->qw0,
1260 (unsigned long long)desc->qw1);
1262 head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
1263 head <<= qi_shift(iommu);
1264 desc = iommu->qi->desc + head;
1266 pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1267 qi_type_string(desc->qw0 & 0xf),
1268 (unsigned long long)desc->qw0,
1269 (unsigned long long)desc->qw1);
1272 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1277 u64 iqe_err, ite_sid;
1278 struct q_inval *qi = iommu->qi;
1279 int shift = qi_shift(iommu);
1281 if (qi->desc_status[wait_index] == QI_ABORT)
1284 fault = readl(iommu->reg + DMAR_FSTS_REG);
1285 if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
1286 qi_dump_fault(iommu, fault);
1289 * If IQE happens, the head points to the descriptor associated
1290 * with the error. No new descriptors are fetched until the IQE
1293 if (fault & DMA_FSTS_IQE) {
1294 head = readl(iommu->reg + DMAR_IQH_REG);
1295 if ((head >> shift) == index) {
1296 struct qi_desc *desc = qi->desc + head;
1299 * desc->qw2 and desc->qw3 are either reserved or
1300 * used by software as private data. We won't print
1301 * out these two qw's for security consideration.
1303 memcpy(desc, qi->desc + (wait_index << shift),
1305 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1306 pr_info("Invalidation Queue Error (IQE) cleared\n");
1312 * If ITE happens, all pending wait_desc commands are aborted.
1313 * No new descriptors are fetched until the ITE is cleared.
1315 if (fault & DMA_FSTS_ITE) {
1316 head = readl(iommu->reg + DMAR_IQH_REG);
1317 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1319 tail = readl(iommu->reg + DMAR_IQT_REG);
1320 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1323 * SID field is valid only when the ITE field is Set in FSTS_REG
1324 * see Intel VT-d spec r4.1, section 11.4.9.9
1326 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1327 ite_sid = DMAR_IQER_REG_ITESID(iqe_err);
1329 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1330 pr_info("Invalidation Time-out Error (ITE) cleared\n");
1333 if (qi->desc_status[head] == QI_IN_USE)
1334 qi->desc_status[head] = QI_ABORT;
1335 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1336 } while (head != tail);
1339 * If device was released or isn't present, no need to retry
1340 * the ATS invalidate request anymore.
1342 * 0 value of ite_sid means old VT-d device, no ite_sid value.
1343 * see Intel VT-d spec r4.1, section 11.4.9.9
1346 dev = device_rbtree_find(iommu, ite_sid);
1347 if (!dev || !dev_is_pci(dev) ||
1348 !pci_device_is_present(to_pci_dev(dev)))
1351 if (qi->desc_status[wait_index] == QI_ABORT)
1355 if (fault & DMA_FSTS_ICE) {
1356 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1357 pr_info("Invalidation Completion Error (ICE) cleared\n");
1364 * Function to submit invalidation descriptors of all types to the queued
1365 * invalidation interface(QI). Multiple descriptors can be submitted at a
1366 * time, a wait descriptor will be appended to each submission to ensure
1367 * hardware has completed the invalidation before return. Wait descriptors
1368 * can be part of the submission but it will not be polled for completion.
1370 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1371 unsigned int count, unsigned long options)
1373 struct q_inval *qi = iommu->qi;
1374 s64 devtlb_start_ktime = 0;
1375 s64 iotlb_start_ktime = 0;
1376 s64 iec_start_ktime = 0;
1377 struct qi_desc wait_desc;
1378 int wait_index, index;
1379 unsigned long flags;
1387 type = desc->qw0 & GENMASK_ULL(3, 0);
1389 if ((type == QI_IOTLB_TYPE || type == QI_EIOTLB_TYPE) &&
1390 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
1391 iotlb_start_ktime = ktime_to_ns(ktime_get());
1393 if ((type == QI_DIOTLB_TYPE || type == QI_DEIOTLB_TYPE) &&
1394 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
1395 devtlb_start_ktime = ktime_to_ns(ktime_get());
1397 if (type == QI_IEC_TYPE &&
1398 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
1399 iec_start_ktime = ktime_to_ns(ktime_get());
1404 raw_spin_lock_irqsave(&qi->q_lock, flags);
1406 * Check if we have enough empty slots in the queue to submit,
1407 * the calculation is based on:
1408 * # of desc + 1 wait desc + 1 space between head and tail
1410 while (qi->free_cnt < count + 2) {
1411 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1413 raw_spin_lock_irqsave(&qi->q_lock, flags);
1416 index = qi->free_head;
1417 wait_index = (index + count) % QI_LENGTH;
1418 shift = qi_shift(iommu);
1420 for (i = 0; i < count; i++) {
1421 offset = ((index + i) % QI_LENGTH) << shift;
1422 memcpy(qi->desc + offset, &desc[i], 1 << shift);
1423 qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
1424 trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
1425 desc[i].qw2, desc[i].qw3);
1427 qi->desc_status[wait_index] = QI_IN_USE;
1429 wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1430 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1431 if (options & QI_OPT_WAIT_DRAIN)
1432 wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
1433 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1437 offset = wait_index << shift;
1438 memcpy(qi->desc + offset, &wait_desc, 1 << shift);
1440 qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
1441 qi->free_cnt -= count + 1;
1444 * update the HW tail register indicating the presence of
1447 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1449 while (qi->desc_status[wait_index] != QI_DONE) {
1451 * We will leave the interrupts disabled, to prevent interrupt
1452 * context to queue another cmd while a cmd is already submitted
1453 * and waiting for completion on this cpu. This is to avoid
1454 * a deadlock where the interrupt context can wait indefinitely
1455 * for free slots in the queue.
1457 rc = qi_check_fault(iommu, index, wait_index);
1461 raw_spin_unlock(&qi->q_lock);
1463 raw_spin_lock(&qi->q_lock);
1466 for (i = 0; i < count; i++)
1467 qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
1469 reclaim_free_desc(qi);
1470 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1475 if (iotlb_start_ktime)
1476 dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
1477 ktime_to_ns(ktime_get()) - iotlb_start_ktime);
1479 if (devtlb_start_ktime)
1480 dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
1481 ktime_to_ns(ktime_get()) - devtlb_start_ktime);
1483 if (iec_start_ktime)
1484 dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
1485 ktime_to_ns(ktime_get()) - iec_start_ktime);
1491 * Flush the global interrupt entry cache.
1493 void qi_global_iec(struct intel_iommu *iommu)
1495 struct qi_desc desc;
1497 desc.qw0 = QI_IEC_TYPE;
1502 /* should never fail */
1503 qi_submit_sync(iommu, &desc, 1, 0);
1506 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1509 struct qi_desc desc;
1511 desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1512 | QI_CC_GRAN(type) | QI_CC_TYPE;
1517 qi_submit_sync(iommu, &desc, 1, 0);
1520 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1521 unsigned int size_order, u64 type)
1525 struct qi_desc desc;
1528 if (cap_write_drain(iommu->cap))
1531 if (cap_read_drain(iommu->cap))
1534 desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1535 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1536 desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1537 | QI_IOTLB_AM(size_order);
1541 qi_submit_sync(iommu, &desc, 1, 0);
1544 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1545 u16 qdep, u64 addr, unsigned mask)
1547 struct qi_desc desc;
1550 * VT-d spec, section 4.3:
1552 * Software is recommended to not submit any Device-TLB invalidation
1553 * requests while address remapping hardware is disabled.
1555 if (!(iommu->gcmd & DMA_GCMD_TE))
1559 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1560 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1562 desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1564 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1567 desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1568 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1572 qi_submit_sync(iommu, &desc, 1, 0);
1575 /* PASID-based IOTLB invalidation */
1576 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1577 unsigned long npages, bool ih)
1579 struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
1582 * npages == -1 means a PASID-selective invalidation, otherwise,
1583 * a positive value for Page-selective-within-PASID invalidation.
1584 * 0 is not a valid input.
1586 if (WARN_ON(!npages)) {
1587 pr_err("Invalid input npages = %ld\n", npages);
1592 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1593 QI_EIOTLB_DID(did) |
1594 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1598 int mask = ilog2(__roundup_pow_of_two(npages));
1599 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1601 if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
1602 addr = ALIGN_DOWN(addr, align);
1604 desc.qw0 = QI_EIOTLB_PASID(pasid) |
1605 QI_EIOTLB_DID(did) |
1606 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1608 desc.qw1 = QI_EIOTLB_ADDR(addr) |
1613 qi_submit_sync(iommu, &desc, 1, 0);
1616 /* PASID-based device IOTLB Invalidate */
1617 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1618 u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
1620 unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
1621 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1624 * VT-d spec, section 4.3:
1626 * Software is recommended to not submit any Device-TLB invalidation
1627 * requests while address remapping hardware is disabled.
1629 if (!(iommu->gcmd & DMA_GCMD_TE))
1632 desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1633 QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1634 QI_DEV_IOTLB_PFSID(pfsid);
1637 * If S bit is 0, we only flush a single page. If S bit is set,
1638 * The least significant zero bit indicates the invalidation address
1639 * range. VT-d spec 6.5.2.6.
1640 * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
1641 * size order = 0 is PAGE_SIZE 4KB
1642 * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
1645 if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
1646 pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1649 /* Take page address */
1650 desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
1654 * Existing 0s in address below size_order may be the least
1655 * significant bit, we must set them to 1s to avoid having
1656 * smaller size than desired.
1658 desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1660 /* Clear size_order bit to indicate size */
1662 /* Set the S bit to indicate flushing more than 1 page */
1663 desc.qw1 |= QI_DEV_EIOTLB_SIZE;
1666 qi_submit_sync(iommu, &desc, 1, 0);
1669 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1670 u64 granu, u32 pasid)
1672 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1674 desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
1675 QI_PC_GRAN(granu) | QI_PC_TYPE;
1676 qi_submit_sync(iommu, &desc, 1, 0);
1680 * Disable Queued Invalidation interface.
1682 void dmar_disable_qi(struct intel_iommu *iommu)
1684 unsigned long flags;
1686 cycles_t start_time = get_cycles();
1688 if (!ecap_qis(iommu->ecap))
1691 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1693 sts = readl(iommu->reg + DMAR_GSTS_REG);
1694 if (!(sts & DMA_GSTS_QIES))
1698 * Give a chance to HW to complete the pending invalidation requests.
1700 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1701 readl(iommu->reg + DMAR_IQH_REG)) &&
1702 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1705 iommu->gcmd &= ~DMA_GCMD_QIE;
1706 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1708 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1709 !(sts & DMA_GSTS_QIES), sts);
1711 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1715 * Enable queued invalidation.
1717 static void __dmar_enable_qi(struct intel_iommu *iommu)
1720 unsigned long flags;
1721 struct q_inval *qi = iommu->qi;
1722 u64 val = virt_to_phys(qi->desc);
1724 qi->free_head = qi->free_tail = 0;
1725 qi->free_cnt = QI_LENGTH;
1728 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1731 if (ecap_smts(iommu->ecap))
1732 val |= BIT_ULL(11) | BIT_ULL(0);
1734 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1736 /* write zero to the tail reg */
1737 writel(0, iommu->reg + DMAR_IQT_REG);
1739 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1741 iommu->gcmd |= DMA_GCMD_QIE;
1742 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1744 /* Make sure hardware complete it */
1745 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1747 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1751 * Enable Queued Invalidation interface. This is a must to support
1752 * interrupt-remapping. Also used by DMA-remapping, which replaces
1753 * register based IOTLB invalidation.
1755 int dmar_enable_qi(struct intel_iommu *iommu)
1758 struct page *desc_page;
1760 if (!ecap_qis(iommu->ecap))
1764 * queued invalidation is already setup and enabled.
1769 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1776 * Need two pages to accommodate 256 descriptors of 256 bits each
1777 * if the remapping hardware supports scalable mode translation.
1779 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1780 !!ecap_smts(iommu->ecap));
1787 qi->desc = page_address(desc_page);
1789 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1790 if (!qi->desc_status) {
1791 free_page((unsigned long) qi->desc);
1797 raw_spin_lock_init(&qi->q_lock);
1799 __dmar_enable_qi(iommu);
1804 /* iommu interrupt handling. Most stuff are MSI-like. */
1812 static const char *dma_remap_fault_reasons[] =
1815 "Present bit in root entry is clear",
1816 "Present bit in context entry is clear",
1817 "Invalid context entry",
1818 "Access beyond MGAW",
1819 "PTE Write access is not set",
1820 "PTE Read access is not set",
1821 "Next page table ptr is invalid",
1822 "Root table address invalid",
1823 "Context table ptr is invalid",
1824 "non-zero reserved fields in RTP",
1825 "non-zero reserved fields in CTP",
1826 "non-zero reserved fields in PTE",
1827 "PCE for translation request specifies blocking",
1830 static const char * const dma_remap_sm_fault_reasons[] = {
1831 "SM: Invalid Root Table Address",
1832 "SM: TTM 0 for request with PASID",
1833 "SM: TTM 0 for page group request",
1834 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1835 "SM: Error attempting to access Root Entry",
1836 "SM: Present bit in Root Entry is clear",
1837 "SM: Non-zero reserved field set in Root Entry",
1838 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1839 "SM: Error attempting to access Context Entry",
1840 "SM: Present bit in Context Entry is clear",
1841 "SM: Non-zero reserved field set in the Context Entry",
1842 "SM: Invalid Context Entry",
1843 "SM: DTE field in Context Entry is clear",
1844 "SM: PASID Enable field in Context Entry is clear",
1845 "SM: PASID is larger than the max in Context Entry",
1846 "SM: PRE field in Context-Entry is clear",
1847 "SM: RID_PASID field error in Context-Entry",
1848 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1849 "SM: Error attempting to access the PASID Directory Entry",
1850 "SM: Present bit in Directory Entry is clear",
1851 "SM: Non-zero reserved field set in PASID Directory Entry",
1852 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1853 "SM: Error attempting to access PASID Table Entry",
1854 "SM: Present bit in PASID Table Entry is clear",
1855 "SM: Non-zero reserved field set in PASID Table Entry",
1856 "SM: Invalid Scalable-Mode PASID Table Entry",
1857 "SM: ERE field is clear in PASID Table Entry",
1858 "SM: SRE field is clear in PASID Table Entry",
1859 "Unknown", "Unknown",/* 0x5E-0x5F */
1860 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1861 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1862 "SM: Error attempting to access first-level paging entry",
1863 "SM: Present bit in first-level paging entry is clear",
1864 "SM: Non-zero reserved field set in first-level paging entry",
1865 "SM: Error attempting to access FL-PML4 entry",
1866 "SM: First-level entry address beyond MGAW in Nested translation",
1867 "SM: Read permission error in FL-PML4 entry in Nested translation",
1868 "SM: Read permission error in first-level paging entry in Nested translation",
1869 "SM: Write permission error in first-level paging entry in Nested translation",
1870 "SM: Error attempting to access second-level paging entry",
1871 "SM: Read/Write permission error in second-level paging entry",
1872 "SM: Non-zero reserved field set in second-level paging entry",
1873 "SM: Invalid second-level page table pointer",
1874 "SM: A/D bit update needed in second-level entry when set up in no snoop",
1875 "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1876 "SM: Address in first-level translation is not canonical",
1877 "SM: U/S set 0 for first-level translation with user privilege",
1878 "SM: No execute permission for request with PASID and ER=1",
1879 "SM: Address beyond the DMA hardware max",
1880 "SM: Second-level entry address beyond the max",
1881 "SM: No write permission for Write/AtomicOp request",
1882 "SM: No read permission for Read/AtomicOp request",
1883 "SM: Invalid address-interrupt address",
1884 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1885 "SM: A/D bit update needed in first-level entry when set up in no snoop",
1888 static const char *irq_remap_fault_reasons[] =
1890 "Detected reserved fields in the decoded interrupt-remapped request",
1891 "Interrupt index exceeded the interrupt-remapping table size",
1892 "Present field in the IRTE entry is clear",
1893 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1894 "Detected reserved fields in the IRTE entry",
1895 "Blocked a compatibility format interrupt request",
1896 "Blocked an interrupt request due to source-id verification failure",
1899 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1901 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1902 ARRAY_SIZE(irq_remap_fault_reasons))) {
1903 *fault_type = INTR_REMAP;
1904 return irq_remap_fault_reasons[fault_reason - 0x20];
1905 } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1906 ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1907 *fault_type = DMA_REMAP;
1908 return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1909 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1910 *fault_type = DMA_REMAP;
1911 return dma_remap_fault_reasons[fault_reason];
1913 *fault_type = UNKNOWN;
1919 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1921 if (iommu->irq == irq)
1922 return DMAR_FECTL_REG;
1923 else if (iommu->pr_irq == irq)
1924 return DMAR_PECTL_REG;
1925 else if (iommu->perf_irq == irq)
1926 return DMAR_PERFINTRCTL_REG;
1931 void dmar_msi_unmask(struct irq_data *data)
1933 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1934 int reg = dmar_msi_reg(iommu, data->irq);
1938 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1939 writel(0, iommu->reg + reg);
1940 /* Read a reg to force flush the post write */
1941 readl(iommu->reg + reg);
1942 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1945 void dmar_msi_mask(struct irq_data *data)
1947 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1948 int reg = dmar_msi_reg(iommu, data->irq);
1952 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1953 writel(DMA_FECTL_IM, iommu->reg + reg);
1954 /* Read a reg to force flush the post write */
1955 readl(iommu->reg + reg);
1956 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1959 void dmar_msi_write(int irq, struct msi_msg *msg)
1961 struct intel_iommu *iommu = irq_get_handler_data(irq);
1962 int reg = dmar_msi_reg(iommu, irq);
1965 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1966 writel(msg->data, iommu->reg + reg + 4);
1967 writel(msg->address_lo, iommu->reg + reg + 8);
1968 writel(msg->address_hi, iommu->reg + reg + 12);
1969 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1972 void dmar_msi_read(int irq, struct msi_msg *msg)
1974 struct intel_iommu *iommu = irq_get_handler_data(irq);
1975 int reg = dmar_msi_reg(iommu, irq);
1978 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1979 msg->data = readl(iommu->reg + reg + 4);
1980 msg->address_lo = readl(iommu->reg + reg + 8);
1981 msg->address_hi = readl(iommu->reg + reg + 12);
1982 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1985 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1986 u8 fault_reason, u32 pasid, u16 source_id,
1987 unsigned long long addr)
1992 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1994 if (fault_type == INTR_REMAP) {
1995 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
1996 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1997 PCI_FUNC(source_id & 0xFF), addr >> 48,
1998 fault_reason, reason);
2003 if (pasid == IOMMU_PASID_INVALID)
2004 pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
2005 type ? "DMA Read" : "DMA Write",
2006 source_id >> 8, PCI_SLOT(source_id & 0xFF),
2007 PCI_FUNC(source_id & 0xFF), addr,
2008 fault_reason, reason);
2010 pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
2011 type ? "DMA Read" : "DMA Write", pasid,
2012 source_id >> 8, PCI_SLOT(source_id & 0xFF),
2013 PCI_FUNC(source_id & 0xFF), addr,
2014 fault_reason, reason);
2016 dmar_fault_dump_ptes(iommu, source_id, addr, pasid);
2021 #define PRIMARY_FAULT_REG_LEN (16)
2022 irqreturn_t dmar_fault(int irq, void *dev_id)
2024 struct intel_iommu *iommu = dev_id;
2025 int reg, fault_index;
2028 static DEFINE_RATELIMIT_STATE(rs,
2029 DEFAULT_RATELIMIT_INTERVAL,
2030 DEFAULT_RATELIMIT_BURST);
2032 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2033 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2034 if (fault_status && __ratelimit(&rs))
2035 pr_err("DRHD: handling fault status reg %x\n", fault_status);
2037 /* TBD: ignore advanced fault log currently */
2038 if (!(fault_status & DMA_FSTS_PPF))
2041 fault_index = dma_fsts_fault_record_index(fault_status);
2042 reg = cap_fault_reg_offset(iommu->cap);
2044 /* Disable printing, simply clear the fault when ratelimited */
2045 bool ratelimited = !__ratelimit(&rs);
2054 /* highest 32 bits */
2055 data = readl(iommu->reg + reg +
2056 fault_index * PRIMARY_FAULT_REG_LEN + 12);
2057 if (!(data & DMA_FRCD_F))
2061 fault_reason = dma_frcd_fault_reason(data);
2062 type = dma_frcd_type(data);
2064 pasid = dma_frcd_pasid_value(data);
2065 data = readl(iommu->reg + reg +
2066 fault_index * PRIMARY_FAULT_REG_LEN + 8);
2067 source_id = dma_frcd_source_id(data);
2069 pasid_present = dma_frcd_pasid_present(data);
2070 guest_addr = dmar_readq(iommu->reg + reg +
2071 fault_index * PRIMARY_FAULT_REG_LEN);
2072 guest_addr = dma_frcd_page_addr(guest_addr);
2075 /* clear the fault */
2076 writel(DMA_FRCD_F, iommu->reg + reg +
2077 fault_index * PRIMARY_FAULT_REG_LEN + 12);
2079 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2082 /* Using pasid -1 if pasid is not present */
2083 dmar_fault_do_one(iommu, type, fault_reason,
2084 pasid_present ? pasid : IOMMU_PASID_INVALID,
2085 source_id, guest_addr);
2088 if (fault_index >= cap_num_fault_regs(iommu->cap))
2090 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2093 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
2094 iommu->reg + DMAR_FSTS_REG);
2097 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2101 int dmar_set_interrupt(struct intel_iommu *iommu)
2106 * Check if the fault interrupt is already initialized.
2111 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
2115 pr_err("No free IRQ vectors\n");
2119 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
2121 pr_err("Can't request irq\n");
2125 int __init enable_drhd_fault_handling(void)
2127 struct dmar_drhd_unit *drhd;
2128 struct intel_iommu *iommu;
2131 * Enable fault control interrupt.
2133 for_each_iommu(iommu, drhd) {
2135 int ret = dmar_set_interrupt(iommu);
2138 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
2139 (unsigned long long)drhd->reg_base_addr, ret);
2144 * Clear any previous faults.
2146 dmar_fault(iommu->irq, iommu);
2147 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2148 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
2155 * Re-enable Queued Invalidation interface.
2157 int dmar_reenable_qi(struct intel_iommu *iommu)
2159 if (!ecap_qis(iommu->ecap))
2166 * First disable queued invalidation.
2168 dmar_disable_qi(iommu);
2170 * Then enable queued invalidation again. Since there is no pending
2171 * invalidation requests now, it's safe to re-enable queued
2174 __dmar_enable_qi(iommu);
2180 * Check interrupt remapping support in DMAR table description.
2182 int __init dmar_ir_support(void)
2184 struct acpi_table_dmar *dmar;
2185 dmar = (struct acpi_table_dmar *)dmar_tbl;
2188 return dmar->flags & 0x1;
2191 /* Check whether DMAR units are in use */
2192 static inline bool dmar_in_use(void)
2194 return irq_remapping_enabled || intel_iommu_enabled;
2197 static int __init dmar_free_unused_resources(void)
2199 struct dmar_drhd_unit *dmaru, *dmaru_n;
2204 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
2205 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
2207 down_write(&dmar_global_lock);
2208 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
2209 list_del(&dmaru->list);
2210 dmar_free_drhd(dmaru);
2212 up_write(&dmar_global_lock);
2217 late_initcall(dmar_free_unused_resources);
2220 * DMAR Hotplug Support
2221 * For more details, please refer to Intel(R) Virtualization Technology
2222 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
2223 * "Remapping Hardware Unit Hot Plug".
2225 static guid_t dmar_hp_guid =
2226 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
2227 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
2230 * Currently there's only one revision and BIOS will not check the revision id,
2231 * so use 0 for safety.
2233 #define DMAR_DSM_REV_ID 0
2234 #define DMAR_DSM_FUNC_DRHD 1
2235 #define DMAR_DSM_FUNC_ATSR 2
2236 #define DMAR_DSM_FUNC_RHSA 3
2237 #define DMAR_DSM_FUNC_SATC 4
2239 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
2241 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
2244 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
2245 dmar_res_handler_t handler, void *arg)
2248 union acpi_object *obj;
2249 struct acpi_dmar_header *start;
2250 struct dmar_res_callback callback;
2251 static int res_type[] = {
2252 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
2253 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
2254 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
2255 [DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
2258 if (!dmar_detect_dsm(handle, func))
2261 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
2262 func, NULL, ACPI_TYPE_BUFFER);
2266 memset(&callback, 0, sizeof(callback));
2267 callback.cb[res_type[func]] = handler;
2268 callback.arg[res_type[func]] = arg;
2269 start = (struct acpi_dmar_header *)obj->buffer.pointer;
2270 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
2277 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
2280 struct dmar_drhd_unit *dmaru;
2282 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2286 ret = dmar_ir_hotplug(dmaru, true);
2288 ret = dmar_iommu_hotplug(dmaru, true);
2293 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2297 struct dmar_drhd_unit *dmaru;
2299 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2304 * All PCI devices managed by this unit should have been destroyed.
2306 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
2307 for_each_active_dev_scope(dmaru->devices,
2308 dmaru->devices_cnt, i, dev)
2312 ret = dmar_ir_hotplug(dmaru, false);
2314 ret = dmar_iommu_hotplug(dmaru, false);
2319 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2321 struct dmar_drhd_unit *dmaru;
2323 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2325 list_del_rcu(&dmaru->list);
2327 dmar_free_drhd(dmaru);
2333 static int dmar_hotplug_insert(acpi_handle handle)
2338 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2339 &dmar_validate_one_drhd, (void *)1);
2343 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2344 &dmar_parse_one_drhd, (void *)&drhd_count);
2345 if (ret == 0 && drhd_count == 0) {
2346 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2352 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2353 &dmar_parse_one_rhsa, NULL);
2357 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2358 &dmar_parse_one_atsr, NULL);
2362 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2363 &dmar_hp_add_drhd, NULL);
2367 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2368 &dmar_hp_remove_drhd, NULL);
2370 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2371 &dmar_release_one_atsr, NULL);
2373 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2374 &dmar_hp_release_drhd, NULL);
2379 static int dmar_hotplug_remove(acpi_handle handle)
2383 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2384 &dmar_check_one_atsr, NULL);
2388 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2389 &dmar_hp_remove_drhd, NULL);
2391 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2392 &dmar_release_one_atsr, NULL));
2393 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2394 &dmar_hp_release_drhd, NULL));
2396 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2397 &dmar_hp_add_drhd, NULL);
2403 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2404 void *context, void **retval)
2406 acpi_handle *phdl = retval;
2408 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2410 return AE_CTRL_TERMINATE;
2416 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2419 acpi_handle tmp = NULL;
2425 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2428 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2430 dmar_get_dsm_handle,
2432 if (ACPI_FAILURE(status)) {
2433 pr_warn("Failed to locate _DSM method.\n");
2440 down_write(&dmar_global_lock);
2442 ret = dmar_hotplug_insert(tmp);
2444 ret = dmar_hotplug_remove(tmp);
2445 up_write(&dmar_global_lock);
2450 int dmar_device_add(acpi_handle handle)
2452 return dmar_device_hotplug(handle, true);
2455 int dmar_device_remove(acpi_handle handle)
2457 return dmar_device_hotplug(handle, false);
2461 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2463 * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2464 * the ACPI DMAR table. This means that the platform boot firmware has made
2465 * sure no device can issue DMA outside of RMRR regions.
2467 bool dmar_platform_optin(void)
2469 struct acpi_table_dmar *dmar;
2473 status = acpi_get_table(ACPI_SIG_DMAR, 0,
2474 (struct acpi_table_header **)&dmar);
2475 if (ACPI_FAILURE(status))
2478 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2479 acpi_put_table((struct acpi_table_header *)dmar);
2483 EXPORT_SYMBOL_GPL(dmar_platform_optin);