2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <linux/limits.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/iommu_table.h>
46 #include "irq_remapping.h"
48 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
49 struct dmar_res_callback {
50 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
51 void *arg[ACPI_DMAR_TYPE_RESERVED];
52 bool ignore_unhandled;
58 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
59 * before IO devices managed by that unit.
60 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
61 * after IO devices managed by that unit.
62 * 3) Hotplug events are rare.
64 * Locking rules for DMA and interrupt remapping related global data structures:
65 * 1) Use dmar_global_lock in process context
66 * 2) Use RCU in interrupt context
68 DECLARE_RWSEM(dmar_global_lock);
69 LIST_HEAD(dmar_drhd_units);
71 struct acpi_table_header * __initdata dmar_tbl;
72 static acpi_size dmar_tbl_size;
73 static int dmar_dev_scope_status = 1;
74 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
76 static int alloc_iommu(struct dmar_drhd_unit *drhd);
77 static void free_iommu(struct intel_iommu *iommu);
79 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
82 * add INCLUDE_ALL at the tail, so scan the list will find it at
85 if (drhd->include_all)
86 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
88 list_add_rcu(&drhd->list, &dmar_drhd_units);
91 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
93 struct acpi_dmar_device_scope *scope;
98 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
99 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
102 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
103 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
104 pr_warn("Unsupported device scope\n");
106 start += scope->length;
111 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
114 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
117 struct device *tmp_dev;
119 if (*devices && *cnt) {
120 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
129 /* Optimize out kzalloc()/kfree() for normal cases */
130 static char dmar_pci_notify_info_buf[64];
132 static struct dmar_pci_notify_info *
133 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
138 struct dmar_pci_notify_info *info;
140 BUG_ON(dev->is_virtfn);
143 * Ignore devices that have a domain number higher than what can
144 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
146 if (pci_domain_nr(dev->bus) > U16_MAX)
149 /* Only generate path[] for device addition event */
150 if (event == BUS_NOTIFY_ADD_DEVICE)
151 for (tmp = dev; tmp; tmp = tmp->bus->self)
154 size = sizeof(*info) + level * sizeof(info->path[0]);
155 if (size <= sizeof(dmar_pci_notify_info_buf)) {
156 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
158 info = kzalloc(size, GFP_KERNEL);
160 pr_warn("Out of memory when allocating notify_info "
161 "for %s.\n", pci_name(dev));
162 if (dmar_dev_scope_status == 0)
163 dmar_dev_scope_status = -ENOMEM;
170 info->seg = pci_domain_nr(dev->bus);
172 if (event == BUS_NOTIFY_ADD_DEVICE) {
173 for (tmp = dev; tmp; tmp = tmp->bus->self) {
175 info->path[level].bus = tmp->bus->number;
176 info->path[level].device = PCI_SLOT(tmp->devfn);
177 info->path[level].function = PCI_FUNC(tmp->devfn);
178 if (pci_is_root_bus(tmp->bus))
179 info->bus = tmp->bus->number;
186 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
188 if ((void *)info != dmar_pci_notify_info_buf)
192 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
193 struct acpi_dmar_pci_path *path, int count)
197 if (info->bus != bus)
199 if (info->level != count)
202 for (i = 0; i < count; i++) {
203 if (path[i].device != info->path[i].device ||
204 path[i].function != info->path[i].function)
216 if (bus == info->path[i].bus &&
217 path[0].device == info->path[i].device &&
218 path[0].function == info->path[i].function) {
219 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
220 bus, path[0].device, path[0].function);
227 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
228 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
229 void *start, void*end, u16 segment,
230 struct dmar_dev_scope *devices,
234 struct device *tmp, *dev = &info->dev->dev;
235 struct acpi_dmar_device_scope *scope;
236 struct acpi_dmar_pci_path *path;
238 if (segment != info->seg)
241 for (; start < end; start += scope->length) {
243 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
244 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
247 path = (struct acpi_dmar_pci_path *)(scope + 1);
248 level = (scope->length - sizeof(*scope)) / sizeof(*path);
249 if (!dmar_match_pci_path(info, scope->bus, path, level))
253 * We expect devices with endpoint scope to have normal PCI
254 * headers, and devices with bridge scope to have bridge PCI
255 * headers. However PCI NTB devices may be listed in the
256 * DMAR table with bridge scope, even though they have a
257 * normal PCI header. NTB devices are identified by class
258 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
259 * for this special case.
261 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
262 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
263 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
264 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
265 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
266 pr_warn("Device scope type does not match for %s\n",
267 pci_name(info->dev));
271 for_each_dev_scope(devices, devices_cnt, i, tmp)
273 devices[i].bus = info->dev->bus->number;
274 devices[i].devfn = info->dev->devfn;
275 rcu_assign_pointer(devices[i].dev,
279 BUG_ON(i >= devices_cnt);
285 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
286 struct dmar_dev_scope *devices, int count)
291 if (info->seg != segment)
294 for_each_active_dev_scope(devices, count, index, tmp)
295 if (tmp == &info->dev->dev) {
296 RCU_INIT_POINTER(devices[index].dev, NULL);
305 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
308 struct dmar_drhd_unit *dmaru;
309 struct acpi_dmar_hardware_unit *drhd;
311 for_each_drhd_unit(dmaru) {
312 if (dmaru->include_all)
315 drhd = container_of(dmaru->hdr,
316 struct acpi_dmar_hardware_unit, header);
317 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
318 ((void *)drhd) + drhd->header.length,
320 dmaru->devices, dmaru->devices_cnt);
325 ret = dmar_iommu_notify_scope_dev(info);
326 if (ret < 0 && dmar_dev_scope_status == 0)
327 dmar_dev_scope_status = ret;
332 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
334 struct dmar_drhd_unit *dmaru;
336 for_each_drhd_unit(dmaru)
337 if (dmar_remove_dev_scope(info, dmaru->segment,
338 dmaru->devices, dmaru->devices_cnt))
340 dmar_iommu_notify_scope_dev(info);
343 static int dmar_pci_bus_notifier(struct notifier_block *nb,
344 unsigned long action, void *data)
346 struct pci_dev *pdev = to_pci_dev(data);
347 struct dmar_pci_notify_info *info;
349 /* Only care about add/remove events for physical functions.
350 * For VFs we actually do the lookup based on the corresponding
351 * PF in device_to_iommu() anyway. */
354 if (action != BUS_NOTIFY_ADD_DEVICE &&
355 action != BUS_NOTIFY_REMOVED_DEVICE)
358 info = dmar_alloc_pci_notify_info(pdev, action);
362 down_write(&dmar_global_lock);
363 if (action == BUS_NOTIFY_ADD_DEVICE)
364 dmar_pci_bus_add_dev(info);
365 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
366 dmar_pci_bus_del_dev(info);
367 up_write(&dmar_global_lock);
369 dmar_free_pci_notify_info(info);
374 static struct notifier_block dmar_pci_bus_nb = {
375 .notifier_call = dmar_pci_bus_notifier,
379 static struct dmar_drhd_unit *
380 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
382 struct dmar_drhd_unit *dmaru;
384 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
385 if (dmaru->segment == drhd->segment &&
386 dmaru->reg_base_addr == drhd->address)
393 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
394 * structure which uniquely represent one DMA remapping hardware unit
395 * present in the platform
397 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
399 struct acpi_dmar_hardware_unit *drhd;
400 struct dmar_drhd_unit *dmaru;
403 drhd = (struct acpi_dmar_hardware_unit *)header;
404 dmaru = dmar_find_dmaru(drhd);
408 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
413 * If header is allocated from slab by ACPI _DSM method, we need to
414 * copy the content because the memory buffer will be freed on return.
416 dmaru->hdr = (void *)(dmaru + 1);
417 memcpy(dmaru->hdr, header, header->length);
418 dmaru->reg_base_addr = drhd->address;
419 dmaru->segment = drhd->segment;
420 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
421 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
422 ((void *)drhd) + drhd->header.length,
423 &dmaru->devices_cnt);
424 if (dmaru->devices_cnt && dmaru->devices == NULL) {
429 ret = alloc_iommu(dmaru);
431 dmar_free_dev_scope(&dmaru->devices,
432 &dmaru->devices_cnt);
436 dmar_register_drhd_unit(dmaru);
445 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
447 if (dmaru->devices && dmaru->devices_cnt)
448 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
450 free_iommu(dmaru->iommu);
454 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
457 struct acpi_dmar_andd *andd = (void *)header;
459 /* Check for NUL termination within the designated length */
460 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
462 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
463 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
464 dmi_get_system_info(DMI_BIOS_VENDOR),
465 dmi_get_system_info(DMI_BIOS_VERSION),
466 dmi_get_system_info(DMI_PRODUCT_VERSION));
467 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
470 pr_info("ANDD device: %x name: %s\n", andd->device_number,
476 #ifdef CONFIG_ACPI_NUMA
477 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
479 struct acpi_dmar_rhsa *rhsa;
480 struct dmar_drhd_unit *drhd;
482 rhsa = (struct acpi_dmar_rhsa *)header;
483 for_each_drhd_unit(drhd) {
484 if (drhd->reg_base_addr == rhsa->base_address) {
485 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
487 if (!node_online(node))
489 drhd->iommu->node = node;
494 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
495 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
497 dmi_get_system_info(DMI_BIOS_VENDOR),
498 dmi_get_system_info(DMI_BIOS_VERSION),
499 dmi_get_system_info(DMI_PRODUCT_VERSION));
500 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
505 #define dmar_parse_one_rhsa dmar_res_noop
509 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
511 struct acpi_dmar_hardware_unit *drhd;
512 struct acpi_dmar_reserved_memory *rmrr;
513 struct acpi_dmar_atsr *atsr;
514 struct acpi_dmar_rhsa *rhsa;
516 switch (header->type) {
517 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
518 drhd = container_of(header, struct acpi_dmar_hardware_unit,
520 pr_info("DRHD base: %#016Lx flags: %#x\n",
521 (unsigned long long)drhd->address, drhd->flags);
523 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
524 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
526 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
527 (unsigned long long)rmrr->base_address,
528 (unsigned long long)rmrr->end_address);
530 case ACPI_DMAR_TYPE_ROOT_ATS:
531 atsr = container_of(header, struct acpi_dmar_atsr, header);
532 pr_info("ATSR flags: %#x\n", atsr->flags);
534 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
535 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
536 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
537 (unsigned long long)rhsa->base_address,
538 rhsa->proximity_domain);
540 case ACPI_DMAR_TYPE_NAMESPACE:
541 /* We don't print this here because we need to sanity-check
542 it first. So print it in dmar_parse_one_andd() instead. */
548 * dmar_table_detect - checks to see if the platform supports DMAR devices
550 static int __init dmar_table_detect(void)
552 acpi_status status = AE_OK;
554 /* if we could find DMAR table, then there are DMAR devices */
555 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
556 (struct acpi_table_header **)&dmar_tbl,
559 if (ACPI_SUCCESS(status) && !dmar_tbl) {
560 pr_warn("Unable to map DMAR\n");
561 status = AE_NOT_FOUND;
564 return (ACPI_SUCCESS(status) ? 1 : 0);
567 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
568 size_t len, struct dmar_res_callback *cb)
571 struct acpi_dmar_header *iter, *next;
572 struct acpi_dmar_header *end = ((void *)start) + len;
574 for (iter = start; iter < end && ret == 0; iter = next) {
575 next = (void *)iter + iter->length;
576 if (iter->length == 0) {
577 /* Avoid looping forever on bad ACPI tables */
578 pr_debug(FW_BUG "Invalid 0-length structure\n");
580 } else if (next > end) {
581 /* Avoid passing table end */
582 pr_warn(FW_BUG "Record passes table end\n");
588 dmar_table_print_dmar_entry(iter);
590 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
591 /* continue for forward compatibility */
592 pr_debug("Unknown DMAR structure type %d\n",
594 } else if (cb->cb[iter->type]) {
595 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
596 } else if (!cb->ignore_unhandled) {
597 pr_warn("No handler for DMAR structure type %d\n",
606 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
607 struct dmar_res_callback *cb)
609 return dmar_walk_remapping_entries((void *)(dmar + 1),
610 dmar->header.length - sizeof(*dmar), cb);
614 * parse_dmar_table - parses the DMA reporting table
617 parse_dmar_table(void)
619 struct acpi_table_dmar *dmar;
622 struct dmar_res_callback cb = {
624 .ignore_unhandled = true,
625 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
626 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
627 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
628 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
629 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
630 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
634 * Do it again, earlier dmar_tbl mapping could be mapped with
640 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
641 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
643 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
645 dmar = (struct acpi_table_dmar *)dmar_tbl;
649 if (dmar->width < PAGE_SHIFT - 1) {
650 pr_warn("Invalid DMAR haw\n");
654 pr_info("Host address width %d\n", dmar->width + 1);
655 ret = dmar_walk_dmar_table(dmar, &cb);
656 if (ret == 0 && drhd_count == 0)
657 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
662 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
663 int cnt, struct pci_dev *dev)
669 for_each_active_dev_scope(devices, cnt, index, tmp)
670 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
673 /* Check our parent */
674 dev = dev->bus->self;
680 struct dmar_drhd_unit *
681 dmar_find_matched_drhd_unit(struct pci_dev *dev)
683 struct dmar_drhd_unit *dmaru;
684 struct acpi_dmar_hardware_unit *drhd;
686 dev = pci_physfn(dev);
689 for_each_drhd_unit(dmaru) {
690 drhd = container_of(dmaru->hdr,
691 struct acpi_dmar_hardware_unit,
694 if (dmaru->include_all &&
695 drhd->segment == pci_domain_nr(dev->bus))
698 if (dmar_pci_device_match(dmaru->devices,
699 dmaru->devices_cnt, dev))
709 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
710 struct acpi_device *adev)
712 struct dmar_drhd_unit *dmaru;
713 struct acpi_dmar_hardware_unit *drhd;
714 struct acpi_dmar_device_scope *scope;
717 struct acpi_dmar_pci_path *path;
719 for_each_drhd_unit(dmaru) {
720 drhd = container_of(dmaru->hdr,
721 struct acpi_dmar_hardware_unit,
724 for (scope = (void *)(drhd + 1);
725 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
726 scope = ((void *)scope) + scope->length) {
727 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
729 if (scope->enumeration_id != device_number)
732 path = (void *)(scope + 1);
733 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
734 dev_name(&adev->dev), dmaru->reg_base_addr,
735 scope->bus, path->device, path->function);
736 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
738 dmaru->devices[i].bus = scope->bus;
739 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
741 rcu_assign_pointer(dmaru->devices[i].dev,
742 get_device(&adev->dev));
745 BUG_ON(i >= dmaru->devices_cnt);
748 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
749 device_number, dev_name(&adev->dev));
752 static int __init dmar_acpi_dev_scope_init(void)
754 struct acpi_dmar_andd *andd;
756 if (dmar_tbl == NULL)
759 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
760 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
761 andd = ((void *)andd) + andd->header.length) {
762 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
764 struct acpi_device *adev;
766 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
769 pr_err("Failed to find handle for ACPI object %s\n",
773 if (acpi_bus_get_device(h, &adev)) {
774 pr_err("Failed to get device for ACPI object %s\n",
778 dmar_acpi_insert_dev_scope(andd->device_number, adev);
784 int __init dmar_dev_scope_init(void)
786 struct pci_dev *dev = NULL;
787 struct dmar_pci_notify_info *info;
789 if (dmar_dev_scope_status != 1)
790 return dmar_dev_scope_status;
792 if (list_empty(&dmar_drhd_units)) {
793 dmar_dev_scope_status = -ENODEV;
795 dmar_dev_scope_status = 0;
797 dmar_acpi_dev_scope_init();
799 for_each_pci_dev(dev) {
803 info = dmar_alloc_pci_notify_info(dev,
804 BUS_NOTIFY_ADD_DEVICE);
806 return dmar_dev_scope_status;
808 dmar_pci_bus_add_dev(info);
809 dmar_free_pci_notify_info(info);
813 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
816 return dmar_dev_scope_status;
820 int __init dmar_table_init(void)
822 static int dmar_table_initialized;
825 if (dmar_table_initialized == 0) {
826 ret = parse_dmar_table();
829 pr_info("Parse DMAR table failure.\n");
830 } else if (list_empty(&dmar_drhd_units)) {
831 pr_info("No DMAR devices found\n");
836 dmar_table_initialized = ret;
838 dmar_table_initialized = 1;
841 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
844 static void warn_invalid_dmar(u64 addr, const char *message)
847 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
848 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
850 dmi_get_system_info(DMI_BIOS_VENDOR),
851 dmi_get_system_info(DMI_BIOS_VERSION),
852 dmi_get_system_info(DMI_PRODUCT_VERSION));
853 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
857 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
859 struct acpi_dmar_hardware_unit *drhd;
863 drhd = (void *)entry;
864 if (!drhd->address) {
865 warn_invalid_dmar(0, "");
870 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
872 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
874 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
878 cap = dmar_readq(addr + DMAR_CAP_REG);
879 ecap = dmar_readq(addr + DMAR_ECAP_REG);
884 early_iounmap(addr, VTD_PAGE_SIZE);
886 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
887 warn_invalid_dmar(drhd->address, " returns all ones");
894 int __init detect_intel_iommu(void)
897 struct dmar_res_callback validate_drhd_cb = {
898 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
899 .ignore_unhandled = true,
902 down_write(&dmar_global_lock);
903 ret = dmar_table_detect();
905 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
907 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
909 /* Make sure ACS will be enabled */
915 x86_init.iommu.iommu_init = intel_iommu_init;
918 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
920 up_write(&dmar_global_lock);
922 return ret ? 1 : -ENODEV;
926 static void unmap_iommu(struct intel_iommu *iommu)
929 release_mem_region(iommu->reg_phys, iommu->reg_size);
933 * map_iommu: map the iommu's registers
934 * @iommu: the iommu to map
935 * @phys_addr: the physical address of the base resgister
937 * Memory map the iommu's registers. Start w/ a single page, and
938 * possibly expand if that turns out to be insufficent.
940 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
944 iommu->reg_phys = phys_addr;
945 iommu->reg_size = VTD_PAGE_SIZE;
947 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
948 pr_err("Can't reserve memory\n");
953 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
955 pr_err("Can't map the region\n");
960 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
961 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
963 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
965 warn_invalid_dmar(phys_addr, " returns all ones");
969 /* the registers might be more than one page */
970 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
971 cap_max_fault_reg_offset(iommu->cap));
972 map_size = VTD_PAGE_ALIGN(map_size);
973 if (map_size > iommu->reg_size) {
975 release_mem_region(iommu->reg_phys, iommu->reg_size);
976 iommu->reg_size = map_size;
977 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
979 pr_err("Can't reserve memory\n");
983 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
985 pr_err("Can't map the region\n");
996 release_mem_region(iommu->reg_phys, iommu->reg_size);
1001 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1003 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1004 DMAR_UNITS_SUPPORTED);
1005 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1008 set_bit(iommu->seq_id, dmar_seq_ids);
1009 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1012 return iommu->seq_id;
1015 static void dmar_free_seq_id(struct intel_iommu *iommu)
1017 if (iommu->seq_id >= 0) {
1018 clear_bit(iommu->seq_id, dmar_seq_ids);
1023 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1025 struct intel_iommu *iommu;
1031 if (!drhd->reg_base_addr) {
1032 warn_invalid_dmar(0, "");
1036 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1040 if (dmar_alloc_seq_id(iommu) < 0) {
1041 pr_err("Failed to allocate seq_id\n");
1046 err = map_iommu(iommu, drhd->reg_base_addr);
1048 pr_err("Failed to map %s\n", iommu->name);
1049 goto error_free_seq_id;
1053 if (cap_sagaw(iommu->cap) == 0) {
1054 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1059 if (!drhd->ignored) {
1060 agaw = iommu_calculate_agaw(iommu);
1062 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1067 if (!drhd->ignored) {
1068 msagaw = iommu_calculate_max_sagaw(iommu);
1070 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1077 iommu->msagaw = msagaw;
1078 iommu->segment = drhd->segment;
1082 ver = readl(iommu->reg + DMAR_VER_REG);
1083 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1085 (unsigned long long)drhd->reg_base_addr,
1086 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1087 (unsigned long long)iommu->cap,
1088 (unsigned long long)iommu->ecap);
1090 /* Reflect status in gcmd */
1091 sts = readl(iommu->reg + DMAR_GSTS_REG);
1092 if (sts & DMA_GSTS_IRES)
1093 iommu->gcmd |= DMA_GCMD_IRE;
1094 if (sts & DMA_GSTS_TES)
1095 iommu->gcmd |= DMA_GCMD_TE;
1096 if (sts & DMA_GSTS_QIES)
1097 iommu->gcmd |= DMA_GCMD_QIE;
1099 raw_spin_lock_init(&iommu->register_lock);
1101 if (intel_iommu_enabled && !drhd->ignored) {
1102 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1106 if (IS_ERR(iommu->iommu_dev)) {
1107 err = PTR_ERR(iommu->iommu_dev);
1112 drhd->iommu = iommu;
1120 dmar_free_seq_id(iommu);
1126 static void free_iommu(struct intel_iommu *iommu)
1128 if (intel_iommu_enabled && !iommu->drhd->ignored)
1129 iommu_device_destroy(iommu->iommu_dev);
1132 if (iommu->pr_irq) {
1133 free_irq(iommu->pr_irq, iommu);
1134 dmar_free_hwirq(iommu->pr_irq);
1137 free_irq(iommu->irq, iommu);
1138 dmar_free_hwirq(iommu->irq);
1143 free_page((unsigned long)iommu->qi->desc);
1144 kfree(iommu->qi->desc_status);
1151 dmar_free_seq_id(iommu);
1156 * Reclaim all the submitted descriptors which have completed its work.
1158 static inline void reclaim_free_desc(struct q_inval *qi)
1160 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1161 qi->desc_status[qi->free_tail] == QI_ABORT) {
1162 qi->desc_status[qi->free_tail] = QI_FREE;
1163 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1168 static int qi_check_fault(struct intel_iommu *iommu, int index)
1172 struct q_inval *qi = iommu->qi;
1173 int wait_index = (index + 1) % QI_LENGTH;
1175 if (qi->desc_status[wait_index] == QI_ABORT)
1178 fault = readl(iommu->reg + DMAR_FSTS_REG);
1181 * If IQE happens, the head points to the descriptor associated
1182 * with the error. No new descriptors are fetched until the IQE
1185 if (fault & DMA_FSTS_IQE) {
1186 head = readl(iommu->reg + DMAR_IQH_REG);
1187 if ((head >> DMAR_IQ_SHIFT) == index) {
1188 pr_err("VT-d detected invalid descriptor: "
1189 "low=%llx, high=%llx\n",
1190 (unsigned long long)qi->desc[index].low,
1191 (unsigned long long)qi->desc[index].high);
1192 memcpy(&qi->desc[index], &qi->desc[wait_index],
1193 sizeof(struct qi_desc));
1194 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1200 * If ITE happens, all pending wait_desc commands are aborted.
1201 * No new descriptors are fetched until the ITE is cleared.
1203 if (fault & DMA_FSTS_ITE) {
1204 head = readl(iommu->reg + DMAR_IQH_REG);
1205 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1207 tail = readl(iommu->reg + DMAR_IQT_REG);
1208 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1210 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1213 if (qi->desc_status[head] == QI_IN_USE)
1214 qi->desc_status[head] = QI_ABORT;
1215 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1216 } while (head != tail);
1218 if (qi->desc_status[wait_index] == QI_ABORT)
1222 if (fault & DMA_FSTS_ICE)
1223 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1229 * Submit the queued invalidation descriptor to the remapping
1230 * hardware unit and wait for its completion.
1232 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1235 struct q_inval *qi = iommu->qi;
1236 struct qi_desc *hw, wait_desc;
1237 int wait_index, index;
1238 unsigned long flags;
1248 raw_spin_lock_irqsave(&qi->q_lock, flags);
1249 while (qi->free_cnt < 3) {
1250 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1252 raw_spin_lock_irqsave(&qi->q_lock, flags);
1255 index = qi->free_head;
1256 wait_index = (index + 1) % QI_LENGTH;
1258 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1262 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1263 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1264 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1266 hw[wait_index] = wait_desc;
1268 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1272 * update the HW tail register indicating the presence of
1275 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1277 while (qi->desc_status[wait_index] != QI_DONE) {
1279 * We will leave the interrupts disabled, to prevent interrupt
1280 * context to queue another cmd while a cmd is already submitted
1281 * and waiting for completion on this cpu. This is to avoid
1282 * a deadlock where the interrupt context can wait indefinitely
1283 * for free slots in the queue.
1285 rc = qi_check_fault(iommu, index);
1289 raw_spin_unlock(&qi->q_lock);
1291 raw_spin_lock(&qi->q_lock);
1294 qi->desc_status[index] = QI_DONE;
1296 reclaim_free_desc(qi);
1297 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1306 * Flush the global interrupt entry cache.
1308 void qi_global_iec(struct intel_iommu *iommu)
1310 struct qi_desc desc;
1312 desc.low = QI_IEC_TYPE;
1315 /* should never fail */
1316 qi_submit_sync(&desc, iommu);
1319 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1322 struct qi_desc desc;
1324 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1325 | QI_CC_GRAN(type) | QI_CC_TYPE;
1328 qi_submit_sync(&desc, iommu);
1331 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1332 unsigned int size_order, u64 type)
1336 struct qi_desc desc;
1339 if (cap_write_drain(iommu->cap))
1342 if (cap_read_drain(iommu->cap))
1345 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1346 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1347 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1348 | QI_IOTLB_AM(size_order);
1350 qi_submit_sync(&desc, iommu);
1353 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1354 u16 qdep, u64 addr, unsigned mask)
1356 struct qi_desc desc;
1359 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1360 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1361 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1363 desc.high = QI_DEV_IOTLB_ADDR(addr);
1365 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1368 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1369 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1371 qi_submit_sync(&desc, iommu);
1375 * Disable Queued Invalidation interface.
1377 void dmar_disable_qi(struct intel_iommu *iommu)
1379 unsigned long flags;
1381 cycles_t start_time = get_cycles();
1383 if (!ecap_qis(iommu->ecap))
1386 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1388 sts = readl(iommu->reg + DMAR_GSTS_REG);
1389 if (!(sts & DMA_GSTS_QIES))
1393 * Give a chance to HW to complete the pending invalidation requests.
1395 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1396 readl(iommu->reg + DMAR_IQH_REG)) &&
1397 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1400 iommu->gcmd &= ~DMA_GCMD_QIE;
1401 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1403 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1404 !(sts & DMA_GSTS_QIES), sts);
1406 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1410 * Enable queued invalidation.
1412 static void __dmar_enable_qi(struct intel_iommu *iommu)
1415 unsigned long flags;
1416 struct q_inval *qi = iommu->qi;
1418 qi->free_head = qi->free_tail = 0;
1419 qi->free_cnt = QI_LENGTH;
1421 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1423 /* write zero to the tail reg */
1424 writel(0, iommu->reg + DMAR_IQT_REG);
1426 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1428 iommu->gcmd |= DMA_GCMD_QIE;
1429 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1431 /* Make sure hardware complete it */
1432 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1434 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1438 * Enable Queued Invalidation interface. This is a must to support
1439 * interrupt-remapping. Also used by DMA-remapping, which replaces
1440 * register based IOTLB invalidation.
1442 int dmar_enable_qi(struct intel_iommu *iommu)
1445 struct page *desc_page;
1447 if (!ecap_qis(iommu->ecap))
1451 * queued invalidation is already setup and enabled.
1456 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1463 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1470 qi->desc = page_address(desc_page);
1472 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1473 if (!qi->desc_status) {
1474 free_page((unsigned long) qi->desc);
1480 raw_spin_lock_init(&qi->q_lock);
1482 __dmar_enable_qi(iommu);
1487 /* iommu interrupt handling. Most stuff are MSI-like. */
1495 static const char *dma_remap_fault_reasons[] =
1498 "Present bit in root entry is clear",
1499 "Present bit in context entry is clear",
1500 "Invalid context entry",
1501 "Access beyond MGAW",
1502 "PTE Write access is not set",
1503 "PTE Read access is not set",
1504 "Next page table ptr is invalid",
1505 "Root table address invalid",
1506 "Context table ptr is invalid",
1507 "non-zero reserved fields in RTP",
1508 "non-zero reserved fields in CTP",
1509 "non-zero reserved fields in PTE",
1510 "PCE for translation request specifies blocking",
1513 static const char *irq_remap_fault_reasons[] =
1515 "Detected reserved fields in the decoded interrupt-remapped request",
1516 "Interrupt index exceeded the interrupt-remapping table size",
1517 "Present field in the IRTE entry is clear",
1518 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1519 "Detected reserved fields in the IRTE entry",
1520 "Blocked a compatibility format interrupt request",
1521 "Blocked an interrupt request due to source-id verification failure",
1524 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1526 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1527 ARRAY_SIZE(irq_remap_fault_reasons))) {
1528 *fault_type = INTR_REMAP;
1529 return irq_remap_fault_reasons[fault_reason - 0x20];
1530 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1531 *fault_type = DMA_REMAP;
1532 return dma_remap_fault_reasons[fault_reason];
1534 *fault_type = UNKNOWN;
1540 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1542 if (iommu->irq == irq)
1543 return DMAR_FECTL_REG;
1544 else if (iommu->pr_irq == irq)
1545 return DMAR_PECTL_REG;
1550 void dmar_msi_unmask(struct irq_data *data)
1552 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1553 int reg = dmar_msi_reg(iommu, data->irq);
1557 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1558 writel(0, iommu->reg + reg);
1559 /* Read a reg to force flush the post write */
1560 readl(iommu->reg + reg);
1561 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1564 void dmar_msi_mask(struct irq_data *data)
1566 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1567 int reg = dmar_msi_reg(iommu, data->irq);
1571 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1572 writel(DMA_FECTL_IM, iommu->reg + reg);
1573 /* Read a reg to force flush the post write */
1574 readl(iommu->reg + reg);
1575 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1578 void dmar_msi_write(int irq, struct msi_msg *msg)
1580 struct intel_iommu *iommu = irq_get_handler_data(irq);
1581 int reg = dmar_msi_reg(iommu, irq);
1584 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1585 writel(msg->data, iommu->reg + reg + 4);
1586 writel(msg->address_lo, iommu->reg + reg + 8);
1587 writel(msg->address_hi, iommu->reg + reg + 12);
1588 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1591 void dmar_msi_read(int irq, struct msi_msg *msg)
1593 struct intel_iommu *iommu = irq_get_handler_data(irq);
1594 int reg = dmar_msi_reg(iommu, irq);
1597 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1598 msg->data = readl(iommu->reg + reg + 4);
1599 msg->address_lo = readl(iommu->reg + reg + 8);
1600 msg->address_hi = readl(iommu->reg + reg + 12);
1601 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1604 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1605 u8 fault_reason, u16 source_id, unsigned long long addr)
1610 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1612 if (fault_type == INTR_REMAP)
1613 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1614 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1615 PCI_FUNC(source_id & 0xFF), addr >> 48,
1616 fault_reason, reason);
1618 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1619 type ? "DMA Read" : "DMA Write",
1620 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1621 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1625 #define PRIMARY_FAULT_REG_LEN (16)
1626 irqreturn_t dmar_fault(int irq, void *dev_id)
1628 struct intel_iommu *iommu = dev_id;
1629 int reg, fault_index;
1633 static DEFINE_RATELIMIT_STATE(rs,
1634 DEFAULT_RATELIMIT_INTERVAL,
1635 DEFAULT_RATELIMIT_BURST);
1637 /* Disable printing, simply clear the fault when ratelimited */
1638 ratelimited = !__ratelimit(&rs);
1640 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1641 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1642 if (fault_status && !ratelimited)
1643 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1645 /* TBD: ignore advanced fault log currently */
1646 if (!(fault_status & DMA_FSTS_PPF))
1649 fault_index = dma_fsts_fault_record_index(fault_status);
1650 reg = cap_fault_reg_offset(iommu->cap);
1658 /* highest 32 bits */
1659 data = readl(iommu->reg + reg +
1660 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1661 if (!(data & DMA_FRCD_F))
1665 fault_reason = dma_frcd_fault_reason(data);
1666 type = dma_frcd_type(data);
1668 data = readl(iommu->reg + reg +
1669 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1670 source_id = dma_frcd_source_id(data);
1672 guest_addr = dmar_readq(iommu->reg + reg +
1673 fault_index * PRIMARY_FAULT_REG_LEN);
1674 guest_addr = dma_frcd_page_addr(guest_addr);
1677 /* clear the fault */
1678 writel(DMA_FRCD_F, iommu->reg + reg +
1679 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1681 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1684 dmar_fault_do_one(iommu, type, fault_reason,
1685 source_id, guest_addr);
1688 if (fault_index >= cap_num_fault_regs(iommu->cap))
1690 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1693 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1696 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1700 int dmar_set_interrupt(struct intel_iommu *iommu)
1705 * Check if the fault interrupt is already initialized.
1710 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1714 pr_err("No free IRQ vectors\n");
1718 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1720 pr_err("Can't request irq\n");
1724 int __init enable_drhd_fault_handling(void)
1726 struct dmar_drhd_unit *drhd;
1727 struct intel_iommu *iommu;
1730 * Enable fault control interrupt.
1732 for_each_iommu(iommu, drhd) {
1734 int ret = dmar_set_interrupt(iommu);
1737 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1738 (unsigned long long)drhd->reg_base_addr, ret);
1743 * Clear any previous faults.
1745 dmar_fault(iommu->irq, iommu);
1746 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1747 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1754 * Re-enable Queued Invalidation interface.
1756 int dmar_reenable_qi(struct intel_iommu *iommu)
1758 if (!ecap_qis(iommu->ecap))
1765 * First disable queued invalidation.
1767 dmar_disable_qi(iommu);
1769 * Then enable queued invalidation again. Since there is no pending
1770 * invalidation requests now, it's safe to re-enable queued
1773 __dmar_enable_qi(iommu);
1779 * Check interrupt remapping support in DMAR table description.
1781 int __init dmar_ir_support(void)
1783 struct acpi_table_dmar *dmar;
1784 dmar = (struct acpi_table_dmar *)dmar_tbl;
1787 return dmar->flags & 0x1;
1790 /* Check whether DMAR units are in use */
1791 static inline bool dmar_in_use(void)
1793 return irq_remapping_enabled || intel_iommu_enabled;
1796 static int __init dmar_free_unused_resources(void)
1798 struct dmar_drhd_unit *dmaru, *dmaru_n;
1803 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1804 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1806 down_write(&dmar_global_lock);
1807 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1808 list_del(&dmaru->list);
1809 dmar_free_drhd(dmaru);
1811 up_write(&dmar_global_lock);
1816 late_initcall(dmar_free_unused_resources);
1817 IOMMU_INIT_POST(detect_intel_iommu);
1820 * DMAR Hotplug Support
1821 * For more details, please refer to Intel(R) Virtualization Technology
1822 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1823 * "Remapping Hardware Unit Hot Plug".
1825 static u8 dmar_hp_uuid[] = {
1826 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1827 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1831 * Currently there's only one revision and BIOS will not check the revision id,
1832 * so use 0 for safety.
1834 #define DMAR_DSM_REV_ID 0
1835 #define DMAR_DSM_FUNC_DRHD 1
1836 #define DMAR_DSM_FUNC_ATSR 2
1837 #define DMAR_DSM_FUNC_RHSA 3
1839 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1841 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1844 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1845 dmar_res_handler_t handler, void *arg)
1848 union acpi_object *obj;
1849 struct acpi_dmar_header *start;
1850 struct dmar_res_callback callback;
1851 static int res_type[] = {
1852 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1853 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1854 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1857 if (!dmar_detect_dsm(handle, func))
1860 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1861 func, NULL, ACPI_TYPE_BUFFER);
1865 memset(&callback, 0, sizeof(callback));
1866 callback.cb[res_type[func]] = handler;
1867 callback.arg[res_type[func]] = arg;
1868 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1869 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1876 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1879 struct dmar_drhd_unit *dmaru;
1881 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1885 ret = dmar_ir_hotplug(dmaru, true);
1887 ret = dmar_iommu_hotplug(dmaru, true);
1892 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1896 struct dmar_drhd_unit *dmaru;
1898 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1903 * All PCI devices managed by this unit should have been destroyed.
1905 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1906 for_each_active_dev_scope(dmaru->devices,
1907 dmaru->devices_cnt, i, dev)
1911 ret = dmar_ir_hotplug(dmaru, false);
1913 ret = dmar_iommu_hotplug(dmaru, false);
1918 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1920 struct dmar_drhd_unit *dmaru;
1922 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1924 list_del_rcu(&dmaru->list);
1926 dmar_free_drhd(dmaru);
1932 static int dmar_hotplug_insert(acpi_handle handle)
1937 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1938 &dmar_validate_one_drhd, (void *)1);
1942 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1943 &dmar_parse_one_drhd, (void *)&drhd_count);
1944 if (ret == 0 && drhd_count == 0) {
1945 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1951 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1952 &dmar_parse_one_rhsa, NULL);
1956 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1957 &dmar_parse_one_atsr, NULL);
1961 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1962 &dmar_hp_add_drhd, NULL);
1966 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1967 &dmar_hp_remove_drhd, NULL);
1969 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1970 &dmar_release_one_atsr, NULL);
1972 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1973 &dmar_hp_release_drhd, NULL);
1978 static int dmar_hotplug_remove(acpi_handle handle)
1982 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1983 &dmar_check_one_atsr, NULL);
1987 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1988 &dmar_hp_remove_drhd, NULL);
1990 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1991 &dmar_release_one_atsr, NULL));
1992 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1993 &dmar_hp_release_drhd, NULL));
1995 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1996 &dmar_hp_add_drhd, NULL);
2002 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2003 void *context, void **retval)
2005 acpi_handle *phdl = retval;
2007 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2009 return AE_CTRL_TERMINATE;
2015 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2018 acpi_handle tmp = NULL;
2024 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2027 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2029 dmar_get_dsm_handle,
2031 if (ACPI_FAILURE(status)) {
2032 pr_warn("Failed to locate _DSM method.\n");
2039 down_write(&dmar_global_lock);
2041 ret = dmar_hotplug_insert(tmp);
2043 ret = dmar_hotplug_remove(tmp);
2044 up_write(&dmar_global_lock);
2049 int dmar_device_add(acpi_handle handle)
2051 return dmar_device_hotplug(handle, true);
2054 int dmar_device_remove(acpi_handle handle)
2056 return dmar_device_hotplug(handle, false);