2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <linux/limits.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/iommu_table.h>
46 #include "irq_remapping.h"
48 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
49 struct dmar_res_callback {
50 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
51 void *arg[ACPI_DMAR_TYPE_RESERVED];
52 bool ignore_unhandled;
58 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
59 * before IO devices managed by that unit.
60 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
61 * after IO devices managed by that unit.
62 * 3) Hotplug events are rare.
64 * Locking rules for DMA and interrupt remapping related global data structures:
65 * 1) Use dmar_global_lock in process context
66 * 2) Use RCU in interrupt context
68 DECLARE_RWSEM(dmar_global_lock);
69 LIST_HEAD(dmar_drhd_units);
71 struct acpi_table_header * __initdata dmar_tbl;
72 static acpi_size dmar_tbl_size;
73 static int dmar_dev_scope_status = 1;
74 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
76 static int alloc_iommu(struct dmar_drhd_unit *drhd);
77 static void free_iommu(struct intel_iommu *iommu);
79 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
82 * add INCLUDE_ALL at the tail, so scan the list will find it at
85 if (drhd->include_all)
86 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
88 list_add_rcu(&drhd->list, &dmar_drhd_units);
91 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
93 struct acpi_dmar_device_scope *scope;
98 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
99 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
102 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
103 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
104 pr_warn("Unsupported device scope\n");
106 start += scope->length;
111 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
114 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
117 struct device *tmp_dev;
119 if (*devices && *cnt) {
120 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
129 /* Optimize out kzalloc()/kfree() for normal cases */
130 static char dmar_pci_notify_info_buf[64];
132 static struct dmar_pci_notify_info *
133 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
138 struct dmar_pci_notify_info *info;
140 BUG_ON(dev->is_virtfn);
143 * Ignore devices that have a domain number higher than what can
144 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
146 if (pci_domain_nr(dev->bus) > U16_MAX)
149 /* Only generate path[] for device addition event */
150 if (event == BUS_NOTIFY_ADD_DEVICE)
151 for (tmp = dev; tmp; tmp = tmp->bus->self)
154 size = sizeof(*info) + level * sizeof(info->path[0]);
155 if (size <= sizeof(dmar_pci_notify_info_buf)) {
156 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
158 info = kzalloc(size, GFP_KERNEL);
160 pr_warn("Out of memory when allocating notify_info "
161 "for %s.\n", pci_name(dev));
162 if (dmar_dev_scope_status == 0)
163 dmar_dev_scope_status = -ENOMEM;
170 info->seg = pci_domain_nr(dev->bus);
172 if (event == BUS_NOTIFY_ADD_DEVICE) {
173 for (tmp = dev; tmp; tmp = tmp->bus->self) {
175 info->path[level].bus = tmp->bus->number;
176 info->path[level].device = PCI_SLOT(tmp->devfn);
177 info->path[level].function = PCI_FUNC(tmp->devfn);
178 if (pci_is_root_bus(tmp->bus))
179 info->bus = tmp->bus->number;
186 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
188 if ((void *)info != dmar_pci_notify_info_buf)
192 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
193 struct acpi_dmar_pci_path *path, int count)
197 if (info->bus != bus)
199 if (info->level != count)
202 for (i = 0; i < count; i++) {
203 if (path[i].device != info->path[i].device ||
204 path[i].function != info->path[i].function)
216 if (bus == info->path[i].bus &&
217 path[0].device == info->path[i].device &&
218 path[0].function == info->path[i].function) {
219 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
220 bus, path[0].device, path[0].function);
227 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
228 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
229 void *start, void*end, u16 segment,
230 struct dmar_dev_scope *devices,
234 struct device *tmp, *dev = &info->dev->dev;
235 struct acpi_dmar_device_scope *scope;
236 struct acpi_dmar_pci_path *path;
238 if (segment != info->seg)
241 for (; start < end; start += scope->length) {
243 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
244 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
247 path = (struct acpi_dmar_pci_path *)(scope + 1);
248 level = (scope->length - sizeof(*scope)) / sizeof(*path);
249 if (!dmar_match_pci_path(info, scope->bus, path, level))
252 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
253 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
254 pr_warn("Device scope type does not match for %s\n",
255 pci_name(info->dev));
259 for_each_dev_scope(devices, devices_cnt, i, tmp)
261 devices[i].bus = info->dev->bus->number;
262 devices[i].devfn = info->dev->devfn;
263 rcu_assign_pointer(devices[i].dev,
267 BUG_ON(i >= devices_cnt);
273 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
274 struct dmar_dev_scope *devices, int count)
279 if (info->seg != segment)
282 for_each_active_dev_scope(devices, count, index, tmp)
283 if (tmp == &info->dev->dev) {
284 RCU_INIT_POINTER(devices[index].dev, NULL);
293 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
296 struct dmar_drhd_unit *dmaru;
297 struct acpi_dmar_hardware_unit *drhd;
299 for_each_drhd_unit(dmaru) {
300 if (dmaru->include_all)
303 drhd = container_of(dmaru->hdr,
304 struct acpi_dmar_hardware_unit, header);
305 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
306 ((void *)drhd) + drhd->header.length,
308 dmaru->devices, dmaru->devices_cnt);
313 ret = dmar_iommu_notify_scope_dev(info);
314 if (ret < 0 && dmar_dev_scope_status == 0)
315 dmar_dev_scope_status = ret;
320 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
322 struct dmar_drhd_unit *dmaru;
324 for_each_drhd_unit(dmaru)
325 if (dmar_remove_dev_scope(info, dmaru->segment,
326 dmaru->devices, dmaru->devices_cnt))
328 dmar_iommu_notify_scope_dev(info);
331 static int dmar_pci_bus_notifier(struct notifier_block *nb,
332 unsigned long action, void *data)
334 struct pci_dev *pdev = to_pci_dev(data);
335 struct dmar_pci_notify_info *info;
337 /* Only care about add/remove events for physical functions.
338 * For VFs we actually do the lookup based on the corresponding
339 * PF in device_to_iommu() anyway. */
342 if (action != BUS_NOTIFY_ADD_DEVICE &&
343 action != BUS_NOTIFY_REMOVED_DEVICE)
346 info = dmar_alloc_pci_notify_info(pdev, action);
350 down_write(&dmar_global_lock);
351 if (action == BUS_NOTIFY_ADD_DEVICE)
352 dmar_pci_bus_add_dev(info);
353 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
354 dmar_pci_bus_del_dev(info);
355 up_write(&dmar_global_lock);
357 dmar_free_pci_notify_info(info);
362 static struct notifier_block dmar_pci_bus_nb = {
363 .notifier_call = dmar_pci_bus_notifier,
367 static struct dmar_drhd_unit *
368 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
370 struct dmar_drhd_unit *dmaru;
372 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
373 if (dmaru->segment == drhd->segment &&
374 dmaru->reg_base_addr == drhd->address)
381 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
382 * structure which uniquely represent one DMA remapping hardware unit
383 * present in the platform
385 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
387 struct acpi_dmar_hardware_unit *drhd;
388 struct dmar_drhd_unit *dmaru;
391 drhd = (struct acpi_dmar_hardware_unit *)header;
392 dmaru = dmar_find_dmaru(drhd);
396 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
401 * If header is allocated from slab by ACPI _DSM method, we need to
402 * copy the content because the memory buffer will be freed on return.
404 dmaru->hdr = (void *)(dmaru + 1);
405 memcpy(dmaru->hdr, header, header->length);
406 dmaru->reg_base_addr = drhd->address;
407 dmaru->segment = drhd->segment;
408 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
409 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
410 ((void *)drhd) + drhd->header.length,
411 &dmaru->devices_cnt);
412 if (dmaru->devices_cnt && dmaru->devices == NULL) {
417 ret = alloc_iommu(dmaru);
419 dmar_free_dev_scope(&dmaru->devices,
420 &dmaru->devices_cnt);
424 dmar_register_drhd_unit(dmaru);
433 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
435 if (dmaru->devices && dmaru->devices_cnt)
436 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
438 free_iommu(dmaru->iommu);
442 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
445 struct acpi_dmar_andd *andd = (void *)header;
447 /* Check for NUL termination within the designated length */
448 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
450 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
451 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
452 dmi_get_system_info(DMI_BIOS_VENDOR),
453 dmi_get_system_info(DMI_BIOS_VERSION),
454 dmi_get_system_info(DMI_PRODUCT_VERSION));
455 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
458 pr_info("ANDD device: %x name: %s\n", andd->device_number,
464 #ifdef CONFIG_ACPI_NUMA
465 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
467 struct acpi_dmar_rhsa *rhsa;
468 struct dmar_drhd_unit *drhd;
470 rhsa = (struct acpi_dmar_rhsa *)header;
471 for_each_drhd_unit(drhd) {
472 if (drhd->reg_base_addr == rhsa->base_address) {
473 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
475 if (!node_online(node))
477 drhd->iommu->node = node;
482 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
483 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
485 dmi_get_system_info(DMI_BIOS_VENDOR),
486 dmi_get_system_info(DMI_BIOS_VERSION),
487 dmi_get_system_info(DMI_PRODUCT_VERSION));
488 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
493 #define dmar_parse_one_rhsa dmar_res_noop
497 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
499 struct acpi_dmar_hardware_unit *drhd;
500 struct acpi_dmar_reserved_memory *rmrr;
501 struct acpi_dmar_atsr *atsr;
502 struct acpi_dmar_rhsa *rhsa;
504 switch (header->type) {
505 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
506 drhd = container_of(header, struct acpi_dmar_hardware_unit,
508 pr_info("DRHD base: %#016Lx flags: %#x\n",
509 (unsigned long long)drhd->address, drhd->flags);
511 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
512 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
514 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
515 (unsigned long long)rmrr->base_address,
516 (unsigned long long)rmrr->end_address);
518 case ACPI_DMAR_TYPE_ROOT_ATS:
519 atsr = container_of(header, struct acpi_dmar_atsr, header);
520 pr_info("ATSR flags: %#x\n", atsr->flags);
522 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
523 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
524 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
525 (unsigned long long)rhsa->base_address,
526 rhsa->proximity_domain);
528 case ACPI_DMAR_TYPE_NAMESPACE:
529 /* We don't print this here because we need to sanity-check
530 it first. So print it in dmar_parse_one_andd() instead. */
536 * dmar_table_detect - checks to see if the platform supports DMAR devices
538 static int __init dmar_table_detect(void)
540 acpi_status status = AE_OK;
542 /* if we could find DMAR table, then there are DMAR devices */
543 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
544 (struct acpi_table_header **)&dmar_tbl,
547 if (ACPI_SUCCESS(status) && !dmar_tbl) {
548 pr_warn("Unable to map DMAR\n");
549 status = AE_NOT_FOUND;
552 return (ACPI_SUCCESS(status) ? 1 : 0);
555 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
556 size_t len, struct dmar_res_callback *cb)
559 struct acpi_dmar_header *iter, *next;
560 struct acpi_dmar_header *end = ((void *)start) + len;
562 for (iter = start; iter < end && ret == 0; iter = next) {
563 next = (void *)iter + iter->length;
564 if (iter->length == 0) {
565 /* Avoid looping forever on bad ACPI tables */
566 pr_debug(FW_BUG "Invalid 0-length structure\n");
568 } else if (next > end) {
569 /* Avoid passing table end */
570 pr_warn(FW_BUG "Record passes table end\n");
576 dmar_table_print_dmar_entry(iter);
578 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
579 /* continue for forward compatibility */
580 pr_debug("Unknown DMAR structure type %d\n",
582 } else if (cb->cb[iter->type]) {
583 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
584 } else if (!cb->ignore_unhandled) {
585 pr_warn("No handler for DMAR structure type %d\n",
594 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
595 struct dmar_res_callback *cb)
597 return dmar_walk_remapping_entries((void *)(dmar + 1),
598 dmar->header.length - sizeof(*dmar), cb);
602 * parse_dmar_table - parses the DMA reporting table
605 parse_dmar_table(void)
607 struct acpi_table_dmar *dmar;
610 struct dmar_res_callback cb = {
612 .ignore_unhandled = true,
613 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
614 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
615 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
616 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
617 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
618 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
622 * Do it again, earlier dmar_tbl mapping could be mapped with
628 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
629 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
631 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
633 dmar = (struct acpi_table_dmar *)dmar_tbl;
637 if (dmar->width < PAGE_SHIFT - 1) {
638 pr_warn("Invalid DMAR haw\n");
642 pr_info("Host address width %d\n", dmar->width + 1);
643 ret = dmar_walk_dmar_table(dmar, &cb);
644 if (ret == 0 && drhd_count == 0)
645 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
650 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
651 int cnt, struct pci_dev *dev)
657 for_each_active_dev_scope(devices, cnt, index, tmp)
658 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
661 /* Check our parent */
662 dev = dev->bus->self;
668 struct dmar_drhd_unit *
669 dmar_find_matched_drhd_unit(struct pci_dev *dev)
671 struct dmar_drhd_unit *dmaru;
672 struct acpi_dmar_hardware_unit *drhd;
674 dev = pci_physfn(dev);
677 for_each_drhd_unit(dmaru) {
678 drhd = container_of(dmaru->hdr,
679 struct acpi_dmar_hardware_unit,
682 if (dmaru->include_all &&
683 drhd->segment == pci_domain_nr(dev->bus))
686 if (dmar_pci_device_match(dmaru->devices,
687 dmaru->devices_cnt, dev))
697 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
698 struct acpi_device *adev)
700 struct dmar_drhd_unit *dmaru;
701 struct acpi_dmar_hardware_unit *drhd;
702 struct acpi_dmar_device_scope *scope;
705 struct acpi_dmar_pci_path *path;
707 for_each_drhd_unit(dmaru) {
708 drhd = container_of(dmaru->hdr,
709 struct acpi_dmar_hardware_unit,
712 for (scope = (void *)(drhd + 1);
713 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
714 scope = ((void *)scope) + scope->length) {
715 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
717 if (scope->enumeration_id != device_number)
720 path = (void *)(scope + 1);
721 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
722 dev_name(&adev->dev), dmaru->reg_base_addr,
723 scope->bus, path->device, path->function);
724 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
726 dmaru->devices[i].bus = scope->bus;
727 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
729 rcu_assign_pointer(dmaru->devices[i].dev,
730 get_device(&adev->dev));
733 BUG_ON(i >= dmaru->devices_cnt);
736 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
737 device_number, dev_name(&adev->dev));
740 static int __init dmar_acpi_dev_scope_init(void)
742 struct acpi_dmar_andd *andd;
744 if (dmar_tbl == NULL)
747 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
748 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
749 andd = ((void *)andd) + andd->header.length) {
750 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
752 struct acpi_device *adev;
754 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
757 pr_err("Failed to find handle for ACPI object %s\n",
761 if (acpi_bus_get_device(h, &adev)) {
762 pr_err("Failed to get device for ACPI object %s\n",
766 dmar_acpi_insert_dev_scope(andd->device_number, adev);
772 int __init dmar_dev_scope_init(void)
774 struct pci_dev *dev = NULL;
775 struct dmar_pci_notify_info *info;
777 if (dmar_dev_scope_status != 1)
778 return dmar_dev_scope_status;
780 if (list_empty(&dmar_drhd_units)) {
781 dmar_dev_scope_status = -ENODEV;
783 dmar_dev_scope_status = 0;
785 dmar_acpi_dev_scope_init();
787 for_each_pci_dev(dev) {
791 info = dmar_alloc_pci_notify_info(dev,
792 BUS_NOTIFY_ADD_DEVICE);
794 return dmar_dev_scope_status;
796 dmar_pci_bus_add_dev(info);
797 dmar_free_pci_notify_info(info);
801 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
804 return dmar_dev_scope_status;
808 int __init dmar_table_init(void)
810 static int dmar_table_initialized;
813 if (dmar_table_initialized == 0) {
814 ret = parse_dmar_table();
817 pr_info("Parse DMAR table failure.\n");
818 } else if (list_empty(&dmar_drhd_units)) {
819 pr_info("No DMAR devices found\n");
824 dmar_table_initialized = ret;
826 dmar_table_initialized = 1;
829 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
832 static void warn_invalid_dmar(u64 addr, const char *message)
835 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
836 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
838 dmi_get_system_info(DMI_BIOS_VENDOR),
839 dmi_get_system_info(DMI_BIOS_VERSION),
840 dmi_get_system_info(DMI_PRODUCT_VERSION));
841 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
845 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
847 struct acpi_dmar_hardware_unit *drhd;
851 drhd = (void *)entry;
852 if (!drhd->address) {
853 warn_invalid_dmar(0, "");
858 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
860 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
862 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
866 cap = dmar_readq(addr + DMAR_CAP_REG);
867 ecap = dmar_readq(addr + DMAR_ECAP_REG);
872 early_iounmap(addr, VTD_PAGE_SIZE);
874 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
875 warn_invalid_dmar(drhd->address, " returns all ones");
882 int __init detect_intel_iommu(void)
885 struct dmar_res_callback validate_drhd_cb = {
886 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
887 .ignore_unhandled = true,
890 down_write(&dmar_global_lock);
891 ret = dmar_table_detect();
893 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
895 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
897 /* Make sure ACS will be enabled */
903 x86_init.iommu.iommu_init = intel_iommu_init;
906 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
908 up_write(&dmar_global_lock);
910 return ret ? 1 : -ENODEV;
914 static void unmap_iommu(struct intel_iommu *iommu)
917 release_mem_region(iommu->reg_phys, iommu->reg_size);
921 * map_iommu: map the iommu's registers
922 * @iommu: the iommu to map
923 * @phys_addr: the physical address of the base resgister
925 * Memory map the iommu's registers. Start w/ a single page, and
926 * possibly expand if that turns out to be insufficent.
928 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
932 iommu->reg_phys = phys_addr;
933 iommu->reg_size = VTD_PAGE_SIZE;
935 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
936 pr_err("Can't reserve memory\n");
941 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
943 pr_err("Can't map the region\n");
948 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
949 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
951 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
953 warn_invalid_dmar(phys_addr, " returns all ones");
957 /* the registers might be more than one page */
958 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
959 cap_max_fault_reg_offset(iommu->cap));
960 map_size = VTD_PAGE_ALIGN(map_size);
961 if (map_size > iommu->reg_size) {
963 release_mem_region(iommu->reg_phys, iommu->reg_size);
964 iommu->reg_size = map_size;
965 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
967 pr_err("Can't reserve memory\n");
971 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
973 pr_err("Can't map the region\n");
984 release_mem_region(iommu->reg_phys, iommu->reg_size);
989 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
991 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
992 DMAR_UNITS_SUPPORTED);
993 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
996 set_bit(iommu->seq_id, dmar_seq_ids);
997 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1000 return iommu->seq_id;
1003 static void dmar_free_seq_id(struct intel_iommu *iommu)
1005 if (iommu->seq_id >= 0) {
1006 clear_bit(iommu->seq_id, dmar_seq_ids);
1011 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1013 struct intel_iommu *iommu;
1019 if (!drhd->reg_base_addr) {
1020 warn_invalid_dmar(0, "");
1024 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1028 if (dmar_alloc_seq_id(iommu) < 0) {
1029 pr_err("Failed to allocate seq_id\n");
1034 err = map_iommu(iommu, drhd->reg_base_addr);
1036 pr_err("Failed to map %s\n", iommu->name);
1037 goto error_free_seq_id;
1041 if (cap_sagaw(iommu->cap) == 0) {
1042 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1047 if (!drhd->ignored) {
1048 agaw = iommu_calculate_agaw(iommu);
1050 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1055 if (!drhd->ignored) {
1056 msagaw = iommu_calculate_max_sagaw(iommu);
1058 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1065 iommu->msagaw = msagaw;
1066 iommu->segment = drhd->segment;
1070 ver = readl(iommu->reg + DMAR_VER_REG);
1071 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1073 (unsigned long long)drhd->reg_base_addr,
1074 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1075 (unsigned long long)iommu->cap,
1076 (unsigned long long)iommu->ecap);
1078 /* Reflect status in gcmd */
1079 sts = readl(iommu->reg + DMAR_GSTS_REG);
1080 if (sts & DMA_GSTS_IRES)
1081 iommu->gcmd |= DMA_GCMD_IRE;
1082 if (sts & DMA_GSTS_TES)
1083 iommu->gcmd |= DMA_GCMD_TE;
1084 if (sts & DMA_GSTS_QIES)
1085 iommu->gcmd |= DMA_GCMD_QIE;
1087 raw_spin_lock_init(&iommu->register_lock);
1089 drhd->iommu = iommu;
1092 if (intel_iommu_enabled && !drhd->ignored)
1093 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1100 dmar_free_seq_id(iommu);
1106 static void free_iommu(struct intel_iommu *iommu)
1108 if (intel_iommu_enabled && !iommu->drhd->ignored)
1109 iommu_device_destroy(iommu->iommu_dev);
1112 if (iommu->pr_irq) {
1113 free_irq(iommu->pr_irq, iommu);
1114 dmar_free_hwirq(iommu->pr_irq);
1117 free_irq(iommu->irq, iommu);
1118 dmar_free_hwirq(iommu->irq);
1123 free_page((unsigned long)iommu->qi->desc);
1124 kfree(iommu->qi->desc_status);
1131 dmar_free_seq_id(iommu);
1136 * Reclaim all the submitted descriptors which have completed its work.
1138 static inline void reclaim_free_desc(struct q_inval *qi)
1140 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1141 qi->desc_status[qi->free_tail] == QI_ABORT) {
1142 qi->desc_status[qi->free_tail] = QI_FREE;
1143 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1148 static int qi_check_fault(struct intel_iommu *iommu, int index)
1152 struct q_inval *qi = iommu->qi;
1153 int wait_index = (index + 1) % QI_LENGTH;
1155 if (qi->desc_status[wait_index] == QI_ABORT)
1158 fault = readl(iommu->reg + DMAR_FSTS_REG);
1161 * If IQE happens, the head points to the descriptor associated
1162 * with the error. No new descriptors are fetched until the IQE
1165 if (fault & DMA_FSTS_IQE) {
1166 head = readl(iommu->reg + DMAR_IQH_REG);
1167 if ((head >> DMAR_IQ_SHIFT) == index) {
1168 pr_err("VT-d detected invalid descriptor: "
1169 "low=%llx, high=%llx\n",
1170 (unsigned long long)qi->desc[index].low,
1171 (unsigned long long)qi->desc[index].high);
1172 memcpy(&qi->desc[index], &qi->desc[wait_index],
1173 sizeof(struct qi_desc));
1174 __iommu_flush_cache(iommu, &qi->desc[index],
1175 sizeof(struct qi_desc));
1176 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1182 * If ITE happens, all pending wait_desc commands are aborted.
1183 * No new descriptors are fetched until the ITE is cleared.
1185 if (fault & DMA_FSTS_ITE) {
1186 head = readl(iommu->reg + DMAR_IQH_REG);
1187 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1189 tail = readl(iommu->reg + DMAR_IQT_REG);
1190 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1192 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1195 if (qi->desc_status[head] == QI_IN_USE)
1196 qi->desc_status[head] = QI_ABORT;
1197 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1198 } while (head != tail);
1200 if (qi->desc_status[wait_index] == QI_ABORT)
1204 if (fault & DMA_FSTS_ICE)
1205 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1211 * Submit the queued invalidation descriptor to the remapping
1212 * hardware unit and wait for its completion.
1214 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1217 struct q_inval *qi = iommu->qi;
1218 struct qi_desc *hw, wait_desc;
1219 int wait_index, index;
1220 unsigned long flags;
1230 raw_spin_lock_irqsave(&qi->q_lock, flags);
1231 while (qi->free_cnt < 3) {
1232 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1234 raw_spin_lock_irqsave(&qi->q_lock, flags);
1237 index = qi->free_head;
1238 wait_index = (index + 1) % QI_LENGTH;
1240 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1244 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1245 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1246 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1248 hw[wait_index] = wait_desc;
1250 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1251 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1253 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1257 * update the HW tail register indicating the presence of
1260 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1262 while (qi->desc_status[wait_index] != QI_DONE) {
1264 * We will leave the interrupts disabled, to prevent interrupt
1265 * context to queue another cmd while a cmd is already submitted
1266 * and waiting for completion on this cpu. This is to avoid
1267 * a deadlock where the interrupt context can wait indefinitely
1268 * for free slots in the queue.
1270 rc = qi_check_fault(iommu, index);
1274 raw_spin_unlock(&qi->q_lock);
1276 raw_spin_lock(&qi->q_lock);
1279 qi->desc_status[index] = QI_DONE;
1281 reclaim_free_desc(qi);
1282 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1291 * Flush the global interrupt entry cache.
1293 void qi_global_iec(struct intel_iommu *iommu)
1295 struct qi_desc desc;
1297 desc.low = QI_IEC_TYPE;
1300 /* should never fail */
1301 qi_submit_sync(&desc, iommu);
1304 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1307 struct qi_desc desc;
1309 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1310 | QI_CC_GRAN(type) | QI_CC_TYPE;
1313 qi_submit_sync(&desc, iommu);
1316 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1317 unsigned int size_order, u64 type)
1321 struct qi_desc desc;
1324 if (cap_write_drain(iommu->cap))
1327 if (cap_read_drain(iommu->cap))
1330 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1331 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1332 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1333 | QI_IOTLB_AM(size_order);
1335 qi_submit_sync(&desc, iommu);
1338 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1339 u16 qdep, u64 addr, unsigned mask)
1341 struct qi_desc desc;
1344 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1345 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1346 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1348 desc.high = QI_DEV_IOTLB_ADDR(addr);
1350 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1353 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1354 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1356 qi_submit_sync(&desc, iommu);
1360 * Disable Queued Invalidation interface.
1362 void dmar_disable_qi(struct intel_iommu *iommu)
1364 unsigned long flags;
1366 cycles_t start_time = get_cycles();
1368 if (!ecap_qis(iommu->ecap))
1371 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1373 sts = readl(iommu->reg + DMAR_GSTS_REG);
1374 if (!(sts & DMA_GSTS_QIES))
1378 * Give a chance to HW to complete the pending invalidation requests.
1380 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1381 readl(iommu->reg + DMAR_IQH_REG)) &&
1382 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1385 iommu->gcmd &= ~DMA_GCMD_QIE;
1386 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1388 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1389 !(sts & DMA_GSTS_QIES), sts);
1391 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1395 * Enable queued invalidation.
1397 static void __dmar_enable_qi(struct intel_iommu *iommu)
1400 unsigned long flags;
1401 struct q_inval *qi = iommu->qi;
1403 qi->free_head = qi->free_tail = 0;
1404 qi->free_cnt = QI_LENGTH;
1406 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1408 /* write zero to the tail reg */
1409 writel(0, iommu->reg + DMAR_IQT_REG);
1411 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1413 iommu->gcmd |= DMA_GCMD_QIE;
1414 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1416 /* Make sure hardware complete it */
1417 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1419 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1423 * Enable Queued Invalidation interface. This is a must to support
1424 * interrupt-remapping. Also used by DMA-remapping, which replaces
1425 * register based IOTLB invalidation.
1427 int dmar_enable_qi(struct intel_iommu *iommu)
1430 struct page *desc_page;
1432 if (!ecap_qis(iommu->ecap))
1436 * queued invalidation is already setup and enabled.
1441 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1448 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1455 qi->desc = page_address(desc_page);
1457 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1458 if (!qi->desc_status) {
1459 free_page((unsigned long) qi->desc);
1465 raw_spin_lock_init(&qi->q_lock);
1467 __dmar_enable_qi(iommu);
1472 /* iommu interrupt handling. Most stuff are MSI-like. */
1480 static const char *dma_remap_fault_reasons[] =
1483 "Present bit in root entry is clear",
1484 "Present bit in context entry is clear",
1485 "Invalid context entry",
1486 "Access beyond MGAW",
1487 "PTE Write access is not set",
1488 "PTE Read access is not set",
1489 "Next page table ptr is invalid",
1490 "Root table address invalid",
1491 "Context table ptr is invalid",
1492 "non-zero reserved fields in RTP",
1493 "non-zero reserved fields in CTP",
1494 "non-zero reserved fields in PTE",
1495 "PCE for translation request specifies blocking",
1498 static const char *irq_remap_fault_reasons[] =
1500 "Detected reserved fields in the decoded interrupt-remapped request",
1501 "Interrupt index exceeded the interrupt-remapping table size",
1502 "Present field in the IRTE entry is clear",
1503 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1504 "Detected reserved fields in the IRTE entry",
1505 "Blocked a compatibility format interrupt request",
1506 "Blocked an interrupt request due to source-id verification failure",
1509 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1511 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1512 ARRAY_SIZE(irq_remap_fault_reasons))) {
1513 *fault_type = INTR_REMAP;
1514 return irq_remap_fault_reasons[fault_reason - 0x20];
1515 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1516 *fault_type = DMA_REMAP;
1517 return dma_remap_fault_reasons[fault_reason];
1519 *fault_type = UNKNOWN;
1525 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1527 if (iommu->irq == irq)
1528 return DMAR_FECTL_REG;
1529 else if (iommu->pr_irq == irq)
1530 return DMAR_PECTL_REG;
1535 void dmar_msi_unmask(struct irq_data *data)
1537 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1538 int reg = dmar_msi_reg(iommu, data->irq);
1542 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1543 writel(0, iommu->reg + reg);
1544 /* Read a reg to force flush the post write */
1545 readl(iommu->reg + reg);
1546 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1549 void dmar_msi_mask(struct irq_data *data)
1551 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1552 int reg = dmar_msi_reg(iommu, data->irq);
1556 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1557 writel(DMA_FECTL_IM, iommu->reg + reg);
1558 /* Read a reg to force flush the post write */
1559 readl(iommu->reg + reg);
1560 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1563 void dmar_msi_write(int irq, struct msi_msg *msg)
1565 struct intel_iommu *iommu = irq_get_handler_data(irq);
1566 int reg = dmar_msi_reg(iommu, irq);
1569 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1570 writel(msg->data, iommu->reg + reg + 4);
1571 writel(msg->address_lo, iommu->reg + reg + 8);
1572 writel(msg->address_hi, iommu->reg + reg + 12);
1573 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1576 void dmar_msi_read(int irq, struct msi_msg *msg)
1578 struct intel_iommu *iommu = irq_get_handler_data(irq);
1579 int reg = dmar_msi_reg(iommu, irq);
1582 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1583 msg->data = readl(iommu->reg + reg + 4);
1584 msg->address_lo = readl(iommu->reg + reg + 8);
1585 msg->address_hi = readl(iommu->reg + reg + 12);
1586 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1589 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1590 u8 fault_reason, u16 source_id, unsigned long long addr)
1595 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1597 if (fault_type == INTR_REMAP)
1598 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1599 "fault index %llx\n"
1600 "INTR-REMAP:[fault reason %02d] %s\n",
1601 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1602 PCI_FUNC(source_id & 0xFF), addr >> 48,
1603 fault_reason, reason);
1605 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1606 "fault addr %llx \n"
1607 "DMAR:[fault reason %02d] %s\n",
1608 (type ? "DMA Read" : "DMA Write"),
1609 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1610 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1614 #define PRIMARY_FAULT_REG_LEN (16)
1615 irqreturn_t dmar_fault(int irq, void *dev_id)
1617 struct intel_iommu *iommu = dev_id;
1618 int reg, fault_index;
1622 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1623 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1625 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1627 /* TBD: ignore advanced fault log currently */
1628 if (!(fault_status & DMA_FSTS_PPF))
1631 fault_index = dma_fsts_fault_record_index(fault_status);
1632 reg = cap_fault_reg_offset(iommu->cap);
1640 /* highest 32 bits */
1641 data = readl(iommu->reg + reg +
1642 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1643 if (!(data & DMA_FRCD_F))
1646 fault_reason = dma_frcd_fault_reason(data);
1647 type = dma_frcd_type(data);
1649 data = readl(iommu->reg + reg +
1650 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1651 source_id = dma_frcd_source_id(data);
1653 guest_addr = dmar_readq(iommu->reg + reg +
1654 fault_index * PRIMARY_FAULT_REG_LEN);
1655 guest_addr = dma_frcd_page_addr(guest_addr);
1656 /* clear the fault */
1657 writel(DMA_FRCD_F, iommu->reg + reg +
1658 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1660 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1662 dmar_fault_do_one(iommu, type, fault_reason,
1663 source_id, guest_addr);
1666 if (fault_index >= cap_num_fault_regs(iommu->cap))
1668 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1671 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1674 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1678 int dmar_set_interrupt(struct intel_iommu *iommu)
1683 * Check if the fault interrupt is already initialized.
1688 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1692 pr_err("No free IRQ vectors\n");
1696 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1698 pr_err("Can't request irq\n");
1702 int __init enable_drhd_fault_handling(void)
1704 struct dmar_drhd_unit *drhd;
1705 struct intel_iommu *iommu;
1708 * Enable fault control interrupt.
1710 for_each_iommu(iommu, drhd) {
1712 int ret = dmar_set_interrupt(iommu);
1715 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1716 (unsigned long long)drhd->reg_base_addr, ret);
1721 * Clear any previous faults.
1723 dmar_fault(iommu->irq, iommu);
1724 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1725 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1732 * Re-enable Queued Invalidation interface.
1734 int dmar_reenable_qi(struct intel_iommu *iommu)
1736 if (!ecap_qis(iommu->ecap))
1743 * First disable queued invalidation.
1745 dmar_disable_qi(iommu);
1747 * Then enable queued invalidation again. Since there is no pending
1748 * invalidation requests now, it's safe to re-enable queued
1751 __dmar_enable_qi(iommu);
1757 * Check interrupt remapping support in DMAR table description.
1759 int __init dmar_ir_support(void)
1761 struct acpi_table_dmar *dmar;
1762 dmar = (struct acpi_table_dmar *)dmar_tbl;
1765 return dmar->flags & 0x1;
1768 /* Check whether DMAR units are in use */
1769 static inline bool dmar_in_use(void)
1771 return irq_remapping_enabled || intel_iommu_enabled;
1774 static int __init dmar_free_unused_resources(void)
1776 struct dmar_drhd_unit *dmaru, *dmaru_n;
1781 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1782 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1784 down_write(&dmar_global_lock);
1785 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1786 list_del(&dmaru->list);
1787 dmar_free_drhd(dmaru);
1789 up_write(&dmar_global_lock);
1794 late_initcall(dmar_free_unused_resources);
1795 IOMMU_INIT_POST(detect_intel_iommu);
1798 * DMAR Hotplug Support
1799 * For more details, please refer to Intel(R) Virtualization Technology
1800 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1801 * "Remapping Hardware Unit Hot Plug".
1803 static u8 dmar_hp_uuid[] = {
1804 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1805 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1809 * Currently there's only one revision and BIOS will not check the revision id,
1810 * so use 0 for safety.
1812 #define DMAR_DSM_REV_ID 0
1813 #define DMAR_DSM_FUNC_DRHD 1
1814 #define DMAR_DSM_FUNC_ATSR 2
1815 #define DMAR_DSM_FUNC_RHSA 3
1817 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1819 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1822 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1823 dmar_res_handler_t handler, void *arg)
1826 union acpi_object *obj;
1827 struct acpi_dmar_header *start;
1828 struct dmar_res_callback callback;
1829 static int res_type[] = {
1830 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1831 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1832 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1835 if (!dmar_detect_dsm(handle, func))
1838 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1839 func, NULL, ACPI_TYPE_BUFFER);
1843 memset(&callback, 0, sizeof(callback));
1844 callback.cb[res_type[func]] = handler;
1845 callback.arg[res_type[func]] = arg;
1846 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1847 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1854 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1857 struct dmar_drhd_unit *dmaru;
1859 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1863 ret = dmar_ir_hotplug(dmaru, true);
1865 ret = dmar_iommu_hotplug(dmaru, true);
1870 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1874 struct dmar_drhd_unit *dmaru;
1876 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1881 * All PCI devices managed by this unit should have been destroyed.
1883 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1884 for_each_active_dev_scope(dmaru->devices,
1885 dmaru->devices_cnt, i, dev)
1889 ret = dmar_ir_hotplug(dmaru, false);
1891 ret = dmar_iommu_hotplug(dmaru, false);
1896 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1898 struct dmar_drhd_unit *dmaru;
1900 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1902 list_del_rcu(&dmaru->list);
1904 dmar_free_drhd(dmaru);
1910 static int dmar_hotplug_insert(acpi_handle handle)
1915 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1916 &dmar_validate_one_drhd, (void *)1);
1920 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1921 &dmar_parse_one_drhd, (void *)&drhd_count);
1922 if (ret == 0 && drhd_count == 0) {
1923 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1929 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1930 &dmar_parse_one_rhsa, NULL);
1934 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1935 &dmar_parse_one_atsr, NULL);
1939 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1940 &dmar_hp_add_drhd, NULL);
1944 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1945 &dmar_hp_remove_drhd, NULL);
1947 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1948 &dmar_release_one_atsr, NULL);
1950 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1951 &dmar_hp_release_drhd, NULL);
1956 static int dmar_hotplug_remove(acpi_handle handle)
1960 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1961 &dmar_check_one_atsr, NULL);
1965 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1966 &dmar_hp_remove_drhd, NULL);
1968 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1969 &dmar_release_one_atsr, NULL));
1970 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1971 &dmar_hp_release_drhd, NULL));
1973 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1974 &dmar_hp_add_drhd, NULL);
1980 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1981 void *context, void **retval)
1983 acpi_handle *phdl = retval;
1985 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1987 return AE_CTRL_TERMINATE;
1993 static int dmar_device_hotplug(acpi_handle handle, bool insert)
1996 acpi_handle tmp = NULL;
2002 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2005 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2007 dmar_get_dsm_handle,
2009 if (ACPI_FAILURE(status)) {
2010 pr_warn("Failed to locate _DSM method.\n");
2017 down_write(&dmar_global_lock);
2019 ret = dmar_hotplug_insert(tmp);
2021 ret = dmar_hotplug_remove(tmp);
2022 up_write(&dmar_global_lock);
2027 int dmar_device_add(acpi_handle handle)
2029 return dmar_device_hotplug(handle, true);
2032 int dmar_device_remove(acpi_handle handle)
2034 return dmar_device_hotplug(handle, false);