2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <linux/limits.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/iommu_table.h>
46 #include "irq_remapping.h"
48 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
49 struct dmar_res_callback {
50 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
51 void *arg[ACPI_DMAR_TYPE_RESERVED];
52 bool ignore_unhandled;
58 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
59 * before IO devices managed by that unit.
60 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
61 * after IO devices managed by that unit.
62 * 3) Hotplug events are rare.
64 * Locking rules for DMA and interrupt remapping related global data structures:
65 * 1) Use dmar_global_lock in process context
66 * 2) Use RCU in interrupt context
68 DECLARE_RWSEM(dmar_global_lock);
69 LIST_HEAD(dmar_drhd_units);
71 struct acpi_table_header * __initdata dmar_tbl;
72 static int dmar_dev_scope_status = 1;
73 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
75 static int alloc_iommu(struct dmar_drhd_unit *drhd);
76 static void free_iommu(struct intel_iommu *iommu);
78 extern const struct iommu_ops intel_iommu_ops;
80 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
83 * add INCLUDE_ALL at the tail, so scan the list will find it at
86 if (drhd->include_all)
87 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
89 list_add_rcu(&drhd->list, &dmar_drhd_units);
92 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
94 struct acpi_dmar_device_scope *scope;
99 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
101 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
103 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
104 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
105 pr_warn("Unsupported device scope\n");
107 start += scope->length;
112 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
115 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
118 struct device *tmp_dev;
120 if (*devices && *cnt) {
121 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
130 /* Optimize out kzalloc()/kfree() for normal cases */
131 static char dmar_pci_notify_info_buf[64];
133 static struct dmar_pci_notify_info *
134 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
139 struct dmar_pci_notify_info *info;
141 BUG_ON(dev->is_virtfn);
144 * Ignore devices that have a domain number higher than what can
145 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
147 if (pci_domain_nr(dev->bus) > U16_MAX)
150 /* Only generate path[] for device addition event */
151 if (event == BUS_NOTIFY_ADD_DEVICE)
152 for (tmp = dev; tmp; tmp = tmp->bus->self)
155 size = sizeof(*info) + level * sizeof(info->path[0]);
156 if (size <= sizeof(dmar_pci_notify_info_buf)) {
157 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
159 info = kzalloc(size, GFP_KERNEL);
161 pr_warn("Out of memory when allocating notify_info "
162 "for %s.\n", pci_name(dev));
163 if (dmar_dev_scope_status == 0)
164 dmar_dev_scope_status = -ENOMEM;
171 info->seg = pci_domain_nr(dev->bus);
173 if (event == BUS_NOTIFY_ADD_DEVICE) {
174 for (tmp = dev; tmp; tmp = tmp->bus->self) {
176 info->path[level].bus = tmp->bus->number;
177 info->path[level].device = PCI_SLOT(tmp->devfn);
178 info->path[level].function = PCI_FUNC(tmp->devfn);
179 if (pci_is_root_bus(tmp->bus))
180 info->bus = tmp->bus->number;
187 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
189 if ((void *)info != dmar_pci_notify_info_buf)
193 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
194 struct acpi_dmar_pci_path *path, int count)
198 if (info->bus != bus)
200 if (info->level != count)
203 for (i = 0; i < count; i++) {
204 if (path[i].device != info->path[i].device ||
205 path[i].function != info->path[i].function)
217 if (bus == info->path[i].bus &&
218 path[0].device == info->path[i].device &&
219 path[0].function == info->path[i].function) {
220 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
221 bus, path[0].device, path[0].function);
228 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
229 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
230 void *start, void*end, u16 segment,
231 struct dmar_dev_scope *devices,
235 struct device *tmp, *dev = &info->dev->dev;
236 struct acpi_dmar_device_scope *scope;
237 struct acpi_dmar_pci_path *path;
239 if (segment != info->seg)
242 for (; start < end; start += scope->length) {
244 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
245 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
248 path = (struct acpi_dmar_pci_path *)(scope + 1);
249 level = (scope->length - sizeof(*scope)) / sizeof(*path);
250 if (!dmar_match_pci_path(info, scope->bus, path, level))
254 * We expect devices with endpoint scope to have normal PCI
255 * headers, and devices with bridge scope to have bridge PCI
256 * headers. However PCI NTB devices may be listed in the
257 * DMAR table with bridge scope, even though they have a
258 * normal PCI header. NTB devices are identified by class
259 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
260 * for this special case.
262 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
263 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
264 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
265 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
266 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
267 pr_warn("Device scope type does not match for %s\n",
268 pci_name(info->dev));
272 for_each_dev_scope(devices, devices_cnt, i, tmp)
274 devices[i].bus = info->dev->bus->number;
275 devices[i].devfn = info->dev->devfn;
276 rcu_assign_pointer(devices[i].dev,
280 BUG_ON(i >= devices_cnt);
286 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
287 struct dmar_dev_scope *devices, int count)
292 if (info->seg != segment)
295 for_each_active_dev_scope(devices, count, index, tmp)
296 if (tmp == &info->dev->dev) {
297 RCU_INIT_POINTER(devices[index].dev, NULL);
306 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
309 struct dmar_drhd_unit *dmaru;
310 struct acpi_dmar_hardware_unit *drhd;
312 for_each_drhd_unit(dmaru) {
313 if (dmaru->include_all)
316 drhd = container_of(dmaru->hdr,
317 struct acpi_dmar_hardware_unit, header);
318 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
319 ((void *)drhd) + drhd->header.length,
321 dmaru->devices, dmaru->devices_cnt);
326 ret = dmar_iommu_notify_scope_dev(info);
327 if (ret < 0 && dmar_dev_scope_status == 0)
328 dmar_dev_scope_status = ret;
333 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
335 struct dmar_drhd_unit *dmaru;
337 for_each_drhd_unit(dmaru)
338 if (dmar_remove_dev_scope(info, dmaru->segment,
339 dmaru->devices, dmaru->devices_cnt))
341 dmar_iommu_notify_scope_dev(info);
344 static int dmar_pci_bus_notifier(struct notifier_block *nb,
345 unsigned long action, void *data)
347 struct pci_dev *pdev = to_pci_dev(data);
348 struct dmar_pci_notify_info *info;
350 /* Only care about add/remove events for physical functions.
351 * For VFs we actually do the lookup based on the corresponding
352 * PF in device_to_iommu() anyway. */
355 if (action != BUS_NOTIFY_ADD_DEVICE &&
356 action != BUS_NOTIFY_REMOVED_DEVICE)
359 info = dmar_alloc_pci_notify_info(pdev, action);
363 down_write(&dmar_global_lock);
364 if (action == BUS_NOTIFY_ADD_DEVICE)
365 dmar_pci_bus_add_dev(info);
366 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
367 dmar_pci_bus_del_dev(info);
368 up_write(&dmar_global_lock);
370 dmar_free_pci_notify_info(info);
375 static struct notifier_block dmar_pci_bus_nb = {
376 .notifier_call = dmar_pci_bus_notifier,
380 static struct dmar_drhd_unit *
381 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
383 struct dmar_drhd_unit *dmaru;
385 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
386 if (dmaru->segment == drhd->segment &&
387 dmaru->reg_base_addr == drhd->address)
394 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
395 * structure which uniquely represent one DMA remapping hardware unit
396 * present in the platform
398 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
400 struct acpi_dmar_hardware_unit *drhd;
401 struct dmar_drhd_unit *dmaru;
404 drhd = (struct acpi_dmar_hardware_unit *)header;
405 dmaru = dmar_find_dmaru(drhd);
409 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
414 * If header is allocated from slab by ACPI _DSM method, we need to
415 * copy the content because the memory buffer will be freed on return.
417 dmaru->hdr = (void *)(dmaru + 1);
418 memcpy(dmaru->hdr, header, header->length);
419 dmaru->reg_base_addr = drhd->address;
420 dmaru->segment = drhd->segment;
421 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
422 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
423 ((void *)drhd) + drhd->header.length,
424 &dmaru->devices_cnt);
425 if (dmaru->devices_cnt && dmaru->devices == NULL) {
430 ret = alloc_iommu(dmaru);
432 dmar_free_dev_scope(&dmaru->devices,
433 &dmaru->devices_cnt);
437 dmar_register_drhd_unit(dmaru);
446 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
448 if (dmaru->devices && dmaru->devices_cnt)
449 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
451 free_iommu(dmaru->iommu);
455 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
458 struct acpi_dmar_andd *andd = (void *)header;
460 /* Check for NUL termination within the designated length */
461 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
463 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
464 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
465 dmi_get_system_info(DMI_BIOS_VENDOR),
466 dmi_get_system_info(DMI_BIOS_VERSION),
467 dmi_get_system_info(DMI_PRODUCT_VERSION));
468 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
471 pr_info("ANDD device: %x name: %s\n", andd->device_number,
477 #ifdef CONFIG_ACPI_NUMA
478 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
480 struct acpi_dmar_rhsa *rhsa;
481 struct dmar_drhd_unit *drhd;
483 rhsa = (struct acpi_dmar_rhsa *)header;
484 for_each_drhd_unit(drhd) {
485 if (drhd->reg_base_addr == rhsa->base_address) {
486 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
488 if (!node_online(node))
490 drhd->iommu->node = node;
495 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
496 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
498 dmi_get_system_info(DMI_BIOS_VENDOR),
499 dmi_get_system_info(DMI_BIOS_VERSION),
500 dmi_get_system_info(DMI_PRODUCT_VERSION));
501 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
506 #define dmar_parse_one_rhsa dmar_res_noop
510 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
512 struct acpi_dmar_hardware_unit *drhd;
513 struct acpi_dmar_reserved_memory *rmrr;
514 struct acpi_dmar_atsr *atsr;
515 struct acpi_dmar_rhsa *rhsa;
517 switch (header->type) {
518 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
519 drhd = container_of(header, struct acpi_dmar_hardware_unit,
521 pr_info("DRHD base: %#016Lx flags: %#x\n",
522 (unsigned long long)drhd->address, drhd->flags);
524 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
525 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
527 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
528 (unsigned long long)rmrr->base_address,
529 (unsigned long long)rmrr->end_address);
531 case ACPI_DMAR_TYPE_ROOT_ATS:
532 atsr = container_of(header, struct acpi_dmar_atsr, header);
533 pr_info("ATSR flags: %#x\n", atsr->flags);
535 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
536 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
537 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
538 (unsigned long long)rhsa->base_address,
539 rhsa->proximity_domain);
541 case ACPI_DMAR_TYPE_NAMESPACE:
542 /* We don't print this here because we need to sanity-check
543 it first. So print it in dmar_parse_one_andd() instead. */
549 * dmar_table_detect - checks to see if the platform supports DMAR devices
551 static int __init dmar_table_detect(void)
553 acpi_status status = AE_OK;
555 /* if we could find DMAR table, then there are DMAR devices */
556 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
558 if (ACPI_SUCCESS(status) && !dmar_tbl) {
559 pr_warn("Unable to map DMAR\n");
560 status = AE_NOT_FOUND;
563 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
566 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
567 size_t len, struct dmar_res_callback *cb)
569 struct acpi_dmar_header *iter, *next;
570 struct acpi_dmar_header *end = ((void *)start) + len;
572 for (iter = start; iter < end; iter = next) {
573 next = (void *)iter + iter->length;
574 if (iter->length == 0) {
575 /* Avoid looping forever on bad ACPI tables */
576 pr_debug(FW_BUG "Invalid 0-length structure\n");
578 } else if (next > end) {
579 /* Avoid passing table end */
580 pr_warn(FW_BUG "Record passes table end\n");
585 dmar_table_print_dmar_entry(iter);
587 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
588 /* continue for forward compatibility */
589 pr_debug("Unknown DMAR structure type %d\n",
591 } else if (cb->cb[iter->type]) {
594 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
597 } else if (!cb->ignore_unhandled) {
598 pr_warn("No handler for DMAR structure type %d\n",
607 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
608 struct dmar_res_callback *cb)
610 return dmar_walk_remapping_entries((void *)(dmar + 1),
611 dmar->header.length - sizeof(*dmar), cb);
615 * parse_dmar_table - parses the DMA reporting table
618 parse_dmar_table(void)
620 struct acpi_table_dmar *dmar;
623 struct dmar_res_callback cb = {
625 .ignore_unhandled = true,
626 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
627 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
628 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
629 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
630 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
631 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
635 * Do it again, earlier dmar_tbl mapping could be mapped with
641 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
642 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
644 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
646 dmar = (struct acpi_table_dmar *)dmar_tbl;
650 if (dmar->width < PAGE_SHIFT - 1) {
651 pr_warn("Invalid DMAR haw\n");
655 pr_info("Host address width %d\n", dmar->width + 1);
656 ret = dmar_walk_dmar_table(dmar, &cb);
657 if (ret == 0 && drhd_count == 0)
658 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
663 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
664 int cnt, struct pci_dev *dev)
670 for_each_active_dev_scope(devices, cnt, index, tmp)
671 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
674 /* Check our parent */
675 dev = dev->bus->self;
681 struct dmar_drhd_unit *
682 dmar_find_matched_drhd_unit(struct pci_dev *dev)
684 struct dmar_drhd_unit *dmaru;
685 struct acpi_dmar_hardware_unit *drhd;
687 dev = pci_physfn(dev);
690 for_each_drhd_unit(dmaru) {
691 drhd = container_of(dmaru->hdr,
692 struct acpi_dmar_hardware_unit,
695 if (dmaru->include_all &&
696 drhd->segment == pci_domain_nr(dev->bus))
699 if (dmar_pci_device_match(dmaru->devices,
700 dmaru->devices_cnt, dev))
710 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
711 struct acpi_device *adev)
713 struct dmar_drhd_unit *dmaru;
714 struct acpi_dmar_hardware_unit *drhd;
715 struct acpi_dmar_device_scope *scope;
718 struct acpi_dmar_pci_path *path;
720 for_each_drhd_unit(dmaru) {
721 drhd = container_of(dmaru->hdr,
722 struct acpi_dmar_hardware_unit,
725 for (scope = (void *)(drhd + 1);
726 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
727 scope = ((void *)scope) + scope->length) {
728 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
730 if (scope->enumeration_id != device_number)
733 path = (void *)(scope + 1);
734 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
735 dev_name(&adev->dev), dmaru->reg_base_addr,
736 scope->bus, path->device, path->function);
737 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
739 dmaru->devices[i].bus = scope->bus;
740 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
742 rcu_assign_pointer(dmaru->devices[i].dev,
743 get_device(&adev->dev));
746 BUG_ON(i >= dmaru->devices_cnt);
749 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
750 device_number, dev_name(&adev->dev));
753 static int __init dmar_acpi_dev_scope_init(void)
755 struct acpi_dmar_andd *andd;
757 if (dmar_tbl == NULL)
760 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
761 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
762 andd = ((void *)andd) + andd->header.length) {
763 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
765 struct acpi_device *adev;
767 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
770 pr_err("Failed to find handle for ACPI object %s\n",
774 if (acpi_bus_get_device(h, &adev)) {
775 pr_err("Failed to get device for ACPI object %s\n",
779 dmar_acpi_insert_dev_scope(andd->device_number, adev);
785 int __init dmar_dev_scope_init(void)
787 struct pci_dev *dev = NULL;
788 struct dmar_pci_notify_info *info;
790 if (dmar_dev_scope_status != 1)
791 return dmar_dev_scope_status;
793 if (list_empty(&dmar_drhd_units)) {
794 dmar_dev_scope_status = -ENODEV;
796 dmar_dev_scope_status = 0;
798 dmar_acpi_dev_scope_init();
800 for_each_pci_dev(dev) {
804 info = dmar_alloc_pci_notify_info(dev,
805 BUS_NOTIFY_ADD_DEVICE);
808 return dmar_dev_scope_status;
810 dmar_pci_bus_add_dev(info);
811 dmar_free_pci_notify_info(info);
816 return dmar_dev_scope_status;
819 void __init dmar_register_bus_notifier(void)
821 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
825 int __init dmar_table_init(void)
827 static int dmar_table_initialized;
830 if (dmar_table_initialized == 0) {
831 ret = parse_dmar_table();
834 pr_info("Parse DMAR table failure.\n");
835 } else if (list_empty(&dmar_drhd_units)) {
836 pr_info("No DMAR devices found\n");
841 dmar_table_initialized = ret;
843 dmar_table_initialized = 1;
846 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
849 static void warn_invalid_dmar(u64 addr, const char *message)
852 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
853 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
855 dmi_get_system_info(DMI_BIOS_VENDOR),
856 dmi_get_system_info(DMI_BIOS_VERSION),
857 dmi_get_system_info(DMI_PRODUCT_VERSION));
858 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
862 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
864 struct acpi_dmar_hardware_unit *drhd;
868 drhd = (void *)entry;
869 if (!drhd->address) {
870 warn_invalid_dmar(0, "");
875 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
877 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
879 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
883 cap = dmar_readq(addr + DMAR_CAP_REG);
884 ecap = dmar_readq(addr + DMAR_ECAP_REG);
889 early_iounmap(addr, VTD_PAGE_SIZE);
891 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
892 warn_invalid_dmar(drhd->address, " returns all ones");
899 int __init detect_intel_iommu(void)
902 struct dmar_res_callback validate_drhd_cb = {
903 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
904 .ignore_unhandled = true,
907 down_write(&dmar_global_lock);
908 ret = dmar_table_detect();
910 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
912 if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
914 /* Make sure ACS will be enabled */
920 x86_init.iommu.iommu_init = intel_iommu_init;
924 acpi_put_table(dmar_tbl);
927 up_write(&dmar_global_lock);
929 return ret ? ret : 1;
932 static void unmap_iommu(struct intel_iommu *iommu)
935 release_mem_region(iommu->reg_phys, iommu->reg_size);
939 * map_iommu: map the iommu's registers
940 * @iommu: the iommu to map
941 * @phys_addr: the physical address of the base resgister
943 * Memory map the iommu's registers. Start w/ a single page, and
944 * possibly expand if that turns out to be insufficent.
946 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
950 iommu->reg_phys = phys_addr;
951 iommu->reg_size = VTD_PAGE_SIZE;
953 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
954 pr_err("Can't reserve memory\n");
959 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
961 pr_err("Can't map the region\n");
966 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
967 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
969 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
971 warn_invalid_dmar(phys_addr, " returns all ones");
975 /* the registers might be more than one page */
976 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
977 cap_max_fault_reg_offset(iommu->cap));
978 map_size = VTD_PAGE_ALIGN(map_size);
979 if (map_size > iommu->reg_size) {
981 release_mem_region(iommu->reg_phys, iommu->reg_size);
982 iommu->reg_size = map_size;
983 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
985 pr_err("Can't reserve memory\n");
989 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
991 pr_err("Can't map the region\n");
1000 iounmap(iommu->reg);
1002 release_mem_region(iommu->reg_phys, iommu->reg_size);
1007 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1009 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1010 DMAR_UNITS_SUPPORTED);
1011 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1014 set_bit(iommu->seq_id, dmar_seq_ids);
1015 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1018 return iommu->seq_id;
1021 static void dmar_free_seq_id(struct intel_iommu *iommu)
1023 if (iommu->seq_id >= 0) {
1024 clear_bit(iommu->seq_id, dmar_seq_ids);
1029 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1031 struct intel_iommu *iommu;
1037 if (!drhd->reg_base_addr) {
1038 warn_invalid_dmar(0, "");
1042 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1046 if (dmar_alloc_seq_id(iommu) < 0) {
1047 pr_err("Failed to allocate seq_id\n");
1052 err = map_iommu(iommu, drhd->reg_base_addr);
1054 pr_err("Failed to map %s\n", iommu->name);
1055 goto error_free_seq_id;
1059 if (cap_sagaw(iommu->cap) == 0) {
1060 pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1065 if (!drhd->ignored) {
1066 agaw = iommu_calculate_agaw(iommu);
1068 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1073 if (!drhd->ignored) {
1074 msagaw = iommu_calculate_max_sagaw(iommu);
1076 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1083 iommu->msagaw = msagaw;
1084 iommu->segment = drhd->segment;
1088 ver = readl(iommu->reg + DMAR_VER_REG);
1089 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1091 (unsigned long long)drhd->reg_base_addr,
1092 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1093 (unsigned long long)iommu->cap,
1094 (unsigned long long)iommu->ecap);
1096 /* Reflect status in gcmd */
1097 sts = readl(iommu->reg + DMAR_GSTS_REG);
1098 if (sts & DMA_GSTS_IRES)
1099 iommu->gcmd |= DMA_GCMD_IRE;
1100 if (sts & DMA_GSTS_TES)
1101 iommu->gcmd |= DMA_GCMD_TE;
1102 if (sts & DMA_GSTS_QIES)
1103 iommu->gcmd |= DMA_GCMD_QIE;
1105 raw_spin_lock_init(&iommu->register_lock);
1108 * This is only for hotplug; at boot time intel_iommu_enabled won't
1109 * be set yet. When intel_iommu_init() runs, it registers the units
1110 * present at boot time, then sets intel_iommu_enabled.
1112 if (intel_iommu_enabled && !drhd->ignored) {
1113 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1119 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1121 err = iommu_device_register(&iommu->iommu);
1126 drhd->iommu = iommu;
1132 iommu_device_sysfs_remove(&iommu->iommu);
1136 dmar_free_seq_id(iommu);
1142 static void free_iommu(struct intel_iommu *iommu)
1144 if (intel_iommu_enabled && !iommu->drhd->ignored) {
1145 iommu_device_unregister(&iommu->iommu);
1146 iommu_device_sysfs_remove(&iommu->iommu);
1150 if (iommu->pr_irq) {
1151 free_irq(iommu->pr_irq, iommu);
1152 dmar_free_hwirq(iommu->pr_irq);
1155 free_irq(iommu->irq, iommu);
1156 dmar_free_hwirq(iommu->irq);
1161 free_page((unsigned long)iommu->qi->desc);
1162 kfree(iommu->qi->desc_status);
1169 dmar_free_seq_id(iommu);
1174 * Reclaim all the submitted descriptors which have completed its work.
1176 static inline void reclaim_free_desc(struct q_inval *qi)
1178 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1179 qi->desc_status[qi->free_tail] == QI_ABORT) {
1180 qi->desc_status[qi->free_tail] = QI_FREE;
1181 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1186 static int qi_check_fault(struct intel_iommu *iommu, int index)
1190 struct q_inval *qi = iommu->qi;
1191 int wait_index = (index + 1) % QI_LENGTH;
1193 if (qi->desc_status[wait_index] == QI_ABORT)
1196 fault = readl(iommu->reg + DMAR_FSTS_REG);
1199 * If IQE happens, the head points to the descriptor associated
1200 * with the error. No new descriptors are fetched until the IQE
1203 if (fault & DMA_FSTS_IQE) {
1204 head = readl(iommu->reg + DMAR_IQH_REG);
1205 if ((head >> DMAR_IQ_SHIFT) == index) {
1206 pr_err("VT-d detected invalid descriptor: "
1207 "low=%llx, high=%llx\n",
1208 (unsigned long long)qi->desc[index].low,
1209 (unsigned long long)qi->desc[index].high);
1210 memcpy(&qi->desc[index], &qi->desc[wait_index],
1211 sizeof(struct qi_desc));
1212 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1218 * If ITE happens, all pending wait_desc commands are aborted.
1219 * No new descriptors are fetched until the ITE is cleared.
1221 if (fault & DMA_FSTS_ITE) {
1222 head = readl(iommu->reg + DMAR_IQH_REG);
1223 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1225 tail = readl(iommu->reg + DMAR_IQT_REG);
1226 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1228 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1231 if (qi->desc_status[head] == QI_IN_USE)
1232 qi->desc_status[head] = QI_ABORT;
1233 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1234 } while (head != tail);
1236 if (qi->desc_status[wait_index] == QI_ABORT)
1240 if (fault & DMA_FSTS_ICE)
1241 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1247 * Submit the queued invalidation descriptor to the remapping
1248 * hardware unit and wait for its completion.
1250 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1253 struct q_inval *qi = iommu->qi;
1254 struct qi_desc *hw, wait_desc;
1255 int wait_index, index;
1256 unsigned long flags;
1266 raw_spin_lock_irqsave(&qi->q_lock, flags);
1267 while (qi->free_cnt < 3) {
1268 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1270 raw_spin_lock_irqsave(&qi->q_lock, flags);
1273 index = qi->free_head;
1274 wait_index = (index + 1) % QI_LENGTH;
1276 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1280 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1281 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1282 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1284 hw[wait_index] = wait_desc;
1286 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1290 * update the HW tail register indicating the presence of
1293 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1295 while (qi->desc_status[wait_index] != QI_DONE) {
1297 * We will leave the interrupts disabled, to prevent interrupt
1298 * context to queue another cmd while a cmd is already submitted
1299 * and waiting for completion on this cpu. This is to avoid
1300 * a deadlock where the interrupt context can wait indefinitely
1301 * for free slots in the queue.
1303 rc = qi_check_fault(iommu, index);
1307 raw_spin_unlock(&qi->q_lock);
1309 raw_spin_lock(&qi->q_lock);
1312 qi->desc_status[index] = QI_DONE;
1314 reclaim_free_desc(qi);
1315 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1324 * Flush the global interrupt entry cache.
1326 void qi_global_iec(struct intel_iommu *iommu)
1328 struct qi_desc desc;
1330 desc.low = QI_IEC_TYPE;
1333 /* should never fail */
1334 qi_submit_sync(&desc, iommu);
1337 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1340 struct qi_desc desc;
1342 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1343 | QI_CC_GRAN(type) | QI_CC_TYPE;
1346 qi_submit_sync(&desc, iommu);
1349 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1350 unsigned int size_order, u64 type)
1354 struct qi_desc desc;
1357 if (cap_write_drain(iommu->cap))
1360 if (cap_read_drain(iommu->cap))
1363 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1364 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1365 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1366 | QI_IOTLB_AM(size_order);
1368 qi_submit_sync(&desc, iommu);
1371 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1372 u16 qdep, u64 addr, unsigned mask)
1374 struct qi_desc desc;
1377 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1378 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1380 desc.high = QI_DEV_IOTLB_ADDR(addr);
1382 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1385 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1386 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1388 qi_submit_sync(&desc, iommu);
1392 * Disable Queued Invalidation interface.
1394 void dmar_disable_qi(struct intel_iommu *iommu)
1396 unsigned long flags;
1398 cycles_t start_time = get_cycles();
1400 if (!ecap_qis(iommu->ecap))
1403 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1405 sts = readl(iommu->reg + DMAR_GSTS_REG);
1406 if (!(sts & DMA_GSTS_QIES))
1410 * Give a chance to HW to complete the pending invalidation requests.
1412 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1413 readl(iommu->reg + DMAR_IQH_REG)) &&
1414 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1417 iommu->gcmd &= ~DMA_GCMD_QIE;
1418 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1420 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1421 !(sts & DMA_GSTS_QIES), sts);
1423 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1427 * Enable queued invalidation.
1429 static void __dmar_enable_qi(struct intel_iommu *iommu)
1432 unsigned long flags;
1433 struct q_inval *qi = iommu->qi;
1435 qi->free_head = qi->free_tail = 0;
1436 qi->free_cnt = QI_LENGTH;
1438 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1440 /* write zero to the tail reg */
1441 writel(0, iommu->reg + DMAR_IQT_REG);
1443 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1445 iommu->gcmd |= DMA_GCMD_QIE;
1446 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1448 /* Make sure hardware complete it */
1449 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1451 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1455 * Enable Queued Invalidation interface. This is a must to support
1456 * interrupt-remapping. Also used by DMA-remapping, which replaces
1457 * register based IOTLB invalidation.
1459 int dmar_enable_qi(struct intel_iommu *iommu)
1462 struct page *desc_page;
1464 if (!ecap_qis(iommu->ecap))
1468 * queued invalidation is already setup and enabled.
1473 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1480 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1487 qi->desc = page_address(desc_page);
1489 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1490 if (!qi->desc_status) {
1491 free_page((unsigned long) qi->desc);
1497 raw_spin_lock_init(&qi->q_lock);
1499 __dmar_enable_qi(iommu);
1504 /* iommu interrupt handling. Most stuff are MSI-like. */
1512 static const char *dma_remap_fault_reasons[] =
1515 "Present bit in root entry is clear",
1516 "Present bit in context entry is clear",
1517 "Invalid context entry",
1518 "Access beyond MGAW",
1519 "PTE Write access is not set",
1520 "PTE Read access is not set",
1521 "Next page table ptr is invalid",
1522 "Root table address invalid",
1523 "Context table ptr is invalid",
1524 "non-zero reserved fields in RTP",
1525 "non-zero reserved fields in CTP",
1526 "non-zero reserved fields in PTE",
1527 "PCE for translation request specifies blocking",
1530 static const char *irq_remap_fault_reasons[] =
1532 "Detected reserved fields in the decoded interrupt-remapped request",
1533 "Interrupt index exceeded the interrupt-remapping table size",
1534 "Present field in the IRTE entry is clear",
1535 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1536 "Detected reserved fields in the IRTE entry",
1537 "Blocked a compatibility format interrupt request",
1538 "Blocked an interrupt request due to source-id verification failure",
1541 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1543 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1544 ARRAY_SIZE(irq_remap_fault_reasons))) {
1545 *fault_type = INTR_REMAP;
1546 return irq_remap_fault_reasons[fault_reason - 0x20];
1547 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1548 *fault_type = DMA_REMAP;
1549 return dma_remap_fault_reasons[fault_reason];
1551 *fault_type = UNKNOWN;
1557 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1559 if (iommu->irq == irq)
1560 return DMAR_FECTL_REG;
1561 else if (iommu->pr_irq == irq)
1562 return DMAR_PECTL_REG;
1567 void dmar_msi_unmask(struct irq_data *data)
1569 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1570 int reg = dmar_msi_reg(iommu, data->irq);
1574 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1575 writel(0, iommu->reg + reg);
1576 /* Read a reg to force flush the post write */
1577 readl(iommu->reg + reg);
1578 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1581 void dmar_msi_mask(struct irq_data *data)
1583 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1584 int reg = dmar_msi_reg(iommu, data->irq);
1588 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1589 writel(DMA_FECTL_IM, iommu->reg + reg);
1590 /* Read a reg to force flush the post write */
1591 readl(iommu->reg + reg);
1592 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1595 void dmar_msi_write(int irq, struct msi_msg *msg)
1597 struct intel_iommu *iommu = irq_get_handler_data(irq);
1598 int reg = dmar_msi_reg(iommu, irq);
1601 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1602 writel(msg->data, iommu->reg + reg + 4);
1603 writel(msg->address_lo, iommu->reg + reg + 8);
1604 writel(msg->address_hi, iommu->reg + reg + 12);
1605 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1608 void dmar_msi_read(int irq, struct msi_msg *msg)
1610 struct intel_iommu *iommu = irq_get_handler_data(irq);
1611 int reg = dmar_msi_reg(iommu, irq);
1614 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1615 msg->data = readl(iommu->reg + reg + 4);
1616 msg->address_lo = readl(iommu->reg + reg + 8);
1617 msg->address_hi = readl(iommu->reg + reg + 12);
1618 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1621 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1622 u8 fault_reason, u16 source_id, unsigned long long addr)
1627 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1629 if (fault_type == INTR_REMAP)
1630 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1631 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1632 PCI_FUNC(source_id & 0xFF), addr >> 48,
1633 fault_reason, reason);
1635 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1636 type ? "DMA Read" : "DMA Write",
1637 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1638 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1642 #define PRIMARY_FAULT_REG_LEN (16)
1643 irqreturn_t dmar_fault(int irq, void *dev_id)
1645 struct intel_iommu *iommu = dev_id;
1646 int reg, fault_index;
1649 static DEFINE_RATELIMIT_STATE(rs,
1650 DEFAULT_RATELIMIT_INTERVAL,
1651 DEFAULT_RATELIMIT_BURST);
1653 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1654 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1655 if (fault_status && __ratelimit(&rs))
1656 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1658 /* TBD: ignore advanced fault log currently */
1659 if (!(fault_status & DMA_FSTS_PPF))
1662 fault_index = dma_fsts_fault_record_index(fault_status);
1663 reg = cap_fault_reg_offset(iommu->cap);
1665 /* Disable printing, simply clear the fault when ratelimited */
1666 bool ratelimited = !__ratelimit(&rs);
1673 /* highest 32 bits */
1674 data = readl(iommu->reg + reg +
1675 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1676 if (!(data & DMA_FRCD_F))
1680 fault_reason = dma_frcd_fault_reason(data);
1681 type = dma_frcd_type(data);
1683 data = readl(iommu->reg + reg +
1684 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1685 source_id = dma_frcd_source_id(data);
1687 guest_addr = dmar_readq(iommu->reg + reg +
1688 fault_index * PRIMARY_FAULT_REG_LEN);
1689 guest_addr = dma_frcd_page_addr(guest_addr);
1692 /* clear the fault */
1693 writel(DMA_FRCD_F, iommu->reg + reg +
1694 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1696 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1699 dmar_fault_do_one(iommu, type, fault_reason,
1700 source_id, guest_addr);
1703 if (fault_index >= cap_num_fault_regs(iommu->cap))
1705 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1708 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
1709 iommu->reg + DMAR_FSTS_REG);
1712 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1716 int dmar_set_interrupt(struct intel_iommu *iommu)
1721 * Check if the fault interrupt is already initialized.
1726 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1730 pr_err("No free IRQ vectors\n");
1734 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1736 pr_err("Can't request irq\n");
1740 int __init enable_drhd_fault_handling(void)
1742 struct dmar_drhd_unit *drhd;
1743 struct intel_iommu *iommu;
1746 * Enable fault control interrupt.
1748 for_each_iommu(iommu, drhd) {
1750 int ret = dmar_set_interrupt(iommu);
1753 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1754 (unsigned long long)drhd->reg_base_addr, ret);
1759 * Clear any previous faults.
1761 dmar_fault(iommu->irq, iommu);
1762 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1763 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1770 * Re-enable Queued Invalidation interface.
1772 int dmar_reenable_qi(struct intel_iommu *iommu)
1774 if (!ecap_qis(iommu->ecap))
1781 * First disable queued invalidation.
1783 dmar_disable_qi(iommu);
1785 * Then enable queued invalidation again. Since there is no pending
1786 * invalidation requests now, it's safe to re-enable queued
1789 __dmar_enable_qi(iommu);
1795 * Check interrupt remapping support in DMAR table description.
1797 int __init dmar_ir_support(void)
1799 struct acpi_table_dmar *dmar;
1800 dmar = (struct acpi_table_dmar *)dmar_tbl;
1803 return dmar->flags & 0x1;
1806 /* Check whether DMAR units are in use */
1807 static inline bool dmar_in_use(void)
1809 return irq_remapping_enabled || intel_iommu_enabled;
1812 static int __init dmar_free_unused_resources(void)
1814 struct dmar_drhd_unit *dmaru, *dmaru_n;
1819 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1820 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1822 down_write(&dmar_global_lock);
1823 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1824 list_del(&dmaru->list);
1825 dmar_free_drhd(dmaru);
1827 up_write(&dmar_global_lock);
1832 late_initcall(dmar_free_unused_resources);
1833 IOMMU_INIT_POST(detect_intel_iommu);
1836 * DMAR Hotplug Support
1837 * For more details, please refer to Intel(R) Virtualization Technology
1838 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1839 * "Remapping Hardware Unit Hot Plug".
1841 static guid_t dmar_hp_guid =
1842 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
1843 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
1846 * Currently there's only one revision and BIOS will not check the revision id,
1847 * so use 0 for safety.
1849 #define DMAR_DSM_REV_ID 0
1850 #define DMAR_DSM_FUNC_DRHD 1
1851 #define DMAR_DSM_FUNC_ATSR 2
1852 #define DMAR_DSM_FUNC_RHSA 3
1854 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1856 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
1859 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1860 dmar_res_handler_t handler, void *arg)
1863 union acpi_object *obj;
1864 struct acpi_dmar_header *start;
1865 struct dmar_res_callback callback;
1866 static int res_type[] = {
1867 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1868 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1869 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1872 if (!dmar_detect_dsm(handle, func))
1875 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
1876 func, NULL, ACPI_TYPE_BUFFER);
1880 memset(&callback, 0, sizeof(callback));
1881 callback.cb[res_type[func]] = handler;
1882 callback.arg[res_type[func]] = arg;
1883 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1884 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1891 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1894 struct dmar_drhd_unit *dmaru;
1896 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1900 ret = dmar_ir_hotplug(dmaru, true);
1902 ret = dmar_iommu_hotplug(dmaru, true);
1907 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1911 struct dmar_drhd_unit *dmaru;
1913 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1918 * All PCI devices managed by this unit should have been destroyed.
1920 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1921 for_each_active_dev_scope(dmaru->devices,
1922 dmaru->devices_cnt, i, dev)
1926 ret = dmar_ir_hotplug(dmaru, false);
1928 ret = dmar_iommu_hotplug(dmaru, false);
1933 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1935 struct dmar_drhd_unit *dmaru;
1937 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1939 list_del_rcu(&dmaru->list);
1941 dmar_free_drhd(dmaru);
1947 static int dmar_hotplug_insert(acpi_handle handle)
1952 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1953 &dmar_validate_one_drhd, (void *)1);
1957 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1958 &dmar_parse_one_drhd, (void *)&drhd_count);
1959 if (ret == 0 && drhd_count == 0) {
1960 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1966 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1967 &dmar_parse_one_rhsa, NULL);
1971 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1972 &dmar_parse_one_atsr, NULL);
1976 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1977 &dmar_hp_add_drhd, NULL);
1981 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1982 &dmar_hp_remove_drhd, NULL);
1984 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1985 &dmar_release_one_atsr, NULL);
1987 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1988 &dmar_hp_release_drhd, NULL);
1993 static int dmar_hotplug_remove(acpi_handle handle)
1997 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1998 &dmar_check_one_atsr, NULL);
2002 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2003 &dmar_hp_remove_drhd, NULL);
2005 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2006 &dmar_release_one_atsr, NULL));
2007 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2008 &dmar_hp_release_drhd, NULL));
2010 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2011 &dmar_hp_add_drhd, NULL);
2017 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2018 void *context, void **retval)
2020 acpi_handle *phdl = retval;
2022 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2024 return AE_CTRL_TERMINATE;
2030 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2033 acpi_handle tmp = NULL;
2039 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2042 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2044 dmar_get_dsm_handle,
2046 if (ACPI_FAILURE(status)) {
2047 pr_warn("Failed to locate _DSM method.\n");
2054 down_write(&dmar_global_lock);
2056 ret = dmar_hotplug_insert(tmp);
2058 ret = dmar_hotplug_remove(tmp);
2059 up_write(&dmar_global_lock);
2064 int dmar_device_add(acpi_handle handle)
2066 return dmar_device_hotplug(handle, true);
2069 int dmar_device_remove(acpi_handle handle)
2071 return dmar_device_hotplug(handle, false);