1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
7 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #include <linux/mmu_notifier.h>
10 #include <linux/amd-iommu.h>
11 #include <linux/mm_types.h>
12 #include <linux/profile.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/sched/mm.h>
16 #include <linux/iommu.h>
17 #include <linux/wait.h>
18 #include <linux/pci.h>
19 #include <linux/gfp.h>
21 #include "amd_iommu_types.h"
22 #include "amd_iommu_proto.h"
24 MODULE_LICENSE("GPL v2");
25 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
27 #define MAX_DEVICES 0x10000
28 #define PRI_QUEUE_SIZE 512
37 struct list_head list; /* For global state-list */
38 atomic_t count; /* Reference count */
39 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
41 struct mm_struct *mm; /* mm_struct for the faults */
42 struct mmu_notifier mn; /* mmu_notifier handle */
43 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
44 struct device_state *device_state; /* Link to our device_state */
45 int pasid; /* PASID index */
46 bool invalid; /* Used during setup and
47 teardown of the pasid */
48 spinlock_t lock; /* Protect pri_queues and
50 wait_queue_head_t wq; /* To wait for count == 0 */
54 struct list_head list;
58 struct pasid_state **states;
59 struct iommu_domain *domain;
62 amd_iommu_invalid_ppr_cb inv_ppr_cb;
63 amd_iommu_invalidate_ctx inv_ctx_cb;
69 struct work_struct work;
70 struct device_state *dev_state;
71 struct pasid_state *state;
81 static LIST_HEAD(state_list);
82 static spinlock_t state_lock;
84 static struct workqueue_struct *iommu_wq;
86 static void free_pasid_states(struct device_state *dev_state);
88 static u16 device_id(struct pci_dev *pdev)
92 devid = pdev->bus->number;
93 devid = (devid << 8) | pdev->devfn;
98 static struct device_state *__get_device_state(u16 devid)
100 struct device_state *dev_state;
102 list_for_each_entry(dev_state, &state_list, list) {
103 if (dev_state->devid == devid)
110 static struct device_state *get_device_state(u16 devid)
112 struct device_state *dev_state;
115 spin_lock_irqsave(&state_lock, flags);
116 dev_state = __get_device_state(devid);
117 if (dev_state != NULL)
118 atomic_inc(&dev_state->count);
119 spin_unlock_irqrestore(&state_lock, flags);
124 static void free_device_state(struct device_state *dev_state)
126 struct iommu_group *group;
129 * First detach device from domain - No more PRI requests will arrive
130 * from that device after it is unbound from the IOMMUv2 domain.
132 group = iommu_group_get(&dev_state->pdev->dev);
136 iommu_detach_group(dev_state->domain, group);
138 iommu_group_put(group);
140 /* Everything is down now, free the IOMMUv2 domain */
141 iommu_domain_free(dev_state->domain);
143 /* Finally get rid of the device-state */
147 static void put_device_state(struct device_state *dev_state)
149 if (atomic_dec_and_test(&dev_state->count))
150 wake_up(&dev_state->wq);
153 /* Must be called under dev_state->lock */
154 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
155 int pasid, bool alloc)
157 struct pasid_state **root, **ptr;
160 level = dev_state->pasid_levels;
161 root = dev_state->states;
165 index = (pasid >> (9 * level)) & 0x1ff;
175 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
180 root = (struct pasid_state **)*ptr;
187 static int set_pasid_state(struct device_state *dev_state,
188 struct pasid_state *pasid_state,
191 struct pasid_state **ptr;
195 spin_lock_irqsave(&dev_state->lock, flags);
196 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
211 spin_unlock_irqrestore(&dev_state->lock, flags);
216 static void clear_pasid_state(struct device_state *dev_state, int pasid)
218 struct pasid_state **ptr;
221 spin_lock_irqsave(&dev_state->lock, flags);
222 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
230 spin_unlock_irqrestore(&dev_state->lock, flags);
233 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
236 struct pasid_state **ptr, *ret = NULL;
239 spin_lock_irqsave(&dev_state->lock, flags);
240 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
247 atomic_inc(&ret->count);
250 spin_unlock_irqrestore(&dev_state->lock, flags);
255 static void free_pasid_state(struct pasid_state *pasid_state)
260 static void put_pasid_state(struct pasid_state *pasid_state)
262 if (atomic_dec_and_test(&pasid_state->count))
263 wake_up(&pasid_state->wq);
266 static void put_pasid_state_wait(struct pasid_state *pasid_state)
268 atomic_dec(&pasid_state->count);
269 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
270 free_pasid_state(pasid_state);
273 static void unbind_pasid(struct pasid_state *pasid_state)
275 struct iommu_domain *domain;
277 domain = pasid_state->device_state->domain;
280 * Mark pasid_state as invalid, no more faults will we added to the
281 * work queue after this is visible everywhere.
283 pasid_state->invalid = true;
285 /* Make sure this is visible */
288 /* After this the device/pasid can't access the mm anymore */
289 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
291 /* Make sure no more pending faults are in the queue */
292 flush_workqueue(iommu_wq);
295 static void free_pasid_states_level1(struct pasid_state **tbl)
299 for (i = 0; i < 512; ++i) {
303 free_page((unsigned long)tbl[i]);
307 static void free_pasid_states_level2(struct pasid_state **tbl)
309 struct pasid_state **ptr;
312 for (i = 0; i < 512; ++i) {
316 ptr = (struct pasid_state **)tbl[i];
317 free_pasid_states_level1(ptr);
321 static void free_pasid_states(struct device_state *dev_state)
323 struct pasid_state *pasid_state;
326 for (i = 0; i < dev_state->max_pasids; ++i) {
327 pasid_state = get_pasid_state(dev_state, i);
328 if (pasid_state == NULL)
331 put_pasid_state(pasid_state);
334 * This will call the mn_release function and
337 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
339 put_pasid_state_wait(pasid_state); /* Reference taken in
340 amd_iommu_bind_pasid */
342 /* Drop reference taken in amd_iommu_bind_pasid */
343 put_device_state(dev_state);
346 if (dev_state->pasid_levels == 2)
347 free_pasid_states_level2(dev_state->states);
348 else if (dev_state->pasid_levels == 1)
349 free_pasid_states_level1(dev_state->states);
351 BUG_ON(dev_state->pasid_levels != 0);
353 free_page((unsigned long)dev_state->states);
356 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
358 return container_of(mn, struct pasid_state, mn);
361 static void mn_invalidate_range(struct mmu_notifier *mn,
362 struct mm_struct *mm,
363 unsigned long start, unsigned long end)
365 struct pasid_state *pasid_state;
366 struct device_state *dev_state;
368 pasid_state = mn_to_state(mn);
369 dev_state = pasid_state->device_state;
371 if ((start ^ (end - 1)) < PAGE_SIZE)
372 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
375 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
378 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
380 struct pasid_state *pasid_state;
381 struct device_state *dev_state;
386 pasid_state = mn_to_state(mn);
387 dev_state = pasid_state->device_state;
388 run_inv_ctx_cb = !pasid_state->invalid;
390 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
391 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
393 unbind_pasid(pasid_state);
396 static const struct mmu_notifier_ops iommu_mn = {
397 .release = mn_release,
398 .invalidate_range = mn_invalidate_range,
401 static void set_pri_tag_status(struct pasid_state *pasid_state,
406 spin_lock_irqsave(&pasid_state->lock, flags);
407 pasid_state->pri[tag].status = status;
408 spin_unlock_irqrestore(&pasid_state->lock, flags);
411 static void finish_pri_tag(struct device_state *dev_state,
412 struct pasid_state *pasid_state,
417 spin_lock_irqsave(&pasid_state->lock, flags);
418 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
419 pasid_state->pri[tag].finish) {
420 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
421 pasid_state->pri[tag].status, tag);
422 pasid_state->pri[tag].finish = false;
423 pasid_state->pri[tag].status = PPR_SUCCESS;
425 spin_unlock_irqrestore(&pasid_state->lock, flags);
428 static void handle_fault_error(struct fault *fault)
432 if (!fault->dev_state->inv_ppr_cb) {
433 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
437 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
442 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
443 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
445 case AMD_IOMMU_INV_PRI_RSP_INVALID:
446 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
448 case AMD_IOMMU_INV_PRI_RSP_FAIL:
449 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
456 static bool access_error(struct vm_area_struct *vma, struct fault *fault)
458 unsigned long requested = 0;
460 if (fault->flags & PPR_FAULT_EXEC)
461 requested |= VM_EXEC;
463 if (fault->flags & PPR_FAULT_READ)
464 requested |= VM_READ;
466 if (fault->flags & PPR_FAULT_WRITE)
467 requested |= VM_WRITE;
469 return (requested & ~vma->vm_flags) != 0;
472 static void do_fault(struct work_struct *work)
474 struct fault *fault = container_of(work, struct fault, work);
475 struct vm_area_struct *vma;
476 vm_fault_t ret = VM_FAULT_ERROR;
477 unsigned int flags = 0;
478 struct mm_struct *mm;
481 mm = fault->state->mm;
482 address = fault->address;
484 if (fault->flags & PPR_FAULT_USER)
485 flags |= FAULT_FLAG_USER;
486 if (fault->flags & PPR_FAULT_WRITE)
487 flags |= FAULT_FLAG_WRITE;
488 flags |= FAULT_FLAG_REMOTE;
490 down_read(&mm->mmap_sem);
491 vma = find_extend_vma(mm, address);
492 if (!vma || address < vma->vm_start)
493 /* failed to get a vma in the right range */
496 /* Check if we have the right permissions on the vma */
497 if (access_error(vma, fault))
500 ret = handle_mm_fault(vma, address, flags);
502 up_read(&mm->mmap_sem);
504 if (ret & VM_FAULT_ERROR)
505 /* failed to service fault */
506 handle_fault_error(fault);
508 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
510 put_pasid_state(fault->state);
515 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
517 struct amd_iommu_fault *iommu_fault;
518 struct pasid_state *pasid_state;
519 struct device_state *dev_state;
525 struct iommu_dev_data *dev_data;
526 struct pci_dev *pdev = NULL;
529 tag = iommu_fault->tag & 0x1ff;
530 finish = (iommu_fault->tag >> 9) & 1;
532 devid = iommu_fault->device_id;
533 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
537 dev_data = get_dev_data(&pdev->dev);
539 /* In kdump kernel pci dev is not initialized yet -> send INVALID */
541 if (translation_pre_enabled(amd_iommu_rlookup_table[devid])
542 && dev_data->defer_attach) {
543 amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
548 dev_state = get_device_state(iommu_fault->device_id);
549 if (dev_state == NULL)
552 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
553 if (pasid_state == NULL || pasid_state->invalid) {
554 /* We know the device but not the PASID -> send INVALID */
555 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
560 spin_lock_irqsave(&pasid_state->lock, flags);
561 atomic_inc(&pasid_state->pri[tag].inflight);
563 pasid_state->pri[tag].finish = true;
564 spin_unlock_irqrestore(&pasid_state->lock, flags);
566 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
568 /* We are OOM - send success and let the device re-fault */
569 finish_pri_tag(dev_state, pasid_state, tag);
573 fault->dev_state = dev_state;
574 fault->address = iommu_fault->address;
575 fault->state = pasid_state;
577 fault->finish = finish;
578 fault->pasid = iommu_fault->pasid;
579 fault->flags = iommu_fault->flags;
580 INIT_WORK(&fault->work, do_fault);
582 queue_work(iommu_wq, &fault->work);
588 if (ret != NOTIFY_OK && pasid_state)
589 put_pasid_state(pasid_state);
591 put_device_state(dev_state);
598 static struct notifier_block ppr_nb = {
599 .notifier_call = ppr_notifier,
602 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
603 struct task_struct *task)
605 struct pasid_state *pasid_state;
606 struct device_state *dev_state;
607 struct mm_struct *mm;
613 if (!amd_iommu_v2_supported())
616 devid = device_id(pdev);
617 dev_state = get_device_state(devid);
619 if (dev_state == NULL)
623 if (pasid < 0 || pasid >= dev_state->max_pasids)
627 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
628 if (pasid_state == NULL)
632 atomic_set(&pasid_state->count, 1);
633 init_waitqueue_head(&pasid_state->wq);
634 spin_lock_init(&pasid_state->lock);
636 mm = get_task_mm(task);
637 pasid_state->mm = mm;
638 pasid_state->device_state = dev_state;
639 pasid_state->pasid = pasid;
640 pasid_state->invalid = true; /* Mark as valid only if we are
641 done with setting up the pasid */
642 pasid_state->mn.ops = &iommu_mn;
644 if (pasid_state->mm == NULL)
647 mmu_notifier_register(&pasid_state->mn, mm);
649 ret = set_pasid_state(dev_state, pasid_state, pasid);
653 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
654 __pa(pasid_state->mm->pgd));
656 goto out_clear_state;
658 /* Now we are ready to handle faults */
659 pasid_state->invalid = false;
662 * Drop the reference to the mm_struct here. We rely on the
663 * mmu_notifier release call-back to inform us when the mm
671 clear_pasid_state(dev_state, pasid);
674 mmu_notifier_unregister(&pasid_state->mn, mm);
678 free_pasid_state(pasid_state);
681 put_device_state(dev_state);
685 EXPORT_SYMBOL(amd_iommu_bind_pasid);
687 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
689 struct pasid_state *pasid_state;
690 struct device_state *dev_state;
695 if (!amd_iommu_v2_supported())
698 devid = device_id(pdev);
699 dev_state = get_device_state(devid);
700 if (dev_state == NULL)
703 if (pasid < 0 || pasid >= dev_state->max_pasids)
706 pasid_state = get_pasid_state(dev_state, pasid);
707 if (pasid_state == NULL)
710 * Drop reference taken here. We are safe because we still hold
711 * the reference taken in the amd_iommu_bind_pasid function.
713 put_pasid_state(pasid_state);
715 /* Clear the pasid state so that the pasid can be re-used */
716 clear_pasid_state(dev_state, pasid_state->pasid);
719 * Call mmu_notifier_unregister to drop our reference
722 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
724 put_pasid_state_wait(pasid_state); /* Reference taken in
725 amd_iommu_bind_pasid */
727 /* Drop reference taken in this function */
728 put_device_state(dev_state);
730 /* Drop reference taken in amd_iommu_bind_pasid */
731 put_device_state(dev_state);
733 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
735 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
737 struct device_state *dev_state;
738 struct iommu_group *group;
746 * When memory encryption is active the device is likely not in a
747 * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
749 if (mem_encrypt_active())
752 if (!amd_iommu_v2_supported())
755 if (pasids <= 0 || pasids > (PASID_MASK + 1))
758 devid = device_id(pdev);
760 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
761 if (dev_state == NULL)
764 spin_lock_init(&dev_state->lock);
765 init_waitqueue_head(&dev_state->wq);
766 dev_state->pdev = pdev;
767 dev_state->devid = devid;
770 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
771 dev_state->pasid_levels += 1;
773 atomic_set(&dev_state->count, 1);
774 dev_state->max_pasids = pasids;
777 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
778 if (dev_state->states == NULL)
779 goto out_free_dev_state;
781 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
782 if (dev_state->domain == NULL)
783 goto out_free_states;
785 amd_iommu_domain_direct_map(dev_state->domain);
787 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
789 goto out_free_domain;
791 group = iommu_group_get(&pdev->dev);
794 goto out_free_domain;
797 ret = iommu_attach_group(dev_state->domain, group);
801 iommu_group_put(group);
803 spin_lock_irqsave(&state_lock, flags);
805 if (__get_device_state(devid) != NULL) {
806 spin_unlock_irqrestore(&state_lock, flags);
808 goto out_free_domain;
811 list_add_tail(&dev_state->list, &state_list);
813 spin_unlock_irqrestore(&state_lock, flags);
818 iommu_group_put(group);
821 iommu_domain_free(dev_state->domain);
824 free_page((unsigned long)dev_state->states);
831 EXPORT_SYMBOL(amd_iommu_init_device);
833 void amd_iommu_free_device(struct pci_dev *pdev)
835 struct device_state *dev_state;
839 if (!amd_iommu_v2_supported())
842 devid = device_id(pdev);
844 spin_lock_irqsave(&state_lock, flags);
846 dev_state = __get_device_state(devid);
847 if (dev_state == NULL) {
848 spin_unlock_irqrestore(&state_lock, flags);
852 list_del(&dev_state->list);
854 spin_unlock_irqrestore(&state_lock, flags);
856 /* Get rid of any remaining pasid states */
857 free_pasid_states(dev_state);
859 put_device_state(dev_state);
861 * Wait until the last reference is dropped before freeing
864 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
865 free_device_state(dev_state);
867 EXPORT_SYMBOL(amd_iommu_free_device);
869 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
870 amd_iommu_invalid_ppr_cb cb)
872 struct device_state *dev_state;
877 if (!amd_iommu_v2_supported())
880 devid = device_id(pdev);
882 spin_lock_irqsave(&state_lock, flags);
885 dev_state = __get_device_state(devid);
886 if (dev_state == NULL)
889 dev_state->inv_ppr_cb = cb;
894 spin_unlock_irqrestore(&state_lock, flags);
898 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
900 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
901 amd_iommu_invalidate_ctx cb)
903 struct device_state *dev_state;
908 if (!amd_iommu_v2_supported())
911 devid = device_id(pdev);
913 spin_lock_irqsave(&state_lock, flags);
916 dev_state = __get_device_state(devid);
917 if (dev_state == NULL)
920 dev_state->inv_ctx_cb = cb;
925 spin_unlock_irqrestore(&state_lock, flags);
929 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
931 static int __init amd_iommu_v2_init(void)
935 pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
937 if (!amd_iommu_v2_supported()) {
938 pr_info("AMD IOMMUv2 functionality not available on this system\n");
940 * Load anyway to provide the symbols to other modules
941 * which may use AMD IOMMUv2 optionally.
946 spin_lock_init(&state_lock);
949 iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
950 if (iommu_wq == NULL)
953 amd_iommu_register_ppr_notifier(&ppr_nb);
961 static void __exit amd_iommu_v2_exit(void)
963 struct device_state *dev_state;
966 if (!amd_iommu_v2_supported())
969 amd_iommu_unregister_ppr_notifier(&ppr_nb);
971 flush_workqueue(iommu_wq);
974 * The loop below might call flush_workqueue(), so call
975 * destroy_workqueue() after it
977 for (i = 0; i < MAX_DEVICES; ++i) {
978 dev_state = get_device_state(i);
980 if (dev_state == NULL)
985 put_device_state(dev_state);
986 amd_iommu_free_device(dev_state->pdev);
989 destroy_workqueue(iommu_wq);
992 module_init(amd_iommu_v2_init);
993 module_exit(amd_iommu_v2_exit);