1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
7 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #include <linux/refcount.h>
10 #include <linux/mmu_notifier.h>
11 #include <linux/amd-iommu.h>
12 #include <linux/mm_types.h>
13 #include <linux/profile.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/sched/mm.h>
17 #include <linux/wait.h>
18 #include <linux/pci.h>
19 #include <linux/gfp.h>
20 #include <linux/cc_platform.h>
22 #include "amd_iommu.h"
24 MODULE_LICENSE("GPL v2");
25 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
27 #define PRI_QUEUE_SIZE 512
36 struct list_head list; /* For global state-list */
37 refcount_t count; /* Reference count */
38 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
40 struct mm_struct *mm; /* mm_struct for the faults */
41 struct mmu_notifier mn; /* mmu_notifier handle */
42 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
43 struct device_state *device_state; /* Link to our device_state */
44 u32 pasid; /* PASID index */
45 bool invalid; /* Used during setup and
46 teardown of the pasid */
47 spinlock_t lock; /* Protect pri_queues and
49 wait_queue_head_t wq; /* To wait for count == 0 */
53 struct list_head list;
57 struct pasid_state **states;
58 struct iommu_domain *domain;
61 amd_iommu_invalid_ppr_cb inv_ppr_cb;
62 amd_iommu_invalidate_ctx inv_ctx_cb;
68 struct work_struct work;
69 struct device_state *dev_state;
70 struct pasid_state *state;
79 static LIST_HEAD(state_list);
80 static DEFINE_SPINLOCK(state_lock);
82 static struct workqueue_struct *iommu_wq;
84 static void free_pasid_states(struct device_state *dev_state);
86 static u16 device_id(struct pci_dev *pdev)
90 devid = pdev->bus->number;
91 devid = (devid << 8) | pdev->devfn;
96 static struct device_state *__get_device_state(u16 devid)
98 struct device_state *dev_state;
100 list_for_each_entry(dev_state, &state_list, list) {
101 if (dev_state->devid == devid)
108 static struct device_state *get_device_state(u16 devid)
110 struct device_state *dev_state;
113 spin_lock_irqsave(&state_lock, flags);
114 dev_state = __get_device_state(devid);
115 if (dev_state != NULL)
116 atomic_inc(&dev_state->count);
117 spin_unlock_irqrestore(&state_lock, flags);
122 static void free_device_state(struct device_state *dev_state)
124 struct iommu_group *group;
126 /* Get rid of any remaining pasid states */
127 free_pasid_states(dev_state);
130 * Wait until the last reference is dropped before freeing
133 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
136 * First detach device from domain - No more PRI requests will arrive
137 * from that device after it is unbound from the IOMMUv2 domain.
139 group = iommu_group_get(&dev_state->pdev->dev);
143 iommu_detach_group(dev_state->domain, group);
145 iommu_group_put(group);
147 /* Everything is down now, free the IOMMUv2 domain */
148 iommu_domain_free(dev_state->domain);
150 /* Finally get rid of the device-state */
154 static void put_device_state(struct device_state *dev_state)
156 if (atomic_dec_and_test(&dev_state->count))
157 wake_up(&dev_state->wq);
160 /* Must be called under dev_state->lock */
161 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
162 u32 pasid, bool alloc)
164 struct pasid_state **root, **ptr;
167 level = dev_state->pasid_levels;
168 root = dev_state->states;
172 index = (pasid >> (9 * level)) & 0x1ff;
182 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
187 root = (struct pasid_state **)*ptr;
194 static int set_pasid_state(struct device_state *dev_state,
195 struct pasid_state *pasid_state,
198 struct pasid_state **ptr;
202 spin_lock_irqsave(&dev_state->lock, flags);
203 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
218 spin_unlock_irqrestore(&dev_state->lock, flags);
223 static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
225 struct pasid_state **ptr;
228 spin_lock_irqsave(&dev_state->lock, flags);
229 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
237 spin_unlock_irqrestore(&dev_state->lock, flags);
240 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
243 struct pasid_state **ptr, *ret = NULL;
246 spin_lock_irqsave(&dev_state->lock, flags);
247 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
254 refcount_inc(&ret->count);
257 spin_unlock_irqrestore(&dev_state->lock, flags);
262 static void free_pasid_state(struct pasid_state *pasid_state)
267 static void put_pasid_state(struct pasid_state *pasid_state)
269 if (refcount_dec_and_test(&pasid_state->count))
270 wake_up(&pasid_state->wq);
273 static void put_pasid_state_wait(struct pasid_state *pasid_state)
275 refcount_dec(&pasid_state->count);
276 wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
277 free_pasid_state(pasid_state);
280 static void unbind_pasid(struct pasid_state *pasid_state)
282 struct iommu_domain *domain;
284 domain = pasid_state->device_state->domain;
287 * Mark pasid_state as invalid, no more faults will we added to the
288 * work queue after this is visible everywhere.
290 pasid_state->invalid = true;
292 /* Make sure this is visible */
295 /* After this the device/pasid can't access the mm anymore */
296 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
298 /* Make sure no more pending faults are in the queue */
299 flush_workqueue(iommu_wq);
302 static void free_pasid_states_level1(struct pasid_state **tbl)
306 for (i = 0; i < 512; ++i) {
310 free_page((unsigned long)tbl[i]);
314 static void free_pasid_states_level2(struct pasid_state **tbl)
316 struct pasid_state **ptr;
319 for (i = 0; i < 512; ++i) {
323 ptr = (struct pasid_state **)tbl[i];
324 free_pasid_states_level1(ptr);
328 static void free_pasid_states(struct device_state *dev_state)
330 struct pasid_state *pasid_state;
333 for (i = 0; i < dev_state->max_pasids; ++i) {
334 pasid_state = get_pasid_state(dev_state, i);
335 if (pasid_state == NULL)
338 put_pasid_state(pasid_state);
341 * This will call the mn_release function and
344 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
346 put_pasid_state_wait(pasid_state); /* Reference taken in
347 amd_iommu_bind_pasid */
349 /* Drop reference taken in amd_iommu_bind_pasid */
350 put_device_state(dev_state);
353 if (dev_state->pasid_levels == 2)
354 free_pasid_states_level2(dev_state->states);
355 else if (dev_state->pasid_levels == 1)
356 free_pasid_states_level1(dev_state->states);
358 BUG_ON(dev_state->pasid_levels != 0);
360 free_page((unsigned long)dev_state->states);
363 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
365 return container_of(mn, struct pasid_state, mn);
368 static void mn_invalidate_range(struct mmu_notifier *mn,
369 struct mm_struct *mm,
370 unsigned long start, unsigned long end)
372 struct pasid_state *pasid_state;
373 struct device_state *dev_state;
375 pasid_state = mn_to_state(mn);
376 dev_state = pasid_state->device_state;
378 if ((start ^ (end - 1)) < PAGE_SIZE)
379 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
382 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
385 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
387 struct pasid_state *pasid_state;
388 struct device_state *dev_state;
393 pasid_state = mn_to_state(mn);
394 dev_state = pasid_state->device_state;
395 run_inv_ctx_cb = !pasid_state->invalid;
397 if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
398 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
400 unbind_pasid(pasid_state);
403 static const struct mmu_notifier_ops iommu_mn = {
404 .release = mn_release,
405 .invalidate_range = mn_invalidate_range,
408 static void set_pri_tag_status(struct pasid_state *pasid_state,
413 spin_lock_irqsave(&pasid_state->lock, flags);
414 pasid_state->pri[tag].status = status;
415 spin_unlock_irqrestore(&pasid_state->lock, flags);
418 static void finish_pri_tag(struct device_state *dev_state,
419 struct pasid_state *pasid_state,
424 spin_lock_irqsave(&pasid_state->lock, flags);
425 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
426 pasid_state->pri[tag].finish) {
427 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
428 pasid_state->pri[tag].status, tag);
429 pasid_state->pri[tag].finish = false;
430 pasid_state->pri[tag].status = PPR_SUCCESS;
432 spin_unlock_irqrestore(&pasid_state->lock, flags);
435 static void handle_fault_error(struct fault *fault)
439 if (!fault->dev_state->inv_ppr_cb) {
440 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
444 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
449 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
450 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
452 case AMD_IOMMU_INV_PRI_RSP_INVALID:
453 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
455 case AMD_IOMMU_INV_PRI_RSP_FAIL:
456 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
463 static bool access_error(struct vm_area_struct *vma, struct fault *fault)
465 unsigned long requested = 0;
467 if (fault->flags & PPR_FAULT_EXEC)
468 requested |= VM_EXEC;
470 if (fault->flags & PPR_FAULT_READ)
471 requested |= VM_READ;
473 if (fault->flags & PPR_FAULT_WRITE)
474 requested |= VM_WRITE;
476 return (requested & ~vma->vm_flags) != 0;
479 static void do_fault(struct work_struct *work)
481 struct fault *fault = container_of(work, struct fault, work);
482 struct vm_area_struct *vma;
483 vm_fault_t ret = VM_FAULT_ERROR;
484 unsigned int flags = 0;
485 struct mm_struct *mm;
488 mm = fault->state->mm;
489 address = fault->address;
491 if (fault->flags & PPR_FAULT_USER)
492 flags |= FAULT_FLAG_USER;
493 if (fault->flags & PPR_FAULT_WRITE)
494 flags |= FAULT_FLAG_WRITE;
495 flags |= FAULT_FLAG_REMOTE;
498 vma = find_extend_vma(mm, address);
499 if (!vma || address < vma->vm_start)
500 /* failed to get a vma in the right range */
503 /* Check if we have the right permissions on the vma */
504 if (access_error(vma, fault))
507 ret = handle_mm_fault(vma, address, flags, NULL);
509 mmap_read_unlock(mm);
511 if (ret & VM_FAULT_ERROR)
512 /* failed to service fault */
513 handle_fault_error(fault);
515 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
517 put_pasid_state(fault->state);
522 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
524 struct amd_iommu_fault *iommu_fault;
525 struct pasid_state *pasid_state;
526 struct device_state *dev_state;
527 struct pci_dev *pdev = NULL;
535 tag = iommu_fault->tag & 0x1ff;
536 finish = (iommu_fault->tag >> 9) & 1;
538 devid = iommu_fault->device_id;
539 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
546 /* In kdump kernel pci dev is not initialized yet -> send INVALID */
547 if (amd_iommu_is_attach_deferred(&pdev->dev)) {
548 amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
553 dev_state = get_device_state(iommu_fault->device_id);
554 if (dev_state == NULL)
557 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
558 if (pasid_state == NULL || pasid_state->invalid) {
559 /* We know the device but not the PASID -> send INVALID */
560 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
565 spin_lock_irqsave(&pasid_state->lock, flags);
566 atomic_inc(&pasid_state->pri[tag].inflight);
568 pasid_state->pri[tag].finish = true;
569 spin_unlock_irqrestore(&pasid_state->lock, flags);
571 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
573 /* We are OOM - send success and let the device re-fault */
574 finish_pri_tag(dev_state, pasid_state, tag);
578 fault->dev_state = dev_state;
579 fault->address = iommu_fault->address;
580 fault->state = pasid_state;
582 fault->finish = finish;
583 fault->pasid = iommu_fault->pasid;
584 fault->flags = iommu_fault->flags;
585 INIT_WORK(&fault->work, do_fault);
587 queue_work(iommu_wq, &fault->work);
593 if (ret != NOTIFY_OK && pasid_state)
594 put_pasid_state(pasid_state);
596 put_device_state(dev_state);
602 static struct notifier_block ppr_nb = {
603 .notifier_call = ppr_notifier,
606 int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
607 struct task_struct *task)
609 struct pasid_state *pasid_state;
610 struct device_state *dev_state;
611 struct mm_struct *mm;
617 if (!amd_iommu_v2_supported())
620 devid = device_id(pdev);
621 dev_state = get_device_state(devid);
623 if (dev_state == NULL)
627 if (pasid >= dev_state->max_pasids)
631 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
632 if (pasid_state == NULL)
636 refcount_set(&pasid_state->count, 1);
637 init_waitqueue_head(&pasid_state->wq);
638 spin_lock_init(&pasid_state->lock);
640 mm = get_task_mm(task);
641 pasid_state->mm = mm;
642 pasid_state->device_state = dev_state;
643 pasid_state->pasid = pasid;
644 pasid_state->invalid = true; /* Mark as valid only if we are
645 done with setting up the pasid */
646 pasid_state->mn.ops = &iommu_mn;
648 if (pasid_state->mm == NULL)
651 mmu_notifier_register(&pasid_state->mn, mm);
653 ret = set_pasid_state(dev_state, pasid_state, pasid);
657 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
658 __pa(pasid_state->mm->pgd));
660 goto out_clear_state;
662 /* Now we are ready to handle faults */
663 pasid_state->invalid = false;
666 * Drop the reference to the mm_struct here. We rely on the
667 * mmu_notifier release call-back to inform us when the mm
675 clear_pasid_state(dev_state, pasid);
678 mmu_notifier_unregister(&pasid_state->mn, mm);
682 free_pasid_state(pasid_state);
685 put_device_state(dev_state);
689 EXPORT_SYMBOL(amd_iommu_bind_pasid);
691 void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
693 struct pasid_state *pasid_state;
694 struct device_state *dev_state;
699 if (!amd_iommu_v2_supported())
702 devid = device_id(pdev);
703 dev_state = get_device_state(devid);
704 if (dev_state == NULL)
707 if (pasid >= dev_state->max_pasids)
710 pasid_state = get_pasid_state(dev_state, pasid);
711 if (pasid_state == NULL)
714 * Drop reference taken here. We are safe because we still hold
715 * the reference taken in the amd_iommu_bind_pasid function.
717 put_pasid_state(pasid_state);
719 /* Clear the pasid state so that the pasid can be re-used */
720 clear_pasid_state(dev_state, pasid_state->pasid);
723 * Call mmu_notifier_unregister to drop our reference
726 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
728 put_pasid_state_wait(pasid_state); /* Reference taken in
729 amd_iommu_bind_pasid */
731 /* Drop reference taken in this function */
732 put_device_state(dev_state);
734 /* Drop reference taken in amd_iommu_bind_pasid */
735 put_device_state(dev_state);
737 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
739 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
741 struct device_state *dev_state;
742 struct iommu_group *group;
750 * When memory encryption is active the device is likely not in a
751 * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
753 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
756 if (!amd_iommu_v2_supported())
759 if (pasids <= 0 || pasids > (PASID_MASK + 1))
762 devid = device_id(pdev);
764 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
765 if (dev_state == NULL)
768 spin_lock_init(&dev_state->lock);
769 init_waitqueue_head(&dev_state->wq);
770 dev_state->pdev = pdev;
771 dev_state->devid = devid;
774 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
775 dev_state->pasid_levels += 1;
777 atomic_set(&dev_state->count, 1);
778 dev_state->max_pasids = pasids;
781 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
782 if (dev_state->states == NULL)
783 goto out_free_dev_state;
785 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
786 if (dev_state->domain == NULL)
787 goto out_free_states;
789 amd_iommu_domain_direct_map(dev_state->domain);
791 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
793 goto out_free_domain;
795 group = iommu_group_get(&pdev->dev);
798 goto out_free_domain;
801 ret = iommu_attach_group(dev_state->domain, group);
805 iommu_group_put(group);
807 spin_lock_irqsave(&state_lock, flags);
809 if (__get_device_state(devid) != NULL) {
810 spin_unlock_irqrestore(&state_lock, flags);
812 goto out_free_domain;
815 list_add_tail(&dev_state->list, &state_list);
817 spin_unlock_irqrestore(&state_lock, flags);
822 iommu_group_put(group);
825 iommu_domain_free(dev_state->domain);
828 free_page((unsigned long)dev_state->states);
835 EXPORT_SYMBOL(amd_iommu_init_device);
837 void amd_iommu_free_device(struct pci_dev *pdev)
839 struct device_state *dev_state;
843 if (!amd_iommu_v2_supported())
846 devid = device_id(pdev);
848 spin_lock_irqsave(&state_lock, flags);
850 dev_state = __get_device_state(devid);
851 if (dev_state == NULL) {
852 spin_unlock_irqrestore(&state_lock, flags);
856 list_del(&dev_state->list);
858 spin_unlock_irqrestore(&state_lock, flags);
860 put_device_state(dev_state);
861 free_device_state(dev_state);
863 EXPORT_SYMBOL(amd_iommu_free_device);
865 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
866 amd_iommu_invalid_ppr_cb cb)
868 struct device_state *dev_state;
873 if (!amd_iommu_v2_supported())
876 devid = device_id(pdev);
878 spin_lock_irqsave(&state_lock, flags);
881 dev_state = __get_device_state(devid);
882 if (dev_state == NULL)
885 dev_state->inv_ppr_cb = cb;
890 spin_unlock_irqrestore(&state_lock, flags);
894 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
896 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
897 amd_iommu_invalidate_ctx cb)
899 struct device_state *dev_state;
904 if (!amd_iommu_v2_supported())
907 devid = device_id(pdev);
909 spin_lock_irqsave(&state_lock, flags);
912 dev_state = __get_device_state(devid);
913 if (dev_state == NULL)
916 dev_state->inv_ctx_cb = cb;
921 spin_unlock_irqrestore(&state_lock, flags);
925 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
927 static int __init amd_iommu_v2_init(void)
931 if (!amd_iommu_v2_supported()) {
932 pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
934 * Load anyway to provide the symbols to other modules
935 * which may use AMD IOMMUv2 optionally.
941 iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
942 if (iommu_wq == NULL)
945 amd_iommu_register_ppr_notifier(&ppr_nb);
947 pr_info("AMD IOMMUv2 loaded and initialized\n");
955 static void __exit amd_iommu_v2_exit(void)
957 struct device_state *dev_state, *next;
961 if (!amd_iommu_v2_supported())
964 amd_iommu_unregister_ppr_notifier(&ppr_nb);
966 flush_workqueue(iommu_wq);
969 * The loop below might call flush_workqueue(), so call
970 * destroy_workqueue() after it
972 spin_lock_irqsave(&state_lock, flags);
974 list_for_each_entry_safe(dev_state, next, &state_list, list) {
977 put_device_state(dev_state);
978 list_del(&dev_state->list);
979 list_add_tail(&dev_state->list, &freelist);
982 spin_unlock_irqrestore(&state_lock, flags);
985 * Since free_device_state waits on the count to be zero,
986 * we need to free dev_state outside the spinlock.
988 list_for_each_entry_safe(dev_state, next, &freelist, list) {
989 list_del(&dev_state->list);
990 free_device_state(dev_state);
993 destroy_workqueue(iommu_wq);
996 module_init(amd_iommu_v2_init);
997 module_exit(amd_iommu_v2_exit);