GNU Linux-libre 4.14.259-gnu1
[releases.git] / drivers / iommu / amd_iommu_v2.c
1 /*
2  * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3  * Author: Joerg Roedel <jroedel@suse.de>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17  */
18
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/sched/mm.h>
26 #include <linux/iommu.h>
27 #include <linux/wait.h>
28 #include <linux/pci.h>
29 #include <linux/gfp.h>
30
31 #include "amd_iommu_types.h"
32 #include "amd_iommu_proto.h"
33
34 MODULE_LICENSE("GPL v2");
35 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
36
37 #define MAX_DEVICES             0x10000
38 #define PRI_QUEUE_SIZE          512
39
40 struct pri_queue {
41         atomic_t inflight;
42         bool finish;
43         int status;
44 };
45
46 struct pasid_state {
47         struct list_head list;                  /* For global state-list */
48         atomic_t count;                         /* Reference count */
49         unsigned mmu_notifier_count;            /* Counting nested mmu_notifier
50                                                    calls */
51         struct mm_struct *mm;                   /* mm_struct for the faults */
52         struct mmu_notifier mn;                 /* mmu_notifier handle */
53         struct pri_queue pri[PRI_QUEUE_SIZE];   /* PRI tag states */
54         struct device_state *device_state;      /* Link to our device_state */
55         int pasid;                              /* PASID index */
56         bool invalid;                           /* Used during setup and
57                                                    teardown of the pasid */
58         spinlock_t lock;                        /* Protect pri_queues and
59                                                    mmu_notifer_count */
60         wait_queue_head_t wq;                   /* To wait for count == 0 */
61 };
62
63 struct device_state {
64         struct list_head list;
65         u16 devid;
66         atomic_t count;
67         struct pci_dev *pdev;
68         struct pasid_state **states;
69         struct iommu_domain *domain;
70         int pasid_levels;
71         int max_pasids;
72         amd_iommu_invalid_ppr_cb inv_ppr_cb;
73         amd_iommu_invalidate_ctx inv_ctx_cb;
74         spinlock_t lock;
75         wait_queue_head_t wq;
76 };
77
78 struct fault {
79         struct work_struct work;
80         struct device_state *dev_state;
81         struct pasid_state *state;
82         struct mm_struct *mm;
83         u64 address;
84         u16 devid;
85         u16 pasid;
86         u16 tag;
87         u16 finish;
88         u16 flags;
89 };
90
91 static LIST_HEAD(state_list);
92 static spinlock_t state_lock;
93
94 static struct workqueue_struct *iommu_wq;
95
96 static void free_pasid_states(struct device_state *dev_state);
97
98 static u16 device_id(struct pci_dev *pdev)
99 {
100         u16 devid;
101
102         devid = pdev->bus->number;
103         devid = (devid << 8) | pdev->devfn;
104
105         return devid;
106 }
107
108 static struct device_state *__get_device_state(u16 devid)
109 {
110         struct device_state *dev_state;
111
112         list_for_each_entry(dev_state, &state_list, list) {
113                 if (dev_state->devid == devid)
114                         return dev_state;
115         }
116
117         return NULL;
118 }
119
120 static struct device_state *get_device_state(u16 devid)
121 {
122         struct device_state *dev_state;
123         unsigned long flags;
124
125         spin_lock_irqsave(&state_lock, flags);
126         dev_state = __get_device_state(devid);
127         if (dev_state != NULL)
128                 atomic_inc(&dev_state->count);
129         spin_unlock_irqrestore(&state_lock, flags);
130
131         return dev_state;
132 }
133
134 static void free_device_state(struct device_state *dev_state)
135 {
136         struct iommu_group *group;
137
138         /*
139          * First detach device from domain - No more PRI requests will arrive
140          * from that device after it is unbound from the IOMMUv2 domain.
141          */
142         group = iommu_group_get(&dev_state->pdev->dev);
143         if (WARN_ON(!group))
144                 return;
145
146         iommu_detach_group(dev_state->domain, group);
147
148         iommu_group_put(group);
149
150         /* Everything is down now, free the IOMMUv2 domain */
151         iommu_domain_free(dev_state->domain);
152
153         /* Finally get rid of the device-state */
154         kfree(dev_state);
155 }
156
157 static void put_device_state(struct device_state *dev_state)
158 {
159         if (atomic_dec_and_test(&dev_state->count))
160                 wake_up(&dev_state->wq);
161 }
162
163 /* Must be called under dev_state->lock */
164 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
165                                                   int pasid, bool alloc)
166 {
167         struct pasid_state **root, **ptr;
168         int level, index;
169
170         level = dev_state->pasid_levels;
171         root  = dev_state->states;
172
173         while (true) {
174
175                 index = (pasid >> (9 * level)) & 0x1ff;
176                 ptr   = &root[index];
177
178                 if (level == 0)
179                         break;
180
181                 if (*ptr == NULL) {
182                         if (!alloc)
183                                 return NULL;
184
185                         *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
186                         if (*ptr == NULL)
187                                 return NULL;
188                 }
189
190                 root   = (struct pasid_state **)*ptr;
191                 level -= 1;
192         }
193
194         return ptr;
195 }
196
197 static int set_pasid_state(struct device_state *dev_state,
198                            struct pasid_state *pasid_state,
199                            int pasid)
200 {
201         struct pasid_state **ptr;
202         unsigned long flags;
203         int ret;
204
205         spin_lock_irqsave(&dev_state->lock, flags);
206         ptr = __get_pasid_state_ptr(dev_state, pasid, true);
207
208         ret = -ENOMEM;
209         if (ptr == NULL)
210                 goto out_unlock;
211
212         ret = -ENOMEM;
213         if (*ptr != NULL)
214                 goto out_unlock;
215
216         *ptr = pasid_state;
217
218         ret = 0;
219
220 out_unlock:
221         spin_unlock_irqrestore(&dev_state->lock, flags);
222
223         return ret;
224 }
225
226 static void clear_pasid_state(struct device_state *dev_state, int pasid)
227 {
228         struct pasid_state **ptr;
229         unsigned long flags;
230
231         spin_lock_irqsave(&dev_state->lock, flags);
232         ptr = __get_pasid_state_ptr(dev_state, pasid, true);
233
234         if (ptr == NULL)
235                 goto out_unlock;
236
237         *ptr = NULL;
238
239 out_unlock:
240         spin_unlock_irqrestore(&dev_state->lock, flags);
241 }
242
243 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
244                                            int pasid)
245 {
246         struct pasid_state **ptr, *ret = NULL;
247         unsigned long flags;
248
249         spin_lock_irqsave(&dev_state->lock, flags);
250         ptr = __get_pasid_state_ptr(dev_state, pasid, false);
251
252         if (ptr == NULL)
253                 goto out_unlock;
254
255         ret = *ptr;
256         if (ret)
257                 atomic_inc(&ret->count);
258
259 out_unlock:
260         spin_unlock_irqrestore(&dev_state->lock, flags);
261
262         return ret;
263 }
264
265 static void free_pasid_state(struct pasid_state *pasid_state)
266 {
267         kfree(pasid_state);
268 }
269
270 static void put_pasid_state(struct pasid_state *pasid_state)
271 {
272         if (atomic_dec_and_test(&pasid_state->count))
273                 wake_up(&pasid_state->wq);
274 }
275
276 static void put_pasid_state_wait(struct pasid_state *pasid_state)
277 {
278         atomic_dec(&pasid_state->count);
279         wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
280         free_pasid_state(pasid_state);
281 }
282
283 static void unbind_pasid(struct pasid_state *pasid_state)
284 {
285         struct iommu_domain *domain;
286
287         domain = pasid_state->device_state->domain;
288
289         /*
290          * Mark pasid_state as invalid, no more faults will we added to the
291          * work queue after this is visible everywhere.
292          */
293         pasid_state->invalid = true;
294
295         /* Make sure this is visible */
296         smp_wmb();
297
298         /* After this the device/pasid can't access the mm anymore */
299         amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
300
301         /* Make sure no more pending faults are in the queue */
302         flush_workqueue(iommu_wq);
303 }
304
305 static void free_pasid_states_level1(struct pasid_state **tbl)
306 {
307         int i;
308
309         for (i = 0; i < 512; ++i) {
310                 if (tbl[i] == NULL)
311                         continue;
312
313                 free_page((unsigned long)tbl[i]);
314         }
315 }
316
317 static void free_pasid_states_level2(struct pasid_state **tbl)
318 {
319         struct pasid_state **ptr;
320         int i;
321
322         for (i = 0; i < 512; ++i) {
323                 if (tbl[i] == NULL)
324                         continue;
325
326                 ptr = (struct pasid_state **)tbl[i];
327                 free_pasid_states_level1(ptr);
328         }
329 }
330
331 static void free_pasid_states(struct device_state *dev_state)
332 {
333         struct pasid_state *pasid_state;
334         int i;
335
336         for (i = 0; i < dev_state->max_pasids; ++i) {
337                 pasid_state = get_pasid_state(dev_state, i);
338                 if (pasid_state == NULL)
339                         continue;
340
341                 put_pasid_state(pasid_state);
342
343                 /*
344                  * This will call the mn_release function and
345                  * unbind the PASID
346                  */
347                 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
348
349                 put_pasid_state_wait(pasid_state); /* Reference taken in
350                                                       amd_iommu_bind_pasid */
351
352                 /* Drop reference taken in amd_iommu_bind_pasid */
353                 put_device_state(dev_state);
354         }
355
356         if (dev_state->pasid_levels == 2)
357                 free_pasid_states_level2(dev_state->states);
358         else if (dev_state->pasid_levels == 1)
359                 free_pasid_states_level1(dev_state->states);
360         else
361                 BUG_ON(dev_state->pasid_levels != 0);
362
363         free_page((unsigned long)dev_state->states);
364 }
365
366 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
367 {
368         return container_of(mn, struct pasid_state, mn);
369 }
370
371 static void __mn_flush_page(struct mmu_notifier *mn,
372                             unsigned long address)
373 {
374         struct pasid_state *pasid_state;
375         struct device_state *dev_state;
376
377         pasid_state = mn_to_state(mn);
378         dev_state   = pasid_state->device_state;
379
380         amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
381 }
382
383 static int mn_clear_flush_young(struct mmu_notifier *mn,
384                                 struct mm_struct *mm,
385                                 unsigned long start,
386                                 unsigned long end)
387 {
388         for (; start < end; start += PAGE_SIZE)
389                 __mn_flush_page(mn, start);
390
391         return 0;
392 }
393
394 static void mn_invalidate_range(struct mmu_notifier *mn,
395                                 struct mm_struct *mm,
396                                 unsigned long start, unsigned long end)
397 {
398         struct pasid_state *pasid_state;
399         struct device_state *dev_state;
400
401         pasid_state = mn_to_state(mn);
402         dev_state   = pasid_state->device_state;
403
404         if ((start ^ (end - 1)) < PAGE_SIZE)
405                 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
406                                      start);
407         else
408                 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
409 }
410
411 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
412 {
413         struct pasid_state *pasid_state;
414         struct device_state *dev_state;
415         bool run_inv_ctx_cb;
416
417         might_sleep();
418
419         pasid_state    = mn_to_state(mn);
420         dev_state      = pasid_state->device_state;
421         run_inv_ctx_cb = !pasid_state->invalid;
422
423         if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
424                 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
425
426         unbind_pasid(pasid_state);
427 }
428
429 static const struct mmu_notifier_ops iommu_mn = {
430         .release                = mn_release,
431         .clear_flush_young      = mn_clear_flush_young,
432         .invalidate_range       = mn_invalidate_range,
433 };
434
435 static void set_pri_tag_status(struct pasid_state *pasid_state,
436                                u16 tag, int status)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&pasid_state->lock, flags);
441         pasid_state->pri[tag].status = status;
442         spin_unlock_irqrestore(&pasid_state->lock, flags);
443 }
444
445 static void finish_pri_tag(struct device_state *dev_state,
446                            struct pasid_state *pasid_state,
447                            u16 tag)
448 {
449         unsigned long flags;
450
451         spin_lock_irqsave(&pasid_state->lock, flags);
452         if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
453             pasid_state->pri[tag].finish) {
454                 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
455                                        pasid_state->pri[tag].status, tag);
456                 pasid_state->pri[tag].finish = false;
457                 pasid_state->pri[tag].status = PPR_SUCCESS;
458         }
459         spin_unlock_irqrestore(&pasid_state->lock, flags);
460 }
461
462 static void handle_fault_error(struct fault *fault)
463 {
464         int status;
465
466         if (!fault->dev_state->inv_ppr_cb) {
467                 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
468                 return;
469         }
470
471         status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
472                                               fault->pasid,
473                                               fault->address,
474                                               fault->flags);
475         switch (status) {
476         case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
477                 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
478                 break;
479         case AMD_IOMMU_INV_PRI_RSP_INVALID:
480                 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
481                 break;
482         case AMD_IOMMU_INV_PRI_RSP_FAIL:
483                 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
484                 break;
485         default:
486                 BUG();
487         }
488 }
489
490 static bool access_error(struct vm_area_struct *vma, struct fault *fault)
491 {
492         unsigned long requested = 0;
493
494         if (fault->flags & PPR_FAULT_EXEC)
495                 requested |= VM_EXEC;
496
497         if (fault->flags & PPR_FAULT_READ)
498                 requested |= VM_READ;
499
500         if (fault->flags & PPR_FAULT_WRITE)
501                 requested |= VM_WRITE;
502
503         return (requested & ~vma->vm_flags) != 0;
504 }
505
506 static void do_fault(struct work_struct *work)
507 {
508         struct fault *fault = container_of(work, struct fault, work);
509         struct vm_area_struct *vma;
510         int ret = VM_FAULT_ERROR;
511         unsigned int flags = 0;
512         struct mm_struct *mm;
513         u64 address;
514
515         mm = fault->state->mm;
516         address = fault->address;
517
518         if (fault->flags & PPR_FAULT_USER)
519                 flags |= FAULT_FLAG_USER;
520         if (fault->flags & PPR_FAULT_WRITE)
521                 flags |= FAULT_FLAG_WRITE;
522         flags |= FAULT_FLAG_REMOTE;
523
524         down_read(&mm->mmap_sem);
525         vma = find_extend_vma(mm, address);
526         if (!vma || address < vma->vm_start)
527                 /* failed to get a vma in the right range */
528                 goto out;
529
530         /* Check if we have the right permissions on the vma */
531         if (access_error(vma, fault))
532                 goto out;
533
534         ret = handle_mm_fault(vma, address, flags);
535 out:
536         up_read(&mm->mmap_sem);
537
538         if (ret & VM_FAULT_ERROR)
539                 /* failed to service fault */
540                 handle_fault_error(fault);
541
542         finish_pri_tag(fault->dev_state, fault->state, fault->tag);
543
544         put_pasid_state(fault->state);
545
546         kfree(fault);
547 }
548
549 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
550 {
551         struct amd_iommu_fault *iommu_fault;
552         struct pasid_state *pasid_state;
553         struct device_state *dev_state;
554         unsigned long flags;
555         struct fault *fault;
556         bool finish;
557         u16 tag, devid;
558         int ret;
559         struct iommu_dev_data *dev_data;
560         struct pci_dev *pdev = NULL;
561
562         iommu_fault = data;
563         tag         = iommu_fault->tag & 0x1ff;
564         finish      = (iommu_fault->tag >> 9) & 1;
565
566         devid = iommu_fault->device_id;
567         pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff);
568         if (!pdev)
569                 return -ENODEV;
570         dev_data = get_dev_data(&pdev->dev);
571
572         /* In kdump kernel pci dev is not initialized yet -> send INVALID */
573         ret = NOTIFY_DONE;
574         if (translation_pre_enabled(amd_iommu_rlookup_table[devid])
575                 && dev_data->defer_attach) {
576                 amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
577                                        PPR_INVALID, tag);
578                 goto out;
579         }
580
581         dev_state = get_device_state(iommu_fault->device_id);
582         if (dev_state == NULL)
583                 goto out;
584
585         pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
586         if (pasid_state == NULL || pasid_state->invalid) {
587                 /* We know the device but not the PASID -> send INVALID */
588                 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
589                                        PPR_INVALID, tag);
590                 goto out_drop_state;
591         }
592
593         spin_lock_irqsave(&pasid_state->lock, flags);
594         atomic_inc(&pasid_state->pri[tag].inflight);
595         if (finish)
596                 pasid_state->pri[tag].finish = true;
597         spin_unlock_irqrestore(&pasid_state->lock, flags);
598
599         fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
600         if (fault == NULL) {
601                 /* We are OOM - send success and let the device re-fault */
602                 finish_pri_tag(dev_state, pasid_state, tag);
603                 goto out_drop_state;
604         }
605
606         fault->dev_state = dev_state;
607         fault->address   = iommu_fault->address;
608         fault->state     = pasid_state;
609         fault->tag       = tag;
610         fault->finish    = finish;
611         fault->pasid     = iommu_fault->pasid;
612         fault->flags     = iommu_fault->flags;
613         INIT_WORK(&fault->work, do_fault);
614
615         queue_work(iommu_wq, &fault->work);
616
617         ret = NOTIFY_OK;
618
619 out_drop_state:
620
621         if (ret != NOTIFY_OK && pasid_state)
622                 put_pasid_state(pasid_state);
623
624         put_device_state(dev_state);
625
626 out:
627         return ret;
628 }
629
630 static struct notifier_block ppr_nb = {
631         .notifier_call = ppr_notifier,
632 };
633
634 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
635                          struct task_struct *task)
636 {
637         struct pasid_state *pasid_state;
638         struct device_state *dev_state;
639         struct mm_struct *mm;
640         u16 devid;
641         int ret;
642
643         might_sleep();
644
645         if (!amd_iommu_v2_supported())
646                 return -ENODEV;
647
648         devid     = device_id(pdev);
649         dev_state = get_device_state(devid);
650
651         if (dev_state == NULL)
652                 return -EINVAL;
653
654         ret = -EINVAL;
655         if (pasid < 0 || pasid >= dev_state->max_pasids)
656                 goto out;
657
658         ret = -ENOMEM;
659         pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
660         if (pasid_state == NULL)
661                 goto out;
662
663
664         atomic_set(&pasid_state->count, 1);
665         init_waitqueue_head(&pasid_state->wq);
666         spin_lock_init(&pasid_state->lock);
667
668         mm                        = get_task_mm(task);
669         pasid_state->mm           = mm;
670         pasid_state->device_state = dev_state;
671         pasid_state->pasid        = pasid;
672         pasid_state->invalid      = true; /* Mark as valid only if we are
673                                              done with setting up the pasid */
674         pasid_state->mn.ops       = &iommu_mn;
675
676         if (pasid_state->mm == NULL)
677                 goto out_free;
678
679         mmu_notifier_register(&pasid_state->mn, mm);
680
681         ret = set_pasid_state(dev_state, pasid_state, pasid);
682         if (ret)
683                 goto out_unregister;
684
685         ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
686                                         __pa(pasid_state->mm->pgd));
687         if (ret)
688                 goto out_clear_state;
689
690         /* Now we are ready to handle faults */
691         pasid_state->invalid = false;
692
693         /*
694          * Drop the reference to the mm_struct here. We rely on the
695          * mmu_notifier release call-back to inform us when the mm
696          * is going away.
697          */
698         mmput(mm);
699
700         return 0;
701
702 out_clear_state:
703         clear_pasid_state(dev_state, pasid);
704
705 out_unregister:
706         mmu_notifier_unregister(&pasid_state->mn, mm);
707         mmput(mm);
708
709 out_free:
710         free_pasid_state(pasid_state);
711
712 out:
713         put_device_state(dev_state);
714
715         return ret;
716 }
717 EXPORT_SYMBOL(amd_iommu_bind_pasid);
718
719 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
720 {
721         struct pasid_state *pasid_state;
722         struct device_state *dev_state;
723         u16 devid;
724
725         might_sleep();
726
727         if (!amd_iommu_v2_supported())
728                 return;
729
730         devid = device_id(pdev);
731         dev_state = get_device_state(devid);
732         if (dev_state == NULL)
733                 return;
734
735         if (pasid < 0 || pasid >= dev_state->max_pasids)
736                 goto out;
737
738         pasid_state = get_pasid_state(dev_state, pasid);
739         if (pasid_state == NULL)
740                 goto out;
741         /*
742          * Drop reference taken here. We are safe because we still hold
743          * the reference taken in the amd_iommu_bind_pasid function.
744          */
745         put_pasid_state(pasid_state);
746
747         /* Clear the pasid state so that the pasid can be re-used */
748         clear_pasid_state(dev_state, pasid_state->pasid);
749
750         /*
751          * Call mmu_notifier_unregister to drop our reference
752          * to pasid_state->mm
753          */
754         mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
755
756         put_pasid_state_wait(pasid_state); /* Reference taken in
757                                               amd_iommu_bind_pasid */
758 out:
759         /* Drop reference taken in this function */
760         put_device_state(dev_state);
761
762         /* Drop reference taken in amd_iommu_bind_pasid */
763         put_device_state(dev_state);
764 }
765 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
766
767 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
768 {
769         struct device_state *dev_state;
770         struct iommu_group *group;
771         unsigned long flags;
772         int ret, tmp;
773         u16 devid;
774
775         might_sleep();
776
777         if (!amd_iommu_v2_supported())
778                 return -ENODEV;
779
780         if (pasids <= 0 || pasids > (PASID_MASK + 1))
781                 return -EINVAL;
782
783         devid = device_id(pdev);
784
785         dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
786         if (dev_state == NULL)
787                 return -ENOMEM;
788
789         spin_lock_init(&dev_state->lock);
790         init_waitqueue_head(&dev_state->wq);
791         dev_state->pdev  = pdev;
792         dev_state->devid = devid;
793
794         tmp = pasids;
795         for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
796                 dev_state->pasid_levels += 1;
797
798         atomic_set(&dev_state->count, 1);
799         dev_state->max_pasids = pasids;
800
801         ret = -ENOMEM;
802         dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
803         if (dev_state->states == NULL)
804                 goto out_free_dev_state;
805
806         dev_state->domain = iommu_domain_alloc(&pci_bus_type);
807         if (dev_state->domain == NULL)
808                 goto out_free_states;
809
810         amd_iommu_domain_direct_map(dev_state->domain);
811
812         ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
813         if (ret)
814                 goto out_free_domain;
815
816         group = iommu_group_get(&pdev->dev);
817         if (!group) {
818                 ret = -EINVAL;
819                 goto out_free_domain;
820         }
821
822         ret = iommu_attach_group(dev_state->domain, group);
823         if (ret != 0)
824                 goto out_drop_group;
825
826         iommu_group_put(group);
827
828         spin_lock_irqsave(&state_lock, flags);
829
830         if (__get_device_state(devid) != NULL) {
831                 spin_unlock_irqrestore(&state_lock, flags);
832                 ret = -EBUSY;
833                 goto out_free_domain;
834         }
835
836         list_add_tail(&dev_state->list, &state_list);
837
838         spin_unlock_irqrestore(&state_lock, flags);
839
840         return 0;
841
842 out_drop_group:
843         iommu_group_put(group);
844
845 out_free_domain:
846         iommu_domain_free(dev_state->domain);
847
848 out_free_states:
849         free_page((unsigned long)dev_state->states);
850
851 out_free_dev_state:
852         kfree(dev_state);
853
854         return ret;
855 }
856 EXPORT_SYMBOL(amd_iommu_init_device);
857
858 void amd_iommu_free_device(struct pci_dev *pdev)
859 {
860         struct device_state *dev_state;
861         unsigned long flags;
862         u16 devid;
863
864         if (!amd_iommu_v2_supported())
865                 return;
866
867         devid = device_id(pdev);
868
869         spin_lock_irqsave(&state_lock, flags);
870
871         dev_state = __get_device_state(devid);
872         if (dev_state == NULL) {
873                 spin_unlock_irqrestore(&state_lock, flags);
874                 return;
875         }
876
877         list_del(&dev_state->list);
878
879         spin_unlock_irqrestore(&state_lock, flags);
880
881         /* Get rid of any remaining pasid states */
882         free_pasid_states(dev_state);
883
884         put_device_state(dev_state);
885         /*
886          * Wait until the last reference is dropped before freeing
887          * the device state.
888          */
889         wait_event(dev_state->wq, !atomic_read(&dev_state->count));
890         free_device_state(dev_state);
891 }
892 EXPORT_SYMBOL(amd_iommu_free_device);
893
894 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
895                                  amd_iommu_invalid_ppr_cb cb)
896 {
897         struct device_state *dev_state;
898         unsigned long flags;
899         u16 devid;
900         int ret;
901
902         if (!amd_iommu_v2_supported())
903                 return -ENODEV;
904
905         devid = device_id(pdev);
906
907         spin_lock_irqsave(&state_lock, flags);
908
909         ret = -EINVAL;
910         dev_state = __get_device_state(devid);
911         if (dev_state == NULL)
912                 goto out_unlock;
913
914         dev_state->inv_ppr_cb = cb;
915
916         ret = 0;
917
918 out_unlock:
919         spin_unlock_irqrestore(&state_lock, flags);
920
921         return ret;
922 }
923 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
924
925 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
926                                     amd_iommu_invalidate_ctx cb)
927 {
928         struct device_state *dev_state;
929         unsigned long flags;
930         u16 devid;
931         int ret;
932
933         if (!amd_iommu_v2_supported())
934                 return -ENODEV;
935
936         devid = device_id(pdev);
937
938         spin_lock_irqsave(&state_lock, flags);
939
940         ret = -EINVAL;
941         dev_state = __get_device_state(devid);
942         if (dev_state == NULL)
943                 goto out_unlock;
944
945         dev_state->inv_ctx_cb = cb;
946
947         ret = 0;
948
949 out_unlock:
950         spin_unlock_irqrestore(&state_lock, flags);
951
952         return ret;
953 }
954 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
955
956 static int __init amd_iommu_v2_init(void)
957 {
958         int ret;
959
960         pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
961
962         if (!amd_iommu_v2_supported()) {
963                 pr_info("AMD IOMMUv2 functionality not available on this system\n");
964                 /*
965                  * Load anyway to provide the symbols to other modules
966                  * which may use AMD IOMMUv2 optionally.
967                  */
968                 return 0;
969         }
970
971         spin_lock_init(&state_lock);
972
973         ret = -ENOMEM;
974         iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
975         if (iommu_wq == NULL)
976                 goto out;
977
978         amd_iommu_register_ppr_notifier(&ppr_nb);
979
980         return 0;
981
982 out:
983         return ret;
984 }
985
986 static void __exit amd_iommu_v2_exit(void)
987 {
988         struct device_state *dev_state;
989         int i;
990
991         if (!amd_iommu_v2_supported())
992                 return;
993
994         amd_iommu_unregister_ppr_notifier(&ppr_nb);
995
996         flush_workqueue(iommu_wq);
997
998         /*
999          * The loop below might call flush_workqueue(), so call
1000          * destroy_workqueue() after it
1001          */
1002         for (i = 0; i < MAX_DEVICES; ++i) {
1003                 dev_state = get_device_state(i);
1004
1005                 if (dev_state == NULL)
1006                         continue;
1007
1008                 WARN_ON_ONCE(1);
1009
1010                 put_device_state(dev_state);
1011                 amd_iommu_free_device(dev_state->pdev);
1012         }
1013
1014         destroy_workqueue(iommu_wq);
1015 }
1016
1017 module_init(amd_iommu_v2_init);
1018 module_exit(amd_iommu_v2_exit);