GNU Linux-libre 4.19.295-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
28 #include <drm/drmP.h>
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32
33 /* Special VM and GART address alignment needed for VI pre-Fiji due to
34  * a HW bug.
35  */
36 #define VI_BO_SIZE_ALIGN (0x8000)
37
38 /* BO flag to indicate a KFD userptr BO */
39 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
40
41 /* Userptr restore delay, just long enough to allow consecutive VM
42  * changes to accumulate
43  */
44 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
45
46 /* Impose limit on how much memory KFD can use */
47 static struct {
48         uint64_t max_system_mem_limit;
49         uint64_t max_userptr_mem_limit;
50         int64_t system_mem_used;
51         int64_t userptr_mem_used;
52         spinlock_t mem_limit_lock;
53 } kfd_mem_limit;
54
55 /* Struct used for amdgpu_amdkfd_bo_validate */
56 struct amdgpu_vm_parser {
57         uint32_t        domain;
58         bool            wait;
59 };
60
61 static const char * const domain_bit_to_string[] = {
62                 "CPU",
63                 "GTT",
64                 "VRAM",
65                 "GDS",
66                 "GWS",
67                 "OA"
68 };
69
70 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
71
72 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
73
74
75 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
76 {
77         return (struct amdgpu_device *)kgd;
78 }
79
80 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
81                 struct kgd_mem *mem)
82 {
83         struct kfd_bo_va_list *entry;
84
85         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
86                 if (entry->bo_va->base.vm == avm)
87                         return false;
88
89         return true;
90 }
91
92 /* Set memory usage limits. Current, limits are
93  *  System (kernel) memory - 3/8th System RAM
94  *  Userptr memory - 3/4th System RAM
95  */
96 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
97 {
98         struct sysinfo si;
99         uint64_t mem;
100
101         si_meminfo(&si);
102         mem = si.totalram - si.totalhigh;
103         mem *= si.mem_unit;
104
105         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
106         kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
107         kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
108         pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
109                 (kfd_mem_limit.max_system_mem_limit >> 20),
110                 (kfd_mem_limit.max_userptr_mem_limit >> 20));
111 }
112
113 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
114                                               uint64_t size, u32 domain)
115 {
116         size_t acc_size;
117         int ret = 0;
118
119         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
120                                        sizeof(struct amdgpu_bo));
121
122         spin_lock(&kfd_mem_limit.mem_limit_lock);
123         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
124                 if (kfd_mem_limit.system_mem_used + (acc_size + size) >
125                         kfd_mem_limit.max_system_mem_limit) {
126                         ret = -ENOMEM;
127                         goto err_no_mem;
128                 }
129                 kfd_mem_limit.system_mem_used += (acc_size + size);
130         } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
131                 if ((kfd_mem_limit.system_mem_used + acc_size >
132                         kfd_mem_limit.max_system_mem_limit) ||
133                         (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
134                         kfd_mem_limit.max_userptr_mem_limit)) {
135                         ret = -ENOMEM;
136                         goto err_no_mem;
137                 }
138                 kfd_mem_limit.system_mem_used += acc_size;
139                 kfd_mem_limit.userptr_mem_used += size;
140         }
141 err_no_mem:
142         spin_unlock(&kfd_mem_limit.mem_limit_lock);
143         return ret;
144 }
145
146 static void unreserve_system_mem_limit(struct amdgpu_device *adev,
147                                        uint64_t size, u32 domain)
148 {
149         size_t acc_size;
150
151         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
152                                        sizeof(struct amdgpu_bo));
153
154         spin_lock(&kfd_mem_limit.mem_limit_lock);
155         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
156                 kfd_mem_limit.system_mem_used -= (acc_size + size);
157         } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
158                 kfd_mem_limit.system_mem_used -= acc_size;
159                 kfd_mem_limit.userptr_mem_used -= size;
160         }
161         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
162                   "kfd system memory accounting unbalanced");
163         WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
164                   "kfd userptr memory accounting unbalanced");
165
166         spin_unlock(&kfd_mem_limit.mem_limit_lock);
167 }
168
169 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
170 {
171         spin_lock(&kfd_mem_limit.mem_limit_lock);
172
173         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
174                 kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
175                 kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
176         } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
177                 kfd_mem_limit.system_mem_used -=
178                         (bo->tbo.acc_size + amdgpu_bo_size(bo));
179         }
180         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
181                   "kfd system memory accounting unbalanced");
182         WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
183                   "kfd userptr memory accounting unbalanced");
184
185         spin_unlock(&kfd_mem_limit.mem_limit_lock);
186 }
187
188
189 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
190  *  reservation object.
191  *
192  * @bo: [IN] Remove eviction fence(s) from this BO
193  * @ef: [IN] If ef is specified, then this eviction fence is removed if it
194  *  is present in the shared list.
195  * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
196  *  from BO's reservation object shared list.
197  * @ef_count: [OUT] Number of fences in ef_list.
198  *
199  * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
200  *  called to restore the eviction fences and to avoid memory leak. This is
201  *  useful for shared BOs.
202  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
203  */
204 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
205                                         struct amdgpu_amdkfd_fence *ef,
206                                         struct amdgpu_amdkfd_fence ***ef_list,
207                                         unsigned int *ef_count)
208 {
209         struct reservation_object *resv = bo->tbo.resv;
210         struct reservation_object_list *old, *new;
211         unsigned int i, j, k;
212
213         if (!ef && !ef_list)
214                 return -EINVAL;
215
216         if (ef_list) {
217                 *ef_list = NULL;
218                 *ef_count = 0;
219         }
220
221         old = reservation_object_get_list(resv);
222         if (!old)
223                 return 0;
224
225         new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
226                       GFP_KERNEL);
227         if (!new)
228                 return -ENOMEM;
229
230         /* Go through all the shared fences in the resevation object and sort
231          * the interesting ones to the end of the list.
232          */
233         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
234                 struct dma_fence *f;
235
236                 f = rcu_dereference_protected(old->shared[i],
237                                               reservation_object_held(resv));
238
239                 if ((ef && f->context == ef->base.context) ||
240                     (!ef && to_amdgpu_amdkfd_fence(f)))
241                         RCU_INIT_POINTER(new->shared[--j], f);
242                 else
243                         RCU_INIT_POINTER(new->shared[k++], f);
244         }
245         new->shared_max = old->shared_max;
246         new->shared_count = k;
247
248         if (!ef) {
249                 unsigned int count = old->shared_count - j;
250
251                 /* Alloc memory for count number of eviction fence pointers.
252                  * Fill the ef_list array and ef_count
253                  */
254                 *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
255                 *ef_count = count;
256
257                 if (!*ef_list) {
258                         kfree(new);
259                         return -ENOMEM;
260                 }
261         }
262
263         /* Install the new fence list, seqcount provides the barriers */
264         preempt_disable();
265         write_seqcount_begin(&resv->seq);
266         RCU_INIT_POINTER(resv->fence, new);
267         write_seqcount_end(&resv->seq);
268         preempt_enable();
269
270         /* Drop the references to the removed fences or move them to ef_list */
271         for (i = j, k = 0; i < old->shared_count; ++i) {
272                 struct dma_fence *f;
273
274                 f = rcu_dereference_protected(new->shared[i],
275                                               reservation_object_held(resv));
276                 if (!ef)
277                         (*ef_list)[k++] = to_amdgpu_amdkfd_fence(f);
278                 else
279                         dma_fence_put(f);
280         }
281         kfree_rcu(old, rcu);
282
283         return 0;
284 }
285
286 /* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
287  *  reservation object.
288  *
289  * @bo: [IN] Add eviction fences to this BO
290  * @ef_list: [IN] List of eviction fences to be added
291  * @ef_count: [IN] Number of fences in ef_list.
292  *
293  * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
294  *  function.
295  */
296 static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
297                                 struct amdgpu_amdkfd_fence **ef_list,
298                                 unsigned int ef_count)
299 {
300         int i;
301
302         if (!ef_list || !ef_count)
303                 return;
304
305         for (i = 0; i < ef_count; i++) {
306                 amdgpu_bo_fence(bo, &ef_list[i]->base, true);
307                 /* Re-adding the fence takes an additional reference. Drop that
308                  * reference.
309                  */
310                 dma_fence_put(&ef_list[i]->base);
311         }
312
313         kfree(ef_list);
314 }
315
316 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
317                                      bool wait)
318 {
319         struct ttm_operation_ctx ctx = { false, false };
320         int ret;
321
322         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
323                  "Called with userptr BO"))
324                 return -EINVAL;
325
326         amdgpu_bo_placement_from_domain(bo, domain);
327
328         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
329         if (ret)
330                 goto validate_fail;
331         if (wait) {
332                 struct amdgpu_amdkfd_fence **ef_list;
333                 unsigned int ef_count;
334
335                 ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
336                                                           &ef_count);
337                 if (ret)
338                         goto validate_fail;
339
340                 ttm_bo_wait(&bo->tbo, false, false);
341                 amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
342         }
343
344 validate_fail:
345         return ret;
346 }
347
348 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
349 {
350         struct amdgpu_vm_parser *p = param;
351
352         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
353 }
354
355 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
356  *
357  * Page directories are not updated here because huge page handling
358  * during page table updates can invalidate page directory entries
359  * again. Page directories are only updated after updating page
360  * tables.
361  */
362 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
363 {
364         struct amdgpu_bo *pd = vm->root.base.bo;
365         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
366         struct amdgpu_vm_parser param;
367         uint64_t addr, flags = AMDGPU_PTE_VALID;
368         int ret;
369
370         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
371         param.wait = false;
372
373         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
374                                         &param);
375         if (ret) {
376                 pr_err("amdgpu: failed to validate PT BOs\n");
377                 return ret;
378         }
379
380         ret = amdgpu_amdkfd_validate(&param, pd);
381         if (ret) {
382                 pr_err("amdgpu: failed to validate PD\n");
383                 return ret;
384         }
385
386         addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
387         amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
388         vm->pd_phys_addr = addr;
389
390         if (vm->use_cpu_for_update) {
391                 ret = amdgpu_bo_kmap(pd, NULL);
392                 if (ret) {
393                         pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
394                         return ret;
395                 }
396         }
397
398         return 0;
399 }
400
401 static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
402                          struct dma_fence *f)
403 {
404         int ret = amdgpu_sync_fence(adev, sync, f, false);
405
406         /* Sync objects can't handle multiple GPUs (contexts) updating
407          * sync->last_vm_update. Fortunately we don't need it for
408          * KFD's purposes, so we can just drop that fence.
409          */
410         if (sync->last_vm_update) {
411                 dma_fence_put(sync->last_vm_update);
412                 sync->last_vm_update = NULL;
413         }
414
415         return ret;
416 }
417
418 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
419 {
420         struct amdgpu_bo *pd = vm->root.base.bo;
421         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
422         int ret;
423
424         ret = amdgpu_vm_update_directories(adev, vm);
425         if (ret)
426                 return ret;
427
428         return sync_vm_fence(adev, sync, vm->last_update);
429 }
430
431 /* add_bo_to_vm - Add a BO to a VM
432  *
433  * Everything that needs to bo done only once when a BO is first added
434  * to a VM. It can later be mapped and unmapped many times without
435  * repeating these steps.
436  *
437  * 1. Allocate and initialize BO VA entry data structure
438  * 2. Add BO to the VM
439  * 3. Determine ASIC-specific PTE flags
440  * 4. Alloc page tables and directories if needed
441  * 4a.  Validate new page tables and directories
442  */
443 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
444                 struct amdgpu_vm *vm, bool is_aql,
445                 struct kfd_bo_va_list **p_bo_va_entry)
446 {
447         int ret;
448         struct kfd_bo_va_list *bo_va_entry;
449         struct amdgpu_bo *pd = vm->root.base.bo;
450         struct amdgpu_bo *bo = mem->bo;
451         uint64_t va = mem->va;
452         struct list_head *list_bo_va = &mem->bo_va_list;
453         unsigned long bo_size = bo->tbo.mem.size;
454
455         if (!va) {
456                 pr_err("Invalid VA when adding BO to VM\n");
457                 return -EINVAL;
458         }
459
460         if (is_aql)
461                 va += bo_size;
462
463         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
464         if (!bo_va_entry)
465                 return -ENOMEM;
466
467         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
468                         va + bo_size, vm);
469
470         /* Add BO to VM internal data structures*/
471         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
472         if (!bo_va_entry->bo_va) {
473                 ret = -EINVAL;
474                 pr_err("Failed to add BO object to VM. ret == %d\n",
475                                 ret);
476                 goto err_vmadd;
477         }
478
479         bo_va_entry->va = va;
480         bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
481                                                          mem->mapping_flags);
482         bo_va_entry->kgd_dev = (void *)adev;
483         list_add(&bo_va_entry->bo_list, list_bo_va);
484
485         if (p_bo_va_entry)
486                 *p_bo_va_entry = bo_va_entry;
487
488         /* Allocate new page tables if needed and validate
489          * them. Clearing of new page tables and validate need to wait
490          * on move fences. We don't want that to trigger the eviction
491          * fence, so remove it temporarily.
492          */
493         amdgpu_amdkfd_remove_eviction_fence(pd,
494                                         vm->process_info->eviction_fence,
495                                         NULL, NULL);
496
497         ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
498         if (ret) {
499                 pr_err("Failed to allocate pts, err=%d\n", ret);
500                 goto err_alloc_pts;
501         }
502
503         ret = vm_validate_pt_pd_bos(vm);
504         if (ret) {
505                 pr_err("validate_pt_pd_bos() failed\n");
506                 goto err_alloc_pts;
507         }
508
509         /* Add the eviction fence back */
510         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
511
512         return 0;
513
514 err_alloc_pts:
515         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
516         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
517         list_del(&bo_va_entry->bo_list);
518 err_vmadd:
519         kfree(bo_va_entry);
520         return ret;
521 }
522
523 static void remove_bo_from_vm(struct amdgpu_device *adev,
524                 struct kfd_bo_va_list *entry, unsigned long size)
525 {
526         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
527                         entry->va,
528                         entry->va + size, entry);
529         amdgpu_vm_bo_rmv(adev, entry->bo_va);
530         list_del(&entry->bo_list);
531         kfree(entry);
532 }
533
534 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
535                                 struct amdkfd_process_info *process_info,
536                                 bool userptr)
537 {
538         struct ttm_validate_buffer *entry = &mem->validate_list;
539         struct amdgpu_bo *bo = mem->bo;
540
541         INIT_LIST_HEAD(&entry->head);
542         entry->shared = true;
543         entry->bo = &bo->tbo;
544         mutex_lock(&process_info->lock);
545         if (userptr)
546                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
547         else
548                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
549         mutex_unlock(&process_info->lock);
550 }
551
552 /* Initializes user pages. It registers the MMU notifier and validates
553  * the userptr BO in the GTT domain.
554  *
555  * The BO must already be on the userptr_valid_list. Otherwise an
556  * eviction and restore may happen that leaves the new BO unmapped
557  * with the user mode queues running.
558  *
559  * Takes the process_info->lock to protect against concurrent restore
560  * workers.
561  *
562  * Returns 0 for success, negative errno for errors.
563  */
564 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
565                            uint64_t user_addr)
566 {
567         struct amdkfd_process_info *process_info = mem->process_info;
568         struct amdgpu_bo *bo = mem->bo;
569         struct ttm_operation_ctx ctx = { true, false };
570         int ret = 0;
571
572         mutex_lock(&process_info->lock);
573
574         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
575         if (ret) {
576                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
577                 goto out;
578         }
579
580         ret = amdgpu_mn_register(bo, user_addr);
581         if (ret) {
582                 pr_err("%s: Failed to register MMU notifier: %d\n",
583                        __func__, ret);
584                 goto out;
585         }
586
587         /* If no restore worker is running concurrently, user_pages
588          * should not be allocated
589          */
590         WARN(mem->user_pages, "Leaking user_pages array");
591
592         mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
593                                            sizeof(struct page *),
594                                            GFP_KERNEL | __GFP_ZERO);
595         if (!mem->user_pages) {
596                 pr_err("%s: Failed to allocate pages array\n", __func__);
597                 ret = -ENOMEM;
598                 goto unregister_out;
599         }
600
601         ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
602         if (ret) {
603                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
604                 goto free_out;
605         }
606
607         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
608
609         ret = amdgpu_bo_reserve(bo, true);
610         if (ret) {
611                 pr_err("%s: Failed to reserve BO\n", __func__);
612                 goto release_out;
613         }
614         amdgpu_bo_placement_from_domain(bo, mem->domain);
615         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
616         if (ret)
617                 pr_err("%s: failed to validate BO\n", __func__);
618         amdgpu_bo_unreserve(bo);
619
620 release_out:
621         if (ret)
622                 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
623 free_out:
624         kvfree(mem->user_pages);
625         mem->user_pages = NULL;
626 unregister_out:
627         if (ret)
628                 amdgpu_mn_unregister(bo);
629 out:
630         mutex_unlock(&process_info->lock);
631         return ret;
632 }
633
634 /* Reserving a BO and its page table BOs must happen atomically to
635  * avoid deadlocks. Some operations update multiple VMs at once. Track
636  * all the reservation info in a context structure. Optionally a sync
637  * object can track VM updates.
638  */
639 struct bo_vm_reservation_context {
640         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
641         unsigned int n_vms;                 /* Number of VMs reserved       */
642         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
643         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
644         struct list_head list, duplicates;  /* BO lists                     */
645         struct amdgpu_sync *sync;           /* Pointer to sync object       */
646         bool reserved;                      /* Whether BOs are reserved     */
647 };
648
649 enum bo_vm_match {
650         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
651         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
652         BO_VM_ALL,              /* Match all VMs a BO was added to    */
653 };
654
655 /**
656  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
657  * @mem: KFD BO structure.
658  * @vm: the VM to reserve.
659  * @ctx: the struct that will be used in unreserve_bo_and_vms().
660  */
661 static int reserve_bo_and_vm(struct kgd_mem *mem,
662                               struct amdgpu_vm *vm,
663                               struct bo_vm_reservation_context *ctx)
664 {
665         struct amdgpu_bo *bo = mem->bo;
666         int ret;
667
668         WARN_ON(!vm);
669
670         ctx->reserved = false;
671         ctx->n_vms = 1;
672         ctx->sync = &mem->sync;
673
674         INIT_LIST_HEAD(&ctx->list);
675         INIT_LIST_HEAD(&ctx->duplicates);
676
677         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
678         if (!ctx->vm_pd)
679                 return -ENOMEM;
680
681         ctx->kfd_bo.robj = bo;
682         ctx->kfd_bo.priority = 0;
683         ctx->kfd_bo.tv.bo = &bo->tbo;
684         ctx->kfd_bo.tv.shared = true;
685         ctx->kfd_bo.user_pages = NULL;
686         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
687
688         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
689
690         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
691                                      false, &ctx->duplicates);
692         if (!ret)
693                 ctx->reserved = true;
694         else {
695                 pr_err("Failed to reserve buffers in ttm\n");
696                 kfree(ctx->vm_pd);
697                 ctx->vm_pd = NULL;
698         }
699
700         return ret;
701 }
702
703 /**
704  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
705  * @mem: KFD BO structure.
706  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
707  * is used. Otherwise, a single VM associated with the BO.
708  * @map_type: the mapping status that will be used to filter the VMs.
709  * @ctx: the struct that will be used in unreserve_bo_and_vms().
710  *
711  * Returns 0 for success, negative for failure.
712  */
713 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
714                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
715                                 struct bo_vm_reservation_context *ctx)
716 {
717         struct amdgpu_bo *bo = mem->bo;
718         struct kfd_bo_va_list *entry;
719         unsigned int i;
720         int ret;
721
722         ctx->reserved = false;
723         ctx->n_vms = 0;
724         ctx->vm_pd = NULL;
725         ctx->sync = &mem->sync;
726
727         INIT_LIST_HEAD(&ctx->list);
728         INIT_LIST_HEAD(&ctx->duplicates);
729
730         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
731                 if ((vm && vm != entry->bo_va->base.vm) ||
732                         (entry->is_mapped != map_type
733                         && map_type != BO_VM_ALL))
734                         continue;
735
736                 ctx->n_vms++;
737         }
738
739         if (ctx->n_vms != 0) {
740                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
741                                      GFP_KERNEL);
742                 if (!ctx->vm_pd)
743                         return -ENOMEM;
744         }
745
746         ctx->kfd_bo.robj = bo;
747         ctx->kfd_bo.priority = 0;
748         ctx->kfd_bo.tv.bo = &bo->tbo;
749         ctx->kfd_bo.tv.shared = true;
750         ctx->kfd_bo.user_pages = NULL;
751         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
752
753         i = 0;
754         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
755                 if ((vm && vm != entry->bo_va->base.vm) ||
756                         (entry->is_mapped != map_type
757                         && map_type != BO_VM_ALL))
758                         continue;
759
760                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
761                                 &ctx->vm_pd[i]);
762                 i++;
763         }
764
765         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
766                                      false, &ctx->duplicates);
767         if (!ret)
768                 ctx->reserved = true;
769         else
770                 pr_err("Failed to reserve buffers in ttm.\n");
771
772         if (ret) {
773                 kfree(ctx->vm_pd);
774                 ctx->vm_pd = NULL;
775         }
776
777         return ret;
778 }
779
780 /**
781  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
782  * @ctx: Reservation context to unreserve
783  * @wait: Optionally wait for a sync object representing pending VM updates
784  * @intr: Whether the wait is interruptible
785  *
786  * Also frees any resources allocated in
787  * reserve_bo_and_(cond_)vm(s). Returns the status from
788  * amdgpu_sync_wait.
789  */
790 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
791                                  bool wait, bool intr)
792 {
793         int ret = 0;
794
795         if (wait)
796                 ret = amdgpu_sync_wait(ctx->sync, intr);
797
798         if (ctx->reserved)
799                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
800         kfree(ctx->vm_pd);
801
802         ctx->sync = NULL;
803
804         ctx->reserved = false;
805         ctx->vm_pd = NULL;
806
807         return ret;
808 }
809
810 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
811                                 struct kfd_bo_va_list *entry,
812                                 struct amdgpu_sync *sync)
813 {
814         struct amdgpu_bo_va *bo_va = entry->bo_va;
815         struct amdgpu_vm *vm = bo_va->base.vm;
816         struct amdgpu_bo *pd = vm->root.base.bo;
817
818         /* Remove eviction fence from PD (and thereby from PTs too as
819          * they share the resv. object). Otherwise during PT update
820          * job (see amdgpu_vm_bo_update_mapping), eviction fence would
821          * get added to job->sync object and job execution would
822          * trigger the eviction fence.
823          */
824         amdgpu_amdkfd_remove_eviction_fence(pd,
825                                             vm->process_info->eviction_fence,
826                                             NULL, NULL);
827         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
828
829         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
830
831         /* Add the eviction fence back */
832         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
833
834         sync_vm_fence(adev, sync, bo_va->last_pt_update);
835
836         return 0;
837 }
838
839 static int update_gpuvm_pte(struct amdgpu_device *adev,
840                 struct kfd_bo_va_list *entry,
841                 struct amdgpu_sync *sync)
842 {
843         int ret;
844         struct amdgpu_vm *vm;
845         struct amdgpu_bo_va *bo_va;
846         struct amdgpu_bo *bo;
847
848         bo_va = entry->bo_va;
849         vm = bo_va->base.vm;
850         bo = bo_va->base.bo;
851
852         /* Update the page tables  */
853         ret = amdgpu_vm_bo_update(adev, bo_va, false);
854         if (ret) {
855                 pr_err("amdgpu_vm_bo_update failed\n");
856                 return ret;
857         }
858
859         return sync_vm_fence(adev, sync, bo_va->last_pt_update);
860 }
861
862 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
863                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
864                 bool no_update_pte)
865 {
866         int ret;
867
868         /* Set virtual address for the allocation */
869         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
870                                amdgpu_bo_size(entry->bo_va->base.bo),
871                                entry->pte_flags);
872         if (ret) {
873                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
874                                 entry->va, ret);
875                 return ret;
876         }
877
878         if (no_update_pte)
879                 return 0;
880
881         ret = update_gpuvm_pte(adev, entry, sync);
882         if (ret) {
883                 pr_err("update_gpuvm_pte() failed\n");
884                 goto update_gpuvm_pte_failed;
885         }
886
887         return 0;
888
889 update_gpuvm_pte_failed:
890         unmap_bo_from_gpuvm(adev, entry, sync);
891         return ret;
892 }
893
894 static int process_validate_vms(struct amdkfd_process_info *process_info)
895 {
896         struct amdgpu_vm *peer_vm;
897         int ret;
898
899         list_for_each_entry(peer_vm, &process_info->vm_list_head,
900                             vm_list_node) {
901                 ret = vm_validate_pt_pd_bos(peer_vm);
902                 if (ret)
903                         return ret;
904         }
905
906         return 0;
907 }
908
909 static int process_update_pds(struct amdkfd_process_info *process_info,
910                               struct amdgpu_sync *sync)
911 {
912         struct amdgpu_vm *peer_vm;
913         int ret;
914
915         list_for_each_entry(peer_vm, &process_info->vm_list_head,
916                             vm_list_node) {
917                 ret = vm_update_pds(peer_vm, sync);
918                 if (ret)
919                         return ret;
920         }
921
922         return 0;
923 }
924
925 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
926                        struct dma_fence **ef)
927 {
928         struct amdkfd_process_info *info = NULL;
929         int ret;
930
931         if (!*process_info) {
932                 info = kzalloc(sizeof(*info), GFP_KERNEL);
933                 if (!info)
934                         return -ENOMEM;
935
936                 mutex_init(&info->lock);
937                 INIT_LIST_HEAD(&info->vm_list_head);
938                 INIT_LIST_HEAD(&info->kfd_bo_list);
939                 INIT_LIST_HEAD(&info->userptr_valid_list);
940                 INIT_LIST_HEAD(&info->userptr_inval_list);
941
942                 info->eviction_fence =
943                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
944                                                    current->mm);
945                 if (!info->eviction_fence) {
946                         pr_err("Failed to create eviction fence\n");
947                         ret = -ENOMEM;
948                         goto create_evict_fence_fail;
949                 }
950
951                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
952                 atomic_set(&info->evicted_bos, 0);
953                 INIT_DELAYED_WORK(&info->restore_userptr_work,
954                                   amdgpu_amdkfd_restore_userptr_worker);
955
956                 *process_info = info;
957                 *ef = dma_fence_get(&info->eviction_fence->base);
958         }
959
960         vm->process_info = *process_info;
961
962         /* Validate page directory and attach eviction fence */
963         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
964         if (ret)
965                 goto reserve_pd_fail;
966         ret = vm_validate_pt_pd_bos(vm);
967         if (ret) {
968                 pr_err("validate_pt_pd_bos() failed\n");
969                 goto validate_pd_fail;
970         }
971         ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
972         if (ret)
973                 goto wait_pd_fail;
974         amdgpu_bo_fence(vm->root.base.bo,
975                         &vm->process_info->eviction_fence->base, true);
976         amdgpu_bo_unreserve(vm->root.base.bo);
977
978         /* Update process info */
979         mutex_lock(&vm->process_info->lock);
980         list_add_tail(&vm->vm_list_node,
981                         &(vm->process_info->vm_list_head));
982         vm->process_info->n_vms++;
983         mutex_unlock(&vm->process_info->lock);
984
985         return 0;
986
987 wait_pd_fail:
988 validate_pd_fail:
989         amdgpu_bo_unreserve(vm->root.base.bo);
990 reserve_pd_fail:
991         vm->process_info = NULL;
992         if (info) {
993                 /* Two fence references: one in info and one in *ef */
994                 dma_fence_put(&info->eviction_fence->base);
995                 dma_fence_put(*ef);
996                 *ef = NULL;
997                 *process_info = NULL;
998                 put_pid(info->pid);
999 create_evict_fence_fail:
1000                 mutex_destroy(&info->lock);
1001                 kfree(info);
1002         }
1003         return ret;
1004 }
1005
1006 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
1007                                           void **process_info,
1008                                           struct dma_fence **ef)
1009 {
1010         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1011         struct amdgpu_vm *new_vm;
1012         int ret;
1013
1014         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1015         if (!new_vm)
1016                 return -ENOMEM;
1017
1018         /* Initialize AMDGPU part of the VM */
1019         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
1020         if (ret) {
1021                 pr_err("Failed init vm ret %d\n", ret);
1022                 goto amdgpu_vm_init_fail;
1023         }
1024
1025         /* Initialize KFD part of the VM and process info */
1026         ret = init_kfd_vm(new_vm, process_info, ef);
1027         if (ret)
1028                 goto init_kfd_vm_fail;
1029
1030         *vm = (void *) new_vm;
1031
1032         return 0;
1033
1034 init_kfd_vm_fail:
1035         amdgpu_vm_fini(adev, new_vm);
1036 amdgpu_vm_init_fail:
1037         kfree(new_vm);
1038         return ret;
1039 }
1040
1041 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1042                                            struct file *filp,
1043                                            void **vm, void **process_info,
1044                                            struct dma_fence **ef)
1045 {
1046         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1047         struct amdgpu_fpriv *drv_priv;
1048         struct amdgpu_vm *avm;
1049         int ret;
1050
1051         ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1052         if (ret)
1053                 return ret;
1054         avm = &drv_priv->vm;
1055
1056         /* Already a compute VM? */
1057         if (avm->process_info)
1058                 return -EINVAL;
1059
1060         /* Convert VM into a compute VM */
1061         ret = amdgpu_vm_make_compute(adev, avm);
1062         if (ret)
1063                 return ret;
1064
1065         /* Initialize KFD part of the VM and process info */
1066         ret = init_kfd_vm(avm, process_info, ef);
1067         if (ret)
1068                 return ret;
1069
1070         *vm = (void *)avm;
1071
1072         return 0;
1073 }
1074
1075 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1076                                     struct amdgpu_vm *vm)
1077 {
1078         struct amdkfd_process_info *process_info = vm->process_info;
1079         struct amdgpu_bo *pd = vm->root.base.bo;
1080
1081         if (!process_info)
1082                 return;
1083
1084         /* Release eviction fence from PD */
1085         amdgpu_bo_reserve(pd, false);
1086         amdgpu_bo_fence(pd, NULL, false);
1087         amdgpu_bo_unreserve(pd);
1088
1089         /* Update process info */
1090         mutex_lock(&process_info->lock);
1091         process_info->n_vms--;
1092         list_del(&vm->vm_list_node);
1093         mutex_unlock(&process_info->lock);
1094
1095         /* Release per-process resources when last compute VM is destroyed */
1096         if (!process_info->n_vms) {
1097                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1098                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1099                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1100
1101                 dma_fence_put(&process_info->eviction_fence->base);
1102                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1103                 put_pid(process_info->pid);
1104                 mutex_destroy(&process_info->lock);
1105                 kfree(process_info);
1106         }
1107 }
1108
1109 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1110 {
1111         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1112         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1113
1114         if (WARN_ON(!kgd || !vm))
1115                 return;
1116
1117         pr_debug("Destroying process vm %p\n", vm);
1118
1119         /* Release the VM context */
1120         amdgpu_vm_fini(adev, avm);
1121         kfree(vm);
1122 }
1123
1124 uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1125 {
1126         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1127
1128         return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1129 }
1130
1131 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1132                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1133                 void *vm, struct kgd_mem **mem,
1134                 uint64_t *offset, uint32_t flags)
1135 {
1136         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1137         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1138         uint64_t user_addr = 0;
1139         struct amdgpu_bo *bo;
1140         struct amdgpu_bo_param bp;
1141         int byte_align;
1142         u32 domain, alloc_domain;
1143         u64 alloc_flags;
1144         uint32_t mapping_flags;
1145         int ret;
1146
1147         /*
1148          * Check on which domain to allocate BO
1149          */
1150         if (flags & ALLOC_MEM_FLAGS_VRAM) {
1151                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1152                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1153                 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1154                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1155                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1156         } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1157                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1158                 alloc_flags = 0;
1159         } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1160                 domain = AMDGPU_GEM_DOMAIN_GTT;
1161                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1162                 alloc_flags = 0;
1163                 if (!offset || !*offset)
1164                         return -EINVAL;
1165                 user_addr = *offset;
1166         } else {
1167                 return -EINVAL;
1168         }
1169
1170         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1171         if (!*mem)
1172                 return -ENOMEM;
1173         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1174         mutex_init(&(*mem)->lock);
1175         (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1176
1177         /* Workaround for AQL queue wraparound bug. Map the same
1178          * memory twice. That means we only actually allocate half
1179          * the memory.
1180          */
1181         if ((*mem)->aql_queue)
1182                 size = size >> 1;
1183
1184         /* Workaround for TLB bug on older VI chips */
1185         byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1186                         adev->asic_type != CHIP_FIJI &&
1187                         adev->asic_type != CHIP_POLARIS10 &&
1188                         adev->asic_type != CHIP_POLARIS11) ?
1189                         VI_BO_SIZE_ALIGN : 1;
1190
1191         mapping_flags = AMDGPU_VM_PAGE_READABLE;
1192         if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1193                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1194         if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1195                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1196         if (flags & ALLOC_MEM_FLAGS_COHERENT)
1197                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1198         else
1199                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1200         (*mem)->mapping_flags = mapping_flags;
1201
1202         amdgpu_sync_create(&(*mem)->sync);
1203
1204         ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
1205         if (ret) {
1206                 pr_debug("Insufficient system memory\n");
1207                 goto err_reserve_system_mem;
1208         }
1209
1210         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1211                         va, size, domain_string(alloc_domain));
1212
1213         memset(&bp, 0, sizeof(bp));
1214         bp.size = size;
1215         bp.byte_align = byte_align;
1216         bp.domain = alloc_domain;
1217         bp.flags = alloc_flags;
1218         bp.type = ttm_bo_type_device;
1219         bp.resv = NULL;
1220         ret = amdgpu_bo_create(adev, &bp, &bo);
1221         if (ret) {
1222                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1223                                 domain_string(alloc_domain), ret);
1224                 goto err_bo_create;
1225         }
1226         bo->kfd_bo = *mem;
1227         (*mem)->bo = bo;
1228         if (user_addr)
1229                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1230
1231         (*mem)->va = va;
1232         (*mem)->domain = domain;
1233         (*mem)->mapped_to_gpu_memory = 0;
1234         (*mem)->process_info = avm->process_info;
1235         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1236
1237         if (user_addr) {
1238                 ret = init_user_pages(*mem, current->mm, user_addr);
1239                 if (ret) {
1240                         mutex_lock(&avm->process_info->lock);
1241                         list_del(&(*mem)->validate_list.head);
1242                         mutex_unlock(&avm->process_info->lock);
1243                         goto allocate_init_user_pages_failed;
1244                 }
1245         }
1246
1247         if (offset)
1248                 *offset = amdgpu_bo_mmap_offset(bo);
1249
1250         return 0;
1251
1252 allocate_init_user_pages_failed:
1253         amdgpu_bo_unref(&bo);
1254         /* Don't unreserve system mem limit twice */
1255         goto err_reserve_system_mem;
1256 err_bo_create:
1257         unreserve_system_mem_limit(adev, size, alloc_domain);
1258 err_reserve_system_mem:
1259         mutex_destroy(&(*mem)->lock);
1260         kfree(*mem);
1261         return ret;
1262 }
1263
1264 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1265                 struct kgd_dev *kgd, struct kgd_mem *mem)
1266 {
1267         struct amdkfd_process_info *process_info = mem->process_info;
1268         unsigned long bo_size = mem->bo->tbo.mem.size;
1269         struct kfd_bo_va_list *entry, *tmp;
1270         struct bo_vm_reservation_context ctx;
1271         struct ttm_validate_buffer *bo_list_entry;
1272         int ret;
1273
1274         mutex_lock(&mem->lock);
1275
1276         if (mem->mapped_to_gpu_memory > 0) {
1277                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1278                                 mem->va, bo_size);
1279                 mutex_unlock(&mem->lock);
1280                 return -EBUSY;
1281         }
1282
1283         mutex_unlock(&mem->lock);
1284         /* lock is not needed after this, since mem is unused and will
1285          * be freed anyway
1286          */
1287
1288         /* No more MMU notifiers */
1289         amdgpu_mn_unregister(mem->bo);
1290
1291         /* Make sure restore workers don't access the BO any more */
1292         bo_list_entry = &mem->validate_list;
1293         mutex_lock(&process_info->lock);
1294         list_del(&bo_list_entry->head);
1295         mutex_unlock(&process_info->lock);
1296
1297         /* Free user pages if necessary */
1298         if (mem->user_pages) {
1299                 pr_debug("%s: Freeing user_pages array\n", __func__);
1300                 if (mem->user_pages[0])
1301                         release_pages(mem->user_pages,
1302                                         mem->bo->tbo.ttm->num_pages);
1303                 kvfree(mem->user_pages);
1304         }
1305
1306         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1307         if (unlikely(ret))
1308                 return ret;
1309
1310         /* The eviction fence should be removed by the last unmap.
1311          * TODO: Log an error condition if the bo still has the eviction fence
1312          * attached
1313          */
1314         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1315                                         process_info->eviction_fence,
1316                                         NULL, NULL);
1317         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1318                 mem->va + bo_size * (1 + mem->aql_queue));
1319
1320         /* Remove from VM internal data structures */
1321         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1322                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1323                                 entry, bo_size);
1324
1325         ret = unreserve_bo_and_vms(&ctx, false, false);
1326
1327         /* Free the sync object */
1328         amdgpu_sync_free(&mem->sync);
1329
1330         /* Free the BO*/
1331         amdgpu_bo_unref(&mem->bo);
1332         mutex_destroy(&mem->lock);
1333         kfree(mem);
1334
1335         return ret;
1336 }
1337
1338 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1339                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1340 {
1341         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1342         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1343         int ret;
1344         struct amdgpu_bo *bo;
1345         uint32_t domain;
1346         struct kfd_bo_va_list *entry;
1347         struct bo_vm_reservation_context ctx;
1348         struct kfd_bo_va_list *bo_va_entry = NULL;
1349         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1350         unsigned long bo_size;
1351         bool is_invalid_userptr = false;
1352
1353         bo = mem->bo;
1354         if (!bo) {
1355                 pr_err("Invalid BO when mapping memory to GPU\n");
1356                 return -EINVAL;
1357         }
1358
1359         /* Make sure restore is not running concurrently. Since we
1360          * don't map invalid userptr BOs, we rely on the next restore
1361          * worker to do the mapping
1362          */
1363         mutex_lock(&mem->process_info->lock);
1364
1365         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1366          * sure that the MMU notifier is no longer running
1367          * concurrently and the queues are actually stopped
1368          */
1369         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1370                 down_write(&current->mm->mmap_sem);
1371                 is_invalid_userptr = atomic_read(&mem->invalid);
1372                 up_write(&current->mm->mmap_sem);
1373         }
1374
1375         mutex_lock(&mem->lock);
1376
1377         domain = mem->domain;
1378         bo_size = bo->tbo.mem.size;
1379
1380         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1381                         mem->va,
1382                         mem->va + bo_size * (1 + mem->aql_queue),
1383                         vm, domain_string(domain));
1384
1385         ret = reserve_bo_and_vm(mem, vm, &ctx);
1386         if (unlikely(ret))
1387                 goto out;
1388
1389         /* Userptr can be marked as "not invalid", but not actually be
1390          * validated yet (still in the system domain). In that case
1391          * the queues are still stopped and we can leave mapping for
1392          * the next restore worker
1393          */
1394         if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1395                 is_invalid_userptr = true;
1396
1397         if (check_if_add_bo_to_vm(avm, mem)) {
1398                 ret = add_bo_to_vm(adev, mem, avm, false,
1399                                 &bo_va_entry);
1400                 if (ret)
1401                         goto add_bo_to_vm_failed;
1402                 if (mem->aql_queue) {
1403                         ret = add_bo_to_vm(adev, mem, avm,
1404                                         true, &bo_va_entry_aql);
1405                         if (ret)
1406                                 goto add_bo_to_vm_failed_aql;
1407                 }
1408         } else {
1409                 ret = vm_validate_pt_pd_bos(avm);
1410                 if (unlikely(ret))
1411                         goto add_bo_to_vm_failed;
1412         }
1413
1414         if (mem->mapped_to_gpu_memory == 0 &&
1415             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1416                 /* Validate BO only once. The eviction fence gets added to BO
1417                  * the first time it is mapped. Validate will wait for all
1418                  * background evictions to complete.
1419                  */
1420                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1421                 if (ret) {
1422                         pr_debug("Validate failed\n");
1423                         goto map_bo_to_gpuvm_failed;
1424                 }
1425         }
1426
1427         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1428                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1429                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1430                                         entry->va, entry->va + bo_size,
1431                                         entry);
1432
1433                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1434                                               is_invalid_userptr);
1435                         if (ret) {
1436                                 pr_err("Failed to map radeon bo to gpuvm\n");
1437                                 goto map_bo_to_gpuvm_failed;
1438                         }
1439
1440                         ret = vm_update_pds(vm, ctx.sync);
1441                         if (ret) {
1442                                 pr_err("Failed to update page directories\n");
1443                                 goto map_bo_to_gpuvm_failed;
1444                         }
1445
1446                         entry->is_mapped = true;
1447                         mem->mapped_to_gpu_memory++;
1448                         pr_debug("\t INC mapping count %d\n",
1449                                         mem->mapped_to_gpu_memory);
1450                 }
1451         }
1452
1453         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1454                 amdgpu_bo_fence(bo,
1455                                 &avm->process_info->eviction_fence->base,
1456                                 true);
1457         ret = unreserve_bo_and_vms(&ctx, false, false);
1458
1459         goto out;
1460
1461 map_bo_to_gpuvm_failed:
1462         if (bo_va_entry_aql)
1463                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1464 add_bo_to_vm_failed_aql:
1465         if (bo_va_entry)
1466                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1467 add_bo_to_vm_failed:
1468         unreserve_bo_and_vms(&ctx, false, false);
1469 out:
1470         mutex_unlock(&mem->process_info->lock);
1471         mutex_unlock(&mem->lock);
1472         return ret;
1473 }
1474
1475 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1476                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1477 {
1478         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1479         struct amdkfd_process_info *process_info =
1480                 ((struct amdgpu_vm *)vm)->process_info;
1481         unsigned long bo_size = mem->bo->tbo.mem.size;
1482         struct kfd_bo_va_list *entry;
1483         struct bo_vm_reservation_context ctx;
1484         int ret;
1485
1486         mutex_lock(&mem->lock);
1487
1488         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1489         if (unlikely(ret))
1490                 goto out;
1491         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1492         if (ctx.n_vms == 0) {
1493                 ret = -EINVAL;
1494                 goto unreserve_out;
1495         }
1496
1497         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1498         if (unlikely(ret))
1499                 goto unreserve_out;
1500
1501         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1502                 mem->va,
1503                 mem->va + bo_size * (1 + mem->aql_queue),
1504                 vm);
1505
1506         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1507                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1508                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1509                                         entry->va,
1510                                         entry->va + bo_size,
1511                                         entry);
1512
1513                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1514                         if (ret == 0) {
1515                                 entry->is_mapped = false;
1516                         } else {
1517                                 pr_err("failed to unmap VA 0x%llx\n",
1518                                                 mem->va);
1519                                 goto unreserve_out;
1520                         }
1521
1522                         mem->mapped_to_gpu_memory--;
1523                         pr_debug("\t DEC mapping count %d\n",
1524                                         mem->mapped_to_gpu_memory);
1525                 }
1526         }
1527
1528         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1529          * required.
1530          */
1531         if (mem->mapped_to_gpu_memory == 0 &&
1532             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1533                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1534                                                 process_info->eviction_fence,
1535                                                     NULL, NULL);
1536
1537 unreserve_out:
1538         unreserve_bo_and_vms(&ctx, false, false);
1539 out:
1540         mutex_unlock(&mem->lock);
1541         return ret;
1542 }
1543
1544 int amdgpu_amdkfd_gpuvm_sync_memory(
1545                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1546 {
1547         struct amdgpu_sync sync;
1548         int ret;
1549
1550         amdgpu_sync_create(&sync);
1551
1552         mutex_lock(&mem->lock);
1553         amdgpu_sync_clone(&mem->sync, &sync);
1554         mutex_unlock(&mem->lock);
1555
1556         ret = amdgpu_sync_wait(&sync, intr);
1557         amdgpu_sync_free(&sync);
1558         return ret;
1559 }
1560
1561 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1562                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1563 {
1564         int ret;
1565         struct amdgpu_bo *bo = mem->bo;
1566
1567         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1568                 pr_err("userptr can't be mapped to kernel\n");
1569                 return -EINVAL;
1570         }
1571
1572         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1573          * this BO in BO's restoring after eviction.
1574          */
1575         mutex_lock(&mem->process_info->lock);
1576
1577         ret = amdgpu_bo_reserve(bo, true);
1578         if (ret) {
1579                 pr_err("Failed to reserve bo. ret %d\n", ret);
1580                 goto bo_reserve_failed;
1581         }
1582
1583         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1584         if (ret) {
1585                 pr_err("Failed to pin bo. ret %d\n", ret);
1586                 goto pin_failed;
1587         }
1588
1589         ret = amdgpu_bo_kmap(bo, kptr);
1590         if (ret) {
1591                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1592                 goto kmap_failed;
1593         }
1594
1595         amdgpu_amdkfd_remove_eviction_fence(
1596                 bo, mem->process_info->eviction_fence, NULL, NULL);
1597         list_del_init(&mem->validate_list.head);
1598
1599         if (size)
1600                 *size = amdgpu_bo_size(bo);
1601
1602         amdgpu_bo_unreserve(bo);
1603
1604         mutex_unlock(&mem->process_info->lock);
1605         return 0;
1606
1607 kmap_failed:
1608         amdgpu_bo_unpin(bo);
1609 pin_failed:
1610         amdgpu_bo_unreserve(bo);
1611 bo_reserve_failed:
1612         mutex_unlock(&mem->process_info->lock);
1613
1614         return ret;
1615 }
1616
1617 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1618                                               struct kfd_vm_fault_info *mem)
1619 {
1620         struct amdgpu_device *adev;
1621
1622         adev = (struct amdgpu_device *)kgd;
1623         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1624                 *mem = *adev->gmc.vm_fault_info;
1625                 mb();
1626                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1627         }
1628         return 0;
1629 }
1630
1631 /* Evict a userptr BO by stopping the queues if necessary
1632  *
1633  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1634  * cannot do any memory allocations, and cannot take any locks that
1635  * are held elsewhere while allocating memory. Therefore this is as
1636  * simple as possible, using atomic counters.
1637  *
1638  * It doesn't do anything to the BO itself. The real work happens in
1639  * restore, where we get updated page addresses. This function only
1640  * ensures that GPU access to the BO is stopped.
1641  */
1642 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1643                                 struct mm_struct *mm)
1644 {
1645         struct amdkfd_process_info *process_info = mem->process_info;
1646         int invalid, evicted_bos;
1647         int r = 0;
1648
1649         invalid = atomic_inc_return(&mem->invalid);
1650         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1651         if (evicted_bos == 1) {
1652                 /* First eviction, stop the queues */
1653                 r = kgd2kfd->quiesce_mm(mm);
1654                 if (r)
1655                         pr_err("Failed to quiesce KFD\n");
1656                 schedule_delayed_work(&process_info->restore_userptr_work,
1657                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1658         }
1659
1660         return r;
1661 }
1662
1663 /* Update invalid userptr BOs
1664  *
1665  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1666  * userptr_inval_list and updates user pages for all BOs that have
1667  * been invalidated since their last update.
1668  */
1669 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1670                                      struct mm_struct *mm)
1671 {
1672         struct kgd_mem *mem, *tmp_mem;
1673         struct amdgpu_bo *bo;
1674         struct ttm_operation_ctx ctx = { false, false };
1675         int invalid, ret;
1676
1677         /* Move all invalidated BOs to the userptr_inval_list and
1678          * release their user pages by migration to the CPU domain
1679          */
1680         list_for_each_entry_safe(mem, tmp_mem,
1681                                  &process_info->userptr_valid_list,
1682                                  validate_list.head) {
1683                 if (!atomic_read(&mem->invalid))
1684                         continue; /* BO is still valid */
1685
1686                 bo = mem->bo;
1687
1688                 if (amdgpu_bo_reserve(bo, true))
1689                         return -EAGAIN;
1690                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1691                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1692                 amdgpu_bo_unreserve(bo);
1693                 if (ret) {
1694                         pr_err("%s: Failed to invalidate userptr BO\n",
1695                                __func__);
1696                         return -EAGAIN;
1697                 }
1698
1699                 list_move_tail(&mem->validate_list.head,
1700                                &process_info->userptr_inval_list);
1701         }
1702
1703         if (list_empty(&process_info->userptr_inval_list))
1704                 return 0; /* All evicted userptr BOs were freed */
1705
1706         /* Go through userptr_inval_list and update any invalid user_pages */
1707         list_for_each_entry(mem, &process_info->userptr_inval_list,
1708                             validate_list.head) {
1709                 invalid = atomic_read(&mem->invalid);
1710                 if (!invalid)
1711                         /* BO hasn't been invalidated since the last
1712                          * revalidation attempt. Keep its BO list.
1713                          */
1714                         continue;
1715
1716                 bo = mem->bo;
1717
1718                 if (!mem->user_pages) {
1719                         mem->user_pages =
1720                                 kvmalloc_array(bo->tbo.ttm->num_pages,
1721                                                  sizeof(struct page *),
1722                                                  GFP_KERNEL | __GFP_ZERO);
1723                         if (!mem->user_pages) {
1724                                 pr_err("%s: Failed to allocate pages array\n",
1725                                        __func__);
1726                                 return -ENOMEM;
1727                         }
1728                 } else if (mem->user_pages[0]) {
1729                         release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
1730                 }
1731
1732                 /* Get updated user pages */
1733                 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1734                                                    mem->user_pages);
1735                 if (ret) {
1736                         mem->user_pages[0] = NULL;
1737                         pr_info("%s: Failed to get user pages: %d\n",
1738                                 __func__, ret);
1739                         /* Pretend it succeeded. It will fail later
1740                          * with a VM fault if the GPU tries to access
1741                          * it. Better than hanging indefinitely with
1742                          * stalled user mode queues.
1743                          */
1744                 }
1745
1746                 /* Mark the BO as valid unless it was invalidated
1747                  * again concurrently
1748                  */
1749                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1750                         return -EAGAIN;
1751         }
1752
1753         return 0;
1754 }
1755
1756 /* Validate invalid userptr BOs
1757  *
1758  * Validates BOs on the userptr_inval_list, and moves them back to the
1759  * userptr_valid_list. Also updates GPUVM page tables with new page
1760  * addresses and waits for the page table updates to complete.
1761  */
1762 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1763 {
1764         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1765         struct list_head resv_list, duplicates;
1766         struct ww_acquire_ctx ticket;
1767         struct amdgpu_sync sync;
1768
1769         struct amdgpu_vm *peer_vm;
1770         struct kgd_mem *mem, *tmp_mem;
1771         struct amdgpu_bo *bo;
1772         struct ttm_operation_ctx ctx = { false, false };
1773         int i, ret;
1774
1775         pd_bo_list_entries = kcalloc(process_info->n_vms,
1776                                      sizeof(struct amdgpu_bo_list_entry),
1777                                      GFP_KERNEL);
1778         if (!pd_bo_list_entries) {
1779                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1780                 return -ENOMEM;
1781         }
1782
1783         INIT_LIST_HEAD(&resv_list);
1784         INIT_LIST_HEAD(&duplicates);
1785
1786         /* Get all the page directory BOs that need to be reserved */
1787         i = 0;
1788         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1789                             vm_list_node)
1790                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1791                                     &pd_bo_list_entries[i++]);
1792         /* Add the userptr_inval_list entries to resv_list */
1793         list_for_each_entry(mem, &process_info->userptr_inval_list,
1794                             validate_list.head) {
1795                 list_add_tail(&mem->resv_list.head, &resv_list);
1796                 mem->resv_list.bo = mem->validate_list.bo;
1797                 mem->resv_list.shared = mem->validate_list.shared;
1798         }
1799
1800         /* Reserve all BOs and page tables for validation */
1801         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1802         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1803         if (ret)
1804                 goto out;
1805
1806         amdgpu_sync_create(&sync);
1807
1808         /* Avoid triggering eviction fences when unmapping invalid
1809          * userptr BOs (waits for all fences, doesn't use
1810          * FENCE_OWNER_VM)
1811          */
1812         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1813                             vm_list_node)
1814                 amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
1815                                                 process_info->eviction_fence,
1816                                                 NULL, NULL);
1817
1818         ret = process_validate_vms(process_info);
1819         if (ret)
1820                 goto unreserve_out;
1821
1822         /* Validate BOs and update GPUVM page tables */
1823         list_for_each_entry_safe(mem, tmp_mem,
1824                                  &process_info->userptr_inval_list,
1825                                  validate_list.head) {
1826                 struct kfd_bo_va_list *bo_va_entry;
1827
1828                 bo = mem->bo;
1829
1830                 /* Copy pages array and validate the BO if we got user pages */
1831                 if (mem->user_pages[0]) {
1832                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1833                                                      mem->user_pages);
1834                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1835                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1836                         if (ret) {
1837                                 pr_err("%s: failed to validate BO\n", __func__);
1838                                 goto unreserve_out;
1839                         }
1840                 }
1841
1842                 /* Validate succeeded, now the BO owns the pages, free
1843                  * our copy of the pointer array. Put this BO back on
1844                  * the userptr_valid_list. If we need to revalidate
1845                  * it, we need to start from scratch.
1846                  */
1847                 kvfree(mem->user_pages);
1848                 mem->user_pages = NULL;
1849                 list_move_tail(&mem->validate_list.head,
1850                                &process_info->userptr_valid_list);
1851
1852                 /* Update mapping. If the BO was not validated
1853                  * (because we couldn't get user pages), this will
1854                  * clear the page table entries, which will result in
1855                  * VM faults if the GPU tries to access the invalid
1856                  * memory.
1857                  */
1858                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1859                         if (!bo_va_entry->is_mapped)
1860                                 continue;
1861
1862                         ret = update_gpuvm_pte((struct amdgpu_device *)
1863                                                bo_va_entry->kgd_dev,
1864                                                bo_va_entry, &sync);
1865                         if (ret) {
1866                                 pr_err("%s: update PTE failed\n", __func__);
1867                                 /* make sure this gets validated again */
1868                                 atomic_inc(&mem->invalid);
1869                                 goto unreserve_out;
1870                         }
1871                 }
1872         }
1873
1874         /* Update page directories */
1875         ret = process_update_pds(process_info, &sync);
1876
1877 unreserve_out:
1878         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1879                             vm_list_node)
1880                 amdgpu_bo_fence(peer_vm->root.base.bo,
1881                                 &process_info->eviction_fence->base, true);
1882         ttm_eu_backoff_reservation(&ticket, &resv_list);
1883         amdgpu_sync_wait(&sync, false);
1884         amdgpu_sync_free(&sync);
1885 out:
1886         kfree(pd_bo_list_entries);
1887
1888         return ret;
1889 }
1890
1891 /* Worker callback to restore evicted userptr BOs
1892  *
1893  * Tries to update and validate all userptr BOs. If successful and no
1894  * concurrent evictions happened, the queues are restarted. Otherwise,
1895  * reschedule for another attempt later.
1896  */
1897 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1898 {
1899         struct delayed_work *dwork = to_delayed_work(work);
1900         struct amdkfd_process_info *process_info =
1901                 container_of(dwork, struct amdkfd_process_info,
1902                              restore_userptr_work);
1903         struct task_struct *usertask;
1904         struct mm_struct *mm;
1905         int evicted_bos;
1906
1907         evicted_bos = atomic_read(&process_info->evicted_bos);
1908         if (!evicted_bos)
1909                 return;
1910
1911         /* Reference task and mm in case of concurrent process termination */
1912         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1913         if (!usertask)
1914                 return;
1915         mm = get_task_mm(usertask);
1916         if (!mm) {
1917                 put_task_struct(usertask);
1918                 return;
1919         }
1920
1921         mutex_lock(&process_info->lock);
1922
1923         if (update_invalid_user_pages(process_info, mm))
1924                 goto unlock_out;
1925         /* userptr_inval_list can be empty if all evicted userptr BOs
1926          * have been freed. In that case there is nothing to validate
1927          * and we can just restart the queues.
1928          */
1929         if (!list_empty(&process_info->userptr_inval_list)) {
1930                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1931                         goto unlock_out; /* Concurrent eviction, try again */
1932
1933                 if (validate_invalid_user_pages(process_info))
1934                         goto unlock_out;
1935         }
1936         /* Final check for concurrent evicton and atomic update. If
1937          * another eviction happens after successful update, it will
1938          * be a first eviction that calls quiesce_mm. The eviction
1939          * reference counting inside KFD will handle this case.
1940          */
1941         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1942             evicted_bos)
1943                 goto unlock_out;
1944         evicted_bos = 0;
1945         if (kgd2kfd->resume_mm(mm)) {
1946                 pr_err("%s: Failed to resume KFD\n", __func__);
1947                 /* No recovery from this failure. Probably the CP is
1948                  * hanging. No point trying again.
1949                  */
1950         }
1951 unlock_out:
1952         mutex_unlock(&process_info->lock);
1953         mmput(mm);
1954         put_task_struct(usertask);
1955
1956         /* If validation failed, reschedule another attempt */
1957         if (evicted_bos)
1958                 schedule_delayed_work(&process_info->restore_userptr_work,
1959                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1960 }
1961
1962 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1963  *   KFD process identified by process_info
1964  *
1965  * @process_info: amdkfd_process_info of the KFD process
1966  *
1967  * After memory eviction, restore thread calls this function. The function
1968  * should be called when the Process is still valid. BO restore involves -
1969  *
1970  * 1.  Release old eviction fence and create new one
1971  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1972  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1973  *     BOs that need to be reserved.
1974  * 4.  Reserve all the BOs
1975  * 5.  Validate of PD and PT BOs.
1976  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1977  * 7.  Add fence to all PD and PT BOs.
1978  * 8.  Unreserve all BOs
1979  */
1980 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1981 {
1982         struct amdgpu_bo_list_entry *pd_bo_list;
1983         struct amdkfd_process_info *process_info = info;
1984         struct amdgpu_vm *peer_vm;
1985         struct kgd_mem *mem;
1986         struct bo_vm_reservation_context ctx;
1987         struct amdgpu_amdkfd_fence *new_fence;
1988         int ret = 0, i;
1989         struct list_head duplicate_save;
1990         struct amdgpu_sync sync_obj;
1991
1992         INIT_LIST_HEAD(&duplicate_save);
1993         INIT_LIST_HEAD(&ctx.list);
1994         INIT_LIST_HEAD(&ctx.duplicates);
1995
1996         pd_bo_list = kcalloc(process_info->n_vms,
1997                              sizeof(struct amdgpu_bo_list_entry),
1998                              GFP_KERNEL);
1999         if (!pd_bo_list)
2000                 return -ENOMEM;
2001
2002         i = 0;
2003         mutex_lock(&process_info->lock);
2004         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2005                         vm_list_node)
2006                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2007
2008         /* Reserve all BOs and page tables/directory. Add all BOs from
2009          * kfd_bo_list to ctx.list
2010          */
2011         list_for_each_entry(mem, &process_info->kfd_bo_list,
2012                             validate_list.head) {
2013
2014                 list_add_tail(&mem->resv_list.head, &ctx.list);
2015                 mem->resv_list.bo = mem->validate_list.bo;
2016                 mem->resv_list.shared = mem->validate_list.shared;
2017         }
2018
2019         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2020                                      false, &duplicate_save);
2021         if (ret) {
2022                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2023                 goto ttm_reserve_fail;
2024         }
2025
2026         amdgpu_sync_create(&sync_obj);
2027
2028         /* Validate PDs and PTs */
2029         ret = process_validate_vms(process_info);
2030         if (ret)
2031                 goto validate_map_fail;
2032
2033         /* Wait for PD/PTs validate to finish */
2034         /* FIXME: I think this isn't needed */
2035         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2036                             vm_list_node) {
2037                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2038
2039                 ttm_bo_wait(&bo->tbo, false, false);
2040         }
2041
2042         /* Validate BOs and map them to GPUVM (update VM page tables). */
2043         list_for_each_entry(mem, &process_info->kfd_bo_list,
2044                             validate_list.head) {
2045
2046                 struct amdgpu_bo *bo = mem->bo;
2047                 uint32_t domain = mem->domain;
2048                 struct kfd_bo_va_list *bo_va_entry;
2049
2050                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2051                 if (ret) {
2052                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2053                         goto validate_map_fail;
2054                 }
2055
2056                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2057                                     bo_list) {
2058                         ret = update_gpuvm_pte((struct amdgpu_device *)
2059                                               bo_va_entry->kgd_dev,
2060                                               bo_va_entry,
2061                                               &sync_obj);
2062                         if (ret) {
2063                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2064                                 goto validate_map_fail;
2065                         }
2066                 }
2067         }
2068
2069         /* Update page directories */
2070         ret = process_update_pds(process_info, &sync_obj);
2071         if (ret) {
2072                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2073                 goto validate_map_fail;
2074         }
2075
2076         amdgpu_sync_wait(&sync_obj, false);
2077
2078         /* Release old eviction fence and create new one, because fence only
2079          * goes from unsignaled to signaled, fence cannot be reused.
2080          * Use context and mm from the old fence.
2081          */
2082         new_fence = amdgpu_amdkfd_fence_create(
2083                                 process_info->eviction_fence->base.context,
2084                                 process_info->eviction_fence->mm);
2085         if (!new_fence) {
2086                 pr_err("Failed to create eviction fence\n");
2087                 ret = -ENOMEM;
2088                 goto validate_map_fail;
2089         }
2090         dma_fence_put(&process_info->eviction_fence->base);
2091         process_info->eviction_fence = new_fence;
2092         *ef = dma_fence_get(&new_fence->base);
2093
2094         /* Wait for validate to finish and attach new eviction fence */
2095         list_for_each_entry(mem, &process_info->kfd_bo_list,
2096                 validate_list.head)
2097                 ttm_bo_wait(&mem->bo->tbo, false, false);
2098         list_for_each_entry(mem, &process_info->kfd_bo_list,
2099                 validate_list.head)
2100                 amdgpu_bo_fence(mem->bo,
2101                         &process_info->eviction_fence->base, true);
2102
2103         /* Attach eviction fence to PD / PT BOs */
2104         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2105                             vm_list_node) {
2106                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2107
2108                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2109         }
2110
2111 validate_map_fail:
2112         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2113         amdgpu_sync_free(&sync_obj);
2114 ttm_reserve_fail:
2115         mutex_unlock(&process_info->lock);
2116         kfree(pd_bo_list);
2117         return ret;
2118 }