GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27 #include <linux/pagemap.h>
28 #include <linux/sync_file.h>
29 #include <drm/drmP.h>
30 #include <drm/amdgpu_drm.h>
31 #include <drm/drm_syncobj.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34 #include "amdgpu_gmc.h"
35
36 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
37                                       struct drm_amdgpu_cs_chunk_fence *data,
38                                       uint32_t *offset)
39 {
40         struct drm_gem_object *gobj;
41         unsigned long size;
42         int r;
43
44         gobj = drm_gem_object_lookup(p->filp, data->handle);
45         if (gobj == NULL)
46                 return -EINVAL;
47
48         p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
49         p->uf_entry.priority = 0;
50         p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
51         p->uf_entry.tv.shared = true;
52         p->uf_entry.user_pages = NULL;
53
54         drm_gem_object_put_unlocked(gobj);
55
56         size = amdgpu_bo_size(p->uf_entry.robj);
57         if (size != PAGE_SIZE || (data->offset + 8) > size) {
58                 r = -EINVAL;
59                 goto error_unref;
60         }
61
62         if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
63                 r = -EINVAL;
64                 goto error_unref;
65         }
66
67         *offset = data->offset;
68
69         return 0;
70
71 error_unref:
72         amdgpu_bo_unref(&p->uf_entry.robj);
73         return r;
74 }
75
76 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
77                                       struct drm_amdgpu_bo_list_in *data)
78 {
79         int r;
80         struct drm_amdgpu_bo_list_entry *info = NULL;
81
82         r = amdgpu_bo_create_list_entry_array(data, &info);
83         if (r)
84                 return r;
85
86         r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
87                                   &p->bo_list);
88         if (r)
89                 goto error_free;
90
91         kvfree(info);
92         return 0;
93
94 error_free:
95         if (info)
96                 kvfree(info);
97
98         return r;
99 }
100
101 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
102 {
103         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
104         struct amdgpu_vm *vm = &fpriv->vm;
105         uint64_t *chunk_array_user;
106         uint64_t *chunk_array;
107         unsigned size, num_ibs = 0;
108         uint32_t uf_offset = 0;
109         int i;
110         int ret;
111
112         if (cs->in.num_chunks == 0)
113                 return 0;
114
115         chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
116         if (!chunk_array)
117                 return -ENOMEM;
118
119         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
120         if (!p->ctx) {
121                 ret = -EINVAL;
122                 goto free_chunk;
123         }
124
125         mutex_lock(&p->ctx->lock);
126
127         /* skip guilty context job */
128         if (atomic_read(&p->ctx->guilty) == 1) {
129                 ret = -ECANCELED;
130                 goto free_chunk;
131         }
132
133         /* get chunks */
134         chunk_array_user = u64_to_user_ptr(cs->in.chunks);
135         if (copy_from_user(chunk_array, chunk_array_user,
136                            sizeof(uint64_t)*cs->in.num_chunks)) {
137                 ret = -EFAULT;
138                 goto free_chunk;
139         }
140
141         p->nchunks = cs->in.num_chunks;
142         p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
143                             GFP_KERNEL);
144         if (!p->chunks) {
145                 ret = -ENOMEM;
146                 goto free_chunk;
147         }
148
149         for (i = 0; i < p->nchunks; i++) {
150                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
151                 struct drm_amdgpu_cs_chunk user_chunk;
152                 uint32_t __user *cdata;
153
154                 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
155                 if (copy_from_user(&user_chunk, chunk_ptr,
156                                        sizeof(struct drm_amdgpu_cs_chunk))) {
157                         ret = -EFAULT;
158                         i--;
159                         goto free_partial_kdata;
160                 }
161                 p->chunks[i].chunk_id = user_chunk.chunk_id;
162                 p->chunks[i].length_dw = user_chunk.length_dw;
163
164                 size = p->chunks[i].length_dw;
165                 cdata = u64_to_user_ptr(user_chunk.chunk_data);
166
167                 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
168                 if (p->chunks[i].kdata == NULL) {
169                         ret = -ENOMEM;
170                         i--;
171                         goto free_partial_kdata;
172                 }
173                 size *= sizeof(uint32_t);
174                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
175                         ret = -EFAULT;
176                         goto free_partial_kdata;
177                 }
178
179                 switch (p->chunks[i].chunk_id) {
180                 case AMDGPU_CHUNK_ID_IB:
181                         ++num_ibs;
182                         break;
183
184                 case AMDGPU_CHUNK_ID_FENCE:
185                         size = sizeof(struct drm_amdgpu_cs_chunk_fence);
186                         if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
187                                 ret = -EINVAL;
188                                 goto free_partial_kdata;
189                         }
190
191                         ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
192                                                          &uf_offset);
193                         if (ret)
194                                 goto free_partial_kdata;
195
196                         break;
197
198                 case AMDGPU_CHUNK_ID_BO_HANDLES:
199                         size = sizeof(struct drm_amdgpu_bo_list_in);
200                         if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
201                                 ret = -EINVAL;
202                                 goto free_partial_kdata;
203                         }
204
205                         ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
206                         if (ret)
207                                 goto free_partial_kdata;
208
209                         break;
210
211                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
212                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
213                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
214                         break;
215
216                 default:
217                         ret = -EINVAL;
218                         goto free_partial_kdata;
219                 }
220         }
221
222         ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
223         if (ret)
224                 goto free_all_kdata;
225
226         if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
227                 ret = -ECANCELED;
228                 goto free_all_kdata;
229         }
230
231         if (p->uf_entry.robj)
232                 p->job->uf_addr = uf_offset;
233         kfree(chunk_array);
234
235         /* Use this opportunity to fill in task info for the vm */
236         amdgpu_vm_set_task_info(vm);
237
238         return 0;
239
240 free_all_kdata:
241         i = p->nchunks - 1;
242 free_partial_kdata:
243         for (; i >= 0; i--)
244                 kvfree(p->chunks[i].kdata);
245         kfree(p->chunks);
246         p->chunks = NULL;
247         p->nchunks = 0;
248 free_chunk:
249         kfree(chunk_array);
250
251         return ret;
252 }
253
254 /* Convert microseconds to bytes. */
255 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
256 {
257         if (us <= 0 || !adev->mm_stats.log2_max_MBps)
258                 return 0;
259
260         /* Since accum_us is incremented by a million per second, just
261          * multiply it by the number of MB/s to get the number of bytes.
262          */
263         return us << adev->mm_stats.log2_max_MBps;
264 }
265
266 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
267 {
268         if (!adev->mm_stats.log2_max_MBps)
269                 return 0;
270
271         return bytes >> adev->mm_stats.log2_max_MBps;
272 }
273
274 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
275  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
276  * which means it can go over the threshold once. If that happens, the driver
277  * will be in debt and no other buffer migrations can be done until that debt
278  * is repaid.
279  *
280  * This approach allows moving a buffer of any size (it's important to allow
281  * that).
282  *
283  * The currency is simply time in microseconds and it increases as the clock
284  * ticks. The accumulated microseconds (us) are converted to bytes and
285  * returned.
286  */
287 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
288                                               u64 *max_bytes,
289                                               u64 *max_vis_bytes)
290 {
291         s64 time_us, increment_us;
292         u64 free_vram, total_vram, used_vram;
293
294         /* Allow a maximum of 200 accumulated ms. This is basically per-IB
295          * throttling.
296          *
297          * It means that in order to get full max MBps, at least 5 IBs per
298          * second must be submitted and not more than 200ms apart from each
299          * other.
300          */
301         const s64 us_upper_bound = 200000;
302
303         if (!adev->mm_stats.log2_max_MBps) {
304                 *max_bytes = 0;
305                 *max_vis_bytes = 0;
306                 return;
307         }
308
309         total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
310         used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
311         free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
312
313         spin_lock(&adev->mm_stats.lock);
314
315         /* Increase the amount of accumulated us. */
316         time_us = ktime_to_us(ktime_get());
317         increment_us = time_us - adev->mm_stats.last_update_us;
318         adev->mm_stats.last_update_us = time_us;
319         adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
320                                       us_upper_bound);
321
322         /* This prevents the short period of low performance when the VRAM
323          * usage is low and the driver is in debt or doesn't have enough
324          * accumulated us to fill VRAM quickly.
325          *
326          * The situation can occur in these cases:
327          * - a lot of VRAM is freed by userspace
328          * - the presence of a big buffer causes a lot of evictions
329          *   (solution: split buffers into smaller ones)
330          *
331          * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
332          * accum_us to a positive number.
333          */
334         if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
335                 s64 min_us;
336
337                 /* Be more aggresive on dGPUs. Try to fill a portion of free
338                  * VRAM now.
339                  */
340                 if (!(adev->flags & AMD_IS_APU))
341                         min_us = bytes_to_us(adev, free_vram / 4);
342                 else
343                         min_us = 0; /* Reset accum_us on APUs. */
344
345                 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
346         }
347
348         /* This is set to 0 if the driver is in debt to disallow (optional)
349          * buffer moves.
350          */
351         *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
352
353         /* Do the same for visible VRAM if half of it is free */
354         if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
355                 u64 total_vis_vram = adev->gmc.visible_vram_size;
356                 u64 used_vis_vram =
357                         amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
358
359                 if (used_vis_vram < total_vis_vram) {
360                         u64 free_vis_vram = total_vis_vram - used_vis_vram;
361                         adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
362                                                           increment_us, us_upper_bound);
363
364                         if (free_vis_vram >= total_vis_vram / 2)
365                                 adev->mm_stats.accum_us_vis =
366                                         max(bytes_to_us(adev, free_vis_vram / 2),
367                                             adev->mm_stats.accum_us_vis);
368                 }
369
370                 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
371         } else {
372                 *max_vis_bytes = 0;
373         }
374
375         spin_unlock(&adev->mm_stats.lock);
376 }
377
378 /* Report how many bytes have really been moved for the last command
379  * submission. This can result in a debt that can stop buffer migrations
380  * temporarily.
381  */
382 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
383                                   u64 num_vis_bytes)
384 {
385         spin_lock(&adev->mm_stats.lock);
386         adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
387         adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
388         spin_unlock(&adev->mm_stats.lock);
389 }
390
391 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
392                                  struct amdgpu_bo *bo)
393 {
394         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
395         struct ttm_operation_ctx ctx = {
396                 .interruptible = true,
397                 .no_wait_gpu = false,
398                 .resv = bo->tbo.resv,
399                 .flags = 0
400         };
401         uint32_t domain;
402         int r;
403
404         if (bo->pin_count)
405                 return 0;
406
407         /* Don't move this buffer if we have depleted our allowance
408          * to move it. Don't move anything if the threshold is zero.
409          */
410         if (p->bytes_moved < p->bytes_moved_threshold) {
411                 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
412                     (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
413                         /* And don't move a CPU_ACCESS_REQUIRED BO to limited
414                          * visible VRAM if we've depleted our allowance to do
415                          * that.
416                          */
417                         if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
418                                 domain = bo->preferred_domains;
419                         else
420                                 domain = bo->allowed_domains;
421                 } else {
422                         domain = bo->preferred_domains;
423                 }
424         } else {
425                 domain = bo->allowed_domains;
426         }
427
428 retry:
429         amdgpu_bo_placement_from_domain(bo, domain);
430         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
431
432         p->bytes_moved += ctx.bytes_moved;
433         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
434             amdgpu_bo_in_cpu_visible_vram(bo))
435                 p->bytes_moved_vis += ctx.bytes_moved;
436
437         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
438                 domain = bo->allowed_domains;
439                 goto retry;
440         }
441
442         return r;
443 }
444
445 /* Last resort, try to evict something from the current working set */
446 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
447                                 struct amdgpu_bo *validated)
448 {
449         uint32_t domain = validated->allowed_domains;
450         struct ttm_operation_ctx ctx = { true, false };
451         int r;
452
453         if (!p->evictable)
454                 return false;
455
456         for (;&p->evictable->tv.head != &p->validated;
457              p->evictable = list_prev_entry(p->evictable, tv.head)) {
458
459                 struct amdgpu_bo_list_entry *candidate = p->evictable;
460                 struct amdgpu_bo *bo = candidate->robj;
461                 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
462                 bool update_bytes_moved_vis;
463                 uint32_t other;
464
465                 /* If we reached our current BO we can forget it */
466                 if (candidate->robj == validated)
467                         break;
468
469                 /* We can't move pinned BOs here */
470                 if (bo->pin_count)
471                         continue;
472
473                 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
474
475                 /* Check if this BO is in one of the domains we need space for */
476                 if (!(other & domain))
477                         continue;
478
479                 /* Check if we can move this BO somewhere else */
480                 other = bo->allowed_domains & ~domain;
481                 if (!other)
482                         continue;
483
484                 /* Good we can try to move this BO somewhere else */
485                 update_bytes_moved_vis =
486                                 !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
487                                 amdgpu_bo_in_cpu_visible_vram(bo);
488                 amdgpu_bo_placement_from_domain(bo, other);
489                 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
490                 p->bytes_moved += ctx.bytes_moved;
491                 if (update_bytes_moved_vis)
492                         p->bytes_moved_vis += ctx.bytes_moved;
493
494                 if (unlikely(r))
495                         break;
496
497                 p->evictable = list_prev_entry(p->evictable, tv.head);
498                 list_move(&candidate->tv.head, &p->validated);
499
500                 return true;
501         }
502
503         return false;
504 }
505
506 static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
507 {
508         struct amdgpu_cs_parser *p = param;
509         int r;
510
511         do {
512                 r = amdgpu_cs_bo_validate(p, bo);
513         } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
514         if (r)
515                 return r;
516
517         if (bo->shadow)
518                 r = amdgpu_cs_bo_validate(p, bo->shadow);
519
520         return r;
521 }
522
523 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
524                             struct list_head *validated)
525 {
526         struct ttm_operation_ctx ctx = { true, false };
527         struct amdgpu_bo_list_entry *lobj;
528         int r;
529
530         list_for_each_entry(lobj, validated, tv.head) {
531                 struct amdgpu_bo *bo = lobj->robj;
532                 bool binding_userptr = false;
533                 struct mm_struct *usermm;
534
535                 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
536                 if (usermm && usermm != current->mm)
537                         return -EPERM;
538
539                 /* Check if we have user pages and nobody bound the BO already */
540                 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
541                     lobj->user_pages) {
542                         amdgpu_bo_placement_from_domain(bo,
543                                                         AMDGPU_GEM_DOMAIN_CPU);
544                         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
545                         if (r)
546                                 return r;
547                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
548                                                      lobj->user_pages);
549                         binding_userptr = true;
550                 }
551
552                 if (p->evictable == lobj)
553                         p->evictable = NULL;
554
555                 r = amdgpu_cs_validate(p, bo);
556                 if (r)
557                         return r;
558
559                 if (binding_userptr) {
560                         kvfree(lobj->user_pages);
561                         lobj->user_pages = NULL;
562                 }
563         }
564         return 0;
565 }
566
567 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
568                                 union drm_amdgpu_cs *cs)
569 {
570         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
571         struct amdgpu_vm *vm = &fpriv->vm;
572         struct amdgpu_bo_list_entry *e;
573         struct list_head duplicates;
574         struct amdgpu_bo *gds;
575         struct amdgpu_bo *gws;
576         struct amdgpu_bo *oa;
577         unsigned tries = 10;
578         int r;
579
580         INIT_LIST_HEAD(&p->validated);
581
582         /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
583         if (cs->in.bo_list_handle) {
584                 if (p->bo_list)
585                         return -EINVAL;
586
587                 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
588                                        &p->bo_list);
589                 if (r)
590                         return r;
591         } else if (!p->bo_list) {
592                 /* Create a empty bo_list when no handle is provided */
593                 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
594                                           &p->bo_list);
595                 if (r)
596                         return r;
597         }
598
599         amdgpu_bo_list_get_list(p->bo_list, &p->validated);
600         if (p->bo_list->first_userptr != p->bo_list->num_entries)
601                 p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
602
603         INIT_LIST_HEAD(&duplicates);
604         amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
605
606         if (p->uf_entry.robj && !p->uf_entry.robj->parent)
607                 list_add(&p->uf_entry.tv.head, &p->validated);
608
609         while (1) {
610                 struct list_head need_pages;
611
612                 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
613                                            &duplicates);
614                 if (unlikely(r != 0)) {
615                         if (r != -ERESTARTSYS)
616                                 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
617                         goto error_free_pages;
618                 }
619
620                 INIT_LIST_HEAD(&need_pages);
621                 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
622                         struct amdgpu_bo *bo = e->robj;
623
624                         if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
625                                  &e->user_invalidated) && e->user_pages) {
626
627                                 /* We acquired a page array, but somebody
628                                  * invalidated it. Free it and try again
629                                  */
630                                 release_pages(e->user_pages,
631                                               bo->tbo.ttm->num_pages);
632                                 kvfree(e->user_pages);
633                                 e->user_pages = NULL;
634                         }
635
636                         if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
637                             !e->user_pages) {
638                                 list_del(&e->tv.head);
639                                 list_add(&e->tv.head, &need_pages);
640
641                                 amdgpu_bo_unreserve(e->robj);
642                         }
643                 }
644
645                 if (list_empty(&need_pages))
646                         break;
647
648                 /* Unreserve everything again. */
649                 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
650
651                 /* We tried too many times, just abort */
652                 if (!--tries) {
653                         r = -EDEADLK;
654                         DRM_ERROR("deadlock in %s\n", __func__);
655                         goto error_free_pages;
656                 }
657
658                 /* Fill the page arrays for all userptrs. */
659                 list_for_each_entry(e, &need_pages, tv.head) {
660                         struct ttm_tt *ttm = e->robj->tbo.ttm;
661
662                         e->user_pages = kvmalloc_array(ttm->num_pages,
663                                                          sizeof(struct page*),
664                                                          GFP_KERNEL | __GFP_ZERO);
665                         if (!e->user_pages) {
666                                 r = -ENOMEM;
667                                 DRM_ERROR("calloc failure in %s\n", __func__);
668                                 goto error_free_pages;
669                         }
670
671                         r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
672                         if (r) {
673                                 DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
674                                 kvfree(e->user_pages);
675                                 e->user_pages = NULL;
676                                 goto error_free_pages;
677                         }
678                 }
679
680                 /* And try again. */
681                 list_splice(&need_pages, &p->validated);
682         }
683
684         amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
685                                           &p->bytes_moved_vis_threshold);
686         p->bytes_moved = 0;
687         p->bytes_moved_vis = 0;
688         p->evictable = list_last_entry(&p->validated,
689                                        struct amdgpu_bo_list_entry,
690                                        tv.head);
691
692         r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
693                                       amdgpu_cs_validate, p);
694         if (r) {
695                 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
696                 goto error_validate;
697         }
698
699         r = amdgpu_cs_list_validate(p, &duplicates);
700         if (r) {
701                 DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
702                 goto error_validate;
703         }
704
705         r = amdgpu_cs_list_validate(p, &p->validated);
706         if (r) {
707                 DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
708                 goto error_validate;
709         }
710
711         amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
712                                      p->bytes_moved_vis);
713
714         gds = p->bo_list->gds_obj;
715         gws = p->bo_list->gws_obj;
716         oa = p->bo_list->oa_obj;
717
718         amdgpu_bo_list_for_each_entry(e, p->bo_list)
719                 e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
720
721         if (gds) {
722                 p->job->gds_base = amdgpu_bo_gpu_offset(gds);
723                 p->job->gds_size = amdgpu_bo_size(gds);
724         }
725         if (gws) {
726                 p->job->gws_base = amdgpu_bo_gpu_offset(gws);
727                 p->job->gws_size = amdgpu_bo_size(gws);
728         }
729         if (oa) {
730                 p->job->oa_base = amdgpu_bo_gpu_offset(oa);
731                 p->job->oa_size = amdgpu_bo_size(oa);
732         }
733
734         if (!r && p->uf_entry.robj) {
735                 struct amdgpu_bo *uf = p->uf_entry.robj;
736
737                 r = amdgpu_ttm_alloc_gart(&uf->tbo);
738                 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
739         }
740
741 error_validate:
742         if (r)
743                 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
744
745 error_free_pages:
746
747         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
748                 if (!e->user_pages)
749                         continue;
750
751                 release_pages(e->user_pages,
752                               e->robj->tbo.ttm->num_pages);
753                 kvfree(e->user_pages);
754         }
755
756         return r;
757 }
758
759 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
760 {
761         struct amdgpu_bo_list_entry *e;
762         int r;
763
764         list_for_each_entry(e, &p->validated, tv.head) {
765                 struct reservation_object *resv = e->robj->tbo.resv;
766                 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
767                                      amdgpu_bo_explicit_sync(e->robj));
768
769                 if (r)
770                         return r;
771         }
772         return 0;
773 }
774
775 /**
776  * cs_parser_fini() - clean parser states
777  * @parser:     parser structure holding parsing context.
778  * @error:      error number
779  *
780  * If error is set than unvalidate buffer, otherwise just free memory
781  * used by parsing context.
782  **/
783 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
784                                   bool backoff)
785 {
786         unsigned i;
787
788         if (error && backoff)
789                 ttm_eu_backoff_reservation(&parser->ticket,
790                                            &parser->validated);
791
792         for (i = 0; i < parser->num_post_dep_syncobjs; i++)
793                 drm_syncobj_put(parser->post_dep_syncobjs[i]);
794         kfree(parser->post_dep_syncobjs);
795
796         dma_fence_put(parser->fence);
797
798         if (parser->ctx) {
799                 mutex_unlock(&parser->ctx->lock);
800                 amdgpu_ctx_put(parser->ctx);
801         }
802         if (parser->bo_list)
803                 amdgpu_bo_list_put(parser->bo_list);
804
805         for (i = 0; i < parser->nchunks; i++)
806                 kvfree(parser->chunks[i].kdata);
807         kfree(parser->chunks);
808         if (parser->job)
809                 amdgpu_job_free(parser->job);
810         amdgpu_bo_unref(&parser->uf_entry.robj);
811 }
812
813 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
814 {
815         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
816         struct amdgpu_device *adev = p->adev;
817         struct amdgpu_vm *vm = &fpriv->vm;
818         struct amdgpu_bo_list_entry *e;
819         struct amdgpu_bo_va *bo_va;
820         struct amdgpu_bo *bo;
821         int r;
822
823         r = amdgpu_vm_clear_freed(adev, vm, NULL);
824         if (r)
825                 return r;
826
827         r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
828         if (r)
829                 return r;
830
831         r = amdgpu_sync_fence(adev, &p->job->sync,
832                               fpriv->prt_va->last_pt_update, false);
833         if (r)
834                 return r;
835
836         if (amdgpu_sriov_vf(adev)) {
837                 struct dma_fence *f;
838
839                 bo_va = fpriv->csa_va;
840                 BUG_ON(!bo_va);
841                 r = amdgpu_vm_bo_update(adev, bo_va, false);
842                 if (r)
843                         return r;
844
845                 f = bo_va->last_pt_update;
846                 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
847                 if (r)
848                         return r;
849         }
850
851         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
852                 struct dma_fence *f;
853
854                 /* ignore duplicates */
855                 bo = e->robj;
856                 if (!bo)
857                         continue;
858
859                 bo_va = e->bo_va;
860                 if (bo_va == NULL)
861                         continue;
862
863                 r = amdgpu_vm_bo_update(adev, bo_va, false);
864                 if (r)
865                         return r;
866
867                 f = bo_va->last_pt_update;
868                 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
869                 if (r)
870                         return r;
871         }
872
873         r = amdgpu_vm_handle_moved(adev, vm);
874         if (r)
875                 return r;
876
877         r = amdgpu_vm_update_directories(adev, vm);
878         if (r)
879                 return r;
880
881         r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
882         if (r)
883                 return r;
884
885         if (amdgpu_vm_debug) {
886                 /* Invalidate all BOs to test for userspace bugs */
887                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
888                         /* ignore duplicates */
889                         if (!e->robj)
890                                 continue;
891
892                         amdgpu_vm_bo_invalidate(adev, e->robj, false);
893                 }
894         }
895
896         return r;
897 }
898
899 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
900                                  struct amdgpu_cs_parser *p)
901 {
902         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
903         struct amdgpu_vm *vm = &fpriv->vm;
904         struct amdgpu_ring *ring = p->ring;
905         int r;
906
907         /* Only for UVD/VCE VM emulation */
908         if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
909                 unsigned i, j;
910
911                 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
912                         struct drm_amdgpu_cs_chunk_ib *chunk_ib;
913                         struct amdgpu_bo_va_mapping *m;
914                         struct amdgpu_bo *aobj = NULL;
915                         struct amdgpu_cs_chunk *chunk;
916                         uint64_t offset, va_start;
917                         struct amdgpu_ib *ib;
918                         uint8_t *kptr;
919
920                         chunk = &p->chunks[i];
921                         ib = &p->job->ibs[j];
922                         chunk_ib = chunk->kdata;
923
924                         if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
925                                 continue;
926
927                         va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
928                         r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
929                         if (r) {
930                                 DRM_ERROR("IB va_start is invalid\n");
931                                 return r;
932                         }
933
934                         if ((va_start + chunk_ib->ib_bytes) >
935                             (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
936                                 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
937                                 return -EINVAL;
938                         }
939
940                         /* the IB should be reserved at this point */
941                         r = amdgpu_bo_kmap(aobj, (void **)&kptr);
942                         if (r) {
943                                 return r;
944                         }
945
946                         offset = m->start * AMDGPU_GPU_PAGE_SIZE;
947                         kptr += va_start - offset;
948
949                         if (p->ring->funcs->parse_cs) {
950                                 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
951                                 amdgpu_bo_kunmap(aobj);
952
953                                 r = amdgpu_ring_parse_cs(ring, p, j);
954                                 if (r)
955                                         return r;
956                         } else {
957                                 ib->ptr = (uint32_t *)kptr;
958                                 r = amdgpu_ring_patch_cs_in_place(ring, p, j);
959                                 amdgpu_bo_kunmap(aobj);
960                                 if (r)
961                                         return r;
962                         }
963
964                         j++;
965                 }
966         }
967
968         if (p->job->vm) {
969                 p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
970
971                 r = amdgpu_bo_vm_update_pte(p);
972                 if (r)
973                         return r;
974
975                 r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
976                 if (r)
977                         return r;
978         }
979
980         return amdgpu_cs_sync_rings(p);
981 }
982
983 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
984                              struct amdgpu_cs_parser *parser)
985 {
986         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
987         struct amdgpu_vm *vm = &fpriv->vm;
988         int i, j;
989         int r, ce_preempt = 0, de_preempt = 0;
990
991         for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
992                 struct amdgpu_cs_chunk *chunk;
993                 struct amdgpu_ib *ib;
994                 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
995                 struct amdgpu_ring *ring;
996
997                 chunk = &parser->chunks[i];
998                 ib = &parser->job->ibs[j];
999                 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
1000
1001                 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
1002                         continue;
1003
1004                 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
1005                         if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
1006                                 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
1007                                         ce_preempt++;
1008                                 else
1009                                         de_preempt++;
1010                         }
1011
1012                         /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
1013                         if (ce_preempt > 1 || de_preempt > 1)
1014                                 return -EINVAL;
1015                 }
1016
1017                 r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
1018                                          chunk_ib->ip_instance, chunk_ib->ring, &ring);
1019                 if (r)
1020                         return r;
1021
1022                 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
1023                         parser->job->preamble_status |=
1024                                 AMDGPU_PREAMBLE_IB_PRESENT;
1025
1026                 if (parser->ring && parser->ring != ring)
1027                         return -EINVAL;
1028
1029                 parser->ring = ring;
1030
1031                 r =  amdgpu_ib_get(adev, vm,
1032                                         ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
1033                                         ib);
1034                 if (r) {
1035                         DRM_ERROR("Failed to get ib !\n");
1036                         return r;
1037                 }
1038
1039                 ib->gpu_addr = chunk_ib->va_start;
1040                 ib->length_dw = chunk_ib->ib_bytes / 4;
1041                 ib->flags = chunk_ib->flags;
1042
1043                 j++;
1044         }
1045
1046         /* UVD & VCE fw doesn't support user fences */
1047         if (parser->job->uf_addr && (
1048             parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
1049             parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
1050                 return -EINVAL;
1051
1052         return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
1053 }
1054
1055 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
1056                                        struct amdgpu_cs_chunk *chunk)
1057 {
1058         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1059         unsigned num_deps;
1060         int i, r;
1061         struct drm_amdgpu_cs_chunk_dep *deps;
1062
1063         deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
1064         num_deps = chunk->length_dw * 4 /
1065                 sizeof(struct drm_amdgpu_cs_chunk_dep);
1066
1067         for (i = 0; i < num_deps; ++i) {
1068                 struct amdgpu_ring *ring;
1069                 struct amdgpu_ctx *ctx;
1070                 struct dma_fence *fence;
1071
1072                 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
1073                 if (ctx == NULL)
1074                         return -EINVAL;
1075
1076                 r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr,
1077                                          deps[i].ip_type,
1078                                          deps[i].ip_instance,
1079                                          deps[i].ring, &ring);
1080                 if (r) {
1081                         amdgpu_ctx_put(ctx);
1082                         return r;
1083                 }
1084
1085                 fence = amdgpu_ctx_get_fence(ctx, ring,
1086                                              deps[i].handle);
1087                 if (IS_ERR(fence)) {
1088                         r = PTR_ERR(fence);
1089                         amdgpu_ctx_put(ctx);
1090                         return r;
1091                 } else if (fence) {
1092                         r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
1093                                         true);
1094                         dma_fence_put(fence);
1095                         amdgpu_ctx_put(ctx);
1096                         if (r)
1097                                 return r;
1098                 }
1099         }
1100         return 0;
1101 }
1102
1103 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1104                                                  uint32_t handle)
1105 {
1106         int r;
1107         struct dma_fence *fence;
1108         r = drm_syncobj_find_fence(p->filp, handle, &fence);
1109         if (r)
1110                 return r;
1111
1112         r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1113         dma_fence_put(fence);
1114
1115         return r;
1116 }
1117
1118 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1119                                             struct amdgpu_cs_chunk *chunk)
1120 {
1121         unsigned num_deps;
1122         int i, r;
1123         struct drm_amdgpu_cs_chunk_sem *deps;
1124
1125         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1126         num_deps = chunk->length_dw * 4 /
1127                 sizeof(struct drm_amdgpu_cs_chunk_sem);
1128
1129         for (i = 0; i < num_deps; ++i) {
1130                 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
1131                 if (r)
1132                         return r;
1133         }
1134         return 0;
1135 }
1136
1137 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1138                                              struct amdgpu_cs_chunk *chunk)
1139 {
1140         unsigned num_deps;
1141         int i;
1142         struct drm_amdgpu_cs_chunk_sem *deps;
1143         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1144         num_deps = chunk->length_dw * 4 /
1145                 sizeof(struct drm_amdgpu_cs_chunk_sem);
1146
1147         p->post_dep_syncobjs = kmalloc_array(num_deps,
1148                                              sizeof(struct drm_syncobj *),
1149                                              GFP_KERNEL);
1150         p->num_post_dep_syncobjs = 0;
1151
1152         if (!p->post_dep_syncobjs)
1153                 return -ENOMEM;
1154
1155         for (i = 0; i < num_deps; ++i) {
1156                 p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
1157                 if (!p->post_dep_syncobjs[i])
1158                         return -EINVAL;
1159                 p->num_post_dep_syncobjs++;
1160         }
1161         return 0;
1162 }
1163
1164 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1165                                   struct amdgpu_cs_parser *p)
1166 {
1167         int i, r;
1168
1169         for (i = 0; i < p->nchunks; ++i) {
1170                 struct amdgpu_cs_chunk *chunk;
1171
1172                 chunk = &p->chunks[i];
1173
1174                 if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
1175                         r = amdgpu_cs_process_fence_dep(p, chunk);
1176                         if (r)
1177                                 return r;
1178                 } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
1179                         r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1180                         if (r)
1181                                 return r;
1182                 } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
1183                         r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1184                         if (r)
1185                                 return r;
1186                 }
1187         }
1188
1189         return 0;
1190 }
1191
1192 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1193 {
1194         int i;
1195
1196         for (i = 0; i < p->num_post_dep_syncobjs; ++i)
1197                 drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
1198 }
1199
1200 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1201                             union drm_amdgpu_cs *cs)
1202 {
1203         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1204         struct amdgpu_ring *ring = p->ring;
1205         struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
1206         enum drm_sched_priority priority;
1207         struct amdgpu_bo_list_entry *e;
1208         struct amdgpu_job *job;
1209         uint64_t seq;
1210
1211         int r;
1212
1213         job = p->job;
1214         p->job = NULL;
1215
1216         r = drm_sched_job_init(&job->base, entity, p->filp);
1217         if (r)
1218                 goto error_unlock;
1219
1220         /* No memory allocation is allowed while holding the mn lock */
1221         amdgpu_mn_lock(p->mn);
1222         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1223                 struct amdgpu_bo *bo = e->robj;
1224
1225                 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1226                         r = -ERESTARTSYS;
1227                         goto error_abort;
1228                 }
1229         }
1230
1231         job->owner = p->filp;
1232         p->fence = dma_fence_get(&job->base.s_fence->finished);
1233
1234         r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
1235         if (r) {
1236                 dma_fence_put(p->fence);
1237                 dma_fence_put(&job->base.s_fence->finished);
1238                 amdgpu_job_free(job);
1239                 amdgpu_mn_unlock(p->mn);
1240                 return r;
1241         }
1242
1243         amdgpu_cs_post_dependencies(p);
1244
1245         if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1246             !p->ctx->preamble_presented) {
1247                 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1248                 p->ctx->preamble_presented = true;
1249         }
1250
1251         cs->out.handle = seq;
1252         job->uf_sequence = seq;
1253
1254         amdgpu_job_free_resources(job);
1255
1256         trace_amdgpu_cs_ioctl(job);
1257         amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1258         priority = job->base.s_priority;
1259         drm_sched_entity_push_job(&job->base, entity);
1260
1261         ring = to_amdgpu_ring(entity->rq->sched);
1262         amdgpu_ring_priority_get(ring, priority);
1263
1264         ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1265         amdgpu_mn_unlock(p->mn);
1266
1267         return 0;
1268
1269 error_abort:
1270         dma_fence_put(&job->base.s_fence->finished);
1271         job->base.s_fence = NULL;
1272         amdgpu_mn_unlock(p->mn);
1273
1274 error_unlock:
1275         amdgpu_job_free(job);
1276         return r;
1277 }
1278
1279 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1280 {
1281         struct amdgpu_device *adev = dev->dev_private;
1282         union drm_amdgpu_cs *cs = data;
1283         struct amdgpu_cs_parser parser = {};
1284         bool reserved_buffers = false;
1285         int i, r;
1286
1287         if (!adev->accel_working)
1288                 return -EBUSY;
1289
1290         parser.adev = adev;
1291         parser.filp = filp;
1292
1293         r = amdgpu_cs_parser_init(&parser, data);
1294         if (r) {
1295                 DRM_ERROR("Failed to initialize parser !\n");
1296                 goto out;
1297         }
1298
1299         r = amdgpu_cs_ib_fill(adev, &parser);
1300         if (r)
1301                 goto out;
1302
1303         r = amdgpu_cs_parser_bos(&parser, data);
1304         if (r) {
1305                 if (r == -ENOMEM)
1306                         DRM_ERROR("Not enough memory for command submission!\n");
1307                 else if (r != -ERESTARTSYS)
1308                         DRM_ERROR("Failed to process the buffer list %d!\n", r);
1309                 goto out;
1310         }
1311
1312         reserved_buffers = true;
1313
1314         r = amdgpu_cs_dependencies(adev, &parser);
1315         if (r) {
1316                 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1317                 goto out;
1318         }
1319
1320         for (i = 0; i < parser.job->num_ibs; i++)
1321                 trace_amdgpu_cs(&parser, i);
1322
1323         r = amdgpu_cs_ib_vm_chunk(adev, &parser);
1324         if (r)
1325                 goto out;
1326
1327         r = amdgpu_cs_submit(&parser, cs);
1328
1329 out:
1330         amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1331         return r;
1332 }
1333
1334 /**
1335  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1336  *
1337  * @dev: drm device
1338  * @data: data from userspace
1339  * @filp: file private
1340  *
1341  * Wait for the command submission identified by handle to finish.
1342  */
1343 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1344                          struct drm_file *filp)
1345 {
1346         union drm_amdgpu_wait_cs *wait = data;
1347         struct amdgpu_device *adev = dev->dev_private;
1348         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1349         struct amdgpu_ring *ring = NULL;
1350         struct amdgpu_ctx *ctx;
1351         struct dma_fence *fence;
1352         long r;
1353
1354         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1355         if (ctx == NULL)
1356                 return -EINVAL;
1357
1358         r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
1359                                  wait->in.ip_type, wait->in.ip_instance,
1360                                  wait->in.ring, &ring);
1361         if (r) {
1362                 amdgpu_ctx_put(ctx);
1363                 return r;
1364         }
1365
1366         fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
1367         if (IS_ERR(fence))
1368                 r = PTR_ERR(fence);
1369         else if (fence) {
1370                 r = dma_fence_wait_timeout(fence, true, timeout);
1371                 if (r > 0 && fence->error)
1372                         r = fence->error;
1373                 dma_fence_put(fence);
1374         } else
1375                 r = 1;
1376
1377         amdgpu_ctx_put(ctx);
1378         if (r < 0)
1379                 return r;
1380
1381         memset(wait, 0, sizeof(*wait));
1382         wait->out.status = (r == 0);
1383
1384         return 0;
1385 }
1386
1387 /**
1388  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1389  *
1390  * @adev: amdgpu device
1391  * @filp: file private
1392  * @user: drm_amdgpu_fence copied from user space
1393  */
1394 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1395                                              struct drm_file *filp,
1396                                              struct drm_amdgpu_fence *user)
1397 {
1398         struct amdgpu_ring *ring;
1399         struct amdgpu_ctx *ctx;
1400         struct dma_fence *fence;
1401         int r;
1402
1403         ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1404         if (ctx == NULL)
1405                 return ERR_PTR(-EINVAL);
1406
1407         r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
1408                                  user->ip_instance, user->ring, &ring);
1409         if (r) {
1410                 amdgpu_ctx_put(ctx);
1411                 return ERR_PTR(r);
1412         }
1413
1414         fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
1415         amdgpu_ctx_put(ctx);
1416
1417         return fence;
1418 }
1419
1420 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1421                                     struct drm_file *filp)
1422 {
1423         struct amdgpu_device *adev = dev->dev_private;
1424         union drm_amdgpu_fence_to_handle *info = data;
1425         struct dma_fence *fence;
1426         struct drm_syncobj *syncobj;
1427         struct sync_file *sync_file;
1428         int fd, r;
1429
1430         fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1431         if (IS_ERR(fence))
1432                 return PTR_ERR(fence);
1433
1434         switch (info->in.what) {
1435         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1436                 r = drm_syncobj_create(&syncobj, 0, fence);
1437                 dma_fence_put(fence);
1438                 if (r)
1439                         return r;
1440                 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1441                 drm_syncobj_put(syncobj);
1442                 return r;
1443
1444         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1445                 r = drm_syncobj_create(&syncobj, 0, fence);
1446                 dma_fence_put(fence);
1447                 if (r)
1448                         return r;
1449                 r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1450                 drm_syncobj_put(syncobj);
1451                 return r;
1452
1453         case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1454                 fd = get_unused_fd_flags(O_CLOEXEC);
1455                 if (fd < 0) {
1456                         dma_fence_put(fence);
1457                         return fd;
1458                 }
1459
1460                 sync_file = sync_file_create(fence);
1461                 dma_fence_put(fence);
1462                 if (!sync_file) {
1463                         put_unused_fd(fd);
1464                         return -ENOMEM;
1465                 }
1466
1467                 fd_install(fd, sync_file->file);
1468                 info->out.handle = fd;
1469                 return 0;
1470
1471         default:
1472                 dma_fence_put(fence);
1473                 return -EINVAL;
1474         }
1475 }
1476
1477 /**
1478  * amdgpu_cs_wait_all_fence - wait on all fences to signal
1479  *
1480  * @adev: amdgpu device
1481  * @filp: file private
1482  * @wait: wait parameters
1483  * @fences: array of drm_amdgpu_fence
1484  */
1485 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1486                                      struct drm_file *filp,
1487                                      union drm_amdgpu_wait_fences *wait,
1488                                      struct drm_amdgpu_fence *fences)
1489 {
1490         uint32_t fence_count = wait->in.fence_count;
1491         unsigned int i;
1492         long r = 1;
1493
1494         for (i = 0; i < fence_count; i++) {
1495                 struct dma_fence *fence;
1496                 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1497
1498                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1499                 if (IS_ERR(fence))
1500                         return PTR_ERR(fence);
1501                 else if (!fence)
1502                         continue;
1503
1504                 r = dma_fence_wait_timeout(fence, true, timeout);
1505                 dma_fence_put(fence);
1506                 if (r < 0)
1507                         return r;
1508
1509                 if (r == 0)
1510                         break;
1511
1512                 if (fence->error)
1513                         return fence->error;
1514         }
1515
1516         memset(wait, 0, sizeof(*wait));
1517         wait->out.status = (r > 0);
1518
1519         return 0;
1520 }
1521
1522 /**
1523  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1524  *
1525  * @adev: amdgpu device
1526  * @filp: file private
1527  * @wait: wait parameters
1528  * @fences: array of drm_amdgpu_fence
1529  */
1530 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1531                                     struct drm_file *filp,
1532                                     union drm_amdgpu_wait_fences *wait,
1533                                     struct drm_amdgpu_fence *fences)
1534 {
1535         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1536         uint32_t fence_count = wait->in.fence_count;
1537         uint32_t first = ~0;
1538         struct dma_fence **array;
1539         unsigned int i;
1540         long r;
1541
1542         /* Prepare the fence array */
1543         array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1544
1545         if (array == NULL)
1546                 return -ENOMEM;
1547
1548         for (i = 0; i < fence_count; i++) {
1549                 struct dma_fence *fence;
1550
1551                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1552                 if (IS_ERR(fence)) {
1553                         r = PTR_ERR(fence);
1554                         goto err_free_fence_array;
1555                 } else if (fence) {
1556                         array[i] = fence;
1557                 } else { /* NULL, the fence has been already signaled */
1558                         r = 1;
1559                         first = i;
1560                         goto out;
1561                 }
1562         }
1563
1564         r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1565                                        &first);
1566         if (r < 0)
1567                 goto err_free_fence_array;
1568
1569 out:
1570         memset(wait, 0, sizeof(*wait));
1571         wait->out.status = (r > 0);
1572         wait->out.first_signaled = first;
1573
1574         if (first < fence_count && array[first])
1575                 r = array[first]->error;
1576         else
1577                 r = 0;
1578
1579 err_free_fence_array:
1580         for (i = 0; i < fence_count; i++)
1581                 dma_fence_put(array[i]);
1582         kfree(array);
1583
1584         return r;
1585 }
1586
1587 /**
1588  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1589  *
1590  * @dev: drm device
1591  * @data: data from userspace
1592  * @filp: file private
1593  */
1594 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1595                                 struct drm_file *filp)
1596 {
1597         struct amdgpu_device *adev = dev->dev_private;
1598         union drm_amdgpu_wait_fences *wait = data;
1599         uint32_t fence_count = wait->in.fence_count;
1600         struct drm_amdgpu_fence *fences_user;
1601         struct drm_amdgpu_fence *fences;
1602         int r;
1603
1604         /* Get the fences from userspace */
1605         fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1606                         GFP_KERNEL);
1607         if (fences == NULL)
1608                 return -ENOMEM;
1609
1610         fences_user = u64_to_user_ptr(wait->in.fences);
1611         if (copy_from_user(fences, fences_user,
1612                 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1613                 r = -EFAULT;
1614                 goto err_free_fences;
1615         }
1616
1617         if (wait->in.wait_all)
1618                 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1619         else
1620                 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1621
1622 err_free_fences:
1623         kfree(fences);
1624
1625         return r;
1626 }
1627
1628 /**
1629  * amdgpu_cs_find_bo_va - find bo_va for VM address
1630  *
1631  * @parser: command submission parser context
1632  * @addr: VM address
1633  * @bo: resulting BO of the mapping found
1634  *
1635  * Search the buffer objects in the command submission context for a certain
1636  * virtual memory address. Returns allocation structure when found, NULL
1637  * otherwise.
1638  */
1639 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1640                            uint64_t addr, struct amdgpu_bo **bo,
1641                            struct amdgpu_bo_va_mapping **map)
1642 {
1643         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1644         struct ttm_operation_ctx ctx = { false, false };
1645         struct amdgpu_vm *vm = &fpriv->vm;
1646         struct amdgpu_bo_va_mapping *mapping;
1647         int r;
1648
1649         addr /= AMDGPU_GPU_PAGE_SIZE;
1650
1651         mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1652         if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1653                 return -EINVAL;
1654
1655         *bo = mapping->bo_va->base.bo;
1656         *map = mapping;
1657
1658         /* Double check that the BO is reserved by this CS */
1659         if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
1660                 return -EINVAL;
1661
1662         if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1663                 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1664                 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1665                 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1666                 if (r)
1667                         return r;
1668         }
1669
1670         return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1671 }