GNU Linux-libre 4.14.295-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <drm/ttm/ttm_bo_api.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_page_alloc.h>
37 #include <drm/drmP.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include <linux/swap.h>
43 #include <linux/pagemap.h>
44 #include <linux/debugfs.h>
45 #include "amdgpu.h"
46 #include "amdgpu_trace.h"
47 #include "bif/bif_4_1_d.h"
48
49 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
50
51 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
52                              struct ttm_mem_reg *mem, unsigned num_pages,
53                              uint64_t offset, unsigned window,
54                              struct amdgpu_ring *ring,
55                              uint64_t *addr);
56
57 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
58 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
59
60 /*
61  * Global memory.
62  */
63 static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
64 {
65         return ttm_mem_global_init(ref->object);
66 }
67
68 static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
69 {
70         ttm_mem_global_release(ref->object);
71 }
72
73 static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
74 {
75         struct drm_global_reference *global_ref;
76         struct amdgpu_ring *ring;
77         struct amd_sched_rq *rq;
78         int r;
79
80         adev->mman.mem_global_referenced = false;
81         global_ref = &adev->mman.mem_global_ref;
82         global_ref->global_type = DRM_GLOBAL_TTM_MEM;
83         global_ref->size = sizeof(struct ttm_mem_global);
84         global_ref->init = &amdgpu_ttm_mem_global_init;
85         global_ref->release = &amdgpu_ttm_mem_global_release;
86         r = drm_global_item_ref(global_ref);
87         if (r) {
88                 DRM_ERROR("Failed setting up TTM memory accounting "
89                           "subsystem.\n");
90                 goto error_mem;
91         }
92
93         adev->mman.bo_global_ref.mem_glob =
94                 adev->mman.mem_global_ref.object;
95         global_ref = &adev->mman.bo_global_ref.ref;
96         global_ref->global_type = DRM_GLOBAL_TTM_BO;
97         global_ref->size = sizeof(struct ttm_bo_global);
98         global_ref->init = &ttm_bo_global_init;
99         global_ref->release = &ttm_bo_global_release;
100         r = drm_global_item_ref(global_ref);
101         if (r) {
102                 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
103                 goto error_bo;
104         }
105
106         mutex_init(&adev->mman.gtt_window_lock);
107
108         ring = adev->mman.buffer_funcs_ring;
109         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
110         r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
111                                   rq, amdgpu_sched_jobs);
112         if (r) {
113                 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
114                 goto error_entity;
115         }
116
117         adev->mman.mem_global_referenced = true;
118
119         return 0;
120
121 error_entity:
122         drm_global_item_unref(&adev->mman.bo_global_ref.ref);
123 error_bo:
124         drm_global_item_unref(&adev->mman.mem_global_ref);
125 error_mem:
126         return r;
127 }
128
129 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
130 {
131         if (adev->mman.mem_global_referenced) {
132                 amd_sched_entity_fini(adev->mman.entity.sched,
133                                       &adev->mman.entity);
134                 mutex_destroy(&adev->mman.gtt_window_lock);
135                 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
136                 drm_global_item_unref(&adev->mman.mem_global_ref);
137                 adev->mman.mem_global_referenced = false;
138         }
139 }
140
141 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
142 {
143         return 0;
144 }
145
146 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
147                                 struct ttm_mem_type_manager *man)
148 {
149         struct amdgpu_device *adev;
150
151         adev = amdgpu_ttm_adev(bdev);
152
153         switch (type) {
154         case TTM_PL_SYSTEM:
155                 /* System memory */
156                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
157                 man->available_caching = TTM_PL_MASK_CACHING;
158                 man->default_caching = TTM_PL_FLAG_CACHED;
159                 break;
160         case TTM_PL_TT:
161                 man->func = &amdgpu_gtt_mgr_func;
162                 man->gpu_offset = adev->mc.gart_start;
163                 man->available_caching = TTM_PL_MASK_CACHING;
164                 man->default_caching = TTM_PL_FLAG_CACHED;
165                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
166                 break;
167         case TTM_PL_VRAM:
168                 /* "On-card" video ram */
169                 man->func = &amdgpu_vram_mgr_func;
170                 man->gpu_offset = adev->mc.vram_start;
171                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
172                              TTM_MEMTYPE_FLAG_MAPPABLE;
173                 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
174                 man->default_caching = TTM_PL_FLAG_WC;
175                 break;
176         case AMDGPU_PL_GDS:
177         case AMDGPU_PL_GWS:
178         case AMDGPU_PL_OA:
179                 /* On-chip GDS memory*/
180                 man->func = &ttm_bo_manager_func;
181                 man->gpu_offset = 0;
182                 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
183                 man->available_caching = TTM_PL_FLAG_UNCACHED;
184                 man->default_caching = TTM_PL_FLAG_UNCACHED;
185                 break;
186         default:
187                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
188                 return -EINVAL;
189         }
190         return 0;
191 }
192
193 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
194                                 struct ttm_placement *placement)
195 {
196         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
197         struct amdgpu_bo *abo;
198         static const struct ttm_place placements = {
199                 .fpfn = 0,
200                 .lpfn = 0,
201                 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
202         };
203
204         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
205                 placement->placement = &placements;
206                 placement->busy_placement = &placements;
207                 placement->num_placement = 1;
208                 placement->num_busy_placement = 1;
209                 return;
210         }
211         abo = container_of(bo, struct amdgpu_bo, tbo);
212         switch (bo->mem.mem_type) {
213         case TTM_PL_VRAM:
214                 if (adev->mman.buffer_funcs &&
215                     adev->mman.buffer_funcs_ring &&
216                     adev->mman.buffer_funcs_ring->ready == false) {
217                         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
218                 } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
219                            !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
220                         unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
221                         struct drm_mm_node *node = bo->mem.mm_node;
222                         unsigned long pages_left;
223
224                         for (pages_left = bo->mem.num_pages;
225                              pages_left;
226                              pages_left -= node->size, node++) {
227                                 if (node->start < fpfn)
228                                         break;
229                         }
230
231                         if (!pages_left)
232                                 goto gtt;
233
234                         /* Try evicting to the CPU inaccessible part of VRAM
235                          * first, but only set GTT as busy placement, so this
236                          * BO will be evicted to GTT rather than causing other
237                          * BOs to be evicted from VRAM
238                          */
239                         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
240                                                          AMDGPU_GEM_DOMAIN_GTT);
241                         abo->placements[0].fpfn = fpfn;
242                         abo->placements[0].lpfn = 0;
243                         abo->placement.busy_placement = &abo->placements[1];
244                         abo->placement.num_busy_placement = 1;
245                 } else {
246 gtt:
247                         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
248                 }
249                 break;
250         case TTM_PL_TT:
251         default:
252                 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
253         }
254         *placement = abo->placement;
255 }
256
257 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
258 {
259         struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
260
261         if (amdgpu_ttm_tt_get_usermm(bo->ttm))
262                 return -EPERM;
263         return drm_vma_node_verify_access(&abo->gem_base.vma_node,
264                                           filp->private_data);
265 }
266
267 static void amdgpu_move_null(struct ttm_buffer_object *bo,
268                              struct ttm_mem_reg *new_mem)
269 {
270         struct ttm_mem_reg *old_mem = &bo->mem;
271
272         BUG_ON(old_mem->mm_node != NULL);
273         *old_mem = *new_mem;
274         new_mem->mm_node = NULL;
275 }
276
277 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
278                                     struct drm_mm_node *mm_node,
279                                     struct ttm_mem_reg *mem)
280 {
281         uint64_t addr = 0;
282
283         if (mem->mem_type != TTM_PL_TT ||
284             amdgpu_gtt_mgr_is_allocated(mem)) {
285                 addr = mm_node->start << PAGE_SHIFT;
286                 addr += bo->bdev->man[mem->mem_type].gpu_offset;
287         }
288         return addr;
289 }
290
291 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
292                             bool evict, bool no_wait_gpu,
293                             struct ttm_mem_reg *new_mem,
294                             struct ttm_mem_reg *old_mem)
295 {
296         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
297         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
298
299         struct drm_mm_node *old_mm, *new_mm;
300         uint64_t old_start, old_size, new_start, new_size;
301         unsigned long num_pages;
302         struct dma_fence *fence = NULL;
303         int r;
304
305         BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
306
307         if (!ring->ready) {
308                 DRM_ERROR("Trying to move memory with ring turned off.\n");
309                 return -EINVAL;
310         }
311
312         old_mm = old_mem->mm_node;
313         old_size = old_mm->size;
314         old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
315
316         new_mm = new_mem->mm_node;
317         new_size = new_mm->size;
318         new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
319
320         num_pages = new_mem->num_pages;
321         mutex_lock(&adev->mman.gtt_window_lock);
322         while (num_pages) {
323                 unsigned long cur_pages = min(min(old_size, new_size),
324                                               (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
325                 uint64_t from = old_start, to = new_start;
326                 struct dma_fence *next;
327
328                 if (old_mem->mem_type == TTM_PL_TT &&
329                     !amdgpu_gtt_mgr_is_allocated(old_mem)) {
330                         r = amdgpu_map_buffer(bo, old_mem, cur_pages,
331                                               old_start, 0, ring, &from);
332                         if (r)
333                                 goto error;
334                 }
335
336                 if (new_mem->mem_type == TTM_PL_TT &&
337                     !amdgpu_gtt_mgr_is_allocated(new_mem)) {
338                         r = amdgpu_map_buffer(bo, new_mem, cur_pages,
339                                               new_start, 1, ring, &to);
340                         if (r)
341                                 goto error;
342                 }
343
344                 r = amdgpu_copy_buffer(ring, from, to,
345                                        cur_pages * PAGE_SIZE,
346                                        bo->resv, &next, false, true);
347                 if (r)
348                         goto error;
349
350                 dma_fence_put(fence);
351                 fence = next;
352
353                 num_pages -= cur_pages;
354                 if (!num_pages)
355                         break;
356
357                 old_size -= cur_pages;
358                 if (!old_size) {
359                         old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
360                         old_size = old_mm->size;
361                 } else {
362                         old_start += cur_pages * PAGE_SIZE;
363                 }
364
365                 new_size -= cur_pages;
366                 if (!new_size) {
367                         new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
368                         new_size = new_mm->size;
369                 } else {
370                         new_start += cur_pages * PAGE_SIZE;
371                 }
372         }
373         mutex_unlock(&adev->mman.gtt_window_lock);
374
375         r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
376         dma_fence_put(fence);
377         return r;
378
379 error:
380         mutex_unlock(&adev->mman.gtt_window_lock);
381
382         if (fence)
383                 dma_fence_wait(fence, false);
384         dma_fence_put(fence);
385         return r;
386 }
387
388 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
389                                 bool evict, bool interruptible,
390                                 bool no_wait_gpu,
391                                 struct ttm_mem_reg *new_mem)
392 {
393         struct amdgpu_device *adev;
394         struct ttm_mem_reg *old_mem = &bo->mem;
395         struct ttm_mem_reg tmp_mem;
396         struct ttm_place placements;
397         struct ttm_placement placement;
398         int r;
399
400         adev = amdgpu_ttm_adev(bo->bdev);
401         tmp_mem = *new_mem;
402         tmp_mem.mm_node = NULL;
403         placement.num_placement = 1;
404         placement.placement = &placements;
405         placement.num_busy_placement = 1;
406         placement.busy_placement = &placements;
407         placements.fpfn = 0;
408         placements.lpfn = 0;
409         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
410         r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
411                              interruptible, no_wait_gpu);
412         if (unlikely(r)) {
413                 return r;
414         }
415
416         r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
417         if (unlikely(r)) {
418                 goto out_cleanup;
419         }
420
421         r = ttm_tt_bind(bo->ttm, &tmp_mem);
422         if (unlikely(r)) {
423                 goto out_cleanup;
424         }
425         r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
426         if (unlikely(r)) {
427                 goto out_cleanup;
428         }
429         r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
430 out_cleanup:
431         ttm_bo_mem_put(bo, &tmp_mem);
432         return r;
433 }
434
435 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
436                                 bool evict, bool interruptible,
437                                 bool no_wait_gpu,
438                                 struct ttm_mem_reg *new_mem)
439 {
440         struct amdgpu_device *adev;
441         struct ttm_mem_reg *old_mem = &bo->mem;
442         struct ttm_mem_reg tmp_mem;
443         struct ttm_placement placement;
444         struct ttm_place placements;
445         int r;
446
447         adev = amdgpu_ttm_adev(bo->bdev);
448         tmp_mem = *new_mem;
449         tmp_mem.mm_node = NULL;
450         placement.num_placement = 1;
451         placement.placement = &placements;
452         placement.num_busy_placement = 1;
453         placement.busy_placement = &placements;
454         placements.fpfn = 0;
455         placements.lpfn = 0;
456         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
457         r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
458                              interruptible, no_wait_gpu);
459         if (unlikely(r)) {
460                 return r;
461         }
462         r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
463         if (unlikely(r)) {
464                 goto out_cleanup;
465         }
466         r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
467         if (unlikely(r)) {
468                 goto out_cleanup;
469         }
470 out_cleanup:
471         ttm_bo_mem_put(bo, &tmp_mem);
472         return r;
473 }
474
475 static int amdgpu_bo_move(struct ttm_buffer_object *bo,
476                         bool evict, bool interruptible,
477                         bool no_wait_gpu,
478                         struct ttm_mem_reg *new_mem)
479 {
480         struct amdgpu_device *adev;
481         struct amdgpu_bo *abo;
482         struct ttm_mem_reg *old_mem = &bo->mem;
483         int r;
484
485         /* Can't move a pinned BO */
486         abo = container_of(bo, struct amdgpu_bo, tbo);
487         if (WARN_ON_ONCE(abo->pin_count > 0))
488                 return -EINVAL;
489
490         adev = amdgpu_ttm_adev(bo->bdev);
491
492         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
493                 amdgpu_move_null(bo, new_mem);
494                 return 0;
495         }
496         if ((old_mem->mem_type == TTM_PL_TT &&
497              new_mem->mem_type == TTM_PL_SYSTEM) ||
498             (old_mem->mem_type == TTM_PL_SYSTEM &&
499              new_mem->mem_type == TTM_PL_TT)) {
500                 /* bind is enough */
501                 amdgpu_move_null(bo, new_mem);
502                 return 0;
503         }
504         if (adev->mman.buffer_funcs == NULL ||
505             adev->mman.buffer_funcs_ring == NULL ||
506             !adev->mman.buffer_funcs_ring->ready) {
507                 /* use memcpy */
508                 goto memcpy;
509         }
510
511         if (old_mem->mem_type == TTM_PL_VRAM &&
512             new_mem->mem_type == TTM_PL_SYSTEM) {
513                 r = amdgpu_move_vram_ram(bo, evict, interruptible,
514                                         no_wait_gpu, new_mem);
515         } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
516                    new_mem->mem_type == TTM_PL_VRAM) {
517                 r = amdgpu_move_ram_vram(bo, evict, interruptible,
518                                             no_wait_gpu, new_mem);
519         } else {
520                 r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
521         }
522
523         if (r) {
524 memcpy:
525                 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
526                 if (r) {
527                         return r;
528                 }
529         }
530
531         if (bo->type == ttm_bo_type_device &&
532             new_mem->mem_type == TTM_PL_VRAM &&
533             old_mem->mem_type != TTM_PL_VRAM) {
534                 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
535                  * accesses the BO after it's moved.
536                  */
537                 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
538         }
539
540         /* update statistics */
541         atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
542         return 0;
543 }
544
545 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
546 {
547         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
548         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
549
550         mem->bus.addr = NULL;
551         mem->bus.offset = 0;
552         mem->bus.size = mem->num_pages << PAGE_SHIFT;
553         mem->bus.base = 0;
554         mem->bus.is_iomem = false;
555         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
556                 return -EINVAL;
557         switch (mem->mem_type) {
558         case TTM_PL_SYSTEM:
559                 /* system memory */
560                 return 0;
561         case TTM_PL_TT:
562                 break;
563         case TTM_PL_VRAM:
564                 mem->bus.offset = mem->start << PAGE_SHIFT;
565                 /* check if it's visible */
566                 if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
567                         return -EINVAL;
568                 mem->bus.base = adev->mc.aper_base;
569                 mem->bus.is_iomem = true;
570                 break;
571         default:
572                 return -EINVAL;
573         }
574         return 0;
575 }
576
577 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
578 {
579 }
580
581 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
582                                            unsigned long page_offset)
583 {
584         struct drm_mm_node *mm = bo->mem.mm_node;
585         uint64_t size = mm->size;
586         uint64_t offset = page_offset;
587
588         page_offset = do_div(offset, size);
589         mm += offset;
590         return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
591 }
592
593 /*
594  * TTM backend functions.
595  */
596 struct amdgpu_ttm_gup_task_list {
597         struct list_head        list;
598         struct task_struct      *task;
599 };
600
601 struct amdgpu_ttm_tt {
602         struct ttm_dma_tt       ttm;
603         struct amdgpu_device    *adev;
604         u64                     offset;
605         uint64_t                userptr;
606         struct mm_struct        *usermm;
607         uint32_t                userflags;
608         spinlock_t              guptasklock;
609         struct list_head        guptasks;
610         atomic_t                mmu_invalidations;
611         struct list_head        list;
612 };
613
614 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
615 {
616         struct amdgpu_ttm_tt *gtt = (void *)ttm;
617         unsigned int flags = 0;
618         unsigned pinned = 0;
619         int r;
620
621         if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
622                 flags |= FOLL_WRITE;
623
624         if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
625                 /* check that we only use anonymous memory
626                    to prevent problems with writeback */
627                 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
628                 struct vm_area_struct *vma;
629
630                 vma = find_vma(gtt->usermm, gtt->userptr);
631                 if (!vma || vma->vm_file || vma->vm_end < end)
632                         return -EPERM;
633         }
634
635         do {
636                 unsigned num_pages = ttm->num_pages - pinned;
637                 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
638                 struct page **p = pages + pinned;
639                 struct amdgpu_ttm_gup_task_list guptask;
640
641                 guptask.task = current;
642                 spin_lock(&gtt->guptasklock);
643                 list_add(&guptask.list, &gtt->guptasks);
644                 spin_unlock(&gtt->guptasklock);
645
646                 r = get_user_pages(userptr, num_pages, flags, p, NULL);
647
648                 spin_lock(&gtt->guptasklock);
649                 list_del(&guptask.list);
650                 spin_unlock(&gtt->guptasklock);
651
652                 if (r < 0)
653                         goto release_pages;
654
655                 pinned += r;
656
657         } while (pinned < ttm->num_pages);
658
659         return 0;
660
661 release_pages:
662         release_pages(pages, pinned, 0);
663         return r;
664 }
665
666 static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
667 {
668         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
669         struct amdgpu_ttm_tt *gtt = (void *)ttm;
670         unsigned i;
671
672         if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) {
673                 for (i = 0; i < ttm->num_pages; i++) {
674                         trace_amdgpu_ttm_tt_populate(
675                                 adev,
676                                 gtt->ttm.dma_address[i],
677                                 page_to_phys(ttm->pages[i]));
678                 }
679         }
680 }
681
682 static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm)
683 {
684         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
685         struct amdgpu_ttm_tt *gtt = (void *)ttm;
686         unsigned i;
687
688         if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) {
689                 for (i = 0; i < ttm->num_pages; i++) {
690                         trace_amdgpu_ttm_tt_unpopulate(
691                                 adev,
692                                 gtt->ttm.dma_address[i],
693                                 page_to_phys(ttm->pages[i]));
694                 }
695         }
696 }
697
698 /* prepare the sg table with the user pages */
699 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
700 {
701         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
702         struct amdgpu_ttm_tt *gtt = (void *)ttm;
703         unsigned nents;
704         int r;
705
706         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
707         enum dma_data_direction direction = write ?
708                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
709
710         r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
711                                       ttm->num_pages << PAGE_SHIFT,
712                                       GFP_KERNEL);
713         if (r)
714                 goto release_sg;
715
716         r = -ENOMEM;
717         nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
718         if (nents != ttm->sg->nents)
719                 goto release_sg;
720
721         drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
722                                          gtt->ttm.dma_address, ttm->num_pages);
723
724         amdgpu_trace_dma_map(ttm);
725
726         return 0;
727
728 release_sg:
729         kfree(ttm->sg);
730         ttm->sg = NULL;
731         return r;
732 }
733
734 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
735 {
736         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
737         struct amdgpu_ttm_tt *gtt = (void *)ttm;
738         struct sg_page_iter sg_iter;
739
740         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
741         enum dma_data_direction direction = write ?
742                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
743
744         /* double check that we don't free the table twice */
745         if (!ttm->sg || !ttm->sg->sgl)
746                 return;
747
748         /* free the sg table and pages again */
749         dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
750
751         for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
752                 struct page *page = sg_page_iter_page(&sg_iter);
753                 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
754                         set_page_dirty(page);
755
756                 mark_page_accessed(page);
757                 put_page(page);
758         }
759
760         amdgpu_trace_dma_unmap(ttm);
761
762         sg_free_table(ttm->sg);
763 }
764
765 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
766                                    struct ttm_mem_reg *bo_mem)
767 {
768         struct amdgpu_ttm_tt *gtt = (void*)ttm;
769         uint64_t flags;
770         int r = 0;
771
772         if (gtt->userptr) {
773                 r = amdgpu_ttm_tt_pin_userptr(ttm);
774                 if (r) {
775                         DRM_ERROR("failed to pin userptr\n");
776                         return r;
777                 }
778         }
779         if (!ttm->num_pages) {
780                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
781                      ttm->num_pages, bo_mem, ttm);
782         }
783
784         if (bo_mem->mem_type == AMDGPU_PL_GDS ||
785             bo_mem->mem_type == AMDGPU_PL_GWS ||
786             bo_mem->mem_type == AMDGPU_PL_OA)
787                 return -EINVAL;
788
789         if (!amdgpu_gtt_mgr_is_allocated(bo_mem))
790                 return 0;
791
792         spin_lock(&gtt->adev->gtt_list_lock);
793         flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
794         gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
795         r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
796                 ttm->pages, gtt->ttm.dma_address, flags);
797
798         if (r) {
799                 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
800                           ttm->num_pages, gtt->offset);
801                 goto error_gart_bind;
802         }
803
804         list_add_tail(&gtt->list, &gtt->adev->gtt_list);
805 error_gart_bind:
806         spin_unlock(&gtt->adev->gtt_list_lock);
807         return r;
808 }
809
810 bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
811 {
812         struct amdgpu_ttm_tt *gtt = (void *)ttm;
813
814         return gtt && !list_empty(&gtt->list);
815 }
816
817 int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
818 {
819         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
820         struct ttm_tt *ttm = bo->ttm;
821         struct ttm_mem_reg tmp;
822
823         struct ttm_placement placement;
824         struct ttm_place placements;
825         int r;
826
827         if (!ttm || amdgpu_ttm_is_bound(ttm))
828                 return 0;
829
830         tmp = bo->mem;
831         tmp.mm_node = NULL;
832         placement.num_placement = 1;
833         placement.placement = &placements;
834         placement.num_busy_placement = 1;
835         placement.busy_placement = &placements;
836         placements.fpfn = 0;
837         placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
838         placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
839
840         r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
841         if (unlikely(r))
842                 return r;
843
844         r = ttm_bo_move_ttm(bo, true, false, &tmp);
845         if (unlikely(r))
846                 ttm_bo_mem_put(bo, &tmp);
847         else
848                 bo->offset = (bo->mem.start << PAGE_SHIFT) +
849                         bo->bdev->man[bo->mem.mem_type].gpu_offset;
850
851         return r;
852 }
853
854 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
855 {
856         struct amdgpu_ttm_tt *gtt, *tmp;
857         struct ttm_mem_reg bo_mem;
858         uint64_t flags;
859         int r;
860
861         bo_mem.mem_type = TTM_PL_TT;
862         spin_lock(&adev->gtt_list_lock);
863         list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
864                 flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
865                 r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
866                                      gtt->ttm.ttm.pages, gtt->ttm.dma_address,
867                                      flags);
868                 if (r) {
869                         spin_unlock(&adev->gtt_list_lock);
870                         DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
871                                   gtt->ttm.ttm.num_pages, gtt->offset);
872                         return r;
873                 }
874         }
875         spin_unlock(&adev->gtt_list_lock);
876         return 0;
877 }
878
879 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
880 {
881         struct amdgpu_ttm_tt *gtt = (void *)ttm;
882         int r;
883
884         if (gtt->userptr)
885                 amdgpu_ttm_tt_unpin_userptr(ttm);
886
887         if (!amdgpu_ttm_is_bound(ttm))
888                 return 0;
889
890         /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
891         spin_lock(&gtt->adev->gtt_list_lock);
892         r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
893         if (r) {
894                 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
895                           gtt->ttm.ttm.num_pages, gtt->offset);
896                 goto error_unbind;
897         }
898         list_del_init(&gtt->list);
899 error_unbind:
900         spin_unlock(&gtt->adev->gtt_list_lock);
901         return r;
902 }
903
904 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
905 {
906         struct amdgpu_ttm_tt *gtt = (void *)ttm;
907
908         ttm_dma_tt_fini(&gtt->ttm);
909         kfree(gtt);
910 }
911
912 static struct ttm_backend_func amdgpu_backend_func = {
913         .bind = &amdgpu_ttm_backend_bind,
914         .unbind = &amdgpu_ttm_backend_unbind,
915         .destroy = &amdgpu_ttm_backend_destroy,
916 };
917
918 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
919                                     unsigned long size, uint32_t page_flags,
920                                     struct page *dummy_read_page)
921 {
922         struct amdgpu_device *adev;
923         struct amdgpu_ttm_tt *gtt;
924
925         adev = amdgpu_ttm_adev(bdev);
926
927         gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
928         if (gtt == NULL) {
929                 return NULL;
930         }
931         gtt->ttm.ttm.func = &amdgpu_backend_func;
932         gtt->adev = adev;
933         if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
934                 kfree(gtt);
935                 return NULL;
936         }
937         INIT_LIST_HEAD(&gtt->list);
938         return &gtt->ttm.ttm;
939 }
940
941 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
942 {
943         struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
944         struct amdgpu_ttm_tt *gtt = (void *)ttm;
945         unsigned i;
946         int r;
947         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
948
949         if (ttm->state != tt_unpopulated)
950                 return 0;
951
952         if (gtt && gtt->userptr) {
953                 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
954                 if (!ttm->sg)
955                         return -ENOMEM;
956
957                 ttm->page_flags |= TTM_PAGE_FLAG_SG;
958                 ttm->state = tt_unbound;
959                 return 0;
960         }
961
962         if (slave && ttm->sg) {
963                 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
964                                                  gtt->ttm.dma_address, ttm->num_pages);
965                 ttm->state = tt_unbound;
966                 r = 0;
967                 goto trace_mappings;
968         }
969
970 #ifdef CONFIG_SWIOTLB
971         if (swiotlb_nr_tbl()) {
972                 r = ttm_dma_populate(&gtt->ttm, adev->dev);
973                 goto trace_mappings;
974         }
975 #endif
976
977         r = ttm_pool_populate(ttm);
978         if (r) {
979                 return r;
980         }
981
982         for (i = 0; i < ttm->num_pages; i++) {
983                 gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
984                                                        0, PAGE_SIZE,
985                                                        PCI_DMA_BIDIRECTIONAL);
986                 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
987                         while (i--) {
988                                 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
989                                                PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
990                                 gtt->ttm.dma_address[i] = 0;
991                         }
992                         ttm_pool_unpopulate(ttm);
993                         return -EFAULT;
994                 }
995         }
996
997         r = 0;
998 trace_mappings:
999         if (likely(!r))
1000                 amdgpu_trace_dma_map(ttm);
1001         return r;
1002 }
1003
1004 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1005 {
1006         struct amdgpu_device *adev;
1007         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1008         unsigned i;
1009         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1010
1011         if (gtt && gtt->userptr) {
1012                 kfree(ttm->sg);
1013                 ttm->sg = NULL;
1014                 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1015                 return;
1016         }
1017
1018         if (slave)
1019                 return;
1020
1021         adev = amdgpu_ttm_adev(ttm->bdev);
1022
1023         amdgpu_trace_dma_unmap(ttm);
1024
1025 #ifdef CONFIG_SWIOTLB
1026         if (swiotlb_nr_tbl()) {
1027                 ttm_dma_unpopulate(&gtt->ttm, adev->dev);
1028                 return;
1029         }
1030 #endif
1031
1032         for (i = 0; i < ttm->num_pages; i++) {
1033                 if (gtt->ttm.dma_address[i]) {
1034                         pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
1035                                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1036                 }
1037         }
1038
1039         ttm_pool_unpopulate(ttm);
1040 }
1041
1042 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1043                               uint32_t flags)
1044 {
1045         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1046
1047         if (gtt == NULL)
1048                 return -EINVAL;
1049
1050         gtt->userptr = addr;
1051         gtt->usermm = current->mm;
1052         gtt->userflags = flags;
1053         spin_lock_init(&gtt->guptasklock);
1054         INIT_LIST_HEAD(&gtt->guptasks);
1055         atomic_set(&gtt->mmu_invalidations, 0);
1056
1057         return 0;
1058 }
1059
1060 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1061 {
1062         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1063
1064         if (gtt == NULL)
1065                 return NULL;
1066
1067         return gtt->usermm;
1068 }
1069
1070 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1071                                   unsigned long end)
1072 {
1073         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1074         struct amdgpu_ttm_gup_task_list *entry;
1075         unsigned long size;
1076
1077         if (gtt == NULL || !gtt->userptr)
1078                 return false;
1079
1080         size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1081         if (gtt->userptr > end || gtt->userptr + size <= start)
1082                 return false;
1083
1084         spin_lock(&gtt->guptasklock);
1085         list_for_each_entry(entry, &gtt->guptasks, list) {
1086                 if (entry->task == current) {
1087                         spin_unlock(&gtt->guptasklock);
1088                         return false;
1089                 }
1090         }
1091         spin_unlock(&gtt->guptasklock);
1092
1093         atomic_inc(&gtt->mmu_invalidations);
1094
1095         return true;
1096 }
1097
1098 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1099                                        int *last_invalidated)
1100 {
1101         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1102         int prev_invalidated = *last_invalidated;
1103
1104         *last_invalidated = atomic_read(&gtt->mmu_invalidations);
1105         return prev_invalidated != *last_invalidated;
1106 }
1107
1108 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1109 {
1110         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1111
1112         if (gtt == NULL)
1113                 return false;
1114
1115         return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1116 }
1117
1118 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1119                                  struct ttm_mem_reg *mem)
1120 {
1121         uint64_t flags = 0;
1122
1123         if (mem && mem->mem_type != TTM_PL_SYSTEM)
1124                 flags |= AMDGPU_PTE_VALID;
1125
1126         if (mem && mem->mem_type == TTM_PL_TT) {
1127                 flags |= AMDGPU_PTE_SYSTEM;
1128
1129                 if (ttm->caching_state == tt_cached)
1130                         flags |= AMDGPU_PTE_SNOOPED;
1131         }
1132
1133         flags |= adev->gart.gart_pte_flags;
1134         flags |= AMDGPU_PTE_READABLE;
1135
1136         if (!amdgpu_ttm_tt_is_readonly(ttm))
1137                 flags |= AMDGPU_PTE_WRITEABLE;
1138
1139         return flags;
1140 }
1141
1142 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1143                                             const struct ttm_place *place)
1144 {
1145         unsigned long num_pages = bo->mem.num_pages;
1146         struct drm_mm_node *node = bo->mem.mm_node;
1147
1148         if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1149                 return ttm_bo_eviction_valuable(bo, place);
1150
1151         switch (bo->mem.mem_type) {
1152         case TTM_PL_TT:
1153                 return true;
1154
1155         case TTM_PL_VRAM:
1156                 /* Check each drm MM node individually */
1157                 while (num_pages) {
1158                         if (place->fpfn < (node->start + node->size) &&
1159                             !(place->lpfn && place->lpfn <= node->start))
1160                                 return true;
1161
1162                         num_pages -= node->size;
1163                         ++node;
1164                 }
1165                 break;
1166
1167         default:
1168                 break;
1169         }
1170
1171         return ttm_bo_eviction_valuable(bo, place);
1172 }
1173
1174 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1175                                     unsigned long offset,
1176                                     void *buf, int len, int write)
1177 {
1178         struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
1179         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1180         struct drm_mm_node *nodes = abo->tbo.mem.mm_node;
1181         uint32_t value = 0;
1182         int ret = 0;
1183         uint64_t pos;
1184         unsigned long flags;
1185
1186         if (bo->mem.mem_type != TTM_PL_VRAM)
1187                 return -EIO;
1188
1189         while (offset >= (nodes->size << PAGE_SHIFT)) {
1190                 offset -= nodes->size << PAGE_SHIFT;
1191                 ++nodes;
1192         }
1193         pos = (nodes->start << PAGE_SHIFT) + offset;
1194
1195         while (len && pos < adev->mc.mc_vram_size) {
1196                 uint64_t aligned_pos = pos & ~(uint64_t)3;
1197                 uint32_t bytes = 4 - (pos & 3);
1198                 uint32_t shift = (pos & 3) * 8;
1199                 uint32_t mask = 0xffffffff << shift;
1200
1201                 if (len < bytes) {
1202                         mask &= 0xffffffff >> (bytes - len) * 8;
1203                         bytes = len;
1204                 }
1205
1206                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1207                 WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1208                 WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
1209                 if (!write || mask != 0xffffffff)
1210                         value = RREG32(mmMM_DATA);
1211                 if (write) {
1212                         value &= ~mask;
1213                         value |= (*(uint32_t *)buf << shift) & mask;
1214                         WREG32(mmMM_DATA, value);
1215                 }
1216                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1217                 if (!write) {
1218                         value = (value & mask) >> shift;
1219                         memcpy(buf, &value, bytes);
1220                 }
1221
1222                 ret += bytes;
1223                 buf = (uint8_t *)buf + bytes;
1224                 pos += bytes;
1225                 len -= bytes;
1226                 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1227                         ++nodes;
1228                         pos = (nodes->start << PAGE_SHIFT);
1229                 }
1230         }
1231
1232         return ret;
1233 }
1234
1235 static struct ttm_bo_driver amdgpu_bo_driver = {
1236         .ttm_tt_create = &amdgpu_ttm_tt_create,
1237         .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1238         .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1239         .invalidate_caches = &amdgpu_invalidate_caches,
1240         .init_mem_type = &amdgpu_init_mem_type,
1241         .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1242         .evict_flags = &amdgpu_evict_flags,
1243         .move = &amdgpu_bo_move,
1244         .verify_access = &amdgpu_verify_access,
1245         .move_notify = &amdgpu_bo_move_notify,
1246         .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1247         .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1248         .io_mem_free = &amdgpu_ttm_io_mem_free,
1249         .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1250         .access_memory = &amdgpu_ttm_access_memory
1251 };
1252
1253 int amdgpu_ttm_init(struct amdgpu_device *adev)
1254 {
1255         uint64_t gtt_size;
1256         int r;
1257         u64 vis_vram_limit;
1258
1259         r = amdgpu_ttm_global_init(adev);
1260         if (r) {
1261                 return r;
1262         }
1263         /* No others user of address space so set it to 0 */
1264         r = ttm_bo_device_init(&adev->mman.bdev,
1265                                adev->mman.bo_global_ref.ref.object,
1266                                &amdgpu_bo_driver,
1267                                adev->ddev->anon_inode->i_mapping,
1268                                DRM_FILE_PAGE_OFFSET,
1269                                adev->need_dma32);
1270         if (r) {
1271                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1272                 return r;
1273         }
1274         adev->mman.initialized = true;
1275         r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1276                                 adev->mc.real_vram_size >> PAGE_SHIFT);
1277         if (r) {
1278                 DRM_ERROR("Failed initializing VRAM heap.\n");
1279                 return r;
1280         }
1281
1282         /* Reduce size of CPU-visible VRAM if requested */
1283         vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1284         if (amdgpu_vis_vram_limit > 0 &&
1285             vis_vram_limit <= adev->mc.visible_vram_size)
1286                 adev->mc.visible_vram_size = vis_vram_limit;
1287
1288         /* Change the size here instead of the init above so only lpfn is affected */
1289         amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
1290
1291         r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
1292                                     AMDGPU_GEM_DOMAIN_VRAM,
1293                                     &adev->stolen_vga_memory,
1294                                     NULL, NULL);
1295         if (r)
1296                 return r;
1297         DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1298                  (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
1299
1300         if (amdgpu_gtt_size == -1)
1301                 gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1302                                adev->mc.mc_vram_size);
1303         else
1304                 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1305         r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1306         if (r) {
1307                 DRM_ERROR("Failed initializing GTT heap.\n");
1308                 return r;
1309         }
1310         DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1311                  (unsigned)(gtt_size / (1024 * 1024)));
1312
1313         adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1314         adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1315         adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1316         adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1317         adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1318         adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1319         adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1320         adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1321         adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1322         /* GDS Memory */
1323         if (adev->gds.mem.total_size) {
1324                 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1325                                    adev->gds.mem.total_size >> PAGE_SHIFT);
1326                 if (r) {
1327                         DRM_ERROR("Failed initializing GDS heap.\n");
1328                         return r;
1329                 }
1330         }
1331
1332         /* GWS */
1333         if (adev->gds.gws.total_size) {
1334                 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1335                                    adev->gds.gws.total_size >> PAGE_SHIFT);
1336                 if (r) {
1337                         DRM_ERROR("Failed initializing gws heap.\n");
1338                         return r;
1339                 }
1340         }
1341
1342         /* OA */
1343         if (adev->gds.oa.total_size) {
1344                 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1345                                    adev->gds.oa.total_size >> PAGE_SHIFT);
1346                 if (r) {
1347                         DRM_ERROR("Failed initializing oa heap.\n");
1348                         return r;
1349                 }
1350         }
1351
1352         r = amdgpu_ttm_debugfs_init(adev);
1353         if (r) {
1354                 DRM_ERROR("Failed to init debugfs\n");
1355                 return r;
1356         }
1357         return 0;
1358 }
1359
1360 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1361 {
1362         int r;
1363
1364         if (!adev->mman.initialized)
1365                 return;
1366         amdgpu_ttm_debugfs_fini(adev);
1367         if (adev->stolen_vga_memory) {
1368                 r = amdgpu_bo_reserve(adev->stolen_vga_memory, true);
1369                 if (r == 0) {
1370                         amdgpu_bo_unpin(adev->stolen_vga_memory);
1371                         amdgpu_bo_unreserve(adev->stolen_vga_memory);
1372                 }
1373                 amdgpu_bo_unref(&adev->stolen_vga_memory);
1374         }
1375         ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1376         ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1377         if (adev->gds.mem.total_size)
1378                 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1379         if (adev->gds.gws.total_size)
1380                 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1381         if (adev->gds.oa.total_size)
1382                 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1383         ttm_bo_device_release(&adev->mman.bdev);
1384         amdgpu_gart_fini(adev);
1385         amdgpu_ttm_global_fini(adev);
1386         adev->mman.initialized = false;
1387         DRM_INFO("amdgpu: ttm finalized\n");
1388 }
1389
1390 /* this should only be called at bootup or when userspace
1391  * isn't running */
1392 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
1393 {
1394         struct ttm_mem_type_manager *man;
1395
1396         if (!adev->mman.initialized)
1397                 return;
1398
1399         man = &adev->mman.bdev.man[TTM_PL_VRAM];
1400         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1401         man->size = size >> PAGE_SHIFT;
1402 }
1403
1404 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1405 {
1406         struct drm_file *file_priv;
1407         struct amdgpu_device *adev;
1408
1409         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
1410                 return -EINVAL;
1411
1412         file_priv = filp->private_data;
1413         adev = file_priv->minor->dev->dev_private;
1414         if (adev == NULL)
1415                 return -EINVAL;
1416
1417         return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1418 }
1419
1420 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1421                              struct ttm_mem_reg *mem, unsigned num_pages,
1422                              uint64_t offset, unsigned window,
1423                              struct amdgpu_ring *ring,
1424                              uint64_t *addr)
1425 {
1426         struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1427         struct amdgpu_device *adev = ring->adev;
1428         struct ttm_tt *ttm = bo->ttm;
1429         struct amdgpu_job *job;
1430         unsigned num_dw, num_bytes;
1431         dma_addr_t *dma_address;
1432         struct dma_fence *fence;
1433         uint64_t src_addr, dst_addr;
1434         uint64_t flags;
1435         int r;
1436
1437         BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1438                AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1439
1440         *addr = adev->mc.gart_start;
1441         *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1442                 AMDGPU_GPU_PAGE_SIZE;
1443
1444         num_dw = adev->mman.buffer_funcs->copy_num_dw;
1445         while (num_dw & 0x7)
1446                 num_dw++;
1447
1448         num_bytes = num_pages * 8;
1449
1450         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
1451         if (r)
1452                 return r;
1453
1454         src_addr = num_dw * 4;
1455         src_addr += job->ibs[0].gpu_addr;
1456
1457         dst_addr = adev->gart.table_addr;
1458         dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
1459         amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
1460                                 dst_addr, num_bytes);
1461
1462         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1463         WARN_ON(job->ibs[0].length_dw > num_dw);
1464
1465         dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
1466         flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
1467         r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
1468                             &job->ibs[0].ptr[num_dw]);
1469         if (r)
1470                 goto error_free;
1471
1472         r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1473                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1474         if (r)
1475                 goto error_free;
1476
1477         dma_fence_put(fence);
1478
1479         return r;
1480
1481 error_free:
1482         amdgpu_job_free(job);
1483         return r;
1484 }
1485
1486 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1487                        uint64_t dst_offset, uint32_t byte_count,
1488                        struct reservation_object *resv,
1489                        struct dma_fence **fence, bool direct_submit,
1490                        bool vm_needs_flush)
1491 {
1492         struct amdgpu_device *adev = ring->adev;
1493         struct amdgpu_job *job;
1494
1495         uint32_t max_bytes;
1496         unsigned num_loops, num_dw;
1497         unsigned i;
1498         int r;
1499
1500         max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1501         num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1502         num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
1503
1504         /* for IB padding */
1505         while (num_dw & 0x7)
1506                 num_dw++;
1507
1508         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1509         if (r)
1510                 return r;
1511
1512         job->vm_needs_flush = vm_needs_flush;
1513         if (resv) {
1514                 r = amdgpu_sync_resv(adev, &job->sync, resv,
1515                                      AMDGPU_FENCE_OWNER_UNDEFINED);
1516                 if (r) {
1517                         DRM_ERROR("sync failed (%d).\n", r);
1518                         goto error_free;
1519                 }
1520         }
1521
1522         for (i = 0; i < num_loops; i++) {
1523                 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1524
1525                 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
1526                                         dst_offset, cur_size_in_bytes);
1527
1528                 src_offset += cur_size_in_bytes;
1529                 dst_offset += cur_size_in_bytes;
1530                 byte_count -= cur_size_in_bytes;
1531         }
1532
1533         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1534         WARN_ON(job->ibs[0].length_dw > num_dw);
1535         if (direct_submit) {
1536                 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
1537                                        NULL, fence);
1538                 job->fence = dma_fence_get(*fence);
1539                 if (r)
1540                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
1541                 amdgpu_job_free(job);
1542         } else {
1543                 r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1544                                       AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1545                 if (r)
1546                         goto error_free;
1547         }
1548
1549         return r;
1550
1551 error_free:
1552         amdgpu_job_free(job);
1553         return r;
1554 }
1555
1556 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
1557                        uint64_t src_data,
1558                        struct reservation_object *resv,
1559                        struct dma_fence **fence)
1560 {
1561         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1562         /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
1563         uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
1564         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1565
1566         struct drm_mm_node *mm_node;
1567         unsigned long num_pages;
1568         unsigned int num_loops, num_dw;
1569
1570         struct amdgpu_job *job;
1571         int r;
1572
1573         if (!ring->ready) {
1574                 DRM_ERROR("Trying to clear memory with ring turned off.\n");
1575                 return -EINVAL;
1576         }
1577
1578         if (bo->tbo.mem.mem_type == TTM_PL_TT) {
1579                 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
1580                 if (r)
1581                         return r;
1582         }
1583
1584         num_pages = bo->tbo.num_pages;
1585         mm_node = bo->tbo.mem.mm_node;
1586         num_loops = 0;
1587         while (num_pages) {
1588                 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1589
1590                 num_loops += DIV_ROUND_UP(byte_count, max_bytes);
1591                 num_pages -= mm_node->size;
1592                 ++mm_node;
1593         }
1594
1595         /* 10 double words for each SDMA_OP_PTEPDE cmd */
1596         num_dw = num_loops * 10;
1597
1598         /* for IB padding */
1599         num_dw += 64;
1600
1601         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
1602         if (r)
1603                 return r;
1604
1605         if (resv) {
1606                 r = amdgpu_sync_resv(adev, &job->sync, resv,
1607                                      AMDGPU_FENCE_OWNER_UNDEFINED);
1608                 if (r) {
1609                         DRM_ERROR("sync failed (%d).\n", r);
1610                         goto error_free;
1611                 }
1612         }
1613
1614         num_pages = bo->tbo.num_pages;
1615         mm_node = bo->tbo.mem.mm_node;
1616
1617         while (num_pages) {
1618                 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
1619                 uint64_t dst_addr;
1620
1621                 WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
1622
1623                 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
1624                 while (byte_count) {
1625                         uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
1626
1627                         amdgpu_vm_set_pte_pde(adev, &job->ibs[0],
1628                                         dst_addr, 0,
1629                                         cur_size_in_bytes >> 3, 0,
1630                                         src_data);
1631
1632                         dst_addr += cur_size_in_bytes;
1633                         byte_count -= cur_size_in_bytes;
1634                 }
1635
1636                 num_pages -= mm_node->size;
1637                 ++mm_node;
1638         }
1639
1640         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1641         WARN_ON(job->ibs[0].length_dw > num_dw);
1642         r = amdgpu_job_submit(job, ring, &adev->mman.entity,
1643                               AMDGPU_FENCE_OWNER_UNDEFINED, fence);
1644         if (r)
1645                 goto error_free;
1646
1647         return 0;
1648
1649 error_free:
1650         amdgpu_job_free(job);
1651         return r;
1652 }
1653
1654 #if defined(CONFIG_DEBUG_FS)
1655
1656 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1657 {
1658         struct drm_info_node *node = (struct drm_info_node *)m->private;
1659         unsigned ttm_pl = *(int *)node->info_ent->data;
1660         struct drm_device *dev = node->minor->dev;
1661         struct amdgpu_device *adev = dev->dev_private;
1662         struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
1663         struct drm_printer p = drm_seq_file_printer(m);
1664
1665         man->func->debug(man, &p);
1666         return 0;
1667 }
1668
1669 static int ttm_pl_vram = TTM_PL_VRAM;
1670 static int ttm_pl_tt = TTM_PL_TT;
1671
1672 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
1673         {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
1674         {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
1675         {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1676 #ifdef CONFIG_SWIOTLB
1677         {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1678 #endif
1679 };
1680
1681 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
1682                                     size_t size, loff_t *pos)
1683 {
1684         struct amdgpu_device *adev = file_inode(f)->i_private;
1685         ssize_t result = 0;
1686         int r;
1687
1688         if (size & 0x3 || *pos & 0x3)
1689                 return -EINVAL;
1690
1691         if (*pos >= adev->mc.mc_vram_size)
1692                 return -ENXIO;
1693
1694         while (size) {
1695                 unsigned long flags;
1696                 uint32_t value;
1697
1698                 if (*pos >= adev->mc.mc_vram_size)
1699                         return result;
1700
1701                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1702                 WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
1703                 WREG32(mmMM_INDEX_HI, *pos >> 31);
1704                 value = RREG32(mmMM_DATA);
1705                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1706
1707                 r = put_user(value, (uint32_t *)buf);
1708                 if (r)
1709                         return r;
1710
1711                 result += 4;
1712                 buf += 4;
1713                 *pos += 4;
1714                 size -= 4;
1715         }
1716
1717         return result;
1718 }
1719
1720 static const struct file_operations amdgpu_ttm_vram_fops = {
1721         .owner = THIS_MODULE,
1722         .read = amdgpu_ttm_vram_read,
1723         .llseek = default_llseek
1724 };
1725
1726 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1727
1728 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
1729                                    size_t size, loff_t *pos)
1730 {
1731         struct amdgpu_device *adev = file_inode(f)->i_private;
1732         ssize_t result = 0;
1733         int r;
1734
1735         while (size) {
1736                 loff_t p = *pos / PAGE_SIZE;
1737                 unsigned off = *pos & ~PAGE_MASK;
1738                 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1739                 struct page *page;
1740                 void *ptr;
1741
1742                 if (p >= adev->gart.num_cpu_pages)
1743                         return result;
1744
1745                 page = adev->gart.pages[p];
1746                 if (page) {
1747                         ptr = kmap(page);
1748                         ptr += off;
1749
1750                         r = copy_to_user(buf, ptr, cur_size);
1751                         kunmap(adev->gart.pages[p]);
1752                 } else
1753                         r = clear_user(buf, cur_size);
1754
1755                 if (r)
1756                         return -EFAULT;
1757
1758                 result += cur_size;
1759                 buf += cur_size;
1760                 *pos += cur_size;
1761                 size -= cur_size;
1762         }
1763
1764         return result;
1765 }
1766
1767 static const struct file_operations amdgpu_ttm_gtt_fops = {
1768         .owner = THIS_MODULE,
1769         .read = amdgpu_ttm_gtt_read,
1770         .llseek = default_llseek
1771 };
1772
1773 #endif
1774
1775 #endif
1776
1777 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
1778 {
1779 #if defined(CONFIG_DEBUG_FS)
1780         unsigned count;
1781
1782         struct drm_minor *minor = adev->ddev->primary;
1783         struct dentry *ent, *root = minor->debugfs_root;
1784
1785         ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
1786                                   adev, &amdgpu_ttm_vram_fops);
1787         if (IS_ERR(ent))
1788                 return PTR_ERR(ent);
1789         i_size_write(ent->d_inode, adev->mc.mc_vram_size);
1790         adev->mman.vram = ent;
1791
1792 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1793         ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
1794                                   adev, &amdgpu_ttm_gtt_fops);
1795         if (IS_ERR(ent))
1796                 return PTR_ERR(ent);
1797         i_size_write(ent->d_inode, adev->mc.gart_size);
1798         adev->mman.gtt = ent;
1799
1800 #endif
1801         count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
1802
1803 #ifdef CONFIG_SWIOTLB
1804         if (!swiotlb_nr_tbl())
1805                 --count;
1806 #endif
1807
1808         return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
1809 #else
1810
1811         return 0;
1812 #endif
1813 }
1814
1815 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
1816 {
1817 #if defined(CONFIG_DEBUG_FS)
1818
1819         debugfs_remove(adev->mman.vram);
1820         adev->mman.vram = NULL;
1821
1822 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
1823         debugfs_remove(adev->mman.gtt);
1824         adev->mman.gtt = NULL;
1825 #endif
1826
1827 #endif
1828 }