2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <drm/ttm/ttm_bo_api.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_page_alloc.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include <linux/swap.h>
43 #include <linux/pagemap.h>
44 #include <linux/debugfs.h>
45 #include <linux/iommu.h>
47 #include "amdgpu_object.h"
48 #include "amdgpu_trace.h"
49 #include "amdgpu_amdkfd.h"
50 #include "bif/bif_4_1_d.h"
52 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
54 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
55 struct ttm_mem_reg *mem, unsigned num_pages,
56 uint64_t offset, unsigned window,
57 struct amdgpu_ring *ring,
60 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
61 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
68 * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
71 * @ref: Object for initialization.
73 * This is called by drm_global_item_ref() when an object is being
76 static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
78 return ttm_mem_global_init(ref->object);
82 * amdgpu_ttm_mem_global_release - Drop reference to a memory object
84 * @ref: Object being removed
86 * This is called by drm_global_item_unref() when an object is being
89 static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
91 ttm_mem_global_release(ref->object);
95 * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
97 * @adev: AMDGPU device for which the global structures need to be registered.
99 * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
102 static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
104 struct drm_global_reference *global_ref;
107 /* ensure reference is false in case init fails */
108 adev->mman.mem_global_referenced = false;
110 global_ref = &adev->mman.mem_global_ref;
111 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
112 global_ref->size = sizeof(struct ttm_mem_global);
113 global_ref->init = &amdgpu_ttm_mem_global_init;
114 global_ref->release = &amdgpu_ttm_mem_global_release;
115 r = drm_global_item_ref(global_ref);
117 DRM_ERROR("Failed setting up TTM memory accounting "
122 adev->mman.bo_global_ref.mem_glob =
123 adev->mman.mem_global_ref.object;
124 global_ref = &adev->mman.bo_global_ref.ref;
125 global_ref->global_type = DRM_GLOBAL_TTM_BO;
126 global_ref->size = sizeof(struct ttm_bo_global);
127 global_ref->init = &ttm_bo_global_init;
128 global_ref->release = &ttm_bo_global_release;
129 r = drm_global_item_ref(global_ref);
131 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
135 mutex_init(&adev->mman.gtt_window_lock);
137 adev->mman.mem_global_referenced = true;
142 drm_global_item_unref(&adev->mman.mem_global_ref);
147 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
149 if (adev->mman.mem_global_referenced) {
150 mutex_destroy(&adev->mman.gtt_window_lock);
151 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
152 drm_global_item_unref(&adev->mman.mem_global_ref);
153 adev->mman.mem_global_referenced = false;
157 static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
163 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
166 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
167 * @type: The type of memory requested
168 * @man: The memory type manager for each domain
170 * This is called by ttm_bo_init_mm() when a buffer object is being
173 static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
174 struct ttm_mem_type_manager *man)
176 struct amdgpu_device *adev;
178 adev = amdgpu_ttm_adev(bdev);
183 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
184 man->available_caching = TTM_PL_MASK_CACHING;
185 man->default_caching = TTM_PL_FLAG_CACHED;
189 man->func = &amdgpu_gtt_mgr_func;
190 man->gpu_offset = adev->gmc.gart_start;
191 man->available_caching = TTM_PL_MASK_CACHING;
192 man->default_caching = TTM_PL_FLAG_CACHED;
193 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
196 /* "On-card" video ram */
197 man->func = &amdgpu_vram_mgr_func;
198 man->gpu_offset = adev->gmc.vram_start;
199 man->flags = TTM_MEMTYPE_FLAG_FIXED |
200 TTM_MEMTYPE_FLAG_MAPPABLE;
201 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
202 man->default_caching = TTM_PL_FLAG_WC;
207 /* On-chip GDS memory*/
208 man->func = &ttm_bo_manager_func;
210 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
211 man->available_caching = TTM_PL_FLAG_UNCACHED;
212 man->default_caching = TTM_PL_FLAG_UNCACHED;
215 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
222 * amdgpu_evict_flags - Compute placement flags
224 * @bo: The buffer object to evict
225 * @placement: Possible destination(s) for evicted BO
227 * Fill in placement data when ttm_bo_evict() is called
229 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
230 struct ttm_placement *placement)
232 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
233 struct amdgpu_bo *abo;
234 static const struct ttm_place placements = {
237 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
240 /* Don't handle scatter gather BOs */
241 if (bo->type == ttm_bo_type_sg) {
242 placement->num_placement = 0;
243 placement->num_busy_placement = 0;
247 /* Object isn't an AMDGPU object so ignore */
248 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
249 placement->placement = &placements;
250 placement->busy_placement = &placements;
251 placement->num_placement = 1;
252 placement->num_busy_placement = 1;
256 abo = ttm_to_amdgpu_bo(bo);
257 switch (bo->mem.mem_type) {
259 if (!adev->mman.buffer_funcs_enabled) {
260 /* Move to system memory */
261 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
262 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
263 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
264 amdgpu_bo_in_cpu_visible_vram(abo)) {
266 /* Try evicting to the CPU inaccessible part of VRAM
267 * first, but only set GTT as busy placement, so this
268 * BO will be evicted to GTT rather than causing other
269 * BOs to be evicted from VRAM
271 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
272 AMDGPU_GEM_DOMAIN_GTT);
273 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
274 abo->placements[0].lpfn = 0;
275 abo->placement.busy_placement = &abo->placements[1];
276 abo->placement.num_busy_placement = 1;
278 /* Move to GTT memory */
279 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
284 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
286 *placement = abo->placement;
290 * amdgpu_verify_access - Verify access for a mmap call
292 * @bo: The buffer object to map
293 * @filp: The file pointer from the process performing the mmap
295 * This is called by ttm_bo_mmap() to verify whether a process
296 * has the right to mmap a BO to their process space.
298 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
300 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
303 * Don't verify access for KFD BOs. They don't have a GEM
304 * object associated with them.
309 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
311 return drm_vma_node_verify_access(&abo->gem_base.vma_node,
316 * amdgpu_move_null - Register memory for a buffer object
318 * @bo: The bo to assign the memory to
319 * @new_mem: The memory to be assigned.
321 * Assign the memory from new_mem to the memory of the buffer object bo.
323 static void amdgpu_move_null(struct ttm_buffer_object *bo,
324 struct ttm_mem_reg *new_mem)
326 struct ttm_mem_reg *old_mem = &bo->mem;
328 BUG_ON(old_mem->mm_node != NULL);
330 new_mem->mm_node = NULL;
334 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
336 * @bo: The bo to assign the memory to.
337 * @mm_node: Memory manager node for drm allocator.
338 * @mem: The region where the bo resides.
341 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
342 struct drm_mm_node *mm_node,
343 struct ttm_mem_reg *mem)
347 if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
348 addr = mm_node->start << PAGE_SHIFT;
349 addr += bo->bdev->man[mem->mem_type].gpu_offset;
355 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
356 * @offset. It also modifies the offset to be within the drm_mm_node returned
358 * @mem: The region where the bo resides.
359 * @offset: The offset that drm_mm_node is used for finding.
362 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
363 unsigned long *offset)
365 struct drm_mm_node *mm_node = mem->mm_node;
367 while (*offset >= (mm_node->size << PAGE_SHIFT)) {
368 *offset -= (mm_node->size << PAGE_SHIFT);
375 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
377 * The function copies @size bytes from {src->mem + src->offset} to
378 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
379 * move and different for a BO to BO copy.
381 * @f: Returns the last fence if multiple jobs are submitted.
383 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
384 struct amdgpu_copy_mem *src,
385 struct amdgpu_copy_mem *dst,
387 struct reservation_object *resv,
388 struct dma_fence **f)
390 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
391 struct drm_mm_node *src_mm, *dst_mm;
392 uint64_t src_node_start, dst_node_start, src_node_size,
393 dst_node_size, src_page_offset, dst_page_offset;
394 struct dma_fence *fence = NULL;
396 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
397 AMDGPU_GPU_PAGE_SIZE);
399 if (!adev->mman.buffer_funcs_enabled) {
400 DRM_ERROR("Trying to move memory with ring turned off.\n");
404 src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
405 src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
407 src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
408 src_page_offset = src_node_start & (PAGE_SIZE - 1);
410 dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
411 dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
413 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
414 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
416 mutex_lock(&adev->mman.gtt_window_lock);
419 unsigned long cur_size;
420 uint64_t from = src_node_start, to = dst_node_start;
421 struct dma_fence *next;
423 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
424 * begins at an offset, then adjust the size accordingly
426 cur_size = min3(min(src_node_size, dst_node_size), size,
428 if (cur_size + src_page_offset > GTT_MAX_BYTES ||
429 cur_size + dst_page_offset > GTT_MAX_BYTES)
430 cur_size -= max(src_page_offset, dst_page_offset);
432 /* Map only what needs to be accessed. Map src to window 0 and
435 if (src->mem->mem_type == TTM_PL_TT &&
436 !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
437 r = amdgpu_map_buffer(src->bo, src->mem,
438 PFN_UP(cur_size + src_page_offset),
439 src_node_start, 0, ring,
443 /* Adjust the offset because amdgpu_map_buffer returns
444 * start of mapped page
446 from += src_page_offset;
449 if (dst->mem->mem_type == TTM_PL_TT &&
450 !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
451 r = amdgpu_map_buffer(dst->bo, dst->mem,
452 PFN_UP(cur_size + dst_page_offset),
453 dst_node_start, 1, ring,
457 to += dst_page_offset;
460 r = amdgpu_copy_buffer(ring, from, to, cur_size,
461 resv, &next, false, true);
465 dma_fence_put(fence);
472 src_node_size -= cur_size;
473 if (!src_node_size) {
474 src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
476 src_node_size = (src_mm->size << PAGE_SHIFT);
478 src_node_start += cur_size;
479 src_page_offset = src_node_start & (PAGE_SIZE - 1);
481 dst_node_size -= cur_size;
482 if (!dst_node_size) {
483 dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
485 dst_node_size = (dst_mm->size << PAGE_SHIFT);
487 dst_node_start += cur_size;
488 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
492 mutex_unlock(&adev->mman.gtt_window_lock);
494 *f = dma_fence_get(fence);
495 dma_fence_put(fence);
500 * amdgpu_move_blit - Copy an entire buffer to another buffer
502 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
503 * help move buffers to and from VRAM.
505 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
506 bool evict, bool no_wait_gpu,
507 struct ttm_mem_reg *new_mem,
508 struct ttm_mem_reg *old_mem)
510 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
511 struct amdgpu_copy_mem src, dst;
512 struct dma_fence *fence = NULL;
522 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
523 new_mem->num_pages << PAGE_SHIFT,
528 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
529 dma_fence_put(fence);
534 dma_fence_wait(fence, false);
535 dma_fence_put(fence);
540 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
542 * Called by amdgpu_bo_move().
544 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
545 struct ttm_operation_ctx *ctx,
546 struct ttm_mem_reg *new_mem)
548 struct amdgpu_device *adev;
549 struct ttm_mem_reg *old_mem = &bo->mem;
550 struct ttm_mem_reg tmp_mem;
551 struct ttm_place placements;
552 struct ttm_placement placement;
555 adev = amdgpu_ttm_adev(bo->bdev);
557 /* create space/pages for new_mem in GTT space */
559 tmp_mem.mm_node = NULL;
560 placement.num_placement = 1;
561 placement.placement = &placements;
562 placement.num_busy_placement = 1;
563 placement.busy_placement = &placements;
566 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
567 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
572 /* set caching flags */
573 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
578 /* Bind the memory to the GTT space */
579 r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
584 /* blit VRAM to GTT */
585 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
590 /* move BO (in tmp_mem) to new_mem */
591 r = ttm_bo_move_ttm(bo, ctx, new_mem);
593 ttm_bo_mem_put(bo, &tmp_mem);
598 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
600 * Called by amdgpu_bo_move().
602 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
603 struct ttm_operation_ctx *ctx,
604 struct ttm_mem_reg *new_mem)
606 struct amdgpu_device *adev;
607 struct ttm_mem_reg *old_mem = &bo->mem;
608 struct ttm_mem_reg tmp_mem;
609 struct ttm_placement placement;
610 struct ttm_place placements;
613 adev = amdgpu_ttm_adev(bo->bdev);
615 /* make space in GTT for old_mem buffer */
617 tmp_mem.mm_node = NULL;
618 placement.num_placement = 1;
619 placement.placement = &placements;
620 placement.num_busy_placement = 1;
621 placement.busy_placement = &placements;
624 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
625 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
630 /* move/bind old memory to GTT space */
631 r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
637 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
642 ttm_bo_mem_put(bo, &tmp_mem);
647 * amdgpu_bo_move - Move a buffer object to a new memory location
649 * Called by ttm_bo_handle_move_mem()
651 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
652 struct ttm_operation_ctx *ctx,
653 struct ttm_mem_reg *new_mem)
655 struct amdgpu_device *adev;
656 struct amdgpu_bo *abo;
657 struct ttm_mem_reg *old_mem = &bo->mem;
660 /* Can't move a pinned BO */
661 abo = ttm_to_amdgpu_bo(bo);
662 if (WARN_ON_ONCE(abo->pin_count > 0))
665 adev = amdgpu_ttm_adev(bo->bdev);
667 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
668 amdgpu_move_null(bo, new_mem);
671 if ((old_mem->mem_type == TTM_PL_TT &&
672 new_mem->mem_type == TTM_PL_SYSTEM) ||
673 (old_mem->mem_type == TTM_PL_SYSTEM &&
674 new_mem->mem_type == TTM_PL_TT)) {
676 amdgpu_move_null(bo, new_mem);
680 if (!adev->mman.buffer_funcs_enabled)
683 if (old_mem->mem_type == TTM_PL_VRAM &&
684 new_mem->mem_type == TTM_PL_SYSTEM) {
685 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
686 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
687 new_mem->mem_type == TTM_PL_VRAM) {
688 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
690 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
696 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
702 if (bo->type == ttm_bo_type_device &&
703 new_mem->mem_type == TTM_PL_VRAM &&
704 old_mem->mem_type != TTM_PL_VRAM) {
705 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
706 * accesses the BO after it's moved.
708 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
711 /* update statistics */
712 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
717 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
719 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
721 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
723 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
724 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
725 struct drm_mm_node *mm_node = mem->mm_node;
727 mem->bus.addr = NULL;
729 mem->bus.size = mem->num_pages << PAGE_SHIFT;
731 mem->bus.is_iomem = false;
732 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
734 switch (mem->mem_type) {
741 mem->bus.offset = mem->start << PAGE_SHIFT;
742 /* check if it's visible */
743 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
745 /* Only physically contiguous buffers apply. In a contiguous
746 * buffer, size of the first mm_node would match the number of
747 * pages in ttm_mem_reg.
749 if (adev->mman.aper_base_kaddr &&
750 (mm_node->size == mem->num_pages))
751 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
754 mem->bus.base = adev->gmc.aper_base;
755 mem->bus.is_iomem = true;
763 static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
767 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
768 unsigned long page_offset)
770 struct drm_mm_node *mm;
771 unsigned long offset = (page_offset << PAGE_SHIFT);
773 mm = amdgpu_find_mm_node(&bo->mem, &offset);
774 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
775 (offset >> PAGE_SHIFT);
779 * TTM backend functions.
781 struct amdgpu_ttm_gup_task_list {
782 struct list_head list;
783 struct task_struct *task;
786 struct amdgpu_ttm_tt {
787 struct ttm_dma_tt ttm;
790 struct task_struct *usertask;
792 spinlock_t guptasklock;
793 struct list_head guptasks;
794 atomic_t mmu_invalidations;
795 uint32_t last_set_pages;
799 * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
802 * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
803 * This provides a wrapper around the get_user_pages() call to provide
804 * device accessible pages that back user memory.
806 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
808 struct amdgpu_ttm_tt *gtt = (void *)ttm;
809 struct mm_struct *mm = gtt->usertask->mm;
810 unsigned int flags = 0;
814 if (!mm) /* Happens during process shutdown */
817 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
820 down_read(&mm->mmap_sem);
822 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
824 * check that we only use anonymous memory to prevent problems
827 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
828 struct vm_area_struct *vma;
830 vma = find_vma(mm, gtt->userptr);
831 if (!vma || vma->vm_file || vma->vm_end < end) {
832 up_read(&mm->mmap_sem);
837 /* loop enough times using contiguous pages of memory */
839 unsigned num_pages = ttm->num_pages - pinned;
840 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
841 struct page **p = pages + pinned;
842 struct amdgpu_ttm_gup_task_list guptask;
844 guptask.task = current;
845 spin_lock(>t->guptasklock);
846 list_add(&guptask.list, >t->guptasks);
847 spin_unlock(>t->guptasklock);
849 if (mm == current->mm)
850 r = get_user_pages(userptr, num_pages, flags, p, NULL);
852 r = get_user_pages_remote(gtt->usertask,
853 mm, userptr, num_pages,
854 flags, p, NULL, NULL);
856 spin_lock(>t->guptasklock);
857 list_del(&guptask.list);
858 spin_unlock(>t->guptasklock);
865 } while (pinned < ttm->num_pages);
867 up_read(&mm->mmap_sem);
871 release_pages(pages, pinned);
872 up_read(&mm->mmap_sem);
877 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
879 * Called by amdgpu_cs_list_validate(). This creates the page list
880 * that backs user memory and will ultimately be mapped into the device
883 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
885 struct amdgpu_ttm_tt *gtt = (void *)ttm;
888 gtt->last_set_pages = atomic_read(>t->mmu_invalidations);
889 for (i = 0; i < ttm->num_pages; ++i) {
891 put_page(ttm->pages[i]);
893 ttm->pages[i] = pages ? pages[i] : NULL;
898 * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty
900 * Called while unpinning userptr pages
902 void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
904 struct amdgpu_ttm_tt *gtt = (void *)ttm;
907 for (i = 0; i < ttm->num_pages; ++i) {
908 struct page *page = ttm->pages[i];
913 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
914 set_page_dirty(page);
916 mark_page_accessed(page);
921 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
923 * Called by amdgpu_ttm_backend_bind()
925 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
927 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
928 struct amdgpu_ttm_tt *gtt = (void *)ttm;
932 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
933 enum dma_data_direction direction = write ?
934 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
936 /* Allocate an SG array and squash pages into it */
937 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
938 ttm->num_pages << PAGE_SHIFT,
943 /* Map SG to device */
945 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
946 if (nents != ttm->sg->nents)
949 /* convert SG to linear array of pages and dma addresses */
950 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
951 gtt->ttm.dma_address, ttm->num_pages);
962 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
964 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
966 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
967 struct amdgpu_ttm_tt *gtt = (void *)ttm;
969 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
970 enum dma_data_direction direction = write ?
971 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
973 /* double check that we don't free the table twice */
974 if (!ttm->sg || !ttm->sg->sgl)
977 /* unmap the pages mapped to the device */
978 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
980 /* mark the pages as dirty */
981 amdgpu_ttm_tt_mark_user_pages(ttm);
983 sg_free_table(ttm->sg);
986 int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
987 struct ttm_buffer_object *tbo,
990 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
991 struct ttm_tt *ttm = tbo->ttm;
992 struct amdgpu_ttm_tt *gtt = (void *)ttm;
995 if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
996 uint64_t page_idx = 1;
998 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
999 ttm->pages, gtt->ttm.dma_address, flags);
1001 goto gart_bind_fail;
1003 /* Patch mtype of the second part BO */
1004 flags &= ~AMDGPU_PTE_MTYPE_MASK;
1005 flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC);
1007 r = amdgpu_gart_bind(adev,
1008 gtt->offset + (page_idx << PAGE_SHIFT),
1009 ttm->num_pages - page_idx,
1010 &ttm->pages[page_idx],
1011 &(gtt->ttm.dma_address[page_idx]), flags);
1013 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1014 ttm->pages, gtt->ttm.dma_address, flags);
1019 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1020 ttm->num_pages, gtt->offset);
1026 * amdgpu_ttm_backend_bind - Bind GTT memory
1028 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1029 * This handles binding GTT memory to the device address space.
1031 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
1032 struct ttm_mem_reg *bo_mem)
1034 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1035 struct amdgpu_ttm_tt *gtt = (void*)ttm;
1040 r = amdgpu_ttm_tt_pin_userptr(ttm);
1042 DRM_ERROR("failed to pin userptr\n");
1046 if (!ttm->num_pages) {
1047 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1048 ttm->num_pages, bo_mem, ttm);
1051 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1052 bo_mem->mem_type == AMDGPU_PL_GWS ||
1053 bo_mem->mem_type == AMDGPU_PL_OA)
1056 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1057 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1061 /* compute PTE flags relevant to this BO memory */
1062 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1064 /* bind pages into GART page tables */
1065 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1066 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1067 ttm->pages, gtt->ttm.dma_address, flags);
1070 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1071 ttm->num_pages, gtt->offset);
1076 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
1078 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1080 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1081 struct ttm_operation_ctx ctx = { false, false };
1082 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
1083 struct ttm_mem_reg tmp;
1084 struct ttm_placement placement;
1085 struct ttm_place placements;
1089 if (bo->mem.mem_type != TTM_PL_TT ||
1090 amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
1093 /* allocate GTT space */
1096 placement.num_placement = 1;
1097 placement.placement = &placements;
1098 placement.num_busy_placement = 1;
1099 placement.busy_placement = &placements;
1100 placements.fpfn = 0;
1101 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1102 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
1105 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1109 /* compute PTE flags for this buffer object */
1110 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1113 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1114 r = amdgpu_ttm_gart_bind(adev, bo, flags);
1116 ttm_bo_mem_put(bo, &tmp);
1120 ttm_bo_mem_put(bo, &bo->mem);
1122 bo->offset = (bo->mem.start << PAGE_SHIFT) +
1123 bo->bdev->man[bo->mem.mem_type].gpu_offset;
1129 * amdgpu_ttm_recover_gart - Rebind GTT pages
1131 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1132 * rebind GTT pages during a GPU reset.
1134 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1136 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1143 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1144 r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1150 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1152 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1155 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
1157 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1158 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1161 /* if the pages have userptr pinning then clear that first */
1163 amdgpu_ttm_tt_unpin_userptr(ttm);
1165 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1168 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1169 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1171 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1172 gtt->ttm.ttm.num_pages, gtt->offset);
1176 static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
1178 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1181 put_task_struct(gtt->usertask);
1183 ttm_dma_tt_fini(>t->ttm);
1187 static struct ttm_backend_func amdgpu_backend_func = {
1188 .bind = &amdgpu_ttm_backend_bind,
1189 .unbind = &amdgpu_ttm_backend_unbind,
1190 .destroy = &amdgpu_ttm_backend_destroy,
1194 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1196 * @bo: The buffer object to create a GTT ttm_tt object around
1198 * Called by ttm_tt_create().
1200 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1201 uint32_t page_flags)
1203 struct amdgpu_device *adev;
1204 struct amdgpu_ttm_tt *gtt;
1206 adev = amdgpu_ttm_adev(bo->bdev);
1208 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1212 gtt->ttm.ttm.func = &amdgpu_backend_func;
1214 /* allocate space for the uninitialized page entries */
1215 if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) {
1219 return >t->ttm.ttm;
1223 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1225 * Map the pages of a ttm_tt object to an address space visible
1226 * to the underlying device.
1228 static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1229 struct ttm_operation_ctx *ctx)
1231 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1232 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1233 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1235 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1236 if (gtt && gtt->userptr) {
1237 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1241 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1242 ttm->state = tt_unbound;
1246 if (slave && ttm->sg) {
1247 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1248 gtt->ttm.dma_address,
1250 ttm->state = tt_unbound;
1254 #ifdef CONFIG_SWIOTLB
1255 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1256 return ttm_dma_populate(>t->ttm, adev->dev, ctx);
1260 /* fall back to generic helper to populate the page array
1261 * and map them to the device */
1262 return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx);
1266 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1268 * Unmaps pages of a ttm_tt object from the device address space and
1269 * unpopulates the page array backing it.
1271 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1273 struct amdgpu_device *adev;
1274 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1275 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1277 if (gtt && gtt->userptr) {
1278 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1281 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1288 adev = amdgpu_ttm_adev(ttm->bdev);
1290 #ifdef CONFIG_SWIOTLB
1291 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1292 ttm_dma_unpopulate(>t->ttm, adev->dev);
1297 /* fall back to generic helper to unmap and unpopulate array */
1298 ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm);
1302 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1305 * @ttm: The ttm_tt object to bind this userptr object to
1306 * @addr: The address in the current tasks VM space to use
1307 * @flags: Requirements of userptr object.
1309 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1312 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1315 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1320 gtt->userptr = addr;
1321 gtt->userflags = flags;
1324 put_task_struct(gtt->usertask);
1325 gtt->usertask = current->group_leader;
1326 get_task_struct(gtt->usertask);
1328 spin_lock_init(>t->guptasklock);
1329 INIT_LIST_HEAD(>t->guptasks);
1330 atomic_set(>t->mmu_invalidations, 0);
1331 gtt->last_set_pages = 0;
1337 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1339 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1341 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1346 if (gtt->usertask == NULL)
1349 return gtt->usertask->mm;
1353 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1354 * address range for the current task.
1357 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1360 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1361 struct amdgpu_ttm_gup_task_list *entry;
1364 if (gtt == NULL || !gtt->userptr)
1367 /* Return false if no part of the ttm_tt object lies within
1370 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1371 if (gtt->userptr > end || gtt->userptr + size <= start)
1374 /* Search the lists of tasks that hold this mapping and see
1375 * if current is one of them. If it is return false.
1377 spin_lock(>t->guptasklock);
1378 list_for_each_entry(entry, >t->guptasks, list) {
1379 if (entry->task == current) {
1380 spin_unlock(>t->guptasklock);
1384 spin_unlock(>t->guptasklock);
1386 atomic_inc(>t->mmu_invalidations);
1392 * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
1394 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1395 int *last_invalidated)
1397 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1398 int prev_invalidated = *last_invalidated;
1400 *last_invalidated = atomic_read(>t->mmu_invalidations);
1401 return prev_invalidated != *last_invalidated;
1405 * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
1406 * been invalidated since the last time they've been set?
1408 bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
1410 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1412 if (gtt == NULL || !gtt->userptr)
1415 return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages;
1419 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1421 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1423 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1428 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1432 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1434 * @ttm: The ttm_tt object to compute the flags for
1435 * @mem: The memory registry backing this ttm_tt object
1437 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1438 struct ttm_mem_reg *mem)
1442 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1443 flags |= AMDGPU_PTE_VALID;
1445 if (mem && mem->mem_type == TTM_PL_TT) {
1446 flags |= AMDGPU_PTE_SYSTEM;
1448 if (ttm->caching_state == tt_cached)
1449 flags |= AMDGPU_PTE_SNOOPED;
1452 flags |= adev->gart.gart_pte_flags;
1453 flags |= AMDGPU_PTE_READABLE;
1455 if (!amdgpu_ttm_tt_is_readonly(ttm))
1456 flags |= AMDGPU_PTE_WRITEABLE;
1462 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1465 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1466 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1467 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1468 * used to clean out a memory space.
1470 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1471 const struct ttm_place *place)
1473 unsigned long num_pages = bo->mem.num_pages;
1474 struct drm_mm_node *node = bo->mem.mm_node;
1475 struct reservation_object_list *flist;
1476 struct dma_fence *f;
1479 /* If bo is a KFD BO, check if the bo belongs to the current process.
1480 * If true, then return false as any KFD process needs all its BOs to
1481 * be resident to run successfully
1483 flist = reservation_object_get_list(bo->resv);
1485 for (i = 0; i < flist->shared_count; ++i) {
1486 f = rcu_dereference_protected(flist->shared[i],
1487 reservation_object_held(bo->resv));
1488 if (amdkfd_fence_check_mm(f, current->mm))
1493 switch (bo->mem.mem_type) {
1498 /* Check each drm MM node individually */
1500 if (place->fpfn < (node->start + node->size) &&
1501 !(place->lpfn && place->lpfn <= node->start))
1504 num_pages -= node->size;
1513 return ttm_bo_eviction_valuable(bo, place);
1517 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1519 * @bo: The buffer object to read/write
1520 * @offset: Offset into buffer object
1521 * @buf: Secondary buffer to write/read from
1522 * @len: Length in bytes of access
1523 * @write: true if writing
1525 * This is used to access VRAM that backs a buffer object via MMIO
1526 * access for debugging purposes.
1528 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1529 unsigned long offset,
1530 void *buf, int len, int write)
1532 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1533 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1534 struct drm_mm_node *nodes;
1538 unsigned long flags;
1540 if (bo->mem.mem_type != TTM_PL_VRAM)
1543 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1544 pos = (nodes->start << PAGE_SHIFT) + offset;
1546 while (len && pos < adev->gmc.mc_vram_size) {
1547 uint64_t aligned_pos = pos & ~(uint64_t)3;
1548 uint32_t bytes = 4 - (pos & 3);
1549 uint32_t shift = (pos & 3) * 8;
1550 uint32_t mask = 0xffffffff << shift;
1553 mask &= 0xffffffff >> (bytes - len) * 8;
1557 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1558 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1559 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1560 if (!write || mask != 0xffffffff)
1561 value = RREG32_NO_KIQ(mmMM_DATA);
1564 value |= (*(uint32_t *)buf << shift) & mask;
1565 WREG32_NO_KIQ(mmMM_DATA, value);
1567 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1569 value = (value & mask) >> shift;
1570 memcpy(buf, &value, bytes);
1574 buf = (uint8_t *)buf + bytes;
1577 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1579 pos = (nodes->start << PAGE_SHIFT);
1586 static struct ttm_bo_driver amdgpu_bo_driver = {
1587 .ttm_tt_create = &amdgpu_ttm_tt_create,
1588 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1589 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1590 .invalidate_caches = &amdgpu_invalidate_caches,
1591 .init_mem_type = &amdgpu_init_mem_type,
1592 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1593 .evict_flags = &amdgpu_evict_flags,
1594 .move = &amdgpu_bo_move,
1595 .verify_access = &amdgpu_verify_access,
1596 .move_notify = &amdgpu_bo_move_notify,
1597 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1598 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1599 .io_mem_free = &amdgpu_ttm_io_mem_free,
1600 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1601 .access_memory = &amdgpu_ttm_access_memory
1605 * Firmware Reservation functions
1608 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1610 * @adev: amdgpu_device pointer
1612 * free fw reserved vram if it has been reserved.
1614 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1616 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1617 NULL, &adev->fw_vram_usage.va);
1621 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1623 * @adev: amdgpu_device pointer
1625 * create bo vram reservation from fw.
1627 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1629 struct ttm_operation_ctx ctx = { false, false };
1630 struct amdgpu_bo_param bp;
1633 u64 vram_size = adev->gmc.visible_vram_size;
1634 u64 offset = adev->fw_vram_usage.start_offset;
1635 u64 size = adev->fw_vram_usage.size;
1636 struct amdgpu_bo *bo;
1638 memset(&bp, 0, sizeof(bp));
1639 bp.size = adev->fw_vram_usage.size;
1640 bp.byte_align = PAGE_SIZE;
1641 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
1642 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1643 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1644 bp.type = ttm_bo_type_kernel;
1646 adev->fw_vram_usage.va = NULL;
1647 adev->fw_vram_usage.reserved_bo = NULL;
1649 if (adev->fw_vram_usage.size > 0 &&
1650 adev->fw_vram_usage.size <= vram_size) {
1652 r = amdgpu_bo_create(adev, &bp,
1653 &adev->fw_vram_usage.reserved_bo);
1657 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
1661 /* remove the original mem node and create a new one at the
1664 bo = adev->fw_vram_usage.reserved_bo;
1665 offset = ALIGN(offset, PAGE_SIZE);
1666 for (i = 0; i < bo->placement.num_placement; ++i) {
1667 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1668 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1671 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1672 r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
1673 &bo->tbo.mem, &ctx);
1677 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
1678 AMDGPU_GEM_DOMAIN_VRAM,
1679 adev->fw_vram_usage.start_offset,
1680 (adev->fw_vram_usage.start_offset +
1681 adev->fw_vram_usage.size));
1684 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
1685 &adev->fw_vram_usage.va);
1689 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1694 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
1696 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1698 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
1700 adev->fw_vram_usage.va = NULL;
1701 adev->fw_vram_usage.reserved_bo = NULL;
1705 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1706 * gtt/vram related fields.
1708 * This initializes all of the memory space pools that the TTM layer
1709 * will need such as the GTT space (system memory mapped to the device),
1710 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1711 * can be mapped per VMID.
1713 int amdgpu_ttm_init(struct amdgpu_device *adev)
1719 /* initialize global references for vram/gtt */
1720 r = amdgpu_ttm_global_init(adev);
1724 /* No others user of address space so set it to 0 */
1725 r = ttm_bo_device_init(&adev->mman.bdev,
1726 adev->mman.bo_global_ref.ref.object,
1728 adev->ddev->anon_inode->i_mapping,
1729 DRM_FILE_PAGE_OFFSET,
1732 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1735 adev->mman.initialized = true;
1737 /* We opt to avoid OOM on system pages allocations */
1738 adev->mman.bdev.no_retry = true;
1740 /* Initialize VRAM pool with all of VRAM divided into pages */
1741 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1742 adev->gmc.real_vram_size >> PAGE_SHIFT);
1744 DRM_ERROR("Failed initializing VRAM heap.\n");
1748 /* Reduce size of CPU-visible VRAM if requested */
1749 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1750 if (amdgpu_vis_vram_limit > 0 &&
1751 vis_vram_limit <= adev->gmc.visible_vram_size)
1752 adev->gmc.visible_vram_size = vis_vram_limit;
1754 /* Change the size here instead of the init above so only lpfn is affected */
1755 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1757 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1758 adev->gmc.visible_vram_size);
1762 *The reserved vram for firmware must be pinned to the specified
1763 *place on the VRAM, so reserve it early.
1765 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1770 /* allocate memory as required for VGA
1771 * This is used for VGA emulation and pre-OS scanout buffers to
1772 * avoid display artifacts while transitioning between pre-OS
1774 if (adev->gmc.stolen_size) {
1775 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1776 AMDGPU_GEM_DOMAIN_VRAM,
1777 &adev->stolen_vga_memory,
1782 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1783 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1785 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1786 * or whatever the user passed on module init */
1787 if (amdgpu_gtt_size == -1) {
1791 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1792 adev->gmc.mc_vram_size),
1793 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1796 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1798 /* Initialize GTT memory pool */
1799 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1801 DRM_ERROR("Failed initializing GTT heap.\n");
1804 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1805 (unsigned)(gtt_size / (1024 * 1024)));
1807 /* Initialize various on-chip memory pools */
1808 adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
1809 adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
1810 adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
1811 adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
1812 adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
1813 adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
1814 adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
1815 adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
1816 adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
1818 if (adev->gds.mem.total_size) {
1819 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1820 adev->gds.mem.total_size >> PAGE_SHIFT);
1822 DRM_ERROR("Failed initializing GDS heap.\n");
1828 if (adev->gds.gws.total_size) {
1829 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1830 adev->gds.gws.total_size >> PAGE_SHIFT);
1832 DRM_ERROR("Failed initializing gws heap.\n");
1838 if (adev->gds.oa.total_size) {
1839 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1840 adev->gds.oa.total_size >> PAGE_SHIFT);
1842 DRM_ERROR("Failed initializing oa heap.\n");
1847 /* Register debugfs entries for amdgpu_ttm */
1848 r = amdgpu_ttm_debugfs_init(adev);
1850 DRM_ERROR("Failed to init debugfs\n");
1857 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1859 void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1861 /* return the VGA stolen memory (if any) back to VRAM */
1862 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1866 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1868 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1870 if (!adev->mman.initialized)
1873 amdgpu_ttm_debugfs_fini(adev);
1874 amdgpu_ttm_fw_reserve_vram_fini(adev);
1875 if (adev->mman.aper_base_kaddr)
1876 iounmap(adev->mman.aper_base_kaddr);
1877 adev->mman.aper_base_kaddr = NULL;
1879 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1880 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1881 if (adev->gds.mem.total_size)
1882 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1883 if (adev->gds.gws.total_size)
1884 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1885 if (adev->gds.oa.total_size)
1886 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1887 ttm_bo_device_release(&adev->mman.bdev);
1888 amdgpu_ttm_global_fini(adev);
1889 adev->mman.initialized = false;
1890 DRM_INFO("amdgpu: ttm finalized\n");
1894 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1896 * @adev: amdgpu_device pointer
1897 * @enable: true when we can use buffer functions.
1899 * Enable/disable use of buffer functions during suspend/resume. This should
1900 * only be called at bootup or when userspace isn't running.
1902 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1904 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1908 if (!adev->mman.initialized || adev->in_gpu_reset ||
1909 adev->mman.buffer_funcs_enabled == enable)
1913 struct amdgpu_ring *ring;
1914 struct drm_sched_rq *rq;
1916 ring = adev->mman.buffer_funcs_ring;
1917 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1918 r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
1920 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1925 drm_sched_entity_destroy(&adev->mman.entity);
1926 dma_fence_put(man->move);
1930 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1932 size = adev->gmc.real_vram_size;
1934 size = adev->gmc.visible_vram_size;
1935 man->size = size >> PAGE_SHIFT;
1936 adev->mman.buffer_funcs_enabled = enable;
1939 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1941 struct drm_file *file_priv;
1942 struct amdgpu_device *adev;
1944 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
1947 file_priv = filp->private_data;
1948 adev = file_priv->minor->dev->dev_private;
1952 return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1955 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1956 struct ttm_mem_reg *mem, unsigned num_pages,
1957 uint64_t offset, unsigned window,
1958 struct amdgpu_ring *ring,
1961 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1962 struct amdgpu_device *adev = ring->adev;
1963 struct ttm_tt *ttm = bo->ttm;
1964 struct amdgpu_job *job;
1965 unsigned num_dw, num_bytes;
1966 dma_addr_t *dma_address;
1967 struct dma_fence *fence;
1968 uint64_t src_addr, dst_addr;
1972 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1973 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1975 *addr = adev->gmc.gart_start;
1976 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1977 AMDGPU_GPU_PAGE_SIZE;
1979 num_dw = adev->mman.buffer_funcs->copy_num_dw;
1980 while (num_dw & 0x7)
1983 num_bytes = num_pages * 8;
1985 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
1989 src_addr = num_dw * 4;
1990 src_addr += job->ibs[0].gpu_addr;
1992 dst_addr = adev->gart.table_addr;
1993 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
1994 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
1995 dst_addr, num_bytes);
1997 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1998 WARN_ON(job->ibs[0].length_dw > num_dw);
2000 dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT];
2001 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
2002 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
2003 &job->ibs[0].ptr[num_dw]);
2007 r = amdgpu_job_submit(job, &adev->mman.entity,
2008 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
2012 dma_fence_put(fence);
2017 amdgpu_job_free(job);
2021 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2022 uint64_t dst_offset, uint32_t byte_count,
2023 struct reservation_object *resv,
2024 struct dma_fence **fence, bool direct_submit,
2025 bool vm_needs_flush)
2027 struct amdgpu_device *adev = ring->adev;
2028 struct amdgpu_job *job;
2031 unsigned num_loops, num_dw;
2035 if (direct_submit && !ring->ready) {
2036 DRM_ERROR("Trying to move memory with ring turned off.\n");
2040 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2041 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2042 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
2044 /* for IB padding */
2045 while (num_dw & 0x7)
2048 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2052 job->vm_needs_flush = vm_needs_flush;
2054 r = amdgpu_sync_resv(adev, &job->sync, resv,
2055 AMDGPU_FENCE_OWNER_UNDEFINED,
2058 DRM_ERROR("sync failed (%d).\n", r);
2063 for (i = 0; i < num_loops; i++) {
2064 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2066 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2067 dst_offset, cur_size_in_bytes);
2069 src_offset += cur_size_in_bytes;
2070 dst_offset += cur_size_in_bytes;
2071 byte_count -= cur_size_in_bytes;
2074 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2075 WARN_ON(job->ibs[0].length_dw > num_dw);
2077 r = amdgpu_job_submit_direct(job, ring, fence);
2079 r = amdgpu_job_submit(job, &adev->mman.entity,
2080 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2087 amdgpu_job_free(job);
2088 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2092 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2094 struct reservation_object *resv,
2095 struct dma_fence **fence)
2097 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2098 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2099 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2101 struct drm_mm_node *mm_node;
2102 unsigned long num_pages;
2103 unsigned int num_loops, num_dw;
2105 struct amdgpu_job *job;
2108 if (!adev->mman.buffer_funcs_enabled) {
2109 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2113 if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2114 r = amdgpu_ttm_alloc_gart(&bo->tbo);
2119 num_pages = bo->tbo.num_pages;
2120 mm_node = bo->tbo.mem.mm_node;
2123 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
2125 num_loops += DIV_ROUND_UP(byte_count, max_bytes);
2126 num_pages -= mm_node->size;
2129 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2131 /* for IB padding */
2134 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2139 r = amdgpu_sync_resv(adev, &job->sync, resv,
2140 AMDGPU_FENCE_OWNER_UNDEFINED, false);
2142 DRM_ERROR("sync failed (%d).\n", r);
2147 num_pages = bo->tbo.num_pages;
2148 mm_node = bo->tbo.mem.mm_node;
2151 uint32_t byte_count = mm_node->size << PAGE_SHIFT;
2154 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2155 while (byte_count) {
2156 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2158 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2159 dst_addr, cur_size_in_bytes);
2161 dst_addr += cur_size_in_bytes;
2162 byte_count -= cur_size_in_bytes;
2165 num_pages -= mm_node->size;
2169 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2170 WARN_ON(job->ibs[0].length_dw > num_dw);
2171 r = amdgpu_job_submit(job, &adev->mman.entity,
2172 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2179 amdgpu_job_free(job);
2183 #if defined(CONFIG_DEBUG_FS)
2185 static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2187 struct drm_info_node *node = (struct drm_info_node *)m->private;
2188 unsigned ttm_pl = *(int *)node->info_ent->data;
2189 struct drm_device *dev = node->minor->dev;
2190 struct amdgpu_device *adev = dev->dev_private;
2191 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
2192 struct drm_printer p = drm_seq_file_printer(m);
2194 man->func->debug(man, &p);
2198 static int ttm_pl_vram = TTM_PL_VRAM;
2199 static int ttm_pl_tt = TTM_PL_TT;
2201 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2202 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
2203 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
2204 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
2205 #ifdef CONFIG_SWIOTLB
2206 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
2211 * amdgpu_ttm_vram_read - Linear read access to VRAM
2213 * Accesses VRAM via MMIO for debugging purposes.
2215 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2216 size_t size, loff_t *pos)
2218 struct amdgpu_device *adev = file_inode(f)->i_private;
2222 if (size & 0x3 || *pos & 0x3)
2225 if (*pos >= adev->gmc.mc_vram_size)
2229 unsigned long flags;
2232 if (*pos >= adev->gmc.mc_vram_size)
2235 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2236 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2237 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2238 value = RREG32_NO_KIQ(mmMM_DATA);
2239 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2241 r = put_user(value, (uint32_t *)buf);
2255 * amdgpu_ttm_vram_write - Linear write access to VRAM
2257 * Accesses VRAM via MMIO for debugging purposes.
2259 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2260 size_t size, loff_t *pos)
2262 struct amdgpu_device *adev = file_inode(f)->i_private;
2266 if (size & 0x3 || *pos & 0x3)
2269 if (*pos >= adev->gmc.mc_vram_size)
2273 unsigned long flags;
2276 if (*pos >= adev->gmc.mc_vram_size)
2279 r = get_user(value, (uint32_t *)buf);
2283 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2284 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2285 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2286 WREG32_NO_KIQ(mmMM_DATA, value);
2287 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2298 static const struct file_operations amdgpu_ttm_vram_fops = {
2299 .owner = THIS_MODULE,
2300 .read = amdgpu_ttm_vram_read,
2301 .write = amdgpu_ttm_vram_write,
2302 .llseek = default_llseek,
2305 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2308 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2310 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2311 size_t size, loff_t *pos)
2313 struct amdgpu_device *adev = file_inode(f)->i_private;
2318 loff_t p = *pos / PAGE_SIZE;
2319 unsigned off = *pos & ~PAGE_MASK;
2320 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2324 if (p >= adev->gart.num_cpu_pages)
2327 page = adev->gart.pages[p];
2332 r = copy_to_user(buf, ptr, cur_size);
2333 kunmap(adev->gart.pages[p]);
2335 r = clear_user(buf, cur_size);
2349 static const struct file_operations amdgpu_ttm_gtt_fops = {
2350 .owner = THIS_MODULE,
2351 .read = amdgpu_ttm_gtt_read,
2352 .llseek = default_llseek
2358 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2360 * This function is used to read memory that has been mapped to the
2361 * GPU and the known addresses are not physical addresses but instead
2362 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2364 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2365 size_t size, loff_t *pos)
2367 struct amdgpu_device *adev = file_inode(f)->i_private;
2368 struct iommu_domain *dom;
2372 /* retrieve the IOMMU domain if any for this device */
2373 dom = iommu_get_domain_for_dev(adev->dev);
2376 phys_addr_t addr = *pos & PAGE_MASK;
2377 loff_t off = *pos & ~PAGE_MASK;
2378 size_t bytes = PAGE_SIZE - off;
2383 bytes = bytes < size ? bytes : size;
2385 /* Translate the bus address to a physical address. If
2386 * the domain is NULL it means there is no IOMMU active
2387 * and the address translation is the identity
2389 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2391 pfn = addr >> PAGE_SHIFT;
2392 if (!pfn_valid(pfn))
2395 p = pfn_to_page(pfn);
2396 if (p->mapping != adev->mman.bdev.dev_mapping)
2400 r = copy_to_user(buf, ptr + off, bytes);
2414 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2416 * This function is used to write memory that has been mapped to the
2417 * GPU and the known addresses are not physical addresses but instead
2418 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2420 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2421 size_t size, loff_t *pos)
2423 struct amdgpu_device *adev = file_inode(f)->i_private;
2424 struct iommu_domain *dom;
2428 dom = iommu_get_domain_for_dev(adev->dev);
2431 phys_addr_t addr = *pos & PAGE_MASK;
2432 loff_t off = *pos & ~PAGE_MASK;
2433 size_t bytes = PAGE_SIZE - off;
2438 bytes = bytes < size ? bytes : size;
2440 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2442 pfn = addr >> PAGE_SHIFT;
2443 if (!pfn_valid(pfn))
2446 p = pfn_to_page(pfn);
2447 if (p->mapping != adev->mman.bdev.dev_mapping)
2451 r = copy_from_user(ptr + off, buf, bytes);
2464 static const struct file_operations amdgpu_ttm_iomem_fops = {
2465 .owner = THIS_MODULE,
2466 .read = amdgpu_iomem_read,
2467 .write = amdgpu_iomem_write,
2468 .llseek = default_llseek
2471 static const struct {
2473 const struct file_operations *fops;
2475 } ttm_debugfs_entries[] = {
2476 { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2477 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2478 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2480 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2485 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2487 #if defined(CONFIG_DEBUG_FS)
2490 struct drm_minor *minor = adev->ddev->primary;
2491 struct dentry *ent, *root = minor->debugfs_root;
2493 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2494 ent = debugfs_create_file(
2495 ttm_debugfs_entries[count].name,
2496 S_IFREG | S_IRUGO, root,
2498 ttm_debugfs_entries[count].fops);
2500 return PTR_ERR(ent);
2501 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2502 i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2503 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2504 i_size_write(ent->d_inode, adev->gmc.gart_size);
2505 adev->mman.debugfs_entries[count] = ent;
2508 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2510 #ifdef CONFIG_SWIOTLB
2511 if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2515 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2521 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2523 #if defined(CONFIG_DEBUG_FS)
2526 for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2527 debugfs_remove(adev->mman.debugfs_entries[i]);