2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * based on nouveau_prime.c
24 * Authors: Alex Deucher
29 #include <drm/amdgpu_drm.h>
30 #include <linux/dma-buf.h>
32 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
34 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
35 int npages = bo->tbo.num_pages;
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
40 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
42 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
45 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
50 return bo->dma_buf_vmap.virtual;
53 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
55 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
57 ttm_bo_kunmap(&bo->dma_buf_vmap);
60 struct drm_gem_object *
61 amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
62 struct dma_buf_attachment *attach,
65 struct reservation_object *resv = attach->dmabuf->resv;
66 struct amdgpu_device *adev = dev->dev_private;
70 ww_mutex_lock(&resv->lock, NULL);
71 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
72 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
73 ww_mutex_unlock(&resv->lock);
77 bo->prime_shared_count = 1;
81 int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
83 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
86 ret = amdgpu_bo_reserve(bo, false);
87 if (unlikely(ret != 0))
91 * Wait for all shared fences to complete before we switch to future
92 * use of exclusive fence on this prime shared bo.
94 ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
95 MAX_SCHEDULE_TIMEOUT);
96 if (unlikely(ret < 0)) {
97 DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
98 amdgpu_bo_unreserve(bo);
102 /* pin buffer into GTT */
103 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
104 if (likely(ret == 0))
105 bo->prime_shared_count++;
107 amdgpu_bo_unreserve(bo);
111 void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
113 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
116 ret = amdgpu_bo_reserve(bo, false);
117 if (unlikely(ret != 0))
121 if (bo->prime_shared_count)
122 bo->prime_shared_count--;
123 amdgpu_bo_unreserve(bo);
126 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
128 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
133 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
134 struct drm_gem_object *gobj,
137 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
139 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
140 return ERR_PTR(-EPERM);
142 return drm_gem_prime_export(dev, gobj, flags);