GNU Linux-libre 4.19.268-gnu1
[releases.git] / drivers / gpu / drm / virtio / virtgpu_ttm.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27
28 #include <drm/ttm/ttm_bo_api.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
32 #include <drm/ttm/ttm_module.h>
33 #include <drm/drmP.h>
34 #include <drm/drm.h>
35 #include <drm/virtgpu_drm.h>
36 #include "virtgpu_drv.h"
37
38 #include <linux/delay.h>
39
40 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
41
42 static struct
43 virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
44 {
45         struct virtio_gpu_mman *mman;
46         struct virtio_gpu_device *vgdev;
47
48         mman = container_of(bdev, struct virtio_gpu_mman, bdev);
49         vgdev = container_of(mman, struct virtio_gpu_device, mman);
50         return vgdev;
51 }
52
53 static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
54 {
55         return ttm_mem_global_init(ref->object);
56 }
57
58 static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
59 {
60         ttm_mem_global_release(ref->object);
61 }
62
63 static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
64 {
65         struct drm_global_reference *global_ref;
66         int r;
67
68         vgdev->mman.mem_global_referenced = false;
69         global_ref = &vgdev->mman.mem_global_ref;
70         global_ref->global_type = DRM_GLOBAL_TTM_MEM;
71         global_ref->size = sizeof(struct ttm_mem_global);
72         global_ref->init = &virtio_gpu_ttm_mem_global_init;
73         global_ref->release = &virtio_gpu_ttm_mem_global_release;
74
75         r = drm_global_item_ref(global_ref);
76         if (r != 0) {
77                 DRM_ERROR("Failed setting up TTM memory accounting "
78                           "subsystem.\n");
79                 return r;
80         }
81
82         vgdev->mman.bo_global_ref.mem_glob =
83                 vgdev->mman.mem_global_ref.object;
84         global_ref = &vgdev->mman.bo_global_ref.ref;
85         global_ref->global_type = DRM_GLOBAL_TTM_BO;
86         global_ref->size = sizeof(struct ttm_bo_global);
87         global_ref->init = &ttm_bo_global_init;
88         global_ref->release = &ttm_bo_global_release;
89         r = drm_global_item_ref(global_ref);
90         if (r != 0) {
91                 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
92                 drm_global_item_unref(&vgdev->mman.mem_global_ref);
93                 return r;
94         }
95
96         vgdev->mman.mem_global_referenced = true;
97         return 0;
98 }
99
100 static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
101 {
102         if (vgdev->mman.mem_global_referenced) {
103                 drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
104                 drm_global_item_unref(&vgdev->mman.mem_global_ref);
105                 vgdev->mman.mem_global_referenced = false;
106         }
107 }
108
109 #if 0
110 /*
111  * Hmm, seems to not do anything useful.  Leftover debug hack?
112  * Something like printing pagefaults to kernel log?
113  */
114 static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
115 static const struct vm_operations_struct *ttm_vm_ops;
116
117 static int virtio_gpu_ttm_fault(struct vm_fault *vmf)
118 {
119         struct ttm_buffer_object *bo;
120         struct virtio_gpu_device *vgdev;
121         int r;
122
123         bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
124         if (bo == NULL)
125                 return VM_FAULT_NOPAGE;
126         vgdev = virtio_gpu_get_vgdev(bo->bdev);
127         r = ttm_vm_ops->fault(vmf);
128         return r;
129 }
130 #endif
131
132 int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
133 {
134         struct drm_file *file_priv;
135         struct virtio_gpu_device *vgdev;
136         int r;
137
138         file_priv = filp->private_data;
139         vgdev = file_priv->minor->dev->dev_private;
140         if (vgdev == NULL) {
141                 DRM_ERROR(
142                  "filp->private_data->minor->dev->dev_private == NULL\n");
143                 return -EINVAL;
144         }
145         r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
146 #if 0
147         if (unlikely(r != 0))
148                 return r;
149         if (unlikely(ttm_vm_ops == NULL)) {
150                 ttm_vm_ops = vma->vm_ops;
151                 virtio_gpu_ttm_vm_ops = *ttm_vm_ops;
152                 virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault;
153         }
154         vma->vm_ops = &virtio_gpu_ttm_vm_ops;
155         return 0;
156 #else
157         return r;
158 #endif
159 }
160
161 static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
162                                         uint32_t flags)
163 {
164         return 0;
165 }
166
167 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
168                                struct ttm_buffer_object *bo,
169                                const struct ttm_place *place,
170                                struct ttm_mem_reg *mem)
171 {
172         mem->mm_node = (void *)1;
173         return 0;
174 }
175
176 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
177                                 struct ttm_mem_reg *mem)
178 {
179         mem->mm_node = (void *)NULL;
180 }
181
182 static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
183                            unsigned long p_size)
184 {
185         return 0;
186 }
187
188 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
189 {
190         return 0;
191 }
192
193 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
194                              struct drm_printer *printer)
195 {
196 }
197
198 static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
199         .init = ttm_bo_man_init,
200         .takedown = ttm_bo_man_takedown,
201         .get_node = ttm_bo_man_get_node,
202         .put_node = ttm_bo_man_put_node,
203         .debug = ttm_bo_man_debug
204 };
205
206 static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
207                                     struct ttm_mem_type_manager *man)
208 {
209         struct virtio_gpu_device *vgdev;
210
211         vgdev = virtio_gpu_get_vgdev(bdev);
212
213         switch (type) {
214         case TTM_PL_SYSTEM:
215                 /* System memory */
216                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
217                 man->available_caching = TTM_PL_MASK_CACHING;
218                 man->default_caching = TTM_PL_FLAG_CACHED;
219                 break;
220         case TTM_PL_TT:
221                 man->func = &virtio_gpu_bo_manager_func;
222                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
223                 man->available_caching = TTM_PL_MASK_CACHING;
224                 man->default_caching = TTM_PL_FLAG_CACHED;
225                 break;
226         default:
227                 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
228                 return -EINVAL;
229         }
230         return 0;
231 }
232
233 static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
234                                 struct ttm_placement *placement)
235 {
236         static const struct ttm_place placements = {
237                 .fpfn  = 0,
238                 .lpfn  = 0,
239                 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
240         };
241
242         placement->placement = &placements;
243         placement->busy_placement = &placements;
244         placement->num_placement = 1;
245         placement->num_busy_placement = 1;
246 }
247
248 static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
249                                     struct file *filp)
250 {
251         return 0;
252 }
253
254 static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
255                                          struct ttm_mem_reg *mem)
256 {
257         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
258
259         mem->bus.addr = NULL;
260         mem->bus.offset = 0;
261         mem->bus.size = mem->num_pages << PAGE_SHIFT;
262         mem->bus.base = 0;
263         mem->bus.is_iomem = false;
264         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
265                 return -EINVAL;
266         switch (mem->mem_type) {
267         case TTM_PL_SYSTEM:
268         case TTM_PL_TT:
269                 /* system memory */
270                 return 0;
271         default:
272                 return -EINVAL;
273         }
274         return 0;
275 }
276
277 static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
278                                        struct ttm_mem_reg *mem)
279 {
280 }
281
282 /*
283  * TTM backend functions.
284  */
285 struct virtio_gpu_ttm_tt {
286         struct ttm_dma_tt               ttm;
287         struct virtio_gpu_device        *vgdev;
288         u64                             offset;
289 };
290
291 static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
292                                        struct ttm_mem_reg *bo_mem)
293 {
294         struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
295
296         gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
297         if (!ttm->num_pages)
298                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
299                      ttm->num_pages, bo_mem, ttm);
300
301         /* Not implemented */
302         return 0;
303 }
304
305 static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
306 {
307         /* Not implemented */
308         return 0;
309 }
310
311 static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
312 {
313         struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
314
315         ttm_dma_tt_fini(&gtt->ttm);
316         kfree(gtt);
317 }
318
319 static struct ttm_backend_func virtio_gpu_backend_func = {
320         .bind = &virtio_gpu_ttm_backend_bind,
321         .unbind = &virtio_gpu_ttm_backend_unbind,
322         .destroy = &virtio_gpu_ttm_backend_destroy,
323 };
324
325 static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
326                                                uint32_t page_flags)
327 {
328         struct virtio_gpu_device *vgdev;
329         struct virtio_gpu_ttm_tt *gtt;
330
331         vgdev = virtio_gpu_get_vgdev(bo->bdev);
332         gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
333         if (gtt == NULL)
334                 return NULL;
335         gtt->ttm.ttm.func = &virtio_gpu_backend_func;
336         gtt->vgdev = vgdev;
337         if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
338                 kfree(gtt);
339                 return NULL;
340         }
341         return &gtt->ttm.ttm;
342 }
343
344 static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
345                                  struct ttm_mem_reg *new_mem)
346 {
347         struct ttm_mem_reg *old_mem = &bo->mem;
348
349         BUG_ON(old_mem->mm_node != NULL);
350         *old_mem = *new_mem;
351         new_mem->mm_node = NULL;
352 }
353
354 static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
355                               struct ttm_operation_ctx *ctx,
356                               struct ttm_mem_reg *new_mem)
357 {
358         int ret;
359
360         ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
361         if (ret)
362                 return ret;
363
364         virtio_gpu_move_null(bo, new_mem);
365         return 0;
366 }
367
368 static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
369                                       bool evict,
370                                       struct ttm_mem_reg *new_mem)
371 {
372         struct virtio_gpu_object *bo;
373         struct virtio_gpu_device *vgdev;
374
375         bo = container_of(tbo, struct virtio_gpu_object, tbo);
376         vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
377
378         if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
379                 if (bo->hw_res_handle)
380                         virtio_gpu_cmd_resource_inval_backing(vgdev,
381                                                            bo->hw_res_handle);
382
383         } else if (new_mem->placement & TTM_PL_FLAG_TT) {
384                 if (bo->hw_res_handle) {
385                         virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
386                                                  NULL);
387                 }
388         }
389 }
390
391 static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
392 {
393         struct virtio_gpu_object *bo;
394         struct virtio_gpu_device *vgdev;
395
396         bo = container_of(tbo, struct virtio_gpu_object, tbo);
397         vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
398
399         if (bo->pages)
400                 virtio_gpu_object_free_sg_table(bo);
401 }
402
403 static struct ttm_bo_driver virtio_gpu_bo_driver = {
404         .ttm_tt_create = &virtio_gpu_ttm_tt_create,
405         .invalidate_caches = &virtio_gpu_invalidate_caches,
406         .init_mem_type = &virtio_gpu_init_mem_type,
407         .eviction_valuable = ttm_bo_eviction_valuable,
408         .evict_flags = &virtio_gpu_evict_flags,
409         .move = &virtio_gpu_bo_move,
410         .verify_access = &virtio_gpu_verify_access,
411         .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
412         .io_mem_free = &virtio_gpu_ttm_io_mem_free,
413         .move_notify = &virtio_gpu_bo_move_notify,
414         .swap_notify = &virtio_gpu_bo_swap_notify,
415 };
416
417 int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
418 {
419         int r;
420
421         r = virtio_gpu_ttm_global_init(vgdev);
422         if (r)
423                 return r;
424         /* No others user of address space so set it to 0 */
425         r = ttm_bo_device_init(&vgdev->mman.bdev,
426                                vgdev->mman.bo_global_ref.ref.object,
427                                &virtio_gpu_bo_driver,
428                                vgdev->ddev->anon_inode->i_mapping,
429                                DRM_FILE_PAGE_OFFSET, 0);
430         if (r) {
431                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
432                 goto err_dev_init;
433         }
434
435         r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
436         if (r) {
437                 DRM_ERROR("Failed initializing GTT heap.\n");
438                 goto err_mm_init;
439         }
440         return 0;
441
442 err_mm_init:
443         ttm_bo_device_release(&vgdev->mman.bdev);
444 err_dev_init:
445         virtio_gpu_ttm_global_fini(vgdev);
446         return r;
447 }
448
449 void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
450 {
451         ttm_bo_device_release(&vgdev->mman.bdev);
452         virtio_gpu_ttm_global_fini(vgdev);
453         DRM_INFO("virtio_gpu: ttm finalized\n");
454 }