GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/io.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
42
43 struct ttm_transfer_obj {
44         struct ttm_buffer_object base;
45         struct ttm_buffer_object *bo;
46 };
47
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49 {
50         ttm_resource_free(bo, &bo->mem);
51 }
52
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54                    struct ttm_operation_ctx *ctx,
55                     struct ttm_resource *new_mem)
56 {
57         struct ttm_tt *ttm = bo->ttm;
58         struct ttm_resource *old_mem = &bo->mem;
59         int ret;
60
61         if (old_mem->mem_type != TTM_PL_SYSTEM) {
62                 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63
64                 if (unlikely(ret != 0)) {
65                         if (ret != -ERESTARTSYS)
66                                 pr_err("Failed to expire sync object before unbinding TTM\n");
67                         return ret;
68                 }
69
70                 ttm_bo_tt_unbind(bo);
71                 ttm_bo_free_old_node(bo);
72                 old_mem->mem_type = TTM_PL_SYSTEM;
73         }
74
75         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
76         if (unlikely(ret != 0))
77                 return ret;
78
79         if (new_mem->mem_type != TTM_PL_SYSTEM) {
80
81                 ret = ttm_tt_populate(bo->bdev, ttm, ctx);
82                 if (unlikely(ret != 0))
83                         return ret;
84
85                 ret = ttm_bo_tt_bind(bo, new_mem);
86                 if (unlikely(ret != 0))
87                         return ret;
88         }
89
90         ttm_bo_assign_mem(bo, new_mem);
91         return 0;
92 }
93 EXPORT_SYMBOL(ttm_bo_move_ttm);
94
95 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
96                        struct ttm_resource *mem)
97 {
98         if (mem->bus.offset || mem->bus.addr)
99                 return 0;
100
101         mem->bus.is_iomem = false;
102         if (!bdev->driver->io_mem_reserve)
103                 return 0;
104
105         return bdev->driver->io_mem_reserve(bdev, mem);
106 }
107
108 void ttm_mem_io_free(struct ttm_bo_device *bdev,
109                      struct ttm_resource *mem)
110 {
111         if (!mem->bus.offset && !mem->bus.addr)
112                 return;
113
114         if (bdev->driver->io_mem_free)
115                 bdev->driver->io_mem_free(bdev, mem);
116
117         mem->bus.offset = 0;
118         mem->bus.addr = NULL;
119 }
120
121 static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
122                                struct ttm_resource *mem,
123                                void **virtual)
124 {
125         int ret;
126         void *addr;
127
128         *virtual = NULL;
129         ret = ttm_mem_io_reserve(bdev, mem);
130         if (ret || !mem->bus.is_iomem)
131                 return ret;
132
133         if (mem->bus.addr) {
134                 addr = mem->bus.addr;
135         } else {
136                 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
137
138                 if (mem->placement & TTM_PL_FLAG_WC)
139                         addr = ioremap_wc(mem->bus.offset, bus_size);
140                 else
141                         addr = ioremap(mem->bus.offset, bus_size);
142                 if (!addr) {
143                         ttm_mem_io_free(bdev, mem);
144                         return -ENOMEM;
145                 }
146         }
147         *virtual = addr;
148         return 0;
149 }
150
151 static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
152                                 struct ttm_resource *mem,
153                                 void *virtual)
154 {
155         if (virtual && mem->bus.addr == NULL)
156                 iounmap(virtual);
157         ttm_mem_io_free(bdev, mem);
158 }
159
160 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
161 {
162         uint32_t *dstP =
163             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
164         uint32_t *srcP =
165             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
166
167         int i;
168         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
169                 iowrite32(ioread32(srcP++), dstP++);
170         return 0;
171 }
172
173 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
174                                 unsigned long page,
175                                 pgprot_t prot)
176 {
177         struct page *d = ttm->pages[page];
178         void *dst;
179
180         if (!d)
181                 return -ENOMEM;
182
183         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
184         dst = kmap_atomic_prot(d, prot);
185         if (!dst)
186                 return -ENOMEM;
187
188         memcpy_fromio(dst, src, PAGE_SIZE);
189
190         kunmap_atomic(dst);
191
192         return 0;
193 }
194
195 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
196                                 unsigned long page,
197                                 pgprot_t prot)
198 {
199         struct page *s = ttm->pages[page];
200         void *src;
201
202         if (!s)
203                 return -ENOMEM;
204
205         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
206         src = kmap_atomic_prot(s, prot);
207         if (!src)
208                 return -ENOMEM;
209
210         memcpy_toio(dst, src, PAGE_SIZE);
211
212         kunmap_atomic(src);
213
214         return 0;
215 }
216
217 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
218                        struct ttm_operation_ctx *ctx,
219                        struct ttm_resource *new_mem)
220 {
221         struct ttm_bo_device *bdev = bo->bdev;
222         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
223         struct ttm_tt *ttm = bo->ttm;
224         struct ttm_resource *old_mem = &bo->mem;
225         struct ttm_resource old_copy = *old_mem;
226         void *old_iomap;
227         void *new_iomap;
228         int ret;
229         unsigned long i;
230         unsigned long page;
231         unsigned long add = 0;
232         int dir;
233
234         ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
235         if (ret)
236                 return ret;
237
238         ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
239         if (ret)
240                 return ret;
241         ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
242         if (ret)
243                 goto out;
244
245         /*
246          * Single TTM move. NOP.
247          */
248         if (old_iomap == NULL && new_iomap == NULL)
249                 goto out2;
250
251         /*
252          * Don't move nonexistent data. Clear destination instead.
253          */
254         if (old_iomap == NULL &&
255             (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
256                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
257                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
258                 goto out2;
259         }
260
261         /*
262          * TTM might be null for moves within the same region.
263          */
264         if (ttm) {
265                 ret = ttm_tt_populate(bdev, ttm, ctx);
266                 if (ret)
267                         goto out1;
268         }
269
270         add = 0;
271         dir = 1;
272
273         if ((old_mem->mem_type == new_mem->mem_type) &&
274             (new_mem->start < old_mem->start + old_mem->size)) {
275                 dir = -1;
276                 add = new_mem->num_pages - 1;
277         }
278
279         for (i = 0; i < new_mem->num_pages; ++i) {
280                 page = i * dir + add;
281                 if (old_iomap == NULL) {
282                         pgprot_t prot = ttm_io_prot(old_mem->placement,
283                                                     PAGE_KERNEL);
284                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
285                                                    prot);
286                 } else if (new_iomap == NULL) {
287                         pgprot_t prot = ttm_io_prot(new_mem->placement,
288                                                     PAGE_KERNEL);
289                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
290                                                    prot);
291                 } else {
292                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
293                 }
294                 if (ret)
295                         goto out1;
296         }
297         mb();
298 out2:
299         old_copy = *old_mem;
300
301         ttm_bo_assign_mem(bo, new_mem);
302
303         if (!man->use_tt)
304                 ttm_bo_tt_destroy(bo);
305
306 out1:
307         ttm_resource_iounmap(bdev, old_mem, new_iomap);
308 out:
309         ttm_resource_iounmap(bdev, &old_copy, old_iomap);
310
311         /*
312          * On error, keep the mm node!
313          */
314         if (!ret)
315                 ttm_resource_free(bo, &old_copy);
316         return ret;
317 }
318 EXPORT_SYMBOL(ttm_bo_move_memcpy);
319
320 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
321 {
322         struct ttm_transfer_obj *fbo;
323
324         fbo = container_of(bo, struct ttm_transfer_obj, base);
325         ttm_bo_put(fbo->bo);
326         kfree(fbo);
327 }
328
329 /**
330  * ttm_buffer_object_transfer
331  *
332  * @bo: A pointer to a struct ttm_buffer_object.
333  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
334  * holding the data of @bo with the old placement.
335  *
336  * This is a utility function that may be called after an accelerated move
337  * has been scheduled. A new buffer object is created as a placeholder for
338  * the old data while it's being copied. When that buffer object is idle,
339  * it can be destroyed, releasing the space of the old placement.
340  * Returns:
341  * !0: Failure.
342  */
343
344 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
345                                       struct ttm_buffer_object **new_obj)
346 {
347         struct ttm_transfer_obj *fbo;
348         int ret;
349
350         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
351         if (!fbo)
352                 return -ENOMEM;
353
354         fbo->base = *bo;
355         fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
356
357         ttm_bo_get(bo);
358         fbo->bo = bo;
359
360         /**
361          * Fix up members that we shouldn't copy directly:
362          * TODO: Explicit member copy would probably be better here.
363          */
364
365         atomic_inc(&ttm_bo_glob.bo_count);
366         INIT_LIST_HEAD(&fbo->base.ddestroy);
367         INIT_LIST_HEAD(&fbo->base.lru);
368         INIT_LIST_HEAD(&fbo->base.swap);
369         fbo->base.moving = NULL;
370         drm_vma_node_reset(&fbo->base.base.vma_node);
371
372         kref_init(&fbo->base.kref);
373         fbo->base.destroy = &ttm_transfered_destroy;
374         fbo->base.acc_size = 0;
375         if (bo->type != ttm_bo_type_sg)
376                 fbo->base.base.resv = &fbo->base.base._resv;
377
378         dma_resv_init(&fbo->base.base._resv);
379         fbo->base.base.dev = NULL;
380         ret = dma_resv_trylock(&fbo->base.base._resv);
381         WARN_ON(!ret);
382
383         *new_obj = &fbo->base;
384         return 0;
385 }
386
387 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
388 {
389         /* Cached mappings need no adjustment */
390         if (caching_flags & TTM_PL_FLAG_CACHED)
391                 return tmp;
392
393 #if defined(__i386__) || defined(__x86_64__)
394         if (caching_flags & TTM_PL_FLAG_WC)
395                 tmp = pgprot_writecombine(tmp);
396         else if (boot_cpu_data.x86 > 3)
397                 tmp = pgprot_noncached(tmp);
398 #endif
399 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
400     defined(__powerpc__) || defined(__mips__)
401         if (caching_flags & TTM_PL_FLAG_WC)
402                 tmp = pgprot_writecombine(tmp);
403         else
404                 tmp = pgprot_noncached(tmp);
405 #endif
406 #if defined(__sparc__)
407         tmp = pgprot_noncached(tmp);
408 #endif
409         return tmp;
410 }
411 EXPORT_SYMBOL(ttm_io_prot);
412
413 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
414                           unsigned long offset,
415                           unsigned long size,
416                           struct ttm_bo_kmap_obj *map)
417 {
418         struct ttm_resource *mem = &bo->mem;
419
420         if (bo->mem.bus.addr) {
421                 map->bo_kmap_type = ttm_bo_map_premapped;
422                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
423         } else {
424                 map->bo_kmap_type = ttm_bo_map_iomap;
425                 if (mem->placement & TTM_PL_FLAG_WC)
426                         map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
427                                                   size);
428                 else
429                         map->virtual = ioremap(bo->mem.bus.offset + offset,
430                                                size);
431         }
432         return (!map->virtual) ? -ENOMEM : 0;
433 }
434
435 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
436                            unsigned long start_page,
437                            unsigned long num_pages,
438                            struct ttm_bo_kmap_obj *map)
439 {
440         struct ttm_resource *mem = &bo->mem;
441         struct ttm_operation_ctx ctx = {
442                 .interruptible = false,
443                 .no_wait_gpu = false
444         };
445         struct ttm_tt *ttm = bo->ttm;
446         pgprot_t prot;
447         int ret;
448
449         BUG_ON(!ttm);
450
451         ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
452         if (ret)
453                 return ret;
454
455         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
456                 /*
457                  * We're mapping a single page, and the desired
458                  * page protection is consistent with the bo.
459                  */
460
461                 map->bo_kmap_type = ttm_bo_map_kmap;
462                 map->page = ttm->pages[start_page];
463                 map->virtual = kmap(map->page);
464         } else {
465                 /*
466                  * We need to use vmap to get the desired page protection
467                  * or to make the buffer object look contiguous.
468                  */
469                 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
470                 map->bo_kmap_type = ttm_bo_map_vmap;
471                 map->virtual = vmap(ttm->pages + start_page, num_pages,
472                                     0, prot);
473         }
474         return (!map->virtual) ? -ENOMEM : 0;
475 }
476
477 int ttm_bo_kmap(struct ttm_buffer_object *bo,
478                 unsigned long start_page, unsigned long num_pages,
479                 struct ttm_bo_kmap_obj *map)
480 {
481         unsigned long offset, size;
482         int ret;
483
484         map->virtual = NULL;
485         map->bo = bo;
486         if (num_pages > bo->num_pages)
487                 return -EINVAL;
488         if (start_page > bo->num_pages)
489                 return -EINVAL;
490
491         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
492         if (ret)
493                 return ret;
494         if (!bo->mem.bus.is_iomem) {
495                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
496         } else {
497                 offset = start_page << PAGE_SHIFT;
498                 size = num_pages << PAGE_SHIFT;
499                 return ttm_bo_ioremap(bo, offset, size, map);
500         }
501 }
502 EXPORT_SYMBOL(ttm_bo_kmap);
503
504 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
505 {
506         if (!map->virtual)
507                 return;
508         switch (map->bo_kmap_type) {
509         case ttm_bo_map_iomap:
510                 iounmap(map->virtual);
511                 break;
512         case ttm_bo_map_vmap:
513                 vunmap(map->virtual);
514                 break;
515         case ttm_bo_map_kmap:
516                 kunmap(map->page);
517                 break;
518         case ttm_bo_map_premapped:
519                 break;
520         default:
521                 BUG();
522         }
523         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
524         map->virtual = NULL;
525         map->page = NULL;
526 }
527 EXPORT_SYMBOL(ttm_bo_kunmap);
528
529 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
530                                  bool dst_use_tt)
531 {
532         int ret;
533         ret = ttm_bo_wait(bo, false, false);
534         if (ret)
535                 return ret;
536
537         if (!dst_use_tt)
538                 ttm_bo_tt_destroy(bo);
539         ttm_bo_free_old_node(bo);
540         return 0;
541 }
542
543 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
544                                 struct dma_fence *fence,
545                                 bool dst_use_tt)
546 {
547         struct ttm_buffer_object *ghost_obj;
548         int ret;
549
550         /**
551          * This should help pipeline ordinary buffer moves.
552          *
553          * Hang old buffer memory on a new buffer object,
554          * and leave it to be released when the GPU
555          * operation has completed.
556          */
557
558         dma_fence_put(bo->moving);
559         bo->moving = dma_fence_get(fence);
560
561         ret = ttm_buffer_object_transfer(bo, &ghost_obj);
562         if (ret)
563                 return ret;
564
565         dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
566
567         /**
568          * If we're not moving to fixed memory, the TTM object
569          * needs to stay alive. Otherwhise hang it on the ghost
570          * bo to be unbound and destroyed.
571          */
572
573         if (dst_use_tt)
574                 ghost_obj->ttm = NULL;
575         else
576                 bo->ttm = NULL;
577
578         dma_resv_unlock(&ghost_obj->base._resv);
579         ttm_bo_put(ghost_obj);
580         return 0;
581 }
582
583 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
584                                        struct dma_fence *fence)
585 {
586         struct ttm_bo_device *bdev = bo->bdev;
587         struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
588
589         /**
590          * BO doesn't have a TTM we need to bind/unbind. Just remember
591          * this eviction and free up the allocation
592          */
593         spin_lock(&from->move_lock);
594         if (!from->move || dma_fence_is_later(fence, from->move)) {
595                 dma_fence_put(from->move);
596                 from->move = dma_fence_get(fence);
597         }
598         spin_unlock(&from->move_lock);
599
600         ttm_bo_free_old_node(bo);
601
602         dma_fence_put(bo->moving);
603         bo->moving = dma_fence_get(fence);
604 }
605
606 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
607                               struct dma_fence *fence,
608                               bool evict,
609                               bool pipeline,
610                               struct ttm_resource *new_mem)
611 {
612         struct ttm_bo_device *bdev = bo->bdev;
613         struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
614         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
615         int ret = 0;
616
617         dma_resv_add_excl_fence(bo->base.resv, fence);
618         if (!evict)
619                 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
620         else if (!from->use_tt && pipeline)
621                 ttm_bo_move_pipeline_evict(bo, fence);
622         else
623                 ret = ttm_bo_wait_free_node(bo, man->use_tt);
624
625         if (ret)
626                 return ret;
627
628         ttm_bo_assign_mem(bo, new_mem);
629
630         return 0;
631 }
632 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
633
634 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
635 {
636         struct ttm_buffer_object *ghost;
637         int ret;
638
639         ret = ttm_buffer_object_transfer(bo, &ghost);
640         if (ret)
641                 return ret;
642
643         ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
644         /* Last resort, wait for the BO to be idle when we are OOM */
645         if (ret)
646                 ttm_bo_wait(bo, false, false);
647
648         memset(&bo->mem, 0, sizeof(bo->mem));
649         bo->mem.mem_type = TTM_PL_SYSTEM;
650         bo->ttm = NULL;
651
652         dma_resv_unlock(&ghost->base._resv);
653         ttm_bo_put(ghost);
654
655         return 0;
656 }