GNU Linux-libre 4.19.268-gnu1
[releases.git] / drivers / gpu / drm / ttm / ttm_tt.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 #include <drm/ttm/ttm_page_alloc.h>
41 #include <drm/ttm/ttm_set_memory.h>
42
43 /**
44  * Allocates a ttm structure for the given BO.
45  */
46 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
47 {
48         struct ttm_bo_device *bdev = bo->bdev;
49         uint32_t page_flags = 0;
50
51         reservation_object_assert_held(bo->resv);
52
53         if (bdev->need_dma32)
54                 page_flags |= TTM_PAGE_FLAG_DMA32;
55
56         if (bdev->no_retry)
57                 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
58
59         switch (bo->type) {
60         case ttm_bo_type_device:
61                 if (zero_alloc)
62                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
63                 break;
64         case ttm_bo_type_kernel:
65                 break;
66         case ttm_bo_type_sg:
67                 page_flags |= TTM_PAGE_FLAG_SG;
68                 break;
69         default:
70                 bo->ttm = NULL;
71                 pr_err("Illegal buffer object type\n");
72                 return -EINVAL;
73         }
74
75         bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
76         if (unlikely(bo->ttm == NULL))
77                 return -ENOMEM;
78
79         return 0;
80 }
81
82 /**
83  * Allocates storage for pointers to the pages that back the ttm.
84  */
85 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
86 {
87         ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
88                         GFP_KERNEL | __GFP_ZERO);
89         if (!ttm->pages)
90                 return -ENOMEM;
91         return 0;
92 }
93
94 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
95 {
96         ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
97                                           sizeof(*ttm->ttm.pages) +
98                                           sizeof(*ttm->dma_address),
99                                           GFP_KERNEL | __GFP_ZERO);
100         if (!ttm->ttm.pages)
101                 return -ENOMEM;
102         ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
103         return 0;
104 }
105
106 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
107 {
108         ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
109                                           sizeof(*ttm->dma_address),
110                                           GFP_KERNEL | __GFP_ZERO);
111         if (!ttm->dma_address)
112                 return -ENOMEM;
113         return 0;
114 }
115
116 static int ttm_tt_set_page_caching(struct page *p,
117                                    enum ttm_caching_state c_old,
118                                    enum ttm_caching_state c_new)
119 {
120         int ret = 0;
121
122         if (PageHighMem(p))
123                 return 0;
124
125         if (c_old != tt_cached) {
126                 /* p isn't in the default caching state, set it to
127                  * writeback first to free its current memtype. */
128
129                 ret = ttm_set_pages_wb(p, 1);
130                 if (ret)
131                         return ret;
132         }
133
134         if (c_new == tt_wc)
135                 ret = ttm_set_pages_wc(p, 1);
136         else if (c_new == tt_uncached)
137                 ret = ttm_set_pages_uc(p, 1);
138
139         return ret;
140 }
141
142 /*
143  * Change caching policy for the linear kernel map
144  * for range of pages in a ttm.
145  */
146
147 static int ttm_tt_set_caching(struct ttm_tt *ttm,
148                               enum ttm_caching_state c_state)
149 {
150         int i, j;
151         struct page *cur_page;
152         int ret;
153
154         if (ttm->caching_state == c_state)
155                 return 0;
156
157         if (ttm->state == tt_unpopulated) {
158                 /* Change caching but don't populate */
159                 ttm->caching_state = c_state;
160                 return 0;
161         }
162
163         if (ttm->caching_state == tt_cached)
164                 drm_clflush_pages(ttm->pages, ttm->num_pages);
165
166         for (i = 0; i < ttm->num_pages; ++i) {
167                 cur_page = ttm->pages[i];
168                 if (likely(cur_page != NULL)) {
169                         ret = ttm_tt_set_page_caching(cur_page,
170                                                       ttm->caching_state,
171                                                       c_state);
172                         if (unlikely(ret != 0))
173                                 goto out_err;
174                 }
175         }
176
177         ttm->caching_state = c_state;
178
179         return 0;
180
181 out_err:
182         for (j = 0; j < i; ++j) {
183                 cur_page = ttm->pages[j];
184                 if (likely(cur_page != NULL)) {
185                         (void)ttm_tt_set_page_caching(cur_page, c_state,
186                                                       ttm->caching_state);
187                 }
188         }
189
190         return ret;
191 }
192
193 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
194 {
195         enum ttm_caching_state state;
196
197         if (placement & TTM_PL_FLAG_WC)
198                 state = tt_wc;
199         else if (placement & TTM_PL_FLAG_UNCACHED)
200                 state = tt_uncached;
201         else
202                 state = tt_cached;
203
204         return ttm_tt_set_caching(ttm, state);
205 }
206 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
207
208 void ttm_tt_destroy(struct ttm_tt *ttm)
209 {
210         if (ttm == NULL)
211                 return;
212
213         ttm_tt_unbind(ttm);
214
215         if (ttm->state == tt_unbound)
216                 ttm_tt_unpopulate(ttm);
217
218         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
219             ttm->swap_storage)
220                 fput(ttm->swap_storage);
221
222         ttm->swap_storage = NULL;
223         ttm->func->destroy(ttm);
224 }
225
226 void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
227                         uint32_t page_flags)
228 {
229         ttm->bdev = bo->bdev;
230         ttm->num_pages = bo->num_pages;
231         ttm->caching_state = tt_cached;
232         ttm->page_flags = page_flags;
233         ttm->state = tt_unpopulated;
234         ttm->swap_storage = NULL;
235         ttm->sg = bo->sg;
236 }
237
238 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
239                 uint32_t page_flags)
240 {
241         ttm_tt_init_fields(ttm, bo, page_flags);
242
243         if (ttm_tt_alloc_page_directory(ttm)) {
244                 pr_err("Failed allocating page table\n");
245                 return -ENOMEM;
246         }
247         return 0;
248 }
249 EXPORT_SYMBOL(ttm_tt_init);
250
251 void ttm_tt_fini(struct ttm_tt *ttm)
252 {
253         kvfree(ttm->pages);
254         ttm->pages = NULL;
255 }
256 EXPORT_SYMBOL(ttm_tt_fini);
257
258 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
259                     uint32_t page_flags)
260 {
261         struct ttm_tt *ttm = &ttm_dma->ttm;
262
263         ttm_tt_init_fields(ttm, bo, page_flags);
264
265         INIT_LIST_HEAD(&ttm_dma->pages_list);
266         if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
267                 pr_err("Failed allocating page table\n");
268                 return -ENOMEM;
269         }
270         return 0;
271 }
272 EXPORT_SYMBOL(ttm_dma_tt_init);
273
274 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
275                    uint32_t page_flags)
276 {
277         struct ttm_tt *ttm = &ttm_dma->ttm;
278         int ret;
279
280         ttm_tt_init_fields(ttm, bo, page_flags);
281
282         INIT_LIST_HEAD(&ttm_dma->pages_list);
283         if (page_flags & TTM_PAGE_FLAG_SG)
284                 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
285         else
286                 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
287         if (ret) {
288                 pr_err("Failed allocating page table\n");
289                 return -ENOMEM;
290         }
291         return 0;
292 }
293 EXPORT_SYMBOL(ttm_sg_tt_init);
294
295 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
296 {
297         struct ttm_tt *ttm = &ttm_dma->ttm;
298
299         if (ttm->pages)
300                 kvfree(ttm->pages);
301         else
302                 kvfree(ttm_dma->dma_address);
303         ttm->pages = NULL;
304         ttm_dma->dma_address = NULL;
305 }
306 EXPORT_SYMBOL(ttm_dma_tt_fini);
307
308 void ttm_tt_unbind(struct ttm_tt *ttm)
309 {
310         int ret;
311
312         if (ttm->state == tt_bound) {
313                 ret = ttm->func->unbind(ttm);
314                 BUG_ON(ret);
315                 ttm->state = tt_unbound;
316         }
317 }
318
319 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
320                 struct ttm_operation_ctx *ctx)
321 {
322         int ret = 0;
323
324         if (!ttm)
325                 return -EINVAL;
326
327         if (ttm->state == tt_bound)
328                 return 0;
329
330         ret = ttm_tt_populate(ttm, ctx);
331         if (ret)
332                 return ret;
333
334         ret = ttm->func->bind(ttm, bo_mem);
335         if (unlikely(ret != 0))
336                 return ret;
337
338         ttm->state = tt_bound;
339
340         return 0;
341 }
342 EXPORT_SYMBOL(ttm_tt_bind);
343
344 int ttm_tt_swapin(struct ttm_tt *ttm)
345 {
346         struct address_space *swap_space;
347         struct file *swap_storage;
348         struct page *from_page;
349         struct page *to_page;
350         int i;
351         int ret = -ENOMEM;
352
353         swap_storage = ttm->swap_storage;
354         BUG_ON(swap_storage == NULL);
355
356         swap_space = swap_storage->f_mapping;
357
358         for (i = 0; i < ttm->num_pages; ++i) {
359                 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
360
361                 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
362                 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
363
364                 if (IS_ERR(from_page)) {
365                         ret = PTR_ERR(from_page);
366                         goto out_err;
367                 }
368                 to_page = ttm->pages[i];
369                 if (unlikely(to_page == NULL))
370                         goto out_err;
371
372                 copy_highpage(to_page, from_page);
373                 put_page(from_page);
374         }
375
376         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
377                 fput(swap_storage);
378         ttm->swap_storage = NULL;
379         ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
380
381         return 0;
382 out_err:
383         return ret;
384 }
385
386 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
387 {
388         struct address_space *swap_space;
389         struct file *swap_storage;
390         struct page *from_page;
391         struct page *to_page;
392         int i;
393         int ret = -ENOMEM;
394
395         BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
396         BUG_ON(ttm->caching_state != tt_cached);
397
398         if (!persistent_swap_storage) {
399                 swap_storage = shmem_file_setup("ttm swap",
400                                                 ttm->num_pages << PAGE_SHIFT,
401                                                 0);
402                 if (IS_ERR(swap_storage)) {
403                         pr_err("Failed allocating swap storage\n");
404                         return PTR_ERR(swap_storage);
405                 }
406         } else {
407                 swap_storage = persistent_swap_storage;
408         }
409
410         swap_space = swap_storage->f_mapping;
411
412         for (i = 0; i < ttm->num_pages; ++i) {
413                 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
414
415                 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
416
417                 from_page = ttm->pages[i];
418                 if (unlikely(from_page == NULL))
419                         continue;
420
421                 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
422                 if (IS_ERR(to_page)) {
423                         ret = PTR_ERR(to_page);
424                         goto out_err;
425                 }
426                 copy_highpage(to_page, from_page);
427                 set_page_dirty(to_page);
428                 mark_page_accessed(to_page);
429                 put_page(to_page);
430         }
431
432         ttm_tt_unpopulate(ttm);
433         ttm->swap_storage = swap_storage;
434         ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
435         if (persistent_swap_storage)
436                 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
437
438         return 0;
439 out_err:
440         if (!persistent_swap_storage)
441                 fput(swap_storage);
442
443         return ret;
444 }
445
446 static void ttm_tt_add_mapping(struct ttm_tt *ttm)
447 {
448         pgoff_t i;
449
450         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
451                 return;
452
453         for (i = 0; i < ttm->num_pages; ++i)
454                 ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
455 }
456
457 int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
458 {
459         int ret;
460
461         if (ttm->state != tt_unpopulated)
462                 return 0;
463
464         if (ttm->bdev->driver->ttm_tt_populate)
465                 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
466         else
467                 ret = ttm_pool_populate(ttm, ctx);
468         if (!ret)
469                 ttm_tt_add_mapping(ttm);
470         return ret;
471 }
472
473 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
474 {
475         pgoff_t i;
476         struct page **page = ttm->pages;
477
478         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
479                 return;
480
481         for (i = 0; i < ttm->num_pages; ++i) {
482                 (*page)->mapping = NULL;
483                 (*page++)->index = 0;
484         }
485 }
486
487 void ttm_tt_unpopulate(struct ttm_tt *ttm)
488 {
489         if (ttm->state == tt_unpopulated)
490                 return;
491
492         ttm_tt_clear_mapping(ttm);
493         if (ttm->bdev->driver->ttm_tt_unpopulate)
494                 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
495         else
496                 ttm_pool_unpopulate(ttm);
497 }