1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
32 static const struct ttm_place vram_placement_flags = {
35 .mem_type = TTM_PL_VRAM,
39 static const struct ttm_place sys_placement_flags = {
42 .mem_type = TTM_PL_SYSTEM,
46 static const struct ttm_place gmr_placement_flags = {
49 .mem_type = VMW_PL_GMR,
53 static const struct ttm_place mob_placement_flags = {
56 .mem_type = VMW_PL_MOB,
60 struct ttm_placement vmw_vram_placement = {
62 .placement = &vram_placement_flags,
63 .num_busy_placement = 1,
64 .busy_placement = &vram_placement_flags
67 static const struct ttm_place vram_gmr_placement_flags[] = {
71 .mem_type = TTM_PL_VRAM,
76 .mem_type = VMW_PL_GMR,
81 static const struct ttm_place gmr_vram_placement_flags[] = {
85 .mem_type = VMW_PL_GMR,
90 .mem_type = TTM_PL_VRAM,
95 static const struct ttm_place vmw_sys_placement_flags = {
98 .mem_type = VMW_PL_SYSTEM,
102 struct ttm_placement vmw_vram_gmr_placement = {
104 .placement = vram_gmr_placement_flags,
105 .num_busy_placement = 1,
106 .busy_placement = &gmr_placement_flags
109 struct ttm_placement vmw_vram_sys_placement = {
111 .placement = &vram_placement_flags,
112 .num_busy_placement = 1,
113 .busy_placement = &sys_placement_flags
116 struct ttm_placement vmw_sys_placement = {
118 .placement = &sys_placement_flags,
119 .num_busy_placement = 1,
120 .busy_placement = &sys_placement_flags
123 struct ttm_placement vmw_pt_sys_placement = {
125 .placement = &vmw_sys_placement_flags,
126 .num_busy_placement = 1,
127 .busy_placement = &vmw_sys_placement_flags
130 static const struct ttm_place nonfixed_placement_flags[] = {
134 .mem_type = TTM_PL_SYSTEM,
139 .mem_type = VMW_PL_GMR,
144 .mem_type = VMW_PL_MOB,
149 struct ttm_placement vmw_srf_placement = {
151 .num_busy_placement = 2,
152 .placement = &gmr_placement_flags,
153 .busy_placement = gmr_vram_placement_flags
156 struct ttm_placement vmw_mob_placement = {
158 .num_busy_placement = 1,
159 .placement = &mob_placement_flags,
160 .busy_placement = &mob_placement_flags
163 struct ttm_placement vmw_nonfixed_placement = {
165 .placement = nonfixed_placement_flags,
166 .num_busy_placement = 1,
167 .busy_placement = &sys_placement_flags
170 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
173 * __vmw_piter_non_sg_next: Helper functions to advance
174 * a struct vmw_piter iterator.
176 * @viter: Pointer to the iterator.
178 * These functions return false if past the end of the list,
179 * true otherwise. Functions are selected depending on the current
182 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
184 return ++(viter->i) < viter->num_pages;
187 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
189 bool ret = __vmw_piter_non_sg_next(viter);
191 return __sg_page_iter_dma_next(&viter->iter) && ret;
195 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
197 return viter->addrs[viter->i];
200 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
202 return sg_page_iter_dma_address(&viter->iter);
207 * vmw_piter_start - Initialize a struct vmw_piter.
209 * @viter: Pointer to the iterator to initialize
210 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
211 * @p_offset: Pointer offset used to update current array position
213 * Note that we're following the convention of __sg_page_iter_start, so that
214 * the iterator doesn't point to a valid page after initialization; it has
215 * to be advanced one step first.
217 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
218 unsigned long p_offset)
220 viter->i = p_offset - 1;
221 viter->num_pages = vsgt->num_pages;
222 viter->pages = vsgt->pages;
223 switch (vsgt->mode) {
224 case vmw_dma_alloc_coherent:
225 viter->next = &__vmw_piter_non_sg_next;
226 viter->dma_address = &__vmw_piter_dma_addr;
227 viter->addrs = vsgt->addrs;
229 case vmw_dma_map_populate:
230 case vmw_dma_map_bind:
231 viter->next = &__vmw_piter_sg_next;
232 viter->dma_address = &__vmw_piter_sg_addr;
233 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
234 vsgt->sgt->orig_nents, p_offset);
242 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
245 * @vmw_tt: Pointer to a struct vmw_ttm_backend
247 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
249 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
251 struct device *dev = vmw_tt->dev_priv->drm.dev;
253 dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
254 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
258 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
260 * @vmw_tt: Pointer to a struct vmw_ttm_backend
262 * This function is used to get device addresses from the kernel DMA layer.
263 * However, it's violating the DMA API in that when this operation has been
264 * performed, it's illegal for the CPU to write to the pages without first
265 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
266 * therefore only legal to call this function if we know that the function
267 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
268 * a CPU write buffer flush.
270 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
272 struct device *dev = vmw_tt->dev_priv->drm.dev;
274 return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
278 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
280 * @vmw_tt: Pointer to a struct vmw_ttm_tt
282 * Select the correct function for and make sure the TTM pages are
283 * visible to the device. Allocate storage for the device mappings.
284 * If a mapping has already been performed, indicated by the storage
285 * pointer being non NULL, the function returns success.
287 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
289 struct vmw_private *dev_priv = vmw_tt->dev_priv;
290 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
296 vsgt->mode = dev_priv->map_mode;
297 vsgt->pages = vmw_tt->dma_ttm.pages;
298 vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
299 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
302 switch (dev_priv->map_mode) {
303 case vmw_dma_map_bind:
304 case vmw_dma_map_populate:
305 vsgt->sgt = &vmw_tt->sgt;
306 ret = sg_alloc_table_from_pages_segment(
307 &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
308 (unsigned long)vsgt->num_pages << PAGE_SHIFT,
309 dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
311 goto out_sg_alloc_fail;
313 ret = vmw_ttm_map_for_dma(vmw_tt);
314 if (unlikely(ret != 0))
322 vmw_tt->mapped = true;
326 sg_free_table(vmw_tt->vsgt.sgt);
327 vmw_tt->vsgt.sgt = NULL;
333 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
335 * @vmw_tt: Pointer to a struct vmw_ttm_tt
337 * Tear down any previously set up device DMA mappings and free
338 * any storage space allocated for them. If there are no mappings set up,
339 * this function is a NOP.
341 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
343 struct vmw_private *dev_priv = vmw_tt->dev_priv;
345 if (!vmw_tt->vsgt.sgt)
348 switch (dev_priv->map_mode) {
349 case vmw_dma_map_bind:
350 case vmw_dma_map_populate:
351 vmw_ttm_unmap_from_dma(vmw_tt);
352 sg_free_table(vmw_tt->vsgt.sgt);
353 vmw_tt->vsgt.sgt = NULL;
358 vmw_tt->mapped = false;
362 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
365 * @bo: Pointer to a struct ttm_buffer_object
367 * Returns a pointer to a struct vmw_sg_table object. The object should
368 * not be freed after use.
369 * Note that for the device addresses to be valid, the buffer object must
370 * either be reserved or pinned.
372 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
374 struct vmw_ttm_tt *vmw_tt =
375 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
377 return &vmw_tt->vsgt;
381 static int vmw_ttm_bind(struct ttm_device *bdev,
382 struct ttm_tt *ttm, struct ttm_resource *bo_mem)
384 struct vmw_ttm_tt *vmw_be =
385 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
394 ret = vmw_ttm_map_dma(vmw_be);
395 if (unlikely(ret != 0))
398 vmw_be->gmr_id = bo_mem->start;
399 vmw_be->mem_type = bo_mem->mem_type;
401 switch (bo_mem->mem_type) {
403 ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
404 ttm->num_pages, vmw_be->gmr_id);
407 if (unlikely(vmw_be->mob == NULL)) {
409 vmw_mob_create(ttm->num_pages);
410 if (unlikely(vmw_be->mob == NULL))
414 ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
415 &vmw_be->vsgt, ttm->num_pages,
419 /* Nothing to be done for a system bind */
424 vmw_be->bound = true;
428 static void vmw_ttm_unbind(struct ttm_device *bdev,
431 struct vmw_ttm_tt *vmw_be =
432 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
437 switch (vmw_be->mem_type) {
439 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
442 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
450 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
451 vmw_ttm_unmap_dma(vmw_be);
452 vmw_be->bound = false;
456 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
458 struct vmw_ttm_tt *vmw_be =
459 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
461 vmw_ttm_unmap_dma(vmw_be);
464 vmw_mob_destroy(vmw_be->mob);
470 static int vmw_ttm_populate(struct ttm_device *bdev,
471 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
475 /* TODO: maybe completely drop this ? */
476 if (ttm_tt_is_populated(ttm))
479 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
484 static void vmw_ttm_unpopulate(struct ttm_device *bdev,
487 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
490 vmw_ttm_unbind(bdev, ttm);
493 vmw_mob_destroy(vmw_tt->mob);
497 vmw_ttm_unmap_dma(vmw_tt);
499 ttm_pool_free(&bdev->pool, ttm);
502 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
505 struct vmw_ttm_tt *vmw_be;
508 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
512 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
515 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
516 ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
519 ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
521 if (unlikely(ret != 0))
524 return &vmw_be->dma_ttm;
530 static void vmw_evict_flags(struct ttm_buffer_object *bo,
531 struct ttm_placement *placement)
533 *placement = vmw_sys_placement;
536 static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
538 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
540 switch (mem->mem_type) {
547 mem->bus.offset = (mem->start << PAGE_SHIFT) +
548 dev_priv->vram_start;
549 mem->bus.is_iomem = true;
550 mem->bus.caching = ttm_cached;
559 * vmw_move_notify - TTM move_notify_callback
561 * @bo: The TTM buffer object about to move.
562 * @old_mem: The old memory where we move from
563 * @new_mem: The struct ttm_resource indicating to what memory
564 * region the move is taking place.
566 * Calls move_notify for all subsystems needing it.
567 * (currently only resources).
569 static void vmw_move_notify(struct ttm_buffer_object *bo,
570 struct ttm_resource *old_mem,
571 struct ttm_resource *new_mem)
573 vmw_bo_move_notify(bo, new_mem);
574 vmw_query_move_notify(bo, old_mem, new_mem);
579 * vmw_swap_notify - TTM move_notify_callback
581 * @bo: The TTM buffer object about to be swapped out.
583 static void vmw_swap_notify(struct ttm_buffer_object *bo)
585 vmw_bo_swap_notify(bo);
586 (void) ttm_bo_wait(bo, false, false);
589 static bool vmw_memtype_is_system(uint32_t mem_type)
591 return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM;
594 static int vmw_move(struct ttm_buffer_object *bo,
596 struct ttm_operation_ctx *ctx,
597 struct ttm_resource *new_mem,
598 struct ttm_place *hop)
600 struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
601 struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
604 if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
605 ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
610 vmw_move_notify(bo, bo->resource, new_mem);
612 if (old_man->use_tt && new_man->use_tt) {
613 if (vmw_memtype_is_system(bo->resource->mem_type)) {
614 ttm_bo_move_null(bo, new_mem);
617 ret = ttm_bo_wait_ctx(bo, ctx);
621 vmw_ttm_unbind(bo->bdev, bo->ttm);
622 ttm_resource_free(bo, &bo->resource);
623 ttm_bo_assign_mem(bo, new_mem);
626 ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
632 vmw_move_notify(bo, new_mem, bo->resource);
636 struct ttm_device_funcs vmw_bo_driver = {
637 .ttm_tt_create = &vmw_ttm_tt_create,
638 .ttm_tt_populate = &vmw_ttm_populate,
639 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
640 .ttm_tt_destroy = &vmw_ttm_destroy,
641 .eviction_valuable = ttm_bo_eviction_valuable,
642 .evict_flags = vmw_evict_flags,
644 .swap_notify = vmw_swap_notify,
645 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
648 int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
649 unsigned long bo_size,
650 struct ttm_buffer_object **bo_p)
652 struct ttm_operation_ctx ctx = {
653 .interruptible = false,
656 struct ttm_buffer_object *bo;
659 ret = vmw_bo_create_kernel(dev_priv, bo_size,
660 &vmw_pt_sys_placement,
662 if (unlikely(ret != 0))
665 ret = ttm_bo_reserve(bo, false, true, NULL);
667 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
668 if (likely(ret == 0)) {
669 struct vmw_ttm_tt *vmw_tt =
670 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
671 ret = vmw_ttm_map_dma(vmw_tt);
674 ttm_bo_unreserve(bo);
676 if (likely(ret == 0))