1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
28 #include <linux/slab.h>
29 #include "vmwgfx_validation.h"
30 #include "vmwgfx_drv.h"
33 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
36 * struct vmw_validation_bo_node - Buffer object validation metadata.
37 * @base: Metadata used for TTM reservation- and validation.
38 * @hash: A hash entry used for the duplicate detection hash table.
39 * @coherent_count: If switching backup buffers, number of new coherent
40 * resources that will have this buffer as a backup buffer.
41 * @as_mob: Validate as mob.
42 * @cpu_blit: Validate for cpu blit access.
44 * Bit fields are used since these structures are allocated and freed in
45 * large numbers and space conservation is desired.
47 struct vmw_validation_bo_node {
48 struct ttm_validate_buffer base;
49 struct vmwgfx_hash_item hash;
50 unsigned int coherent_count;
55 * struct vmw_validation_res_node - Resource validation metadata.
56 * @head: List head for the resource validation list.
57 * @hash: A hash entry used for the duplicate detection hash table.
58 * @res: Reference counted resource pointer.
59 * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
61 * @new_backup_offset: Offset into the new backup mob for resources that can
63 * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
64 * the command stream provides a mob bind operation.
65 * @switching_backup: The validation process is switching backup MOB.
66 * @first_usage: True iff the resource has been seen only once in the current
68 * @reserved: Whether the resource is currently reserved by this process.
69 * @dirty_set: Change dirty status of the resource.
70 * @dirty: Dirty information VMW_RES_DIRTY_XX.
71 * @private: Optionally additional memory for caller-private data.
73 * Bit fields are used since these structures are allocated and freed in
74 * large numbers and space conservation is desired.
76 struct vmw_validation_res_node {
77 struct list_head head;
78 struct vmwgfx_hash_item hash;
79 struct vmw_resource *res;
80 struct vmw_buffer_object *new_backup;
81 unsigned long new_backup_offset;
82 u32 no_buffer_needed : 1;
83 u32 switching_backup : 1;
88 unsigned long private[];
92 * vmw_validation_mem_alloc - Allocate kernel memory from the validation
93 * context based allocator
94 * @ctx: The validation context
95 * @size: The number of bytes to allocated.
97 * The memory allocated may not exceed PAGE_SIZE, and the returned
98 * address is aligned to sizeof(long). All memory allocated this way is
99 * reclaimed after validation when calling any of the exported functions:
100 * vmw_validation_unref_lists()
101 * vmw_validation_revert()
102 * vmw_validation_done()
104 * Return: Pointer to the allocated memory on success. NULL on failure.
106 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
111 size = vmw_validation_align(size);
112 if (size > PAGE_SIZE)
115 if (ctx->mem_size_left < size) {
118 if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
119 ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
120 ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
123 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
128 ctx->vm_size_left -= PAGE_SIZE;
130 list_add_tail(&page->lru, &ctx->page_list);
131 ctx->page_address = page_address(page);
132 ctx->mem_size_left = PAGE_SIZE;
135 addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
136 ctx->mem_size_left -= size;
142 * vmw_validation_mem_free - Free all memory allocated using
143 * vmw_validation_mem_alloc()
144 * @ctx: The validation context
146 * All memory previously allocated for this context using
147 * vmw_validation_mem_alloc() is freed.
149 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
151 struct page *entry, *next;
153 list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
154 list_del_init(&entry->lru);
158 ctx->mem_size_left = 0;
159 if (ctx->vm && ctx->total_mem) {
161 ctx->vm_size_left = 0;
166 * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
167 * validation context's lists.
168 * @ctx: The validation context to search.
169 * @vbo: The buffer object to search for.
171 * Return: Pointer to the struct vmw_validation_bo_node referencing the
172 * duplicate, or NULL if none found.
174 static struct vmw_validation_bo_node *
175 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
176 struct vmw_buffer_object *vbo)
178 struct vmw_validation_bo_node *bo_node = NULL;
180 if (!ctx->merge_dups)
184 struct vmwgfx_hash_item *hash;
186 if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
187 bo_node = container_of(hash, typeof(*bo_node), hash);
189 struct vmw_validation_bo_node *entry;
191 list_for_each_entry(entry, &ctx->bo_list, base.head) {
192 if (entry->base.bo == &vbo->base) {
203 * vmw_validation_find_res_dup - Find a duplicate resource entry in the
204 * validation context's lists.
205 * @ctx: The validation context to search.
206 * @res: Reference counted resource pointer.
208 * Return: Pointer to the struct vmw_validation_bo_node referencing the
209 * duplicate, or NULL if none found.
211 static struct vmw_validation_res_node *
212 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
213 struct vmw_resource *res)
215 struct vmw_validation_res_node *res_node = NULL;
217 if (!ctx->merge_dups)
221 struct vmwgfx_hash_item *hash;
223 if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash))
224 res_node = container_of(hash, typeof(*res_node), hash);
226 struct vmw_validation_res_node *entry;
228 list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
229 if (entry->res == res) {
235 list_for_each_entry(entry, &ctx->resource_list, head) {
236 if (entry->res == res) {
248 * vmw_validation_add_bo - Add a buffer object to the validation context.
249 * @ctx: The validation context.
250 * @vbo: The buffer object.
251 * @as_mob: Validate as mob, otherwise suitable for GMR operations.
252 * @cpu_blit: Validate in a page-mappable location.
254 * Return: Zero on success, negative error code otherwise.
256 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
257 struct vmw_buffer_object *vbo,
261 struct vmw_validation_bo_node *bo_node;
263 bo_node = vmw_validation_find_bo_dup(ctx, vbo);
265 if (bo_node->as_mob != as_mob ||
266 bo_node->cpu_blit != cpu_blit) {
267 DRM_ERROR("Inconsistent buffer usage.\n");
271 struct ttm_validate_buffer *val_buf;
274 bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
279 bo_node->hash.key = (unsigned long) vbo;
280 ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash);
282 DRM_ERROR("Failed to initialize a buffer "
283 "validation entry.\n");
287 val_buf = &bo_node->base;
288 val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
291 val_buf->num_shared = 0;
292 list_add_tail(&val_buf->head, &ctx->bo_list);
293 bo_node->as_mob = as_mob;
294 bo_node->cpu_blit = cpu_blit;
301 * vmw_validation_add_resource - Add a resource to the validation context.
302 * @ctx: The validation context.
303 * @res: The resource.
304 * @priv_size: Size of private, additional metadata.
305 * @dirty: Whether to change dirty status.
306 * @p_node: Output pointer of additional metadata address.
307 * @first_usage: Whether this was the first time this resource was seen.
309 * Return: Zero on success, negative error code otherwise.
311 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
312 struct vmw_resource *res,
318 struct vmw_validation_res_node *node;
321 node = vmw_validation_find_res_dup(ctx, res);
323 node->first_usage = 0;
327 node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
329 VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
334 node->hash.key = (unsigned long) res;
335 ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash);
337 DRM_ERROR("Failed to initialize a resource validation "
342 node->res = vmw_resource_reference_unless_doomed(res);
346 node->first_usage = 1;
347 if (!res->dev_priv->has_mob) {
348 list_add_tail(&node->head, &ctx->resource_list);
350 switch (vmw_res_type(res)) {
351 case vmw_res_context:
352 case vmw_res_dx_context:
353 list_add(&node->head, &ctx->resource_ctx_list);
355 case vmw_res_cotable:
356 list_add_tail(&node->head, &ctx->resource_ctx_list);
359 list_add_tail(&node->head, &ctx->resource_list);
367 /* Overwriting previous information here is intentional! */
368 node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
371 *first_usage = node->first_usage;
373 *p_node = &node->private;
379 * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
381 * @ctx: The validation context.
382 * @val_private: The additional meta-data pointer returned when the
383 * resource was registered with the validation context. Used to identify
385 * @dirty: Dirty information VMW_RES_DIRTY_XX
387 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
388 void *val_private, u32 dirty)
390 struct vmw_validation_res_node *val;
395 val = container_of(val_private, typeof(*val), private);
397 /* Overwriting previous information here is intentional! */
398 val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
402 * vmw_validation_res_switch_backup - Register a backup MOB switch during
404 * @ctx: The validation context.
405 * @val_private: The additional meta-data pointer returned when the
406 * resource was registered with the validation context. Used to identify
408 * @vbo: The new backup buffer object MOB. This buffer object needs to have
409 * already been registered with the validation context.
410 * @backup_offset: Offset into the new backup MOB.
412 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
414 struct vmw_buffer_object *vbo,
415 unsigned long backup_offset)
417 struct vmw_validation_res_node *val;
419 val = container_of(val_private, typeof(*val), private);
421 val->switching_backup = 1;
422 if (val->first_usage)
423 val->no_buffer_needed = 1;
425 val->new_backup = vbo;
426 val->new_backup_offset = backup_offset;
430 * vmw_validation_res_reserve - Reserve all resources registered with this
431 * validation context.
432 * @ctx: The validation context.
433 * @intr: Use interruptible waits when possible.
435 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
438 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
441 struct vmw_validation_res_node *val;
444 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
446 list_for_each_entry(val, &ctx->resource_list, head) {
447 struct vmw_resource *res = val->res;
449 ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
455 struct vmw_buffer_object *vbo = res->backup;
457 ret = vmw_validation_add_bo
458 (ctx, vbo, vmw_resource_needs_backup(res),
464 if (val->switching_backup && val->new_backup &&
466 struct vmw_validation_bo_node *bo_node =
467 vmw_validation_find_bo_dup(ctx,
470 if (WARN_ON(!bo_node)) {
474 bo_node->coherent_count++;
481 vmw_validation_res_unreserve(ctx, true);
486 * vmw_validation_res_unreserve - Unreserve all reserved resources
487 * registered with this validation context.
488 * @ctx: The validation context.
489 * @backoff: Whether this is a backoff- of a commit-type operation. This
490 * is used to determine whether to switch backup MOBs or not.
492 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
495 struct vmw_validation_res_node *val;
497 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
499 list_for_each_entry(val, &ctx->resource_list, head) {
501 vmw_resource_unreserve(val->res,
506 list_for_each_entry(val, &ctx->resource_list, head) {
508 vmw_resource_unreserve(val->res,
511 val->switching_backup,
513 val->new_backup_offset);
518 * vmw_validation_bo_validate_single - Validate a single buffer object.
519 * @bo: The TTM buffer object base.
520 * @interruptible: Whether to perform waits interruptible if possible.
521 * @validate_as_mob: Whether to validate in MOB memory.
523 * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
526 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
528 bool validate_as_mob)
530 struct vmw_buffer_object *vbo =
531 container_of(bo, struct vmw_buffer_object, base);
532 struct ttm_operation_ctx ctx = {
533 .interruptible = interruptible,
538 if (atomic_read(&vbo->cpu_writers))
541 if (vbo->base.pin_count > 0)
545 return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
548 * Put BO in VRAM if there is space, otherwise as a GMR.
549 * If there is no space in VRAM and GMR ids are all used up,
550 * start evicting GMRs to make room. If the DMA buffer can't be
551 * used as a GMR, this will return -ENOMEM.
554 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
555 if (ret == 0 || ret == -ERESTARTSYS)
559 * If that failed, try VRAM again, this time evicting
563 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
568 * vmw_validation_bo_validate - Validate all buffer objects registered with
569 * the validation context.
570 * @ctx: The validation context.
571 * @intr: Whether to perform waits interruptible if possible.
573 * Return: Zero on success, -ERESTARTSYS if interrupted,
574 * negative error code on failure.
576 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
578 struct vmw_validation_bo_node *entry;
581 list_for_each_entry(entry, &ctx->bo_list, base.head) {
582 struct vmw_buffer_object *vbo =
583 container_of(entry->base.bo, typeof(*vbo), base);
585 if (entry->cpu_blit) {
586 struct ttm_operation_ctx ttm_ctx = {
587 .interruptible = intr,
591 ret = ttm_bo_validate(entry->base.bo,
592 &vmw_nonfixed_placement, &ttm_ctx);
594 ret = vmw_validation_bo_validate_single
595 (entry->base.bo, intr, entry->as_mob);
601 * Rather than having the resource code allocating the bo
602 * dirty tracker in resource_unreserve() where we can't fail,
603 * Do it here when validating the buffer object.
605 if (entry->coherent_count) {
606 unsigned int coherent_count = entry->coherent_count;
608 while (coherent_count) {
609 ret = vmw_bo_dirty_add(vbo);
615 entry->coherent_count -= coherent_count;
619 vmw_bo_dirty_scan(vbo);
625 * vmw_validation_res_validate - Validate all resources registered with the
626 * validation context.
627 * @ctx: The validation context.
628 * @intr: Whether to perform waits interruptible if possible.
630 * Before this function is called, all resource backup buffers must have
633 * Return: Zero on success, -ERESTARTSYS if interrupted,
634 * negative error code on failure.
636 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
638 struct vmw_validation_res_node *val;
641 list_for_each_entry(val, &ctx->resource_list, head) {
642 struct vmw_resource *res = val->res;
643 struct vmw_buffer_object *backup = res->backup;
645 ret = vmw_resource_validate(res, intr, val->dirty_set &&
648 if (ret != -ERESTARTSYS)
649 DRM_ERROR("Failed to validate resource.\n");
653 /* Check if the resource switched backup buffer */
654 if (backup && res->backup && (backup != res->backup)) {
655 struct vmw_buffer_object *vbo = res->backup;
657 ret = vmw_validation_add_bo
658 (ctx, vbo, vmw_resource_needs_backup(res),
668 * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
669 * and unregister it from this validation context.
670 * @ctx: The validation context.
672 * The hash table used for duplicate finding is an expensive resource and
673 * may be protected by mutexes that may cause deadlocks during resource
674 * unreferencing if held. After resource- and buffer object registering,
675 * there is no longer any use for this hash table, so allow freeing it
676 * either to shorten any mutex locking time, or before resources- and
677 * buffer objects are freed during validation context cleanup.
679 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
681 struct vmw_validation_bo_node *entry;
682 struct vmw_validation_res_node *val;
687 list_for_each_entry(entry, &ctx->bo_list, base.head)
688 (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash);
690 list_for_each_entry(val, &ctx->resource_list, head)
691 (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
693 list_for_each_entry(val, &ctx->resource_ctx_list, head)
694 (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash);
700 * vmw_validation_unref_lists - Unregister previously registered buffer
701 * object and resources.
702 * @ctx: The validation context.
704 * Note that this function may cause buffer object- and resource destructors
707 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
709 struct vmw_validation_bo_node *entry;
710 struct vmw_validation_res_node *val;
712 list_for_each_entry(entry, &ctx->bo_list, base.head) {
713 ttm_bo_put(entry->base.bo);
714 entry->base.bo = NULL;
717 list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
718 list_for_each_entry(val, &ctx->resource_list, head)
719 vmw_resource_unreference(&val->res);
722 * No need to detach each list entry since they are all freed with
723 * vmw_validation_free_mem. Just make the inaccessible.
725 INIT_LIST_HEAD(&ctx->bo_list);
726 INIT_LIST_HEAD(&ctx->resource_list);
728 vmw_validation_mem_free(ctx);
732 * vmw_validation_prepare - Prepare a validation context for command
734 * @ctx: The validation context.
735 * @mutex: The mutex used to protect resource reservation.
736 * @intr: Whether to perform waits interruptible if possible.
738 * Note that the single reservation mutex @mutex is an unfortunate
739 * construct. Ideally resource reservation should be moved to per-resource
741 * If this functions doesn't return Zero to indicate success, all resources
742 * are left unreserved but still referenced.
743 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
746 int vmw_validation_prepare(struct vmw_validation_context *ctx,
754 ret = mutex_lock_interruptible(mutex);
761 ctx->res_mutex = mutex;
762 ret = vmw_validation_res_reserve(ctx, intr);
764 goto out_no_res_reserve;
766 ret = vmw_validation_bo_reserve(ctx, intr);
768 goto out_no_bo_reserve;
770 ret = vmw_validation_bo_validate(ctx, intr);
772 goto out_no_validate;
774 ret = vmw_validation_res_validate(ctx, intr);
776 goto out_no_validate;
781 vmw_validation_bo_backoff(ctx);
783 vmw_validation_res_unreserve(ctx, true);
792 * vmw_validation_revert - Revert validation actions if command submission
795 * @ctx: The validation context.
797 * The caller still needs to unref resources after a call to this function.
799 void vmw_validation_revert(struct vmw_validation_context *ctx)
801 vmw_validation_bo_backoff(ctx);
802 vmw_validation_res_unreserve(ctx, true);
804 mutex_unlock(ctx->res_mutex);
805 vmw_validation_unref_lists(ctx);
809 * vmw_validation_done - Commit validation actions after command submission
811 * @ctx: The validation context.
812 * @fence: Fence with which to fence all buffer objects taking part in the
813 * command submission.
815 * The caller does NOT need to unref resources after a call to this function.
817 void vmw_validation_done(struct vmw_validation_context *ctx,
818 struct vmw_fence_obj *fence)
820 vmw_validation_bo_fence(ctx, fence);
821 vmw_validation_res_unreserve(ctx, false);
823 mutex_unlock(ctx->res_mutex);
824 vmw_validation_unref_lists(ctx);
828 * vmw_validation_preload_bo - Preload the validation memory allocator for a
829 * call to vmw_validation_add_bo().
830 * @ctx: Pointer to the validation context.
832 * Iff this function returns successfully, the next call to
833 * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
834 * but voids the guarantee.
836 * Returns: Zero if successful, %-EINVAL otherwise.
838 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
840 unsigned int size = sizeof(struct vmw_validation_bo_node);
842 if (!vmw_validation_mem_alloc(ctx, size))
845 ctx->mem_size_left += size;
850 * vmw_validation_preload_res - Preload the validation memory allocator for a
851 * call to vmw_validation_add_res().
852 * @ctx: Pointer to the validation context.
853 * @size: Size of the validation node extra data. See below.
855 * Iff this function returns successfully, the next call to
856 * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
857 * sleep. An error is not fatal but voids the guarantee.
859 * Returns: Zero if successful, %-EINVAL otherwise.
861 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
864 size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
866 vmw_validation_align(sizeof(struct vmw_validation_bo_node));
867 if (!vmw_validation_mem_alloc(ctx, size))
870 ctx->mem_size_left += size;
875 * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
877 * @ctx: The validation context
879 * This function unreserves the buffer objects previously reserved using
880 * vmw_validation_bo_reserve. It's typically used as part of an error path
882 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
884 struct vmw_validation_bo_node *entry;
887 * Switching coherent resource backup buffers failed.
888 * Release corresponding buffer object dirty trackers.
890 list_for_each_entry(entry, &ctx->bo_list, base.head) {
891 if (entry->coherent_count) {
892 unsigned int coherent_count = entry->coherent_count;
893 struct vmw_buffer_object *vbo =
894 container_of(entry->base.bo, typeof(*vbo),
897 while (coherent_count--)
898 vmw_bo_dirty_release(vbo);
902 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);