1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <linux/dmapool.h>
29 #include <linux/pci.h>
31 #include <drm/ttm/ttm_bo_api.h>
33 #include "vmwgfx_drv.h"
36 * Size of inline command buffers. Try to make sure that a page size is a
37 * multiple of the DMA pool allocation size.
39 #define VMW_CMDBUF_INLINE_ALIGN 64
40 #define VMW_CMDBUF_INLINE_SIZE \
41 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
44 * struct vmw_cmdbuf_context - Command buffer context queues
46 * @submitted: List of command buffers that have been submitted to the
47 * manager but not yet submitted to hardware.
48 * @hw_submitted: List of command buffers submitted to hardware.
49 * @preempted: List of preempted command buffers.
50 * @num_hw_submitted: Number of buffers currently being processed by hardware
51 * @block_submission: Identifies a block command submission.
53 struct vmw_cmdbuf_context {
54 struct list_head submitted;
55 struct list_head hw_submitted;
56 struct list_head preempted;
57 unsigned num_hw_submitted;
58 bool block_submission;
62 * struct vmw_cmdbuf_man - Command buffer manager
64 * @cur_mutex: Mutex protecting the command buffer used for incremental small
65 * kernel command submissions, @cur.
66 * @space_mutex: Mutex to protect against starvation when we allocate
67 * main pool buffer space.
68 * @error_mutex: Mutex to serialize the work queue error handling.
69 * Note this is not needed if the same workqueue handler
70 * can't race with itself...
71 * @work: A struct work_struct implementeing command buffer error handling.
73 * @dev_priv: Pointer to the device private struct. Immutable.
74 * @ctx: Array of command buffer context queues. The queues and the context
75 * data is protected by @lock.
76 * @error: List of command buffers that have caused device errors.
78 * @mm: Range manager for the command buffer space. Manager allocations and
79 * frees are protected by @lock.
80 * @cmd_space: Buffer object for the command buffer space, unless we were
81 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
82 * @map_obj: Mapping state for @cmd_space. Immutable.
83 * @map: Pointer to command buffer space. May be a mapped buffer object or
84 * a contigous coherent DMA memory allocation. Immutable.
85 * @cur: Command buffer for small kernel command submissions. Protected by
87 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
88 * @default_size: Default size for the @cur command buffer. Immutable.
89 * @max_hw_submitted: Max number of in-flight command buffers the device can
91 * @lock: Spinlock protecting command submission queues.
92 * @headers: Pool of DMA memory for device command buffer headers.
93 * Internal protection.
94 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
95 * space for inline data. Internal protection.
96 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
98 * @idle_queue: Wait queue for processes waiting for command buffer idle.
99 * @irq_on: Whether the process function has requested irq to be turned on.
100 * Protected by @lock.
101 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
102 * allocation. Immutable.
103 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
104 * Typically this is false only during bootstrap.
105 * @handle: DMA address handle for the command buffer space if @using_mob is
107 * @size: The size of the command buffer space. Immutable.
108 * @num_contexts: Number of contexts actually enabled.
110 struct vmw_cmdbuf_man {
111 struct mutex cur_mutex;
112 struct mutex space_mutex;
113 struct mutex error_mutex;
114 struct work_struct work;
115 struct vmw_private *dev_priv;
116 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
117 struct list_head error;
119 struct ttm_buffer_object *cmd_space;
120 struct ttm_bo_kmap_obj map_obj;
122 struct vmw_cmdbuf_header *cur;
125 unsigned max_hw_submitted;
127 struct dma_pool *headers;
128 struct dma_pool *dheaders;
129 wait_queue_head_t alloc_queue;
130 wait_queue_head_t idle_queue;
140 * struct vmw_cmdbuf_header - Command buffer metadata
142 * @man: The command buffer manager.
143 * @cb_header: Device command buffer header, allocated from a DMA pool.
144 * @cb_context: The device command buffer context.
145 * @list: List head for attaching to the manager lists.
146 * @node: The range manager node.
147 * @handle: The DMA address of @cb_header. Handed to the device on command
149 * @cmd: Pointer to the command buffer space of this buffer.
150 * @size: Size of the command buffer space of this buffer.
151 * @reserved: Reserved space of this buffer.
152 * @inline_space: Whether inline command buffer space is used.
154 struct vmw_cmdbuf_header {
155 struct vmw_cmdbuf_man *man;
156 SVGACBHeader *cb_header;
157 SVGACBContext cb_context;
158 struct list_head list;
159 struct drm_mm_node node;
168 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
169 * command buffer space.
171 * @cb_header: Device command buffer header.
172 * @cmd: Inline command buffer space.
174 struct vmw_cmdbuf_dheader {
175 SVGACBHeader cb_header;
176 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
180 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
182 * @page_size: Size of requested command buffer space in pages.
183 * @node: Pointer to the range manager node.
184 * @done: True if this allocation has succeeded.
186 struct vmw_cmdbuf_alloc_info {
188 struct drm_mm_node *node;
192 /* Loop over each context in the command buffer manager. */
193 #define for_each_cmdbuf_ctx(_man, _i, _ctx) \
194 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
197 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
199 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
202 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
204 * @man: The range manager.
205 * @interruptible: Whether to wait interruptible when locking.
207 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
210 if (mutex_lock_interruptible(&man->cur_mutex))
213 mutex_lock(&man->cur_mutex);
220 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
222 * @man: The range manager.
224 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
226 mutex_unlock(&man->cur_mutex);
230 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
231 * been used for the device context with inline command buffers.
232 * Need not be called locked.
234 * @header: Pointer to the header to free.
236 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
238 struct vmw_cmdbuf_dheader *dheader;
240 if (WARN_ON_ONCE(!header->inline_space))
243 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
245 dma_pool_free(header->man->dheaders, dheader, header->handle);
250 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
251 * associated structures.
253 * @header: Pointer to the header to free.
255 * For internal use. Must be called with man::lock held.
257 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
259 struct vmw_cmdbuf_man *man = header->man;
261 lockdep_assert_held_once(&man->lock);
263 if (header->inline_space) {
264 vmw_cmdbuf_header_inline_free(header);
268 drm_mm_remove_node(&header->node);
269 wake_up_all(&man->alloc_queue);
270 if (header->cb_header)
271 dma_pool_free(man->headers, header->cb_header,
277 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
278 * associated structures.
280 * @header: Pointer to the header to free.
282 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
284 struct vmw_cmdbuf_man *man = header->man;
286 /* Avoid locking if inline_space */
287 if (header->inline_space) {
288 vmw_cmdbuf_header_inline_free(header);
291 spin_lock(&man->lock);
292 __vmw_cmdbuf_header_free(header);
293 spin_unlock(&man->lock);
298 * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
300 * @header: The header of the buffer to submit.
302 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
304 struct vmw_cmdbuf_man *man = header->man;
307 val = upper_32_bits(header->handle);
308 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
310 val = lower_32_bits(header->handle);
311 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
312 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
314 return header->cb_header->status;
318 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
320 * @ctx: The command buffer context to initialize
322 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
324 INIT_LIST_HEAD(&ctx->hw_submitted);
325 INIT_LIST_HEAD(&ctx->submitted);
326 INIT_LIST_HEAD(&ctx->preempted);
327 ctx->num_hw_submitted = 0;
331 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
334 * @man: The command buffer manager.
335 * @ctx: The command buffer context.
337 * Submits command buffers to hardware until there are no more command
338 * buffers to submit or the hardware can't handle more command buffers.
340 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
341 struct vmw_cmdbuf_context *ctx)
343 while (ctx->num_hw_submitted < man->max_hw_submitted &&
344 !list_empty(&ctx->submitted) &&
345 !ctx->block_submission) {
346 struct vmw_cmdbuf_header *entry;
349 entry = list_first_entry(&ctx->submitted,
350 struct vmw_cmdbuf_header,
353 status = vmw_cmdbuf_header_submit(entry);
355 /* This should never happen */
356 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
357 entry->cb_header->status = SVGA_CB_STATUS_NONE;
361 list_move_tail(&entry->list, &ctx->hw_submitted);
362 ctx->num_hw_submitted++;
368 * vmw_cmdbuf_ctx_process - Process a command buffer context.
370 * @man: The command buffer manager.
371 * @ctx: The command buffer context.
372 * @notempty: Pass back count of non-empty command submitted lists.
374 * Submit command buffers to hardware if possible, and process finished
375 * buffers. Typically freeing them, but on preemption or error take
376 * appropriate action. Wake up waiters if appropriate.
378 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
379 struct vmw_cmdbuf_context *ctx,
382 struct vmw_cmdbuf_header *entry, *next;
384 vmw_cmdbuf_ctx_submit(man, ctx);
386 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
387 SVGACBStatus status = entry->cb_header->status;
389 if (status == SVGA_CB_STATUS_NONE)
392 list_del(&entry->list);
393 wake_up_all(&man->idle_queue);
394 ctx->num_hw_submitted--;
396 case SVGA_CB_STATUS_COMPLETED:
397 __vmw_cmdbuf_header_free(entry);
399 case SVGA_CB_STATUS_COMMAND_ERROR:
400 WARN_ONCE(true, "Command buffer error.\n");
401 entry->cb_header->status = SVGA_CB_STATUS_NONE;
402 list_add_tail(&entry->list, &man->error);
403 schedule_work(&man->work);
405 case SVGA_CB_STATUS_PREEMPTED:
406 entry->cb_header->status = SVGA_CB_STATUS_NONE;
407 list_add_tail(&entry->list, &ctx->preempted);
409 case SVGA_CB_STATUS_CB_HEADER_ERROR:
410 WARN_ONCE(true, "Command buffer header error.\n");
411 __vmw_cmdbuf_header_free(entry);
414 WARN_ONCE(true, "Undefined command buffer status.\n");
415 __vmw_cmdbuf_header_free(entry);
420 vmw_cmdbuf_ctx_submit(man, ctx);
421 if (!list_empty(&ctx->submitted))
426 * vmw_cmdbuf_man_process - Process all command buffer contexts and
427 * switch on and off irqs as appropriate.
429 * @man: The command buffer manager.
431 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
432 * command buffers left that are not submitted to hardware, Make sure
433 * IRQ handling is turned on. Otherwise, make sure it's turned off.
435 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
438 struct vmw_cmdbuf_context *ctx;
443 for_each_cmdbuf_ctx(man, i, ctx)
444 vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
446 if (man->irq_on && !notempty) {
447 vmw_generic_waiter_remove(man->dev_priv,
448 SVGA_IRQFLAG_COMMAND_BUFFER,
449 &man->dev_priv->cmdbuf_waiters);
451 } else if (!man->irq_on && notempty) {
452 vmw_generic_waiter_add(man->dev_priv,
453 SVGA_IRQFLAG_COMMAND_BUFFER,
454 &man->dev_priv->cmdbuf_waiters);
457 /* Rerun in case we just missed an irq. */
463 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
464 * command buffer context
466 * @man: The command buffer manager.
467 * @header: The header of the buffer to submit.
468 * @cb_context: The command buffer context to use.
470 * This function adds @header to the "submitted" queue of the command
471 * buffer context identified by @cb_context. It then calls the command buffer
472 * manager processing to potentially submit the buffer to hardware.
473 * @man->lock needs to be held when calling this function.
475 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
476 struct vmw_cmdbuf_header *header,
477 SVGACBContext cb_context)
479 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
480 header->cb_header->dxContext = 0;
481 header->cb_context = cb_context;
482 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
484 vmw_cmdbuf_man_process(man);
488 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
489 * handler implemented as a threaded irq task.
491 * @man: Pointer to the command buffer manager.
493 * The bottom half of the interrupt handler simply calls into the
494 * command buffer processor to free finished buffers and submit any
495 * queued buffers to hardware.
497 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
499 spin_lock(&man->lock);
500 vmw_cmdbuf_man_process(man);
501 spin_unlock(&man->lock);
505 * vmw_cmdbuf_work_func - The deferred work function that handles
506 * command buffer errors.
508 * @work: The work func closure argument.
510 * Restarting the command buffer context after an error requires process
511 * context, so it is deferred to this work function.
513 static void vmw_cmdbuf_work_func(struct work_struct *work)
515 struct vmw_cmdbuf_man *man =
516 container_of(work, struct vmw_cmdbuf_man, work);
517 struct vmw_cmdbuf_header *entry, *next;
519 bool send_fence = false;
520 struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
522 struct vmw_cmdbuf_context *ctx;
523 bool global_block = false;
525 for_each_cmdbuf_ctx(man, i, ctx)
526 INIT_LIST_HEAD(&restart_head[i]);
528 mutex_lock(&man->error_mutex);
529 spin_lock(&man->lock);
530 list_for_each_entry_safe(entry, next, &man->error, list) {
531 SVGACBHeader *cb_hdr = entry->cb_header;
532 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
533 (entry->cmd + cb_hdr->errorOffset);
534 u32 error_cmd_size, new_start_offset;
535 const char *cmd_name;
537 list_del_init(&entry->list);
540 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
541 VMW_DEBUG_USER("Unknown command causing device error.\n");
542 VMW_DEBUG_USER("Command buffer offset is %lu\n",
543 (unsigned long) cb_hdr->errorOffset);
544 __vmw_cmdbuf_header_free(entry);
549 VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
551 VMW_DEBUG_USER("Command buffer offset is %lu\n",
552 (unsigned long) cb_hdr->errorOffset);
553 VMW_DEBUG_USER("Command size is %lu\n",
554 (unsigned long) error_cmd_size);
556 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
558 if (new_start_offset >= cb_hdr->length) {
559 __vmw_cmdbuf_header_free(entry);
565 cb_hdr->ptr.mob.mobOffset += new_start_offset;
567 cb_hdr->ptr.pa += (u64) new_start_offset;
569 entry->cmd += new_start_offset;
570 cb_hdr->length -= new_start_offset;
571 cb_hdr->errorOffset = 0;
574 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
577 for_each_cmdbuf_ctx(man, i, ctx)
578 man->ctx[i].block_submission = true;
580 spin_unlock(&man->lock);
582 /* Preempt all contexts */
583 if (global_block && vmw_cmdbuf_preempt(man, 0))
584 DRM_ERROR("Failed preempting command buffer contexts\n");
586 spin_lock(&man->lock);
587 for_each_cmdbuf_ctx(man, i, ctx) {
588 /* Move preempted command buffers to the preempted queue. */
589 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
592 * Add the preempted queue after the command buffer
593 * that caused an error.
595 list_splice_init(&ctx->preempted, restart_head[i].prev);
598 * Finally add all command buffers first in the submitted
599 * queue, to rerun them.
602 ctx->block_submission = false;
603 list_splice_init(&restart_head[i], &ctx->submitted);
606 vmw_cmdbuf_man_process(man);
607 spin_unlock(&man->lock);
609 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
610 DRM_ERROR("Failed restarting command buffer contexts\n");
612 /* Send a new fence in case one was removed */
614 vmw_cmd_send_fence(man->dev_priv, &dummy);
615 wake_up_all(&man->idle_queue);
618 mutex_unlock(&man->error_mutex);
622 * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
624 * @man: The command buffer manager.
625 * @check_preempted: Check also the preempted queue for pending command buffers.
628 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
629 bool check_preempted)
631 struct vmw_cmdbuf_context *ctx;
635 spin_lock(&man->lock);
636 vmw_cmdbuf_man_process(man);
637 for_each_cmdbuf_ctx(man, i, ctx) {
638 if (!list_empty(&ctx->submitted) ||
639 !list_empty(&ctx->hw_submitted) ||
640 (check_preempted && !list_empty(&ctx->preempted)))
644 idle = list_empty(&man->error);
647 spin_unlock(&man->lock);
653 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
654 * command submissions
656 * @man: The command buffer manager.
658 * Flushes the current command buffer without allocating a new one. A new one
659 * is automatically allocated when needed. Call with @man->cur_mutex held.
661 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
663 struct vmw_cmdbuf_header *cur = man->cur;
665 lockdep_assert_held_once(&man->cur_mutex);
670 spin_lock(&man->lock);
671 if (man->cur_pos == 0) {
672 __vmw_cmdbuf_header_free(cur);
676 man->cur->cb_header->length = man->cur_pos;
677 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
679 spin_unlock(&man->lock);
685 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
686 * command submissions
688 * @man: The command buffer manager.
689 * @interruptible: Whether to sleep interruptible when sleeping.
691 * Flushes the current command buffer without allocating a new one. A new one
692 * is automatically allocated when needed.
694 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
697 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
702 __vmw_cmdbuf_cur_flush(man);
703 vmw_cmdbuf_cur_unlock(man);
709 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
711 * @man: The command buffer manager.
712 * @interruptible: Sleep interruptible while waiting.
713 * @timeout: Time out after this many ticks.
715 * Wait until the command buffer manager has processed all command buffers,
716 * or until a timeout occurs. If a timeout occurs, the function will return
719 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
720 unsigned long timeout)
724 ret = vmw_cmdbuf_cur_flush(man, interruptible);
725 vmw_generic_waiter_add(man->dev_priv,
726 SVGA_IRQFLAG_COMMAND_BUFFER,
727 &man->dev_priv->cmdbuf_waiters);
730 ret = wait_event_interruptible_timeout
731 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
734 ret = wait_event_timeout
735 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
738 vmw_generic_waiter_remove(man->dev_priv,
739 SVGA_IRQFLAG_COMMAND_BUFFER,
740 &man->dev_priv->cmdbuf_waiters);
742 if (!vmw_cmdbuf_man_idle(man, true))
754 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
756 * @man: The command buffer manager.
757 * @info: Allocation info. Will hold the size on entry and allocated mm node
758 * on successful return.
760 * Try to allocate buffer space from the main pool. Returns true if succeeded.
761 * If a fatal error was hit, the error code is returned in @info->ret.
763 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
764 struct vmw_cmdbuf_alloc_info *info)
771 memset(info->node, 0, sizeof(*info->node));
772 spin_lock(&man->lock);
773 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
775 vmw_cmdbuf_man_process(man);
776 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
779 spin_unlock(&man->lock);
786 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
788 * @man: The command buffer manager.
789 * @node: Pointer to pre-allocated range-manager node.
790 * @size: The size of the allocation.
791 * @interruptible: Whether to sleep interruptible while waiting for space.
793 * This function allocates buffer space from the main pool, and if there is
794 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
797 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
798 struct drm_mm_node *node,
802 struct vmw_cmdbuf_alloc_info info;
804 info.page_size = PFN_UP(size);
809 * To prevent starvation of large requests, only one allocating call
810 * at a time waiting for space.
813 if (mutex_lock_interruptible(&man->space_mutex))
816 mutex_lock(&man->space_mutex);
819 /* Try to allocate space without waiting. */
820 if (vmw_cmdbuf_try_alloc(man, &info))
823 vmw_generic_waiter_add(man->dev_priv,
824 SVGA_IRQFLAG_COMMAND_BUFFER,
825 &man->dev_priv->cmdbuf_waiters);
830 ret = wait_event_interruptible
831 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
833 vmw_generic_waiter_remove
834 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
835 &man->dev_priv->cmdbuf_waiters);
836 mutex_unlock(&man->space_mutex);
840 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
842 vmw_generic_waiter_remove(man->dev_priv,
843 SVGA_IRQFLAG_COMMAND_BUFFER,
844 &man->dev_priv->cmdbuf_waiters);
847 mutex_unlock(&man->space_mutex);
853 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
854 * space from the main pool.
856 * @man: The command buffer manager.
857 * @header: Pointer to the header to set up.
858 * @size: The requested size of the buffer space.
859 * @interruptible: Whether to sleep interruptible while waiting for space.
861 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
862 struct vmw_cmdbuf_header *header,
866 SVGACBHeader *cb_hdr;
873 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
878 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
880 if (!header->cb_header) {
882 goto out_no_cb_header;
885 header->size = header->node.size << PAGE_SHIFT;
886 cb_hdr = header->cb_header;
887 offset = header->node.start << PAGE_SHIFT;
888 header->cmd = man->map + offset;
889 if (man->using_mob) {
890 cb_hdr->flags = SVGA_CB_FLAG_MOB;
891 cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
892 cb_hdr->ptr.mob.mobOffset = offset;
894 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
900 spin_lock(&man->lock);
901 drm_mm_remove_node(&header->node);
902 spin_unlock(&man->lock);
908 * vmw_cmdbuf_space_inline - Set up a command buffer header with
909 * inline command buffer space.
911 * @man: The command buffer manager.
912 * @header: Pointer to the header to set up.
913 * @size: The requested size of the buffer space.
915 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
916 struct vmw_cmdbuf_header *header,
919 struct vmw_cmdbuf_dheader *dheader;
920 SVGACBHeader *cb_hdr;
922 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
925 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
930 header->inline_space = true;
931 header->size = VMW_CMDBUF_INLINE_SIZE;
932 cb_hdr = &dheader->cb_header;
933 header->cb_header = cb_hdr;
934 header->cmd = dheader->cmd;
935 cb_hdr->status = SVGA_CB_STATUS_NONE;
936 cb_hdr->flags = SVGA_CB_FLAG_NONE;
937 cb_hdr->ptr.pa = (u64)header->handle +
938 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
944 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
945 * command buffer space.
947 * @man: The command buffer manager.
948 * @size: The requested size of the buffer space.
949 * @interruptible: Whether to sleep interruptible while waiting for space.
950 * @p_header: points to a header pointer to populate on successful return.
952 * Returns a pointer to command buffer space if successful. Otherwise
953 * returns an error pointer. The header pointer returned in @p_header should
954 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
956 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
957 size_t size, bool interruptible,
958 struct vmw_cmdbuf_header **p_header)
960 struct vmw_cmdbuf_header *header;
965 header = kzalloc(sizeof(*header), GFP_KERNEL);
967 return ERR_PTR(-ENOMEM);
969 if (size <= VMW_CMDBUF_INLINE_SIZE)
970 ret = vmw_cmdbuf_space_inline(man, header, size);
972 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
980 INIT_LIST_HEAD(&header->list);
981 header->cb_header->status = SVGA_CB_STATUS_NONE;
988 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
991 * @man: The command buffer manager.
992 * @size: The requested size of the commands.
993 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
994 * @interruptible: Whether to sleep interruptible while waiting for space.
996 * Returns a pointer to command buffer space if successful. Otherwise
997 * returns an error pointer.
999 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1004 struct vmw_cmdbuf_header *cur;
1007 if (vmw_cmdbuf_cur_lock(man, interruptible))
1008 return ERR_PTR(-ERESTARTSYS);
1011 if (cur && (size + man->cur_pos > cur->size ||
1012 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1013 ctx_id != cur->cb_header->dxContext)))
1014 __vmw_cmdbuf_cur_flush(man);
1017 ret = vmw_cmdbuf_alloc(man,
1018 max_t(size_t, size, man->default_size),
1019 interruptible, &man->cur);
1021 vmw_cmdbuf_cur_unlock(man);
1028 if (ctx_id != SVGA3D_INVALID_ID) {
1029 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1030 cur->cb_header->dxContext = ctx_id;
1033 cur->reserved = size;
1035 return (void *) (man->cur->cmd + man->cur_pos);
1039 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1041 * @man: The command buffer manager.
1042 * @size: The size of the commands actually written.
1043 * @flush: Whether to flush the command buffer immediately.
1045 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1046 size_t size, bool flush)
1048 struct vmw_cmdbuf_header *cur = man->cur;
1050 lockdep_assert_held_once(&man->cur_mutex);
1052 WARN_ON(size > cur->reserved);
1053 man->cur_pos += size;
1055 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1057 __vmw_cmdbuf_cur_flush(man);
1058 vmw_cmdbuf_cur_unlock(man);
1062 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1064 * @man: The command buffer manager.
1065 * @size: The requested size of the commands.
1066 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1067 * @interruptible: Whether to sleep interruptible while waiting for space.
1068 * @header: Header of the command buffer. NULL if the current command buffer
1071 * Returns a pointer to command buffer space if successful. Otherwise
1072 * returns an error pointer.
1074 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1075 int ctx_id, bool interruptible,
1076 struct vmw_cmdbuf_header *header)
1079 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1081 if (size > header->size)
1082 return ERR_PTR(-EINVAL);
1084 if (ctx_id != SVGA3D_INVALID_ID) {
1085 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1086 header->cb_header->dxContext = ctx_id;
1089 header->reserved = size;
1094 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1096 * @man: The command buffer manager.
1097 * @size: The size of the commands actually written.
1098 * @header: Header of the command buffer. NULL if the current command buffer
1100 * @flush: Whether to flush the command buffer immediately.
1102 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1103 struct vmw_cmdbuf_header *header, bool flush)
1106 vmw_cmdbuf_commit_cur(man, size, flush);
1110 (void) vmw_cmdbuf_cur_lock(man, false);
1111 __vmw_cmdbuf_cur_flush(man);
1112 WARN_ON(size > header->reserved);
1114 man->cur_pos = size;
1116 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1118 __vmw_cmdbuf_cur_flush(man);
1119 vmw_cmdbuf_cur_unlock(man);
1124 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1126 * @man: The command buffer manager.
1127 * @command: Pointer to the command to send.
1128 * @size: Size of the command.
1130 * Synchronously sends a device context command.
1132 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1133 const void *command,
1136 struct vmw_cmdbuf_header *header;
1138 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1141 return PTR_ERR(cmd);
1143 memcpy(cmd, command, size);
1144 header->cb_header->length = size;
1145 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1146 spin_lock(&man->lock);
1147 status = vmw_cmdbuf_header_submit(header);
1148 spin_unlock(&man->lock);
1149 vmw_cmdbuf_header_free(header);
1151 if (status != SVGA_CB_STATUS_COMPLETED) {
1152 DRM_ERROR("Device context command failed with status %d\n",
1161 * vmw_cmdbuf_preempt - Send a preempt command through the device
1164 * @man: The command buffer manager.
1165 * @context: Device context to pass command through.
1167 * Synchronously sends a preempt command.
1169 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1173 SVGADCCmdPreempt body;
1176 cmd.id = SVGA_DC_CMD_PREEMPT;
1177 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1178 cmd.body.ignoreIDZero = 0;
1180 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1185 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1188 * @man: The command buffer manager.
1189 * @context: Device context to start/stop.
1190 * @enable: Whether to enable or disable the context.
1192 * Synchronously sends a device start / stop context command.
1194 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1199 SVGADCCmdStartStop body;
1202 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1203 cmd.body.enable = (enable) ? 1 : 0;
1204 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1206 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1210 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1212 * @man: The command buffer manager.
1213 * @size: The size of the main space pool.
1215 * Set the size and allocate the main command buffer space pool.
1216 * If successful, this enables large command submissions.
1217 * Note that this function requires that rudimentary command
1218 * submission is already available and that the MOB memory manager is alive.
1219 * Returns 0 on success. Negative error code on failure.
1221 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1223 struct vmw_private *dev_priv = man->dev_priv;
1230 /* First, try to allocate a huge chunk of DMA memory */
1231 size = PAGE_ALIGN(size);
1232 man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1233 &man->handle, GFP_KERNEL);
1235 man->using_mob = false;
1238 * DMA memory failed. If we can have command buffers in a
1239 * MOB, try to use that instead. Note that this will
1240 * actually call into the already enabled manager, when
1243 if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1247 ret = vmw_bo_create_kernel(dev_priv, size,
1253 man->using_mob = true;
1254 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1259 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1263 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1265 man->has_pool = true;
1268 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1269 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1270 * needs to wait for space and we block on further command
1271 * submissions to be able to free up space.
1273 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1274 drm_info(&dev_priv->drm,
1275 "Using command buffers with %s pool.\n",
1276 (man->using_mob) ? "MOB" : "DMA");
1281 if (man->using_mob) {
1282 ttm_bo_put(man->cmd_space);
1283 man->cmd_space = NULL;
1290 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1291 * inline command buffer submissions only.
1293 * @dev_priv: Pointer to device private structure.
1295 * Returns a pointer to a cummand buffer manager to success or error pointer
1296 * on failure. The command buffer manager will be enabled for submissions of
1297 * size VMW_CMDBUF_INLINE_SIZE only.
1299 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1301 struct vmw_cmdbuf_man *man;
1302 struct vmw_cmdbuf_context *ctx;
1306 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307 return ERR_PTR(-ENOSYS);
1309 man = kzalloc(sizeof(*man), GFP_KERNEL);
1311 return ERR_PTR(-ENOMEM);
1313 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1315 man->headers = dma_pool_create("vmwgfx cmdbuf",
1317 sizeof(SVGACBHeader),
1319 if (!man->headers) {
1324 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1326 sizeof(struct vmw_cmdbuf_dheader),
1328 if (!man->dheaders) {
1333 for_each_cmdbuf_ctx(man, i, ctx)
1334 vmw_cmdbuf_ctx_init(ctx);
1336 INIT_LIST_HEAD(&man->error);
1337 spin_lock_init(&man->lock);
1338 mutex_init(&man->cur_mutex);
1339 mutex_init(&man->space_mutex);
1340 mutex_init(&man->error_mutex);
1341 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342 init_waitqueue_head(&man->alloc_queue);
1343 init_waitqueue_head(&man->idle_queue);
1344 man->dev_priv = dev_priv;
1345 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348 &dev_priv->error_waiters);
1349 ret = vmw_cmdbuf_startstop(man, 0, true);
1351 DRM_ERROR("Failed starting command buffer contexts\n");
1352 vmw_cmdbuf_man_destroy(man);
1353 return ERR_PTR(ret);
1359 dma_pool_destroy(man->headers);
1363 return ERR_PTR(ret);
1367 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1369 * @man: Pointer to a command buffer manager.
1371 * This function removes the main buffer space pool, and should be called
1372 * before MOB memory management is removed. When this function has been called,
1373 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1374 * less are allowed, and the default size of the command buffer for small kernel
1375 * submissions is also set to this size.
1377 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1382 man->has_pool = false;
1383 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385 if (man->using_mob) {
1386 (void) ttm_bo_kunmap(&man->map_obj);
1387 ttm_bo_put(man->cmd_space);
1388 man->cmd_space = NULL;
1390 dma_free_coherent(man->dev_priv->drm.dev,
1391 man->size, man->map, man->handle);
1396 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1398 * @man: Pointer to a command buffer manager.
1400 * This function idles and then destroys a command buffer manager.
1402 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1404 WARN_ON_ONCE(man->has_pool);
1405 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1407 if (vmw_cmdbuf_startstop(man, 0, false))
1408 DRM_ERROR("Failed stopping command buffer contexts.\n");
1410 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411 &man->dev_priv->error_waiters);
1412 (void) cancel_work_sync(&man->work);
1413 dma_pool_destroy(man->dheaders);
1414 dma_pool_destroy(man->headers);
1415 mutex_destroy(&man->cur_mutex);
1416 mutex_destroy(&man->space_mutex);
1417 mutex_destroy(&man->error_mutex);