GNU Linux-libre 4.14.251-gnu1
[releases.git] / drivers / gpu / drm / vmwgfx / vmwgfx_cmdbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_bo_api.h>
29
30 #include "vmwgfx_drv.h"
31
32 /*
33  * Size of inline command buffers. Try to make sure that a page size is a
34  * multiple of the DMA pool allocation size.
35  */
36 #define VMW_CMDBUF_INLINE_ALIGN 64
37 #define VMW_CMDBUF_INLINE_SIZE \
38         (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
39
40 /**
41  * struct vmw_cmdbuf_context - Command buffer context queues
42  *
43  * @submitted: List of command buffers that have been submitted to the
44  * manager but not yet submitted to hardware.
45  * @hw_submitted: List of command buffers submitted to hardware.
46  * @preempted: List of preempted command buffers.
47  * @num_hw_submitted: Number of buffers currently being processed by hardware
48  */
49 struct vmw_cmdbuf_context {
50         struct list_head submitted;
51         struct list_head hw_submitted;
52         struct list_head preempted;
53         unsigned num_hw_submitted;
54         bool block_submission;
55 };
56
57 /**
58  * struct vmw_cmdbuf_man: - Command buffer manager
59  *
60  * @cur_mutex: Mutex protecting the command buffer used for incremental small
61  * kernel command submissions, @cur.
62  * @space_mutex: Mutex to protect against starvation when we allocate
63  * main pool buffer space.
64  * @error_mutex: Mutex to serialize the work queue error handling.
65  * Note this is not needed if the same workqueue handler
66  * can't race with itself...
67  * @work: A struct work_struct implementeing command buffer error handling.
68  * Immutable.
69  * @dev_priv: Pointer to the device private struct. Immutable.
70  * @ctx: Array of command buffer context queues. The queues and the context
71  * data is protected by @lock.
72  * @error: List of command buffers that have caused device errors.
73  * Protected by @lock.
74  * @mm: Range manager for the command buffer space. Manager allocations and
75  * frees are protected by @lock.
76  * @cmd_space: Buffer object for the command buffer space, unless we were
77  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
78  * @map_obj: Mapping state for @cmd_space. Immutable.
79  * @map: Pointer to command buffer space. May be a mapped buffer object or
80  * a contigous coherent DMA memory allocation. Immutable.
81  * @cur: Command buffer for small kernel command submissions. Protected by
82  * the @cur_mutex.
83  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
84  * @default_size: Default size for the @cur command buffer. Immutable.
85  * @max_hw_submitted: Max number of in-flight command buffers the device can
86  * handle. Immutable.
87  * @lock: Spinlock protecting command submission queues.
88  * @header: Pool of DMA memory for device command buffer headers.
89  * Internal protection.
90  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
91  * space for inline data. Internal protection.
92  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
93  * space.
94  * @idle_queue: Wait queue for processes waiting for command buffer idle.
95  * @irq_on: Whether the process function has requested irq to be turned on.
96  * Protected by @lock.
97  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
98  * allocation. Immutable.
99  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
100  * Typically this is false only during bootstrap.
101  * @handle: DMA address handle for the command buffer space if @using_mob is
102  * false. Immutable.
103  * @size: The size of the command buffer space. Immutable.
104  */
105 struct vmw_cmdbuf_man {
106         struct mutex cur_mutex;
107         struct mutex space_mutex;
108         struct mutex error_mutex;
109         struct work_struct work;
110         struct vmw_private *dev_priv;
111         struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
112         struct list_head error;
113         struct drm_mm mm;
114         struct ttm_buffer_object *cmd_space;
115         struct ttm_bo_kmap_obj map_obj;
116         u8 *map;
117         struct vmw_cmdbuf_header *cur;
118         size_t cur_pos;
119         size_t default_size;
120         unsigned max_hw_submitted;
121         spinlock_t lock;
122         struct dma_pool *headers;
123         struct dma_pool *dheaders;
124         wait_queue_head_t alloc_queue;
125         wait_queue_head_t idle_queue;
126         bool irq_on;
127         bool using_mob;
128         bool has_pool;
129         dma_addr_t handle;
130         size_t size;
131 };
132
133 /**
134  * struct vmw_cmdbuf_header - Command buffer metadata
135  *
136  * @man: The command buffer manager.
137  * @cb_header: Device command buffer header, allocated from a DMA pool.
138  * @cb_context: The device command buffer context.
139  * @list: List head for attaching to the manager lists.
140  * @node: The range manager node.
141  * @handle. The DMA address of @cb_header. Handed to the device on command
142  * buffer submission.
143  * @cmd: Pointer to the command buffer space of this buffer.
144  * @size: Size of the command buffer space of this buffer.
145  * @reserved: Reserved space of this buffer.
146  * @inline_space: Whether inline command buffer space is used.
147  */
148 struct vmw_cmdbuf_header {
149         struct vmw_cmdbuf_man *man;
150         SVGACBHeader *cb_header;
151         SVGACBContext cb_context;
152         struct list_head list;
153         struct drm_mm_node node;
154         dma_addr_t handle;
155         u8 *cmd;
156         size_t size;
157         size_t reserved;
158         bool inline_space;
159 };
160
161 /**
162  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
163  * command buffer space.
164  *
165  * @cb_header: Device command buffer header.
166  * @cmd: Inline command buffer space.
167  */
168 struct vmw_cmdbuf_dheader {
169         SVGACBHeader cb_header;
170         u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
171 };
172
173 /**
174  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
175  *
176  * @page_size: Size of requested command buffer space in pages.
177  * @node: Pointer to the range manager node.
178  * @done: True if this allocation has succeeded.
179  */
180 struct vmw_cmdbuf_alloc_info {
181         size_t page_size;
182         struct drm_mm_node *node;
183         bool done;
184 };
185
186 /* Loop over each context in the command buffer manager. */
187 #define for_each_cmdbuf_ctx(_man, _i, _ctx)                             \
188         for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
189              ++(_i), ++(_ctx))
190
191 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
192                                 bool enable);
193 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
194
195 /**
196  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
197  *
198  * @man: The range manager.
199  * @interruptible: Whether to wait interruptible when locking.
200  */
201 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
202 {
203         if (interruptible) {
204                 if (mutex_lock_interruptible(&man->cur_mutex))
205                         return -ERESTARTSYS;
206         } else {
207                 mutex_lock(&man->cur_mutex);
208         }
209
210         return 0;
211 }
212
213 /**
214  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
215  *
216  * @man: The range manager.
217  */
218 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
219 {
220         mutex_unlock(&man->cur_mutex);
221 }
222
223 /**
224  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
225  * been used for the device context with inline command buffers.
226  * Need not be called locked.
227  *
228  * @header: Pointer to the header to free.
229  */
230 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
231 {
232         struct vmw_cmdbuf_dheader *dheader;
233
234         if (WARN_ON_ONCE(!header->inline_space))
235                 return;
236
237         dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
238                                cb_header);
239         dma_pool_free(header->man->dheaders, dheader, header->handle);
240         kfree(header);
241 }
242
243 /**
244  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
245  * associated structures.
246  *
247  * header: Pointer to the header to free.
248  *
249  * For internal use. Must be called with man::lock held.
250  */
251 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
252 {
253         struct vmw_cmdbuf_man *man = header->man;
254
255         lockdep_assert_held_once(&man->lock);
256
257         if (header->inline_space) {
258                 vmw_cmdbuf_header_inline_free(header);
259                 return;
260         }
261
262         drm_mm_remove_node(&header->node);
263         wake_up_all(&man->alloc_queue);
264         if (header->cb_header)
265                 dma_pool_free(man->headers, header->cb_header,
266                               header->handle);
267         kfree(header);
268 }
269
270 /**
271  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
272  * associated structures.
273  *
274  * @header: Pointer to the header to free.
275  */
276 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
277 {
278         struct vmw_cmdbuf_man *man = header->man;
279
280         /* Avoid locking if inline_space */
281         if (header->inline_space) {
282                 vmw_cmdbuf_header_inline_free(header);
283                 return;
284         }
285         spin_lock(&man->lock);
286         __vmw_cmdbuf_header_free(header);
287         spin_unlock(&man->lock);
288 }
289
290
291 /**
292  * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
293  *
294  * @header: The header of the buffer to submit.
295  */
296 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
297 {
298         struct vmw_cmdbuf_man *man = header->man;
299         u32 val;
300
301         val = upper_32_bits(header->handle);
302         vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
303
304         val = lower_32_bits(header->handle);
305         val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
306         vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
307
308         return header->cb_header->status;
309 }
310
311 /**
312  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
313  *
314  * @ctx: The command buffer context to initialize
315  */
316 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
317 {
318         INIT_LIST_HEAD(&ctx->hw_submitted);
319         INIT_LIST_HEAD(&ctx->submitted);
320         INIT_LIST_HEAD(&ctx->preempted);
321         ctx->num_hw_submitted = 0;
322 }
323
324 /**
325  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
326  * context.
327  *
328  * @man: The command buffer manager.
329  * @ctx: The command buffer context.
330  *
331  * Submits command buffers to hardware until there are no more command
332  * buffers to submit or the hardware can't handle more command buffers.
333  */
334 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
335                                   struct vmw_cmdbuf_context *ctx)
336 {
337         while (ctx->num_hw_submitted < man->max_hw_submitted &&
338                !list_empty(&ctx->submitted) &&
339                !ctx->block_submission) {
340                 struct vmw_cmdbuf_header *entry;
341                 SVGACBStatus status;
342
343                 entry = list_first_entry(&ctx->submitted,
344                                          struct vmw_cmdbuf_header,
345                                          list);
346
347                 status = vmw_cmdbuf_header_submit(entry);
348
349                 /* This should never happen */
350                 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
351                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
352                         break;
353                 }
354
355                 list_del(&entry->list);
356                 list_add_tail(&entry->list, &ctx->hw_submitted);
357                 ctx->num_hw_submitted++;
358         }
359
360 }
361
362 /**
363  * vmw_cmdbuf_ctx_submit: Process a command buffer context.
364  *
365  * @man: The command buffer manager.
366  * @ctx: The command buffer context.
367  *
368  * Submit command buffers to hardware if possible, and process finished
369  * buffers. Typically freeing them, but on preemption or error take
370  * appropriate action. Wake up waiters if appropriate.
371  */
372 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
373                                    struct vmw_cmdbuf_context *ctx,
374                                    int *notempty)
375 {
376         struct vmw_cmdbuf_header *entry, *next;
377
378         vmw_cmdbuf_ctx_submit(man, ctx);
379
380         list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
381                 SVGACBStatus status = entry->cb_header->status;
382
383                 if (status == SVGA_CB_STATUS_NONE)
384                         break;
385
386                 list_del(&entry->list);
387                 wake_up_all(&man->idle_queue);
388                 ctx->num_hw_submitted--;
389                 switch (status) {
390                 case SVGA_CB_STATUS_COMPLETED:
391                         __vmw_cmdbuf_header_free(entry);
392                         break;
393                 case SVGA_CB_STATUS_COMMAND_ERROR:
394                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
395                         list_add_tail(&entry->list, &man->error);
396                         schedule_work(&man->work);
397                         break;
398                 case SVGA_CB_STATUS_PREEMPTED:
399                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
400                         list_add_tail(&entry->list, &ctx->preempted);
401                         break;
402                 case SVGA_CB_STATUS_CB_HEADER_ERROR:
403                         WARN_ONCE(true, "Command buffer header error.\n");
404                         __vmw_cmdbuf_header_free(entry);
405                         break;
406                 default:
407                         WARN_ONCE(true, "Undefined command buffer status.\n");
408                         __vmw_cmdbuf_header_free(entry);
409                         break;
410                 }
411         }
412
413         vmw_cmdbuf_ctx_submit(man, ctx);
414         if (!list_empty(&ctx->submitted))
415                 (*notempty)++;
416 }
417
418 /**
419  * vmw_cmdbuf_man_process - Process all command buffer contexts and
420  * switch on and off irqs as appropriate.
421  *
422  * @man: The command buffer manager.
423  *
424  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
425  * command buffers left that are not submitted to hardware, Make sure
426  * IRQ handling is turned on. Otherwise, make sure it's turned off.
427  */
428 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
429 {
430         int notempty;
431         struct vmw_cmdbuf_context *ctx;
432         int i;
433
434 retry:
435         notempty = 0;
436         for_each_cmdbuf_ctx(man, i, ctx)
437                 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
438
439         if (man->irq_on && !notempty) {
440                 vmw_generic_waiter_remove(man->dev_priv,
441                                           SVGA_IRQFLAG_COMMAND_BUFFER,
442                                           &man->dev_priv->cmdbuf_waiters);
443                 man->irq_on = false;
444         } else if (!man->irq_on && notempty) {
445                 vmw_generic_waiter_add(man->dev_priv,
446                                        SVGA_IRQFLAG_COMMAND_BUFFER,
447                                        &man->dev_priv->cmdbuf_waiters);
448                 man->irq_on = true;
449
450                 /* Rerun in case we just missed an irq. */
451                 goto retry;
452         }
453 }
454
455 /**
456  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
457  * command buffer context
458  *
459  * @man: The command buffer manager.
460  * @header: The header of the buffer to submit.
461  * @cb_context: The command buffer context to use.
462  *
463  * This function adds @header to the "submitted" queue of the command
464  * buffer context identified by @cb_context. It then calls the command buffer
465  * manager processing to potentially submit the buffer to hardware.
466  * @man->lock needs to be held when calling this function.
467  */
468 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
469                                struct vmw_cmdbuf_header *header,
470                                SVGACBContext cb_context)
471 {
472         if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
473                 header->cb_header->dxContext = 0;
474         header->cb_context = cb_context;
475         list_add_tail(&header->list, &man->ctx[cb_context].submitted);
476
477         vmw_cmdbuf_man_process(man);
478 }
479
480 /**
481  * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
482  * handler implemented as a threaded irq task.
483  *
484  * @man: Pointer to the command buffer manager.
485  *
486  * The bottom half of the interrupt handler simply calls into the
487  * command buffer processor to free finished buffers and submit any
488  * queued buffers to hardware.
489  */
490 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
491 {
492         spin_lock(&man->lock);
493         vmw_cmdbuf_man_process(man);
494         spin_unlock(&man->lock);
495 }
496
497 /**
498  * vmw_cmdbuf_work_func - The deferred work function that handles
499  * command buffer errors.
500  *
501  * @work: The work func closure argument.
502  *
503  * Restarting the command buffer context after an error requires process
504  * context, so it is deferred to this work function.
505  */
506 static void vmw_cmdbuf_work_func(struct work_struct *work)
507 {
508         struct vmw_cmdbuf_man *man =
509                 container_of(work, struct vmw_cmdbuf_man, work);
510         struct vmw_cmdbuf_header *entry, *next;
511         uint32_t dummy;
512         bool restart[SVGA_CB_CONTEXT_MAX];
513         bool send_fence = false;
514         struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
515         int i;
516         struct vmw_cmdbuf_context *ctx;
517
518         for_each_cmdbuf_ctx(man, i, ctx) {
519                 INIT_LIST_HEAD(&restart_head[i]);
520                 restart[i] = false;
521         }
522
523         mutex_lock(&man->error_mutex);
524         spin_lock(&man->lock);
525         list_for_each_entry_safe(entry, next, &man->error, list) {
526                 SVGACBHeader *cb_hdr = entry->cb_header;
527                 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
528                         (entry->cmd + cb_hdr->errorOffset);
529                 u32 error_cmd_size, new_start_offset;
530                 const char *cmd_name;
531
532                 list_del_init(&entry->list);
533                 restart[entry->cb_context] = true;
534
535                 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
536                         DRM_ERROR("Unknown command causing device error.\n");
537                         DRM_ERROR("Command buffer offset is %lu\n",
538                                   (unsigned long) cb_hdr->errorOffset);
539                         __vmw_cmdbuf_header_free(entry);
540                         send_fence = true;
541                         continue;
542                 }
543
544                 DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
545                 DRM_ERROR("Command buffer offset is %lu\n",
546                           (unsigned long) cb_hdr->errorOffset);
547                 DRM_ERROR("Command size is %lu\n",
548                           (unsigned long) error_cmd_size);
549
550                 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
551
552                 if (new_start_offset >= cb_hdr->length) {
553                         __vmw_cmdbuf_header_free(entry);
554                         send_fence = true;
555                         continue;
556                 }
557
558                 if (man->using_mob)
559                         cb_hdr->ptr.mob.mobOffset += new_start_offset;
560                 else
561                         cb_hdr->ptr.pa += (u64) new_start_offset;
562
563                 entry->cmd += new_start_offset;
564                 cb_hdr->length -= new_start_offset;
565                 cb_hdr->errorOffset = 0;
566                 cb_hdr->offset = 0;
567                 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
568                 man->ctx[entry->cb_context].block_submission = true;
569         }
570         spin_unlock(&man->lock);
571
572         /* Preempt all contexts with errors */
573         for_each_cmdbuf_ctx(man, i, ctx) {
574                 if (ctx->block_submission && vmw_cmdbuf_preempt(man, i))
575                         DRM_ERROR("Failed preempting command buffer "
576                                   "context %u.\n", i);
577         }
578
579         spin_lock(&man->lock);
580         for_each_cmdbuf_ctx(man, i, ctx) {
581                 if (!ctx->block_submission)
582                         continue;
583
584                 /* Move preempted command buffers to the preempted queue. */
585                 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
586
587                 /*
588                  * Add the preempted queue after the command buffer
589                  * that caused an error.
590                  */
591                 list_splice_init(&ctx->preempted, restart_head[i].prev);
592
593                 /*
594                  * Finally add all command buffers first in the submitted
595                  * queue, to rerun them.
596                  */
597                 list_splice_init(&restart_head[i], &ctx->submitted);
598
599                 ctx->block_submission = false;
600         }
601
602         vmw_cmdbuf_man_process(man);
603         spin_unlock(&man->lock);
604
605         for_each_cmdbuf_ctx(man, i, ctx) {
606                 if (restart[i] && vmw_cmdbuf_startstop(man, i, true))
607                         DRM_ERROR("Failed restarting command buffer "
608                                   "context %u.\n", i);
609         }
610
611         /* Send a new fence in case one was removed */
612         if (send_fence) {
613                 vmw_fifo_send_fence(man->dev_priv, &dummy);
614                 wake_up_all(&man->idle_queue);
615         }
616
617         mutex_unlock(&man->error_mutex);
618 }
619
620 /**
621  * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
622  *
623  * @man: The command buffer manager.
624  * @check_preempted: Check also the preempted queue for pending command buffers.
625  *
626  */
627 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
628                                 bool check_preempted)
629 {
630         struct vmw_cmdbuf_context *ctx;
631         bool idle = false;
632         int i;
633
634         spin_lock(&man->lock);
635         vmw_cmdbuf_man_process(man);
636         for_each_cmdbuf_ctx(man, i, ctx) {
637                 if (!list_empty(&ctx->submitted) ||
638                     !list_empty(&ctx->hw_submitted) ||
639                     (check_preempted && !list_empty(&ctx->preempted)))
640                         goto out_unlock;
641         }
642
643         idle = list_empty(&man->error);
644
645 out_unlock:
646         spin_unlock(&man->lock);
647
648         return idle;
649 }
650
651 /**
652  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
653  * command submissions
654  *
655  * @man: The command buffer manager.
656  *
657  * Flushes the current command buffer without allocating a new one. A new one
658  * is automatically allocated when needed. Call with @man->cur_mutex held.
659  */
660 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
661 {
662         struct vmw_cmdbuf_header *cur = man->cur;
663
664         WARN_ON(!mutex_is_locked(&man->cur_mutex));
665
666         if (!cur)
667                 return;
668
669         spin_lock(&man->lock);
670         if (man->cur_pos == 0) {
671                 __vmw_cmdbuf_header_free(cur);
672                 goto out_unlock;
673         }
674
675         man->cur->cb_header->length = man->cur_pos;
676         vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
677 out_unlock:
678         spin_unlock(&man->lock);
679         man->cur = NULL;
680         man->cur_pos = 0;
681 }
682
683 /**
684  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
685  * command submissions
686  *
687  * @man: The command buffer manager.
688  * @interruptible: Whether to sleep interruptible when sleeping.
689  *
690  * Flushes the current command buffer without allocating a new one. A new one
691  * is automatically allocated when needed.
692  */
693 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
694                          bool interruptible)
695 {
696         int ret = vmw_cmdbuf_cur_lock(man, interruptible);
697
698         if (ret)
699                 return ret;
700
701         __vmw_cmdbuf_cur_flush(man);
702         vmw_cmdbuf_cur_unlock(man);
703
704         return 0;
705 }
706
707 /**
708  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
709  *
710  * @man: The command buffer manager.
711  * @interruptible: Sleep interruptible while waiting.
712  * @timeout: Time out after this many ticks.
713  *
714  * Wait until the command buffer manager has processed all command buffers,
715  * or until a timeout occurs. If a timeout occurs, the function will return
716  * -EBUSY.
717  */
718 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
719                     unsigned long timeout)
720 {
721         int ret;
722
723         ret = vmw_cmdbuf_cur_flush(man, interruptible);
724         vmw_generic_waiter_add(man->dev_priv,
725                                SVGA_IRQFLAG_COMMAND_BUFFER,
726                                &man->dev_priv->cmdbuf_waiters);
727
728         if (interruptible) {
729                 ret = wait_event_interruptible_timeout
730                         (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
731                          timeout);
732         } else {
733                 ret = wait_event_timeout
734                         (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
735                          timeout);
736         }
737         vmw_generic_waiter_remove(man->dev_priv,
738                                   SVGA_IRQFLAG_COMMAND_BUFFER,
739                                   &man->dev_priv->cmdbuf_waiters);
740         if (ret == 0) {
741                 if (!vmw_cmdbuf_man_idle(man, true))
742                         ret = -EBUSY;
743                 else
744                         ret = 0;
745         }
746         if (ret > 0)
747                 ret = 0;
748
749         return ret;
750 }
751
752 /**
753  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
754  *
755  * @man: The command buffer manager.
756  * @info: Allocation info. Will hold the size on entry and allocated mm node
757  * on successful return.
758  *
759  * Try to allocate buffer space from the main pool. Returns true if succeeded.
760  * If a fatal error was hit, the error code is returned in @info->ret.
761  */
762 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
763                                  struct vmw_cmdbuf_alloc_info *info)
764 {
765         int ret;
766
767         if (info->done)
768                 return true;
769  
770         memset(info->node, 0, sizeof(*info->node));
771         spin_lock(&man->lock);
772         ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
773         if (ret) {
774                 vmw_cmdbuf_man_process(man);
775                 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
776         }
777
778         spin_unlock(&man->lock);
779         info->done = !ret;
780
781         return info->done;
782 }
783
784 /**
785  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
786  *
787  * @man: The command buffer manager.
788  * @node: Pointer to pre-allocated range-manager node.
789  * @size: The size of the allocation.
790  * @interruptible: Whether to sleep interruptible while waiting for space.
791  *
792  * This function allocates buffer space from the main pool, and if there is
793  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
794  * become available.
795  */
796 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
797                                   struct drm_mm_node *node,
798                                   size_t size,
799                                   bool interruptible)
800 {
801         struct vmw_cmdbuf_alloc_info info;
802
803         info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
804         info.node = node;
805         info.done = false;
806
807         /*
808          * To prevent starvation of large requests, only one allocating call
809          * at a time waiting for space.
810          */
811         if (interruptible) {
812                 if (mutex_lock_interruptible(&man->space_mutex))
813                         return -ERESTARTSYS;
814         } else {
815                 mutex_lock(&man->space_mutex);
816         }
817
818         /* Try to allocate space without waiting. */
819         if (vmw_cmdbuf_try_alloc(man, &info))
820                 goto out_unlock;
821
822         vmw_generic_waiter_add(man->dev_priv,
823                                SVGA_IRQFLAG_COMMAND_BUFFER,
824                                &man->dev_priv->cmdbuf_waiters);
825
826         if (interruptible) {
827                 int ret;
828
829                 ret = wait_event_interruptible
830                         (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
831                 if (ret) {
832                         vmw_generic_waiter_remove
833                                 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
834                                  &man->dev_priv->cmdbuf_waiters);
835                         mutex_unlock(&man->space_mutex);
836                         return ret;
837                 }
838         } else {
839                 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
840         }
841         vmw_generic_waiter_remove(man->dev_priv,
842                                   SVGA_IRQFLAG_COMMAND_BUFFER,
843                                   &man->dev_priv->cmdbuf_waiters);
844
845 out_unlock:
846         mutex_unlock(&man->space_mutex);
847
848         return 0;
849 }
850
851 /**
852  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
853  * space from the main pool.
854  *
855  * @man: The command buffer manager.
856  * @header: Pointer to the header to set up.
857  * @size: The requested size of the buffer space.
858  * @interruptible: Whether to sleep interruptible while waiting for space.
859  */
860 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
861                                  struct vmw_cmdbuf_header *header,
862                                  size_t size,
863                                  bool interruptible)
864 {
865         SVGACBHeader *cb_hdr;
866         size_t offset;
867         int ret;
868
869         if (!man->has_pool)
870                 return -ENOMEM;
871
872         ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
873
874         if (ret)
875                 return ret;
876
877         header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
878                                             &header->handle);
879         if (!header->cb_header) {
880                 ret = -ENOMEM;
881                 goto out_no_cb_header;
882         }
883
884         header->size = header->node.size << PAGE_SHIFT;
885         cb_hdr = header->cb_header;
886         offset = header->node.start << PAGE_SHIFT;
887         header->cmd = man->map + offset;
888         if (man->using_mob) {
889                 cb_hdr->flags = SVGA_CB_FLAG_MOB;
890                 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
891                 cb_hdr->ptr.mob.mobOffset = offset;
892         } else {
893                 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
894         }
895
896         return 0;
897
898 out_no_cb_header:
899         spin_lock(&man->lock);
900         drm_mm_remove_node(&header->node);
901         spin_unlock(&man->lock);
902
903         return ret;
904 }
905
906 /**
907  * vmw_cmdbuf_space_inline - Set up a command buffer header with
908  * inline command buffer space.
909  *
910  * @man: The command buffer manager.
911  * @header: Pointer to the header to set up.
912  * @size: The requested size of the buffer space.
913  */
914 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
915                                    struct vmw_cmdbuf_header *header,
916                                    int size)
917 {
918         struct vmw_cmdbuf_dheader *dheader;
919         SVGACBHeader *cb_hdr;
920
921         if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
922                 return -ENOMEM;
923
924         dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
925                                   &header->handle);
926         if (!dheader)
927                 return -ENOMEM;
928
929         header->inline_space = true;
930         header->size = VMW_CMDBUF_INLINE_SIZE;
931         cb_hdr = &dheader->cb_header;
932         header->cb_header = cb_hdr;
933         header->cmd = dheader->cmd;
934         cb_hdr->status = SVGA_CB_STATUS_NONE;
935         cb_hdr->flags = SVGA_CB_FLAG_NONE;
936         cb_hdr->ptr.pa = (u64)header->handle +
937                 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
938
939         return 0;
940 }
941
942 /**
943  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
944  * command buffer space.
945  *
946  * @man: The command buffer manager.
947  * @size: The requested size of the buffer space.
948  * @interruptible: Whether to sleep interruptible while waiting for space.
949  * @p_header: points to a header pointer to populate on successful return.
950  *
951  * Returns a pointer to command buffer space if successful. Otherwise
952  * returns an error pointer. The header pointer returned in @p_header should
953  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
954  */
955 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
956                        size_t size, bool interruptible,
957                        struct vmw_cmdbuf_header **p_header)
958 {
959         struct vmw_cmdbuf_header *header;
960         int ret = 0;
961
962         *p_header = NULL;
963
964         header = kzalloc(sizeof(*header), GFP_KERNEL);
965         if (!header)
966                 return ERR_PTR(-ENOMEM);
967
968         if (size <= VMW_CMDBUF_INLINE_SIZE)
969                 ret = vmw_cmdbuf_space_inline(man, header, size);
970         else
971                 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
972
973         if (ret) {
974                 kfree(header);
975                 return ERR_PTR(ret);
976         }
977
978         header->man = man;
979         INIT_LIST_HEAD(&header->list);
980         header->cb_header->status = SVGA_CB_STATUS_NONE;
981         *p_header = header;
982
983         return header->cmd;
984 }
985
986 /**
987  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
988  * command buffer.
989  *
990  * @man: The command buffer manager.
991  * @size: The requested size of the commands.
992  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
993  * @interruptible: Whether to sleep interruptible while waiting for space.
994  *
995  * Returns a pointer to command buffer space if successful. Otherwise
996  * returns an error pointer.
997  */
998 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
999                                     size_t size,
1000                                     int ctx_id,
1001                                     bool interruptible)
1002 {
1003         struct vmw_cmdbuf_header *cur;
1004         void *ret;
1005
1006         if (vmw_cmdbuf_cur_lock(man, interruptible))
1007                 return ERR_PTR(-ERESTARTSYS);
1008
1009         cur = man->cur;
1010         if (cur && (size + man->cur_pos > cur->size ||
1011                     ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1012                      ctx_id != cur->cb_header->dxContext)))
1013                 __vmw_cmdbuf_cur_flush(man);
1014
1015         if (!man->cur) {
1016                 ret = vmw_cmdbuf_alloc(man,
1017                                        max_t(size_t, size, man->default_size),
1018                                        interruptible, &man->cur);
1019                 if (IS_ERR(ret)) {
1020                         vmw_cmdbuf_cur_unlock(man);
1021                         return ret;
1022                 }
1023
1024                 cur = man->cur;
1025         }
1026
1027         if (ctx_id != SVGA3D_INVALID_ID) {
1028                 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1029                 cur->cb_header->dxContext = ctx_id;
1030         }
1031
1032         cur->reserved = size;
1033
1034         return (void *) (man->cur->cmd + man->cur_pos);
1035 }
1036
1037 /**
1038  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1039  *
1040  * @man: The command buffer manager.
1041  * @size: The size of the commands actually written.
1042  * @flush: Whether to flush the command buffer immediately.
1043  */
1044 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1045                                   size_t size, bool flush)
1046 {
1047         struct vmw_cmdbuf_header *cur = man->cur;
1048
1049         WARN_ON(!mutex_is_locked(&man->cur_mutex));
1050
1051         WARN_ON(size > cur->reserved);
1052         man->cur_pos += size;
1053         if (!size)
1054                 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1055         if (flush)
1056                 __vmw_cmdbuf_cur_flush(man);
1057         vmw_cmdbuf_cur_unlock(man);
1058 }
1059
1060 /**
1061  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1062  *
1063  * @man: The command buffer manager.
1064  * @size: The requested size of the commands.
1065  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1066  * @interruptible: Whether to sleep interruptible while waiting for space.
1067  * @header: Header of the command buffer. NULL if the current command buffer
1068  * should be used.
1069  *
1070  * Returns a pointer to command buffer space if successful. Otherwise
1071  * returns an error pointer.
1072  */
1073 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1074                          int ctx_id, bool interruptible,
1075                          struct vmw_cmdbuf_header *header)
1076 {
1077         if (!header)
1078                 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1079
1080         if (size > header->size)
1081                 return ERR_PTR(-EINVAL);
1082
1083         if (ctx_id != SVGA3D_INVALID_ID) {
1084                 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1085                 header->cb_header->dxContext = ctx_id;
1086         }
1087
1088         header->reserved = size;
1089         return header->cmd;
1090 }
1091
1092 /**
1093  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1094  *
1095  * @man: The command buffer manager.
1096  * @size: The size of the commands actually written.
1097  * @header: Header of the command buffer. NULL if the current command buffer
1098  * should be used.
1099  * @flush: Whether to flush the command buffer immediately.
1100  */
1101 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1102                        struct vmw_cmdbuf_header *header, bool flush)
1103 {
1104         if (!header) {
1105                 vmw_cmdbuf_commit_cur(man, size, flush);
1106                 return;
1107         }
1108
1109         (void) vmw_cmdbuf_cur_lock(man, false);
1110         __vmw_cmdbuf_cur_flush(man);
1111         WARN_ON(size > header->reserved);
1112         man->cur = header;
1113         man->cur_pos = size;
1114         if (!size)
1115                 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1116         if (flush)
1117                 __vmw_cmdbuf_cur_flush(man);
1118         vmw_cmdbuf_cur_unlock(man);
1119 }
1120
1121
1122 /**
1123  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1124  *
1125  * @man: The command buffer manager.
1126  * @command: Pointer to the command to send.
1127  * @size: Size of the command.
1128  *
1129  * Synchronously sends a device context command.
1130  */
1131 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1132                                           const void *command,
1133                                           size_t size)
1134 {
1135         struct vmw_cmdbuf_header *header;
1136         int status;
1137         void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1138
1139         if (IS_ERR(cmd))
1140                 return PTR_ERR(cmd);
1141
1142         memcpy(cmd, command, size);
1143         header->cb_header->length = size;
1144         header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1145         spin_lock(&man->lock);
1146         status = vmw_cmdbuf_header_submit(header);
1147         spin_unlock(&man->lock);
1148         vmw_cmdbuf_header_free(header);
1149
1150         if (status != SVGA_CB_STATUS_COMPLETED) {
1151                 DRM_ERROR("Device context command failed with status %d\n",
1152                           status);
1153                 return -EINVAL;
1154         }
1155
1156         return 0;
1157 }
1158
1159 /**
1160  * vmw_cmdbuf_preempt - Send a preempt command through the device
1161  * context.
1162  *
1163  * @man: The command buffer manager.
1164  *
1165  * Synchronously sends a preempt command.
1166  */
1167 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1168 {
1169         struct {
1170                 uint32 id;
1171                 SVGADCCmdPreempt body;
1172         } __packed cmd;
1173
1174         cmd.id = SVGA_DC_CMD_PREEMPT;
1175         cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1176         cmd.body.ignoreIDZero = 0;
1177
1178         return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1179 }
1180
1181
1182 /**
1183  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1184  * context.
1185  *
1186  * @man: The command buffer manager.
1187  * @enable: Whether to enable or disable the context.
1188  *
1189  * Synchronously sends a device start / stop context command.
1190  */
1191 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1192                                 bool enable)
1193 {
1194         struct {
1195                 uint32 id;
1196                 SVGADCCmdStartStop body;
1197         } __packed cmd;
1198
1199         cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1200         cmd.body.enable = (enable) ? 1 : 0;
1201         cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1202
1203         return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1204 }
1205
1206 /**
1207  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1208  *
1209  * @man: The command buffer manager.
1210  * @size: The size of the main space pool.
1211  * @default_size: The default size of the command buffer for small kernel
1212  * submissions.
1213  *
1214  * Set the size and allocate the main command buffer space pool,
1215  * as well as the default size of the command buffer for
1216  * small kernel submissions. If successful, this enables large command
1217  * submissions. Note that this function requires that rudimentary command
1218  * submission is already available and that the MOB memory manager is alive.
1219  * Returns 0 on success. Negative error code on failure.
1220  */
1221 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1222                              size_t size, size_t default_size)
1223 {
1224         struct vmw_private *dev_priv = man->dev_priv;
1225         bool dummy;
1226         int ret;
1227
1228         if (man->has_pool)
1229                 return -EINVAL;
1230
1231         /* First, try to allocate a huge chunk of DMA memory */
1232         size = PAGE_ALIGN(size);
1233         man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1234                                       &man->handle, GFP_KERNEL);
1235         if (man->map) {
1236                 man->using_mob = false;
1237         } else {
1238                 /*
1239                  * DMA memory failed. If we can have command buffers in a
1240                  * MOB, try to use that instead. Note that this will
1241                  * actually call into the already enabled manager, when
1242                  * binding the MOB.
1243                  */
1244                 if (!(dev_priv->capabilities & SVGA_CAP_DX))
1245                         return -ENOMEM;
1246
1247                 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1248                                     &vmw_mob_ne_placement, 0, false, NULL,
1249                                     &man->cmd_space);
1250                 if (ret)
1251                         return ret;
1252
1253                 man->using_mob = true;
1254                 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1255                                   &man->map_obj);
1256                 if (ret)
1257                         goto out_no_map;
1258
1259                 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1260         }
1261
1262         man->size = size;
1263         drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1264
1265         man->has_pool = true;
1266
1267         /*
1268          * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1269          * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1270          * needs to wait for space and we block on further command
1271          * submissions to be able to free up space.
1272          */
1273         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1274         DRM_INFO("Using command buffers with %s pool.\n",
1275                  (man->using_mob) ? "MOB" : "DMA");
1276
1277         return 0;
1278
1279 out_no_map:
1280         if (man->using_mob)
1281                 ttm_bo_unref(&man->cmd_space);
1282
1283         return ret;
1284 }
1285
1286 /**
1287  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1288  * inline command buffer submissions only.
1289  *
1290  * @dev_priv: Pointer to device private structure.
1291  *
1292  * Returns a pointer to a cummand buffer manager to success or error pointer
1293  * on failure. The command buffer manager will be enabled for submissions of
1294  * size VMW_CMDBUF_INLINE_SIZE only.
1295  */
1296 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1297 {
1298         struct vmw_cmdbuf_man *man;
1299         struct vmw_cmdbuf_context *ctx;
1300         unsigned int i;
1301         int ret;
1302
1303         if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1304                 return ERR_PTR(-ENOSYS);
1305
1306         man = kzalloc(sizeof(*man), GFP_KERNEL);
1307         if (!man)
1308                 return ERR_PTR(-ENOMEM);
1309
1310         man->headers = dma_pool_create("vmwgfx cmdbuf",
1311                                        &dev_priv->dev->pdev->dev,
1312                                        sizeof(SVGACBHeader),
1313                                        64, PAGE_SIZE);
1314         if (!man->headers) {
1315                 ret = -ENOMEM;
1316                 goto out_no_pool;
1317         }
1318
1319         man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1320                                         &dev_priv->dev->pdev->dev,
1321                                         sizeof(struct vmw_cmdbuf_dheader),
1322                                         64, PAGE_SIZE);
1323         if (!man->dheaders) {
1324                 ret = -ENOMEM;
1325                 goto out_no_dpool;
1326         }
1327
1328         for_each_cmdbuf_ctx(man, i, ctx)
1329                 vmw_cmdbuf_ctx_init(ctx);
1330
1331         INIT_LIST_HEAD(&man->error);
1332         spin_lock_init(&man->lock);
1333         mutex_init(&man->cur_mutex);
1334         mutex_init(&man->space_mutex);
1335         mutex_init(&man->error_mutex);
1336         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1337         init_waitqueue_head(&man->alloc_queue);
1338         init_waitqueue_head(&man->idle_queue);
1339         man->dev_priv = dev_priv;
1340         man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1341         INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1342         vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1343                                &dev_priv->error_waiters);
1344         for_each_cmdbuf_ctx(man, i, ctx) {
1345                 ret = vmw_cmdbuf_startstop(man, i, true);
1346                 if (ret) {
1347                         DRM_ERROR("Failed starting command buffer "
1348                                   "context %u.\n", i);
1349                         vmw_cmdbuf_man_destroy(man);
1350                         return ERR_PTR(ret);
1351                 }
1352         }
1353
1354         return man;
1355
1356 out_no_dpool:
1357         dma_pool_destroy(man->headers);
1358 out_no_pool:
1359         kfree(man);
1360
1361         return ERR_PTR(ret);
1362 }
1363
1364 /**
1365  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1366  *
1367  * @man: Pointer to a command buffer manager.
1368  *
1369  * This function removes the main buffer space pool, and should be called
1370  * before MOB memory management is removed. When this function has been called,
1371  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1372  * less are allowed, and the default size of the command buffer for small kernel
1373  * submissions is also set to this size.
1374  */
1375 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1376 {
1377         if (!man->has_pool)
1378                 return;
1379
1380         man->has_pool = false;
1381         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1382         (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1383         if (man->using_mob) {
1384                 (void) ttm_bo_kunmap(&man->map_obj);
1385                 ttm_bo_unref(&man->cmd_space);
1386         } else {
1387                 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1388                                   man->size, man->map, man->handle);
1389         }
1390 }
1391
1392 /**
1393  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1394  *
1395  * @man: Pointer to a command buffer manager.
1396  *
1397  * This function idles and then destroys a command buffer manager.
1398  */
1399 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1400 {
1401         struct vmw_cmdbuf_context *ctx;
1402         unsigned int i;
1403
1404         WARN_ON_ONCE(man->has_pool);
1405         (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406
1407         for_each_cmdbuf_ctx(man, i, ctx)
1408                 if (vmw_cmdbuf_startstop(man, i, false))
1409                         DRM_ERROR("Failed stopping command buffer "
1410                                   "context %u.\n", i);
1411
1412         vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1413                                   &man->dev_priv->error_waiters);
1414         (void) cancel_work_sync(&man->work);
1415         dma_pool_destroy(man->dheaders);
1416         dma_pool_destroy(man->headers);
1417         mutex_destroy(&man->cur_mutex);
1418         mutex_destroy(&man->space_mutex);
1419         mutex_destroy(&man->error_mutex);
1420         kfree(man);
1421 }