GNU Linux-libre 4.9.318-gnu1
[releases.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34
35 #define VMW_RES_HT_ORDER 12
36
37 /**
38  * enum vmw_resource_relocation_type - Relocation type for resources
39  *
40  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
41  * command stream is replaced with the actual id after validation.
42  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
43  * with a NOP.
44  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
45  * after validation is -1, the command is replaced with a NOP. Otherwise no
46  * action.
47  */
48 enum vmw_resource_relocation_type {
49         vmw_res_rel_normal,
50         vmw_res_rel_nop,
51         vmw_res_rel_cond_nop,
52         vmw_res_rel_max
53 };
54
55 /**
56  * struct vmw_resource_relocation - Relocation info for resources
57  *
58  * @head: List head for the software context's relocation list.
59  * @res: Non-ref-counted pointer to the resource.
60  * @offset: Offset of single byte entries into the command buffer where the
61  * id that needs fixup is located.
62  * @rel_type: Type of relocation.
63  */
64 struct vmw_resource_relocation {
65         struct list_head head;
66         const struct vmw_resource *res;
67         u32 offset:29;
68         enum vmw_resource_relocation_type rel_type:3;
69 };
70
71 /**
72  * struct vmw_resource_val_node - Validation info for resources
73  *
74  * @head: List head for the software context's resource list.
75  * @hash: Hash entry for quick resouce to val_node lookup.
76  * @res: Ref-counted pointer to the resource.
77  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
78  * @new_backup: Refcounted pointer to the new backup buffer.
79  * @staged_bindings: If @res is a context, tracks bindings set up during
80  * the command batch. Otherwise NULL.
81  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
82  * @first_usage: Set to true the first time the resource is referenced in
83  * the command stream.
84  * @switching_backup: The command stream provides a new backup buffer for a
85  * resource.
86  * @no_buffer_needed: This means @switching_backup is true on first buffer
87  * reference. So resource reservation does not need to allocate a backup
88  * buffer for the resource.
89  */
90 struct vmw_resource_val_node {
91         struct list_head head;
92         struct drm_hash_item hash;
93         struct vmw_resource *res;
94         struct vmw_dma_buffer *new_backup;
95         struct vmw_ctx_binding_state *staged_bindings;
96         unsigned long new_backup_offset;
97         u32 first_usage : 1;
98         u32 switching_backup : 1;
99         u32 no_buffer_needed : 1;
100 };
101
102 /**
103  * struct vmw_cmd_entry - Describe a command for the verifier
104  *
105  * @user_allow: Whether allowed from the execbuf ioctl.
106  * @gb_disable: Whether disabled if guest-backed objects are available.
107  * @gb_enable: Whether enabled iff guest-backed objects are available.
108  */
109 struct vmw_cmd_entry {
110         int (*func) (struct vmw_private *, struct vmw_sw_context *,
111                      SVGA3dCmdHeader *);
112         bool user_allow;
113         bool gb_disable;
114         bool gb_enable;
115 };
116
117 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
118         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
119                                        (_gb_disable), (_gb_enable)}
120
121 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
122                                         struct vmw_sw_context *sw_context,
123                                         struct vmw_resource *ctx);
124 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
125                                  struct vmw_sw_context *sw_context,
126                                  SVGAMobId *id,
127                                  struct vmw_dma_buffer **vmw_bo_p);
128 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
129                                    struct vmw_dma_buffer *vbo,
130                                    bool validate_as_mob,
131                                    uint32_t *p_val_node);
132 /**
133  * vmw_ptr_diff - Compute the offset from a to b in bytes
134  *
135  * @a: A starting pointer.
136  * @b: A pointer offset in the same address space.
137  *
138  * Returns: The offset in bytes between the two pointers.
139  */
140 static size_t vmw_ptr_diff(void *a, void *b)
141 {
142         return (unsigned long) b - (unsigned long) a;
143 }
144
145 /**
146  * vmw_resources_unreserve - unreserve resources previously reserved for
147  * command submission.
148  *
149  * @sw_context: pointer to the software context
150  * @backoff: Whether command submission failed.
151  */
152 static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
153                                     bool backoff)
154 {
155         struct vmw_resource_val_node *val;
156         struct list_head *list = &sw_context->resource_list;
157
158         if (sw_context->dx_query_mob && !backoff)
159                 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
160                                           sw_context->dx_query_mob);
161
162         list_for_each_entry(val, list, head) {
163                 struct vmw_resource *res = val->res;
164                 bool switch_backup =
165                         (backoff) ? false : val->switching_backup;
166
167                 /*
168                  * Transfer staged context bindings to the
169                  * persistent context binding tracker.
170                  */
171                 if (unlikely(val->staged_bindings)) {
172                         if (!backoff) {
173                                 vmw_binding_state_commit
174                                         (vmw_context_binding_state(val->res),
175                                          val->staged_bindings);
176                         }
177
178                         if (val->staged_bindings != sw_context->staged_bindings)
179                                 vmw_binding_state_free(val->staged_bindings);
180                         else
181                                 sw_context->staged_bindings_inuse = false;
182                         val->staged_bindings = NULL;
183                 }
184                 vmw_resource_unreserve(res, switch_backup, val->new_backup,
185                                        val->new_backup_offset);
186                 vmw_dmabuf_unreference(&val->new_backup);
187         }
188 }
189
190 /**
191  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
192  * added to the validate list.
193  *
194  * @dev_priv: Pointer to the device private:
195  * @sw_context: The validation context:
196  * @node: The validation node holding this context.
197  */
198 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
199                                    struct vmw_sw_context *sw_context,
200                                    struct vmw_resource_val_node *node)
201 {
202         int ret;
203
204         ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
205         if (unlikely(ret != 0))
206                 goto out_err;
207
208         if (!sw_context->staged_bindings) {
209                 sw_context->staged_bindings =
210                         vmw_binding_state_alloc(dev_priv);
211                 if (IS_ERR(sw_context->staged_bindings)) {
212                         DRM_ERROR("Failed to allocate context binding "
213                                   "information.\n");
214                         ret = PTR_ERR(sw_context->staged_bindings);
215                         sw_context->staged_bindings = NULL;
216                         goto out_err;
217                 }
218         }
219
220         if (sw_context->staged_bindings_inuse) {
221                 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
222                 if (IS_ERR(node->staged_bindings)) {
223                         DRM_ERROR("Failed to allocate context binding "
224                                   "information.\n");
225                         ret = PTR_ERR(node->staged_bindings);
226                         node->staged_bindings = NULL;
227                         goto out_err;
228                 }
229         } else {
230                 node->staged_bindings = sw_context->staged_bindings;
231                 sw_context->staged_bindings_inuse = true;
232         }
233
234         return 0;
235 out_err:
236         return ret;
237 }
238
239 /**
240  * vmw_resource_val_add - Add a resource to the software context's
241  * resource list if it's not already on it.
242  *
243  * @sw_context: Pointer to the software context.
244  * @res: Pointer to the resource.
245  * @p_node On successful return points to a valid pointer to a
246  * struct vmw_resource_val_node, if non-NULL on entry.
247  */
248 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
249                                 struct vmw_resource *res,
250                                 struct vmw_resource_val_node **p_node)
251 {
252         struct vmw_private *dev_priv = res->dev_priv;
253         struct vmw_resource_val_node *node;
254         struct drm_hash_item *hash;
255         int ret;
256
257         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
258                                     &hash) == 0)) {
259                 node = container_of(hash, struct vmw_resource_val_node, hash);
260                 node->first_usage = false;
261                 if (unlikely(p_node != NULL))
262                         *p_node = node;
263                 return 0;
264         }
265
266         node = kzalloc(sizeof(*node), GFP_KERNEL);
267         if (unlikely(node == NULL)) {
268                 DRM_ERROR("Failed to allocate a resource validation "
269                           "entry.\n");
270                 return -ENOMEM;
271         }
272
273         node->hash.key = (unsigned long) res;
274         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
275         if (unlikely(ret != 0)) {
276                 DRM_ERROR("Failed to initialize a resource validation "
277                           "entry.\n");
278                 kfree(node);
279                 return ret;
280         }
281         node->res = vmw_resource_reference(res);
282         node->first_usage = true;
283         if (unlikely(p_node != NULL))
284                 *p_node = node;
285
286         if (!dev_priv->has_mob) {
287                 list_add_tail(&node->head, &sw_context->resource_list);
288                 return 0;
289         }
290
291         switch (vmw_res_type(res)) {
292         case vmw_res_context:
293         case vmw_res_dx_context:
294                 list_add(&node->head, &sw_context->ctx_resource_list);
295                 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
296                 break;
297         case vmw_res_cotable:
298                 list_add_tail(&node->head, &sw_context->ctx_resource_list);
299                 break;
300         default:
301                 list_add_tail(&node->head, &sw_context->resource_list);
302                 break;
303         }
304
305         return ret;
306 }
307
308 /**
309  * vmw_view_res_val_add - Add a view and the surface it's pointing to
310  * to the validation list
311  *
312  * @sw_context: The software context holding the validation list.
313  * @view: Pointer to the view resource.
314  *
315  * Returns 0 if success, negative error code otherwise.
316  */
317 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
318                                 struct vmw_resource *view)
319 {
320         int ret;
321
322         /*
323          * First add the resource the view is pointing to, otherwise
324          * it may be swapped out when the view is validated.
325          */
326         ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
327         if (ret)
328                 return ret;
329
330         return vmw_resource_val_add(sw_context, view, NULL);
331 }
332
333 /**
334  * vmw_view_id_val_add - Look up a view and add it and the surface it's
335  * pointing to to the validation list.
336  *
337  * @sw_context: The software context holding the validation list.
338  * @view_type: The view type to look up.
339  * @id: view id of the view.
340  *
341  * The view is represented by a view id and the DX context it's created on,
342  * or scheduled for creation on. If there is no DX context set, the function
343  * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
344  */
345 static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
346                                enum vmw_view_type view_type, u32 id)
347 {
348         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
349         struct vmw_resource *view;
350         int ret;
351
352         if (!ctx_node) {
353                 DRM_ERROR("DX Context not set.\n");
354                 return -EINVAL;
355         }
356
357         view = vmw_view_lookup(sw_context->man, view_type, id);
358         if (IS_ERR(view))
359                 return PTR_ERR(view);
360
361         ret = vmw_view_res_val_add(sw_context, view);
362         vmw_resource_unreference(&view);
363
364         return ret;
365 }
366
367 /**
368  * vmw_resource_context_res_add - Put resources previously bound to a context on
369  * the validation list
370  *
371  * @dev_priv: Pointer to a device private structure
372  * @sw_context: Pointer to a software context used for this command submission
373  * @ctx: Pointer to the context resource
374  *
375  * This function puts all resources that were previously bound to @ctx on
376  * the resource validation list. This is part of the context state reemission
377  */
378 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
379                                         struct vmw_sw_context *sw_context,
380                                         struct vmw_resource *ctx)
381 {
382         struct list_head *binding_list;
383         struct vmw_ctx_bindinfo *entry;
384         int ret = 0;
385         struct vmw_resource *res;
386         u32 i;
387
388         /* Add all cotables to the validation list. */
389         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
390                 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
391                         res = vmw_context_cotable(ctx, i);
392                         if (IS_ERR(res))
393                                 continue;
394
395                         ret = vmw_resource_val_add(sw_context, res, NULL);
396                         vmw_resource_unreference(&res);
397                         if (unlikely(ret != 0))
398                                 return ret;
399                 }
400         }
401
402
403         /* Add all resources bound to the context to the validation list */
404         mutex_lock(&dev_priv->binding_mutex);
405         binding_list = vmw_context_binding_list(ctx);
406
407         list_for_each_entry(entry, binding_list, ctx_list) {
408                 /* entry->res is not refcounted */
409                 res = vmw_resource_reference_unless_doomed(entry->res);
410                 if (unlikely(res == NULL))
411                         continue;
412
413                 if (vmw_res_type(entry->res) == vmw_res_view)
414                         ret = vmw_view_res_val_add(sw_context, entry->res);
415                 else
416                         ret = vmw_resource_val_add(sw_context, entry->res,
417                                                    NULL);
418                 vmw_resource_unreference(&res);
419                 if (unlikely(ret != 0))
420                         break;
421         }
422
423         if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
424                 struct vmw_dma_buffer *dx_query_mob;
425
426                 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
427                 if (dx_query_mob)
428                         ret = vmw_bo_to_validate_list(sw_context,
429                                                       dx_query_mob,
430                                                       true, NULL);
431         }
432
433         mutex_unlock(&dev_priv->binding_mutex);
434         return ret;
435 }
436
437 /**
438  * vmw_resource_relocation_add - Add a relocation to the relocation list
439  *
440  * @list: Pointer to head of relocation list.
441  * @res: The resource.
442  * @offset: Offset into the command buffer currently being parsed where the
443  * id that needs fixup is located. Granularity is one byte.
444  * @rel_type: Relocation type.
445  */
446 static int vmw_resource_relocation_add(struct list_head *list,
447                                        const struct vmw_resource *res,
448                                        unsigned long offset,
449                                        enum vmw_resource_relocation_type
450                                        rel_type)
451 {
452         struct vmw_resource_relocation *rel;
453
454         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
455         if (unlikely(rel == NULL)) {
456                 DRM_ERROR("Failed to allocate a resource relocation.\n");
457                 return -ENOMEM;
458         }
459
460         rel->res = res;
461         rel->offset = offset;
462         rel->rel_type = rel_type;
463         list_add_tail(&rel->head, list);
464
465         return 0;
466 }
467
468 /**
469  * vmw_resource_relocations_free - Free all relocations on a list
470  *
471  * @list: Pointer to the head of the relocation list.
472  */
473 static void vmw_resource_relocations_free(struct list_head *list)
474 {
475         struct vmw_resource_relocation *rel, *n;
476
477         list_for_each_entry_safe(rel, n, list, head) {
478                 list_del(&rel->head);
479                 kfree(rel);
480         }
481 }
482
483 /**
484  * vmw_resource_relocations_apply - Apply all relocations on a list
485  *
486  * @cb: Pointer to the start of the command buffer bein patch. This need
487  * not be the same buffer as the one being parsed when the relocation
488  * list was built, but the contents must be the same modulo the
489  * resource ids.
490  * @list: Pointer to the head of the relocation list.
491  */
492 static void vmw_resource_relocations_apply(uint32_t *cb,
493                                            struct list_head *list)
494 {
495         struct vmw_resource_relocation *rel;
496
497         /* Validate the struct vmw_resource_relocation member size */
498         BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
499         BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
500
501         list_for_each_entry(rel, list, head) {
502                 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
503                 switch (rel->rel_type) {
504                 case vmw_res_rel_normal:
505                         *addr = rel->res->id;
506                         break;
507                 case vmw_res_rel_nop:
508                         *addr = SVGA_3D_CMD_NOP;
509                         break;
510                 default:
511                         if (rel->res->id == -1)
512                                 *addr = SVGA_3D_CMD_NOP;
513                         break;
514                 }
515         }
516 }
517
518 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
519                            struct vmw_sw_context *sw_context,
520                            SVGA3dCmdHeader *header)
521 {
522         return -EINVAL;
523 }
524
525 static int vmw_cmd_ok(struct vmw_private *dev_priv,
526                       struct vmw_sw_context *sw_context,
527                       SVGA3dCmdHeader *header)
528 {
529         return 0;
530 }
531
532 /**
533  * vmw_bo_to_validate_list - add a bo to a validate list
534  *
535  * @sw_context: The software context used for this command submission batch.
536  * @bo: The buffer object to add.
537  * @validate_as_mob: Validate this buffer as a MOB.
538  * @p_val_node: If non-NULL Will be updated with the validate node number
539  * on return.
540  *
541  * Returns -EINVAL if the limit of number of buffer objects per command
542  * submission is reached.
543  */
544 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
545                                    struct vmw_dma_buffer *vbo,
546                                    bool validate_as_mob,
547                                    uint32_t *p_val_node)
548 {
549         uint32_t val_node;
550         struct vmw_validate_buffer *vval_buf;
551         struct ttm_validate_buffer *val_buf;
552         struct drm_hash_item *hash;
553         int ret;
554
555         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
556                                     &hash) == 0)) {
557                 vval_buf = container_of(hash, struct vmw_validate_buffer,
558                                         hash);
559                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
560                         DRM_ERROR("Inconsistent buffer usage.\n");
561                         return -EINVAL;
562                 }
563                 val_buf = &vval_buf->base;
564                 val_node = vval_buf - sw_context->val_bufs;
565         } else {
566                 val_node = sw_context->cur_val_buf;
567                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
568                         DRM_ERROR("Max number of DMA buffers per submission "
569                                   "exceeded.\n");
570                         return -EINVAL;
571                 }
572                 vval_buf = &sw_context->val_bufs[val_node];
573                 vval_buf->hash.key = (unsigned long) vbo;
574                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
575                 if (unlikely(ret != 0)) {
576                         DRM_ERROR("Failed to initialize a buffer validation "
577                                   "entry.\n");
578                         return ret;
579                 }
580                 ++sw_context->cur_val_buf;
581                 val_buf = &vval_buf->base;
582                 val_buf->bo = ttm_bo_reference(&vbo->base);
583                 val_buf->shared = false;
584                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
585                 vval_buf->validate_as_mob = validate_as_mob;
586         }
587
588         if (p_val_node)
589                 *p_val_node = val_node;
590
591         return 0;
592 }
593
594 /**
595  * vmw_resources_reserve - Reserve all resources on the sw_context's
596  * resource list.
597  *
598  * @sw_context: Pointer to the software context.
599  *
600  * Note that since vmware's command submission currently is protected by
601  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
602  * since only a single thread at once will attempt this.
603  */
604 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
605 {
606         struct vmw_resource_val_node *val;
607         int ret = 0;
608
609         list_for_each_entry(val, &sw_context->resource_list, head) {
610                 struct vmw_resource *res = val->res;
611
612                 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
613                 if (unlikely(ret != 0))
614                         return ret;
615
616                 if (res->backup) {
617                         struct vmw_dma_buffer *vbo = res->backup;
618
619                         ret = vmw_bo_to_validate_list
620                                 (sw_context, vbo,
621                                  vmw_resource_needs_backup(res), NULL);
622
623                         if (unlikely(ret != 0))
624                                 return ret;
625                 }
626         }
627
628         if (sw_context->dx_query_mob) {
629                 struct vmw_dma_buffer *expected_dx_query_mob;
630
631                 expected_dx_query_mob =
632                         vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
633                 if (expected_dx_query_mob &&
634                     expected_dx_query_mob != sw_context->dx_query_mob) {
635                         ret = -EINVAL;
636                 }
637         }
638
639         return ret;
640 }
641
642 /**
643  * vmw_resources_validate - Validate all resources on the sw_context's
644  * resource list.
645  *
646  * @sw_context: Pointer to the software context.
647  *
648  * Before this function is called, all resource backup buffers must have
649  * been validated.
650  */
651 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
652 {
653         struct vmw_resource_val_node *val;
654         int ret;
655
656         list_for_each_entry(val, &sw_context->resource_list, head) {
657                 struct vmw_resource *res = val->res;
658                 struct vmw_dma_buffer *backup = res->backup;
659
660                 ret = vmw_resource_validate(res);
661                 if (unlikely(ret != 0)) {
662                         if (ret != -ERESTARTSYS)
663                                 DRM_ERROR("Failed to validate resource.\n");
664                         return ret;
665                 }
666
667                 /* Check if the resource switched backup buffer */
668                 if (backup && res->backup && (backup != res->backup)) {
669                         struct vmw_dma_buffer *vbo = res->backup;
670
671                         ret = vmw_bo_to_validate_list
672                                 (sw_context, vbo,
673                                  vmw_resource_needs_backup(res), NULL);
674                         if (ret) {
675                                 ttm_bo_unreserve(&vbo->base);
676                                 return ret;
677                         }
678                 }
679         }
680         return 0;
681 }
682
683 /**
684  * vmw_cmd_res_reloc_add - Add a resource to a software context's
685  * relocation- and validation lists.
686  *
687  * @dev_priv: Pointer to a struct vmw_private identifying the device.
688  * @sw_context: Pointer to the software context.
689  * @id_loc: Pointer to where the id that needs translation is located.
690  * @res: Valid pointer to a struct vmw_resource.
691  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
692  * used for this resource is returned here.
693  */
694 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
695                                  struct vmw_sw_context *sw_context,
696                                  uint32_t *id_loc,
697                                  struct vmw_resource *res,
698                                  struct vmw_resource_val_node **p_val)
699 {
700         int ret;
701         struct vmw_resource_val_node *node;
702
703         *p_val = NULL;
704         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
705                                           res,
706                                           vmw_ptr_diff(sw_context->buf_start,
707                                                        id_loc),
708                                           vmw_res_rel_normal);
709         if (unlikely(ret != 0))
710                 return ret;
711
712         ret = vmw_resource_val_add(sw_context, res, &node);
713         if (unlikely(ret != 0))
714                 return ret;
715
716         if (p_val)
717                 *p_val = node;
718
719         return 0;
720 }
721
722
723 /**
724  * vmw_cmd_res_check - Check that a resource is present and if so, put it
725  * on the resource validate list unless it's already there.
726  *
727  * @dev_priv: Pointer to a device private structure.
728  * @sw_context: Pointer to the software context.
729  * @res_type: Resource type.
730  * @converter: User-space visisble type specific information.
731  * @id_loc: Pointer to the location in the command buffer currently being
732  * parsed from where the user-space resource id handle is located.
733  * @p_val: Pointer to pointer to resource validalidation node. Populated
734  * on exit.
735  */
736 static int
737 vmw_cmd_res_check(struct vmw_private *dev_priv,
738                   struct vmw_sw_context *sw_context,
739                   enum vmw_res_type res_type,
740                   const struct vmw_user_resource_conv *converter,
741                   uint32_t *id_loc,
742                   struct vmw_resource_val_node **p_val)
743 {
744         struct vmw_res_cache_entry *rcache =
745                 &sw_context->res_cache[res_type];
746         struct vmw_resource *res;
747         struct vmw_resource_val_node *node;
748         int ret;
749
750         if (*id_loc == SVGA3D_INVALID_ID) {
751                 if (p_val)
752                         *p_val = NULL;
753                 if (res_type == vmw_res_context) {
754                         DRM_ERROR("Illegal context invalid id.\n");
755                         return -EINVAL;
756                 }
757                 return 0;
758         }
759
760         /*
761          * Fastpath in case of repeated commands referencing the same
762          * resource
763          */
764
765         if (likely(rcache->valid && *id_loc == rcache->handle)) {
766                 const struct vmw_resource *res = rcache->res;
767
768                 rcache->node->first_usage = false;
769                 if (p_val)
770                         *p_val = rcache->node;
771
772                 return vmw_resource_relocation_add
773                         (&sw_context->res_relocations, res,
774                          vmw_ptr_diff(sw_context->buf_start, id_loc),
775                          vmw_res_rel_normal);
776         }
777
778         ret = vmw_user_resource_lookup_handle(dev_priv,
779                                               sw_context->fp->tfile,
780                                               *id_loc,
781                                               converter,
782                                               &res);
783         if (unlikely(ret != 0)) {
784                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
785                           (unsigned) *id_loc);
786                 dump_stack();
787                 return ret;
788         }
789
790         rcache->valid = true;
791         rcache->res = res;
792         rcache->handle = *id_loc;
793
794         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
795                                     res, &node);
796         if (unlikely(ret != 0))
797                 goto out_no_reloc;
798
799         rcache->node = node;
800         if (p_val)
801                 *p_val = node;
802         vmw_resource_unreference(&res);
803         return 0;
804
805 out_no_reloc:
806         BUG_ON(sw_context->error_resource != NULL);
807         sw_context->error_resource = res;
808
809         return ret;
810 }
811
812 /**
813  * vmw_rebind_dx_query - Rebind DX query associated with the context
814  *
815  * @ctx_res: context the query belongs to
816  *
817  * This function assumes binding_mutex is held.
818  */
819 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
820 {
821         struct vmw_private *dev_priv = ctx_res->dev_priv;
822         struct vmw_dma_buffer *dx_query_mob;
823         struct {
824                 SVGA3dCmdHeader header;
825                 SVGA3dCmdDXBindAllQuery body;
826         } *cmd;
827
828
829         dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
830
831         if (!dx_query_mob || dx_query_mob->dx_query_ctx)
832                 return 0;
833
834         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
835
836         if (cmd == NULL) {
837                 DRM_ERROR("Failed to rebind queries.\n");
838                 return -ENOMEM;
839         }
840
841         cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
842         cmd->header.size = sizeof(cmd->body);
843         cmd->body.cid = ctx_res->id;
844         cmd->body.mobid = dx_query_mob->base.mem.start;
845         vmw_fifo_commit(dev_priv, sizeof(*cmd));
846
847         vmw_context_bind_dx_query(ctx_res, dx_query_mob);
848
849         return 0;
850 }
851
852 /**
853  * vmw_rebind_contexts - Rebind all resources previously bound to
854  * referenced contexts.
855  *
856  * @sw_context: Pointer to the software context.
857  *
858  * Rebind context binding points that have been scrubbed because of eviction.
859  */
860 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
861 {
862         struct vmw_resource_val_node *val;
863         int ret;
864
865         list_for_each_entry(val, &sw_context->resource_list, head) {
866                 if (unlikely(!val->staged_bindings))
867                         break;
868
869                 ret = vmw_binding_rebind_all
870                         (vmw_context_binding_state(val->res));
871                 if (unlikely(ret != 0)) {
872                         if (ret != -ERESTARTSYS)
873                                 DRM_ERROR("Failed to rebind context.\n");
874                         return ret;
875                 }
876
877                 ret = vmw_rebind_all_dx_query(val->res);
878                 if (ret != 0)
879                         return ret;
880         }
881
882         return 0;
883 }
884
885 /**
886  * vmw_view_bindings_add - Add an array of view bindings to a context
887  * binding state tracker.
888  *
889  * @sw_context: The execbuf state used for this command.
890  * @view_type: View type for the bindings.
891  * @binding_type: Binding type for the bindings.
892  * @shader_slot: The shader slot to user for the bindings.
893  * @view_ids: Array of view ids to be bound.
894  * @num_views: Number of view ids in @view_ids.
895  * @first_slot: The binding slot to be used for the first view id in @view_ids.
896  */
897 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
898                                  enum vmw_view_type view_type,
899                                  enum vmw_ctx_binding_type binding_type,
900                                  uint32 shader_slot,
901                                  uint32 view_ids[], u32 num_views,
902                                  u32 first_slot)
903 {
904         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
905         struct vmw_cmdbuf_res_manager *man;
906         u32 i;
907         int ret;
908
909         if (!ctx_node) {
910                 DRM_ERROR("DX Context not set.\n");
911                 return -EINVAL;
912         }
913
914         man = sw_context->man;
915         for (i = 0; i < num_views; ++i) {
916                 struct vmw_ctx_bindinfo_view binding;
917                 struct vmw_resource *view = NULL;
918
919                 if (view_ids[i] != SVGA3D_INVALID_ID) {
920                         view = vmw_view_lookup(man, view_type, view_ids[i]);
921                         if (IS_ERR(view)) {
922                                 DRM_ERROR("View not found.\n");
923                                 return PTR_ERR(view);
924                         }
925
926                         ret = vmw_view_res_val_add(sw_context, view);
927                         if (ret) {
928                                 DRM_ERROR("Could not add view to "
929                                           "validation list.\n");
930                                 vmw_resource_unreference(&view);
931                                 return ret;
932                         }
933                 }
934                 binding.bi.ctx = ctx_node->res;
935                 binding.bi.res = view;
936                 binding.bi.bt = binding_type;
937                 binding.shader_slot = shader_slot;
938                 binding.slot = first_slot + i;
939                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
940                                 shader_slot, binding.slot);
941                 if (view)
942                         vmw_resource_unreference(&view);
943         }
944
945         return 0;
946 }
947
948 /**
949  * vmw_cmd_cid_check - Check a command header for valid context information.
950  *
951  * @dev_priv: Pointer to a device private structure.
952  * @sw_context: Pointer to the software context.
953  * @header: A command header with an embedded user-space context handle.
954  *
955  * Convenience function: Call vmw_cmd_res_check with the user-space context
956  * handle embedded in @header.
957  */
958 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
959                              struct vmw_sw_context *sw_context,
960                              SVGA3dCmdHeader *header)
961 {
962         struct vmw_cid_cmd {
963                 SVGA3dCmdHeader header;
964                 uint32_t cid;
965         } *cmd;
966
967         cmd = container_of(header, struct vmw_cid_cmd, header);
968         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
969                                  user_context_converter, &cmd->cid, NULL);
970 }
971
972 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
973                                            struct vmw_sw_context *sw_context,
974                                            SVGA3dCmdHeader *header)
975 {
976         struct vmw_sid_cmd {
977                 SVGA3dCmdHeader header;
978                 SVGA3dCmdSetRenderTarget body;
979         } *cmd;
980         struct vmw_resource_val_node *ctx_node;
981         struct vmw_resource_val_node *res_node;
982         int ret;
983
984         cmd = container_of(header, struct vmw_sid_cmd, header);
985
986         if (cmd->body.type >= SVGA3D_RT_MAX) {
987                 DRM_ERROR("Illegal render target type %u.\n",
988                           (unsigned) cmd->body.type);
989                 return -EINVAL;
990         }
991
992         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
993                                 user_context_converter, &cmd->body.cid,
994                                 &ctx_node);
995         if (unlikely(ret != 0))
996                 return ret;
997
998         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
999                                 user_surface_converter,
1000                                 &cmd->body.target.sid, &res_node);
1001         if (unlikely(ret != 0))
1002                 return ret;
1003
1004         if (dev_priv->has_mob) {
1005                 struct vmw_ctx_bindinfo_view binding;
1006
1007                 binding.bi.ctx = ctx_node->res;
1008                 binding.bi.res = res_node ? res_node->res : NULL;
1009                 binding.bi.bt = vmw_ctx_binding_rt;
1010                 binding.slot = cmd->body.type;
1011                 vmw_binding_add(ctx_node->staged_bindings,
1012                                 &binding.bi, 0, binding.slot);
1013         }
1014
1015         return 0;
1016 }
1017
1018 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1019                                       struct vmw_sw_context *sw_context,
1020                                       SVGA3dCmdHeader *header)
1021 {
1022         struct vmw_sid_cmd {
1023                 SVGA3dCmdHeader header;
1024                 SVGA3dCmdSurfaceCopy body;
1025         } *cmd;
1026         int ret;
1027
1028         cmd = container_of(header, struct vmw_sid_cmd, header);
1029
1030         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1031                                 user_surface_converter,
1032                                 &cmd->body.src.sid, NULL);
1033         if (ret)
1034                 return ret;
1035
1036         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1037                                  user_surface_converter,
1038                                  &cmd->body.dest.sid, NULL);
1039 }
1040
1041 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1042                                       struct vmw_sw_context *sw_context,
1043                                       SVGA3dCmdHeader *header)
1044 {
1045         struct {
1046                 SVGA3dCmdHeader header;
1047                 SVGA3dCmdDXBufferCopy body;
1048         } *cmd;
1049         int ret;
1050
1051         cmd = container_of(header, typeof(*cmd), header);
1052         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1053                                 user_surface_converter,
1054                                 &cmd->body.src, NULL);
1055         if (ret != 0)
1056                 return ret;
1057
1058         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1059                                  user_surface_converter,
1060                                  &cmd->body.dest, NULL);
1061 }
1062
1063 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1064                                    struct vmw_sw_context *sw_context,
1065                                    SVGA3dCmdHeader *header)
1066 {
1067         struct {
1068                 SVGA3dCmdHeader header;
1069                 SVGA3dCmdDXPredCopyRegion body;
1070         } *cmd;
1071         int ret;
1072
1073         cmd = container_of(header, typeof(*cmd), header);
1074         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1075                                 user_surface_converter,
1076                                 &cmd->body.srcSid, NULL);
1077         if (ret != 0)
1078                 return ret;
1079
1080         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1081                                  user_surface_converter,
1082                                  &cmd->body.dstSid, NULL);
1083 }
1084
1085 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1086                                      struct vmw_sw_context *sw_context,
1087                                      SVGA3dCmdHeader *header)
1088 {
1089         struct vmw_sid_cmd {
1090                 SVGA3dCmdHeader header;
1091                 SVGA3dCmdSurfaceStretchBlt body;
1092         } *cmd;
1093         int ret;
1094
1095         cmd = container_of(header, struct vmw_sid_cmd, header);
1096         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1097                                 user_surface_converter,
1098                                 &cmd->body.src.sid, NULL);
1099         if (unlikely(ret != 0))
1100                 return ret;
1101         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1102                                  user_surface_converter,
1103                                  &cmd->body.dest.sid, NULL);
1104 }
1105
1106 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1107                                          struct vmw_sw_context *sw_context,
1108                                          SVGA3dCmdHeader *header)
1109 {
1110         struct vmw_sid_cmd {
1111                 SVGA3dCmdHeader header;
1112                 SVGA3dCmdBlitSurfaceToScreen body;
1113         } *cmd;
1114
1115         cmd = container_of(header, struct vmw_sid_cmd, header);
1116
1117         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1118                                  user_surface_converter,
1119                                  &cmd->body.srcImage.sid, NULL);
1120 }
1121
1122 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1123                                  struct vmw_sw_context *sw_context,
1124                                  SVGA3dCmdHeader *header)
1125 {
1126         struct vmw_sid_cmd {
1127                 SVGA3dCmdHeader header;
1128                 SVGA3dCmdPresent body;
1129         } *cmd;
1130
1131
1132         cmd = container_of(header, struct vmw_sid_cmd, header);
1133
1134         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1135                                  user_surface_converter, &cmd->body.sid,
1136                                  NULL);
1137 }
1138
1139 /**
1140  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1141  *
1142  * @dev_priv: The device private structure.
1143  * @new_query_bo: The new buffer holding query results.
1144  * @sw_context: The software context used for this command submission.
1145  *
1146  * This function checks whether @new_query_bo is suitable for holding
1147  * query results, and if another buffer currently is pinned for query
1148  * results. If so, the function prepares the state of @sw_context for
1149  * switching pinned buffers after successful submission of the current
1150  * command batch.
1151  */
1152 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1153                                        struct vmw_dma_buffer *new_query_bo,
1154                                        struct vmw_sw_context *sw_context)
1155 {
1156         struct vmw_res_cache_entry *ctx_entry =
1157                 &sw_context->res_cache[vmw_res_context];
1158         int ret;
1159
1160         BUG_ON(!ctx_entry->valid);
1161         sw_context->last_query_ctx = ctx_entry->res;
1162
1163         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1164
1165                 if (unlikely(new_query_bo->base.num_pages > 4)) {
1166                         DRM_ERROR("Query buffer too large.\n");
1167                         return -EINVAL;
1168                 }
1169
1170                 if (unlikely(sw_context->cur_query_bo != NULL)) {
1171                         sw_context->needs_post_query_barrier = true;
1172                         ret = vmw_bo_to_validate_list(sw_context,
1173                                                       sw_context->cur_query_bo,
1174                                                       dev_priv->has_mob, NULL);
1175                         if (unlikely(ret != 0))
1176                                 return ret;
1177                 }
1178                 sw_context->cur_query_bo = new_query_bo;
1179
1180                 ret = vmw_bo_to_validate_list(sw_context,
1181                                               dev_priv->dummy_query_bo,
1182                                               dev_priv->has_mob, NULL);
1183                 if (unlikely(ret != 0))
1184                         return ret;
1185
1186         }
1187
1188         return 0;
1189 }
1190
1191
1192 /**
1193  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1194  *
1195  * @dev_priv: The device private structure.
1196  * @sw_context: The software context used for this command submission batch.
1197  *
1198  * This function will check if we're switching query buffers, and will then,
1199  * issue a dummy occlusion query wait used as a query barrier. When the fence
1200  * object following that query wait has signaled, we are sure that all
1201  * preceding queries have finished, and the old query buffer can be unpinned.
1202  * However, since both the new query buffer and the old one are fenced with
1203  * that fence, we can do an asynchronus unpin now, and be sure that the
1204  * old query buffer won't be moved until the fence has signaled.
1205  *
1206  * As mentioned above, both the new - and old query buffers need to be fenced
1207  * using a sequence emitted *after* calling this function.
1208  */
1209 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1210                                      struct vmw_sw_context *sw_context)
1211 {
1212         /*
1213          * The validate list should still hold references to all
1214          * contexts here.
1215          */
1216
1217         if (sw_context->needs_post_query_barrier) {
1218                 struct vmw_res_cache_entry *ctx_entry =
1219                         &sw_context->res_cache[vmw_res_context];
1220                 struct vmw_resource *ctx;
1221                 int ret;
1222
1223                 BUG_ON(!ctx_entry->valid);
1224                 ctx = ctx_entry->res;
1225
1226                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1227
1228                 if (unlikely(ret != 0))
1229                         DRM_ERROR("Out of fifo space for dummy query.\n");
1230         }
1231
1232         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1233                 if (dev_priv->pinned_bo) {
1234                         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1235                         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1236                 }
1237
1238                 if (!sw_context->needs_post_query_barrier) {
1239                         vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1240
1241                         /*
1242                          * We pin also the dummy_query_bo buffer so that we
1243                          * don't need to validate it when emitting
1244                          * dummy queries in context destroy paths.
1245                          */
1246
1247                         if (!dev_priv->dummy_query_bo_pinned) {
1248                                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1249                                                     true);
1250                                 dev_priv->dummy_query_bo_pinned = true;
1251                         }
1252
1253                         BUG_ON(sw_context->last_query_ctx == NULL);
1254                         dev_priv->query_cid = sw_context->last_query_ctx->id;
1255                         dev_priv->query_cid_valid = true;
1256                         dev_priv->pinned_bo =
1257                                 vmw_dmabuf_reference(sw_context->cur_query_bo);
1258                 }
1259         }
1260 }
1261
1262 /**
1263  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1264  * handle to a MOB id.
1265  *
1266  * @dev_priv: Pointer to a device private structure.
1267  * @sw_context: The software context used for this command batch validation.
1268  * @id: Pointer to the user-space handle to be translated.
1269  * @vmw_bo_p: Points to a location that, on successful return will carry
1270  * a reference-counted pointer to the DMA buffer identified by the
1271  * user-space handle in @id.
1272  *
1273  * This function saves information needed to translate a user-space buffer
1274  * handle to a MOB id. The translation does not take place immediately, but
1275  * during a call to vmw_apply_relocations(). This function builds a relocation
1276  * list and a list of buffers to validate. The former needs to be freed using
1277  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1278  * needs to be freed using vmw_clear_validations.
1279  */
1280 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1281                                  struct vmw_sw_context *sw_context,
1282                                  SVGAMobId *id,
1283                                  struct vmw_dma_buffer **vmw_bo_p)
1284 {
1285         struct vmw_dma_buffer *vmw_bo = NULL;
1286         uint32_t handle = *id;
1287         struct vmw_relocation *reloc;
1288         int ret;
1289
1290         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1291                                      NULL);
1292         if (unlikely(ret != 0)) {
1293                 DRM_ERROR("Could not find or use MOB buffer.\n");
1294                 ret = -EINVAL;
1295                 goto out_no_reloc;
1296         }
1297
1298         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1299                 DRM_ERROR("Max number relocations per submission"
1300                           " exceeded\n");
1301                 ret = -EINVAL;
1302                 goto out_no_reloc;
1303         }
1304
1305         reloc = &sw_context->relocs[sw_context->cur_reloc++];
1306         reloc->mob_loc = id;
1307         reloc->location = NULL;
1308
1309         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1310         if (unlikely(ret != 0))
1311                 goto out_no_reloc;
1312
1313         *vmw_bo_p = vmw_bo;
1314         return 0;
1315
1316 out_no_reloc:
1317         vmw_dmabuf_unreference(&vmw_bo);
1318         *vmw_bo_p = NULL;
1319         return ret;
1320 }
1321
1322 /**
1323  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1324  * handle to a valid SVGAGuestPtr
1325  *
1326  * @dev_priv: Pointer to a device private structure.
1327  * @sw_context: The software context used for this command batch validation.
1328  * @ptr: Pointer to the user-space handle to be translated.
1329  * @vmw_bo_p: Points to a location that, on successful return will carry
1330  * a reference-counted pointer to the DMA buffer identified by the
1331  * user-space handle in @id.
1332  *
1333  * This function saves information needed to translate a user-space buffer
1334  * handle to a valid SVGAGuestPtr. The translation does not take place
1335  * immediately, but during a call to vmw_apply_relocations().
1336  * This function builds a relocation list and a list of buffers to validate.
1337  * The former needs to be freed using either vmw_apply_relocations() or
1338  * vmw_free_relocations(). The latter needs to be freed using
1339  * vmw_clear_validations.
1340  */
1341 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1342                                    struct vmw_sw_context *sw_context,
1343                                    SVGAGuestPtr *ptr,
1344                                    struct vmw_dma_buffer **vmw_bo_p)
1345 {
1346         struct vmw_dma_buffer *vmw_bo = NULL;
1347         uint32_t handle = ptr->gmrId;
1348         struct vmw_relocation *reloc;
1349         int ret;
1350
1351         ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1352                                      NULL);
1353         if (unlikely(ret != 0)) {
1354                 DRM_ERROR("Could not find or use GMR region.\n");
1355                 ret = -EINVAL;
1356                 goto out_no_reloc;
1357         }
1358
1359         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1360                 DRM_ERROR("Max number relocations per submission"
1361                           " exceeded\n");
1362                 ret = -EINVAL;
1363                 goto out_no_reloc;
1364         }
1365
1366         reloc = &sw_context->relocs[sw_context->cur_reloc++];
1367         reloc->location = ptr;
1368
1369         ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1370         if (unlikely(ret != 0))
1371                 goto out_no_reloc;
1372
1373         *vmw_bo_p = vmw_bo;
1374         return 0;
1375
1376 out_no_reloc:
1377         vmw_dmabuf_unreference(&vmw_bo);
1378         *vmw_bo_p = NULL;
1379         return ret;
1380 }
1381
1382
1383
1384 /**
1385  * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1386  *
1387  * @dev_priv: Pointer to a device private struct.
1388  * @sw_context: The software context used for this command submission.
1389  * @header: Pointer to the command header in the command stream.
1390  *
1391  * This function adds the new query into the query COTABLE
1392  */
1393 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1394                                    struct vmw_sw_context *sw_context,
1395                                    SVGA3dCmdHeader *header)
1396 {
1397         struct vmw_dx_define_query_cmd {
1398                 SVGA3dCmdHeader header;
1399                 SVGA3dCmdDXDefineQuery q;
1400         } *cmd;
1401
1402         int    ret;
1403         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1404         struct vmw_resource *cotable_res;
1405
1406
1407         if (ctx_node == NULL) {
1408                 DRM_ERROR("DX Context not set for query.\n");
1409                 return -EINVAL;
1410         }
1411
1412         cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1413
1414         if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1415             cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1416                 return -EINVAL;
1417
1418         cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1419         ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1420         vmw_resource_unreference(&cotable_res);
1421
1422         return ret;
1423 }
1424
1425
1426
1427 /**
1428  * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1429  *
1430  * @dev_priv: Pointer to a device private struct.
1431  * @sw_context: The software context used for this command submission.
1432  * @header: Pointer to the command header in the command stream.
1433  *
1434  * The query bind operation will eventually associate the query ID
1435  * with its backing MOB.  In this function, we take the user mode
1436  * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1437  * kernel mode equivalent.
1438  */
1439 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1440                                  struct vmw_sw_context *sw_context,
1441                                  SVGA3dCmdHeader *header)
1442 {
1443         struct vmw_dx_bind_query_cmd {
1444                 SVGA3dCmdHeader header;
1445                 SVGA3dCmdDXBindQuery q;
1446         } *cmd;
1447
1448         struct vmw_dma_buffer *vmw_bo;
1449         int    ret;
1450
1451
1452         cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1453
1454         /*
1455          * Look up the buffer pointed to by q.mobid, put it on the relocation
1456          * list so its kernel mode MOB ID can be filled in later
1457          */
1458         ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1459                                     &vmw_bo);
1460
1461         if (ret != 0)
1462                 return ret;
1463
1464         sw_context->dx_query_mob = vmw_bo;
1465         sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1466
1467         vmw_dmabuf_unreference(&vmw_bo);
1468
1469         return ret;
1470 }
1471
1472
1473
1474 /**
1475  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1476  *
1477  * @dev_priv: Pointer to a device private struct.
1478  * @sw_context: The software context used for this command submission.
1479  * @header: Pointer to the command header in the command stream.
1480  */
1481 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1482                                   struct vmw_sw_context *sw_context,
1483                                   SVGA3dCmdHeader *header)
1484 {
1485         struct vmw_begin_gb_query_cmd {
1486                 SVGA3dCmdHeader header;
1487                 SVGA3dCmdBeginGBQuery q;
1488         } *cmd;
1489
1490         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1491                            header);
1492
1493         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1494                                  user_context_converter, &cmd->q.cid,
1495                                  NULL);
1496 }
1497
1498 /**
1499  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1500  *
1501  * @dev_priv: Pointer to a device private struct.
1502  * @sw_context: The software context used for this command submission.
1503  * @header: Pointer to the command header in the command stream.
1504  */
1505 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1506                                struct vmw_sw_context *sw_context,
1507                                SVGA3dCmdHeader *header)
1508 {
1509         struct vmw_begin_query_cmd {
1510                 SVGA3dCmdHeader header;
1511                 SVGA3dCmdBeginQuery q;
1512         } *cmd;
1513
1514         cmd = container_of(header, struct vmw_begin_query_cmd,
1515                            header);
1516
1517         if (unlikely(dev_priv->has_mob)) {
1518                 struct {
1519                         SVGA3dCmdHeader header;
1520                         SVGA3dCmdBeginGBQuery q;
1521                 } gb_cmd;
1522
1523                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1524
1525                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1526                 gb_cmd.header.size = cmd->header.size;
1527                 gb_cmd.q.cid = cmd->q.cid;
1528                 gb_cmd.q.type = cmd->q.type;
1529
1530                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1531                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1532         }
1533
1534         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1535                                  user_context_converter, &cmd->q.cid,
1536                                  NULL);
1537 }
1538
1539 /**
1540  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1541  *
1542  * @dev_priv: Pointer to a device private struct.
1543  * @sw_context: The software context used for this command submission.
1544  * @header: Pointer to the command header in the command stream.
1545  */
1546 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1547                                 struct vmw_sw_context *sw_context,
1548                                 SVGA3dCmdHeader *header)
1549 {
1550         struct vmw_dma_buffer *vmw_bo;
1551         struct vmw_query_cmd {
1552                 SVGA3dCmdHeader header;
1553                 SVGA3dCmdEndGBQuery q;
1554         } *cmd;
1555         int ret;
1556
1557         cmd = container_of(header, struct vmw_query_cmd, header);
1558         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1559         if (unlikely(ret != 0))
1560                 return ret;
1561
1562         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1563                                     &cmd->q.mobid,
1564                                     &vmw_bo);
1565         if (unlikely(ret != 0))
1566                 return ret;
1567
1568         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1569
1570         vmw_dmabuf_unreference(&vmw_bo);
1571         return ret;
1572 }
1573
1574 /**
1575  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1576  *
1577  * @dev_priv: Pointer to a device private struct.
1578  * @sw_context: The software context used for this command submission.
1579  * @header: Pointer to the command header in the command stream.
1580  */
1581 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1582                              struct vmw_sw_context *sw_context,
1583                              SVGA3dCmdHeader *header)
1584 {
1585         struct vmw_dma_buffer *vmw_bo;
1586         struct vmw_query_cmd {
1587                 SVGA3dCmdHeader header;
1588                 SVGA3dCmdEndQuery q;
1589         } *cmd;
1590         int ret;
1591
1592         cmd = container_of(header, struct vmw_query_cmd, header);
1593         if (dev_priv->has_mob) {
1594                 struct {
1595                         SVGA3dCmdHeader header;
1596                         SVGA3dCmdEndGBQuery q;
1597                 } gb_cmd;
1598
1599                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1600
1601                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1602                 gb_cmd.header.size = cmd->header.size;
1603                 gb_cmd.q.cid = cmd->q.cid;
1604                 gb_cmd.q.type = cmd->q.type;
1605                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1606                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1607
1608                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1609                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1610         }
1611
1612         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1613         if (unlikely(ret != 0))
1614                 return ret;
1615
1616         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1617                                       &cmd->q.guestResult,
1618                                       &vmw_bo);
1619         if (unlikely(ret != 0))
1620                 return ret;
1621
1622         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1623
1624         vmw_dmabuf_unreference(&vmw_bo);
1625         return ret;
1626 }
1627
1628 /**
1629  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1630  *
1631  * @dev_priv: Pointer to a device private struct.
1632  * @sw_context: The software context used for this command submission.
1633  * @header: Pointer to the command header in the command stream.
1634  */
1635 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1636                                  struct vmw_sw_context *sw_context,
1637                                  SVGA3dCmdHeader *header)
1638 {
1639         struct vmw_dma_buffer *vmw_bo;
1640         struct vmw_query_cmd {
1641                 SVGA3dCmdHeader header;
1642                 SVGA3dCmdWaitForGBQuery q;
1643         } *cmd;
1644         int ret;
1645
1646         cmd = container_of(header, struct vmw_query_cmd, header);
1647         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1648         if (unlikely(ret != 0))
1649                 return ret;
1650
1651         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1652                                     &cmd->q.mobid,
1653                                     &vmw_bo);
1654         if (unlikely(ret != 0))
1655                 return ret;
1656
1657         vmw_dmabuf_unreference(&vmw_bo);
1658         return 0;
1659 }
1660
1661 /**
1662  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1663  *
1664  * @dev_priv: Pointer to a device private struct.
1665  * @sw_context: The software context used for this command submission.
1666  * @header: Pointer to the command header in the command stream.
1667  */
1668 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1669                               struct vmw_sw_context *sw_context,
1670                               SVGA3dCmdHeader *header)
1671 {
1672         struct vmw_dma_buffer *vmw_bo;
1673         struct vmw_query_cmd {
1674                 SVGA3dCmdHeader header;
1675                 SVGA3dCmdWaitForQuery q;
1676         } *cmd;
1677         int ret;
1678
1679         cmd = container_of(header, struct vmw_query_cmd, header);
1680         if (dev_priv->has_mob) {
1681                 struct {
1682                         SVGA3dCmdHeader header;
1683                         SVGA3dCmdWaitForGBQuery q;
1684                 } gb_cmd;
1685
1686                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1687
1688                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1689                 gb_cmd.header.size = cmd->header.size;
1690                 gb_cmd.q.cid = cmd->q.cid;
1691                 gb_cmd.q.type = cmd->q.type;
1692                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1693                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1694
1695                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1696                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1697         }
1698
1699         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1700         if (unlikely(ret != 0))
1701                 return ret;
1702
1703         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1704                                       &cmd->q.guestResult,
1705                                       &vmw_bo);
1706         if (unlikely(ret != 0))
1707                 return ret;
1708
1709         vmw_dmabuf_unreference(&vmw_bo);
1710         return 0;
1711 }
1712
1713 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1714                        struct vmw_sw_context *sw_context,
1715                        SVGA3dCmdHeader *header)
1716 {
1717         struct vmw_dma_buffer *vmw_bo = NULL;
1718         struct vmw_surface *srf = NULL;
1719         struct vmw_dma_cmd {
1720                 SVGA3dCmdHeader header;
1721                 SVGA3dCmdSurfaceDMA dma;
1722         } *cmd;
1723         int ret;
1724         SVGA3dCmdSurfaceDMASuffix *suffix;
1725         uint32_t bo_size;
1726
1727         cmd = container_of(header, struct vmw_dma_cmd, header);
1728         suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1729                                                header->size - sizeof(*suffix));
1730
1731         /* Make sure device and verifier stays in sync. */
1732         if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1733                 DRM_ERROR("Invalid DMA suffix size.\n");
1734                 return -EINVAL;
1735         }
1736
1737         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1738                                       &cmd->dma.guest.ptr,
1739                                       &vmw_bo);
1740         if (unlikely(ret != 0))
1741                 return ret;
1742
1743         /* Make sure DMA doesn't cross BO boundaries. */
1744         bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1745         if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1746                 DRM_ERROR("Invalid DMA offset.\n");
1747                 return -EINVAL;
1748         }
1749
1750         bo_size -= cmd->dma.guest.ptr.offset;
1751         if (unlikely(suffix->maximumOffset > bo_size))
1752                 suffix->maximumOffset = bo_size;
1753
1754         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1755                                 user_surface_converter, &cmd->dma.host.sid,
1756                                 NULL);
1757         if (unlikely(ret != 0)) {
1758                 if (unlikely(ret != -ERESTARTSYS))
1759                         DRM_ERROR("could not find surface for DMA.\n");
1760                 goto out_no_surface;
1761         }
1762
1763         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1764
1765         vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1766                              header);
1767
1768 out_no_surface:
1769         vmw_dmabuf_unreference(&vmw_bo);
1770         return ret;
1771 }
1772
1773 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1774                         struct vmw_sw_context *sw_context,
1775                         SVGA3dCmdHeader *header)
1776 {
1777         struct vmw_draw_cmd {
1778                 SVGA3dCmdHeader header;
1779                 SVGA3dCmdDrawPrimitives body;
1780         } *cmd;
1781         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1782                 (unsigned long)header + sizeof(*cmd));
1783         SVGA3dPrimitiveRange *range;
1784         uint32_t i;
1785         uint32_t maxnum;
1786         int ret;
1787
1788         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1789         if (unlikely(ret != 0))
1790                 return ret;
1791
1792         cmd = container_of(header, struct vmw_draw_cmd, header);
1793         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1794
1795         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1796                 DRM_ERROR("Illegal number of vertex declarations.\n");
1797                 return -EINVAL;
1798         }
1799
1800         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1801                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1802                                         user_surface_converter,
1803                                         &decl->array.surfaceId, NULL);
1804                 if (unlikely(ret != 0))
1805                         return ret;
1806         }
1807
1808         maxnum = (header->size - sizeof(cmd->body) -
1809                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1810         if (unlikely(cmd->body.numRanges > maxnum)) {
1811                 DRM_ERROR("Illegal number of index ranges.\n");
1812                 return -EINVAL;
1813         }
1814
1815         range = (SVGA3dPrimitiveRange *) decl;
1816         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1817                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818                                         user_surface_converter,
1819                                         &range->indexArray.surfaceId, NULL);
1820                 if (unlikely(ret != 0))
1821                         return ret;
1822         }
1823         return 0;
1824 }
1825
1826
1827 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1828                              struct vmw_sw_context *sw_context,
1829                              SVGA3dCmdHeader *header)
1830 {
1831         struct vmw_tex_state_cmd {
1832                 SVGA3dCmdHeader header;
1833                 SVGA3dCmdSetTextureState state;
1834         } *cmd;
1835
1836         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1837           ((unsigned long) header + header->size + sizeof(header));
1838         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1839                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1840         struct vmw_resource_val_node *ctx_node;
1841         struct vmw_resource_val_node *res_node;
1842         int ret;
1843
1844         cmd = container_of(header, struct vmw_tex_state_cmd,
1845                            header);
1846
1847         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1848                                 user_context_converter, &cmd->state.cid,
1849                                 &ctx_node);
1850         if (unlikely(ret != 0))
1851                 return ret;
1852
1853         for (; cur_state < last_state; ++cur_state) {
1854                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1855                         continue;
1856
1857                 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1858                         DRM_ERROR("Illegal texture/sampler unit %u.\n",
1859                                   (unsigned) cur_state->stage);
1860                         return -EINVAL;
1861                 }
1862
1863                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1864                                         user_surface_converter,
1865                                         &cur_state->value, &res_node);
1866                 if (unlikely(ret != 0))
1867                         return ret;
1868
1869                 if (dev_priv->has_mob) {
1870                         struct vmw_ctx_bindinfo_tex binding;
1871
1872                         binding.bi.ctx = ctx_node->res;
1873                         binding.bi.res = res_node ? res_node->res : NULL;
1874                         binding.bi.bt = vmw_ctx_binding_tex;
1875                         binding.texture_stage = cur_state->stage;
1876                         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1877                                         0, binding.texture_stage);
1878                 }
1879         }
1880
1881         return 0;
1882 }
1883
1884 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1885                                       struct vmw_sw_context *sw_context,
1886                                       void *buf)
1887 {
1888         struct vmw_dma_buffer *vmw_bo;
1889         int ret;
1890
1891         struct {
1892                 uint32_t header;
1893                 SVGAFifoCmdDefineGMRFB body;
1894         } *cmd = buf;
1895
1896         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1897                                       &cmd->body.ptr,
1898                                       &vmw_bo);
1899         if (unlikely(ret != 0))
1900                 return ret;
1901
1902         vmw_dmabuf_unreference(&vmw_bo);
1903
1904         return ret;
1905 }
1906
1907
1908 /**
1909  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1910  * switching
1911  *
1912  * @dev_priv: Pointer to a device private struct.
1913  * @sw_context: The software context being used for this batch.
1914  * @val_node: The validation node representing the resource.
1915  * @buf_id: Pointer to the user-space backup buffer handle in the command
1916  * stream.
1917  * @backup_offset: Offset of backup into MOB.
1918  *
1919  * This function prepares for registering a switch of backup buffers
1920  * in the resource metadata just prior to unreserving. It's basically a wrapper
1921  * around vmw_cmd_res_switch_backup with a different interface.
1922  */
1923 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1924                                      struct vmw_sw_context *sw_context,
1925                                      struct vmw_resource_val_node *val_node,
1926                                      uint32_t *buf_id,
1927                                      unsigned long backup_offset)
1928 {
1929         struct vmw_dma_buffer *dma_buf;
1930         int ret;
1931
1932         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1933         if (ret)
1934                 return ret;
1935
1936         val_node->switching_backup = true;
1937         if (val_node->first_usage)
1938                 val_node->no_buffer_needed = true;
1939
1940         vmw_dmabuf_unreference(&val_node->new_backup);
1941         val_node->new_backup = dma_buf;
1942         val_node->new_backup_offset = backup_offset;
1943
1944         return 0;
1945 }
1946
1947
1948 /**
1949  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1950  *
1951  * @dev_priv: Pointer to a device private struct.
1952  * @sw_context: The software context being used for this batch.
1953  * @res_type: The resource type.
1954  * @converter: Information about user-space binding for this resource type.
1955  * @res_id: Pointer to the user-space resource handle in the command stream.
1956  * @buf_id: Pointer to the user-space backup buffer handle in the command
1957  * stream.
1958  * @backup_offset: Offset of backup into MOB.
1959  *
1960  * This function prepares for registering a switch of backup buffers
1961  * in the resource metadata just prior to unreserving. It's basically a wrapper
1962  * around vmw_cmd_res_switch_backup with a different interface.
1963  */
1964 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1965                                  struct vmw_sw_context *sw_context,
1966                                  enum vmw_res_type res_type,
1967                                  const struct vmw_user_resource_conv
1968                                  *converter,
1969                                  uint32_t *res_id,
1970                                  uint32_t *buf_id,
1971                                  unsigned long backup_offset)
1972 {
1973         struct vmw_resource_val_node *val_node;
1974         int ret;
1975
1976         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1977                                 converter, res_id, &val_node);
1978         if (ret)
1979                 return ret;
1980
1981         return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1982                                          buf_id, backup_offset);
1983 }
1984
1985 /**
1986  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1987  * command
1988  *
1989  * @dev_priv: Pointer to a device private struct.
1990  * @sw_context: The software context being used for this batch.
1991  * @header: Pointer to the command header in the command stream.
1992  */
1993 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1994                                    struct vmw_sw_context *sw_context,
1995                                    SVGA3dCmdHeader *header)
1996 {
1997         struct vmw_bind_gb_surface_cmd {
1998                 SVGA3dCmdHeader header;
1999                 SVGA3dCmdBindGBSurface body;
2000         } *cmd;
2001
2002         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2003
2004         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2005                                      user_surface_converter,
2006                                      &cmd->body.sid, &cmd->body.mobid,
2007                                      0);
2008 }
2009
2010 /**
2011  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2012  * command
2013  *
2014  * @dev_priv: Pointer to a device private struct.
2015  * @sw_context: The software context being used for this batch.
2016  * @header: Pointer to the command header in the command stream.
2017  */
2018 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2019                                    struct vmw_sw_context *sw_context,
2020                                    SVGA3dCmdHeader *header)
2021 {
2022         struct vmw_gb_surface_cmd {
2023                 SVGA3dCmdHeader header;
2024                 SVGA3dCmdUpdateGBImage body;
2025         } *cmd;
2026
2027         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2028
2029         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2030                                  user_surface_converter,
2031                                  &cmd->body.image.sid, NULL);
2032 }
2033
2034 /**
2035  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2036  * command
2037  *
2038  * @dev_priv: Pointer to a device private struct.
2039  * @sw_context: The software context being used for this batch.
2040  * @header: Pointer to the command header in the command stream.
2041  */
2042 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2043                                      struct vmw_sw_context *sw_context,
2044                                      SVGA3dCmdHeader *header)
2045 {
2046         struct vmw_gb_surface_cmd {
2047                 SVGA3dCmdHeader header;
2048                 SVGA3dCmdUpdateGBSurface body;
2049         } *cmd;
2050
2051         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2052
2053         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2054                                  user_surface_converter,
2055                                  &cmd->body.sid, NULL);
2056 }
2057
2058 /**
2059  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2060  * command
2061  *
2062  * @dev_priv: Pointer to a device private struct.
2063  * @sw_context: The software context being used for this batch.
2064  * @header: Pointer to the command header in the command stream.
2065  */
2066 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2067                                      struct vmw_sw_context *sw_context,
2068                                      SVGA3dCmdHeader *header)
2069 {
2070         struct vmw_gb_surface_cmd {
2071                 SVGA3dCmdHeader header;
2072                 SVGA3dCmdReadbackGBImage body;
2073         } *cmd;
2074
2075         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2076
2077         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2078                                  user_surface_converter,
2079                                  &cmd->body.image.sid, NULL);
2080 }
2081
2082 /**
2083  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2084  * command
2085  *
2086  * @dev_priv: Pointer to a device private struct.
2087  * @sw_context: The software context being used for this batch.
2088  * @header: Pointer to the command header in the command stream.
2089  */
2090 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2091                                        struct vmw_sw_context *sw_context,
2092                                        SVGA3dCmdHeader *header)
2093 {
2094         struct vmw_gb_surface_cmd {
2095                 SVGA3dCmdHeader header;
2096                 SVGA3dCmdReadbackGBSurface body;
2097         } *cmd;
2098
2099         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2100
2101         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2102                                  user_surface_converter,
2103                                  &cmd->body.sid, NULL);
2104 }
2105
2106 /**
2107  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2108  * command
2109  *
2110  * @dev_priv: Pointer to a device private struct.
2111  * @sw_context: The software context being used for this batch.
2112  * @header: Pointer to the command header in the command stream.
2113  */
2114 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2115                                        struct vmw_sw_context *sw_context,
2116                                        SVGA3dCmdHeader *header)
2117 {
2118         struct vmw_gb_surface_cmd {
2119                 SVGA3dCmdHeader header;
2120                 SVGA3dCmdInvalidateGBImage body;
2121         } *cmd;
2122
2123         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2124
2125         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2126                                  user_surface_converter,
2127                                  &cmd->body.image.sid, NULL);
2128 }
2129
2130 /**
2131  * vmw_cmd_invalidate_gb_surface - Validate an
2132  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2133  *
2134  * @dev_priv: Pointer to a device private struct.
2135  * @sw_context: The software context being used for this batch.
2136  * @header: Pointer to the command header in the command stream.
2137  */
2138 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2139                                          struct vmw_sw_context *sw_context,
2140                                          SVGA3dCmdHeader *header)
2141 {
2142         struct vmw_gb_surface_cmd {
2143                 SVGA3dCmdHeader header;
2144                 SVGA3dCmdInvalidateGBSurface body;
2145         } *cmd;
2146
2147         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2148
2149         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2150                                  user_surface_converter,
2151                                  &cmd->body.sid, NULL);
2152 }
2153
2154
2155 /**
2156  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2157  * command
2158  *
2159  * @dev_priv: Pointer to a device private struct.
2160  * @sw_context: The software context being used for this batch.
2161  * @header: Pointer to the command header in the command stream.
2162  */
2163 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2164                                  struct vmw_sw_context *sw_context,
2165                                  SVGA3dCmdHeader *header)
2166 {
2167         struct vmw_shader_define_cmd {
2168                 SVGA3dCmdHeader header;
2169                 SVGA3dCmdDefineShader body;
2170         } *cmd;
2171         int ret;
2172         size_t size;
2173         struct vmw_resource_val_node *val;
2174
2175         cmd = container_of(header, struct vmw_shader_define_cmd,
2176                            header);
2177
2178         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2179                                 user_context_converter, &cmd->body.cid,
2180                                 &val);
2181         if (unlikely(ret != 0))
2182                 return ret;
2183
2184         if (unlikely(!dev_priv->has_mob))
2185                 return 0;
2186
2187         size = cmd->header.size - sizeof(cmd->body);
2188         ret = vmw_compat_shader_add(dev_priv,
2189                                     vmw_context_res_man(val->res),
2190                                     cmd->body.shid, cmd + 1,
2191                                     cmd->body.type, size,
2192                                     &sw_context->staged_cmd_res);
2193         if (unlikely(ret != 0))
2194                 return ret;
2195
2196         return vmw_resource_relocation_add(&sw_context->res_relocations,
2197                                            NULL,
2198                                            vmw_ptr_diff(sw_context->buf_start,
2199                                                         &cmd->header.id),
2200                                            vmw_res_rel_nop);
2201 }
2202
2203 /**
2204  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2205  * command
2206  *
2207  * @dev_priv: Pointer to a device private struct.
2208  * @sw_context: The software context being used for this batch.
2209  * @header: Pointer to the command header in the command stream.
2210  */
2211 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2212                                   struct vmw_sw_context *sw_context,
2213                                   SVGA3dCmdHeader *header)
2214 {
2215         struct vmw_shader_destroy_cmd {
2216                 SVGA3dCmdHeader header;
2217                 SVGA3dCmdDestroyShader body;
2218         } *cmd;
2219         int ret;
2220         struct vmw_resource_val_node *val;
2221
2222         cmd = container_of(header, struct vmw_shader_destroy_cmd,
2223                            header);
2224
2225         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2226                                 user_context_converter, &cmd->body.cid,
2227                                 &val);
2228         if (unlikely(ret != 0))
2229                 return ret;
2230
2231         if (unlikely(!dev_priv->has_mob))
2232                 return 0;
2233
2234         ret = vmw_shader_remove(vmw_context_res_man(val->res),
2235                                 cmd->body.shid,
2236                                 cmd->body.type,
2237                                 &sw_context->staged_cmd_res);
2238         if (unlikely(ret != 0))
2239                 return ret;
2240
2241         return vmw_resource_relocation_add(&sw_context->res_relocations,
2242                                            NULL,
2243                                            vmw_ptr_diff(sw_context->buf_start,
2244                                                         &cmd->header.id),
2245                                            vmw_res_rel_nop);
2246 }
2247
2248 /**
2249  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2250  * command
2251  *
2252  * @dev_priv: Pointer to a device private struct.
2253  * @sw_context: The software context being used for this batch.
2254  * @header: Pointer to the command header in the command stream.
2255  */
2256 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2257                               struct vmw_sw_context *sw_context,
2258                               SVGA3dCmdHeader *header)
2259 {
2260         struct vmw_set_shader_cmd {
2261                 SVGA3dCmdHeader header;
2262                 SVGA3dCmdSetShader body;
2263         } *cmd;
2264         struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2265         struct vmw_ctx_bindinfo_shader binding;
2266         struct vmw_resource *res = NULL;
2267         int ret;
2268
2269         cmd = container_of(header, struct vmw_set_shader_cmd,
2270                            header);
2271
2272         if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2273                 DRM_ERROR("Illegal shader type %u.\n",
2274                           (unsigned) cmd->body.type);
2275                 return -EINVAL;
2276         }
2277
2278         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2279                                 user_context_converter, &cmd->body.cid,
2280                                 &ctx_node);
2281         if (unlikely(ret != 0))
2282                 return ret;
2283
2284         if (!dev_priv->has_mob)
2285                 return 0;
2286
2287         if (cmd->body.shid != SVGA3D_INVALID_ID) {
2288                 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2289                                         cmd->body.shid,
2290                                         cmd->body.type);
2291
2292                 if (!IS_ERR(res)) {
2293                         ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2294                                                     &cmd->body.shid, res,
2295                                                     &res_node);
2296                         vmw_resource_unreference(&res);
2297                         if (unlikely(ret != 0))
2298                                 return ret;
2299                 }
2300         }
2301
2302         if (!res_node) {
2303                 ret = vmw_cmd_res_check(dev_priv, sw_context,
2304                                         vmw_res_shader,
2305                                         user_shader_converter,
2306                                         &cmd->body.shid, &res_node);
2307                 if (unlikely(ret != 0))
2308                         return ret;
2309         }
2310
2311         binding.bi.ctx = ctx_node->res;
2312         binding.bi.res = res_node ? res_node->res : NULL;
2313         binding.bi.bt = vmw_ctx_binding_shader;
2314         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2315         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2316                         binding.shader_slot, 0);
2317         return 0;
2318 }
2319
2320 /**
2321  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2322  * command
2323  *
2324  * @dev_priv: Pointer to a device private struct.
2325  * @sw_context: The software context being used for this batch.
2326  * @header: Pointer to the command header in the command stream.
2327  */
2328 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2329                                     struct vmw_sw_context *sw_context,
2330                                     SVGA3dCmdHeader *header)
2331 {
2332         struct vmw_set_shader_const_cmd {
2333                 SVGA3dCmdHeader header;
2334                 SVGA3dCmdSetShaderConst body;
2335         } *cmd;
2336         int ret;
2337
2338         cmd = container_of(header, struct vmw_set_shader_const_cmd,
2339                            header);
2340
2341         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2342                                 user_context_converter, &cmd->body.cid,
2343                                 NULL);
2344         if (unlikely(ret != 0))
2345                 return ret;
2346
2347         if (dev_priv->has_mob)
2348                 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2349
2350         return 0;
2351 }
2352
2353 /**
2354  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2355  * command
2356  *
2357  * @dev_priv: Pointer to a device private struct.
2358  * @sw_context: The software context being used for this batch.
2359  * @header: Pointer to the command header in the command stream.
2360  */
2361 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2362                                   struct vmw_sw_context *sw_context,
2363                                   SVGA3dCmdHeader *header)
2364 {
2365         struct vmw_bind_gb_shader_cmd {
2366                 SVGA3dCmdHeader header;
2367                 SVGA3dCmdBindGBShader body;
2368         } *cmd;
2369
2370         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2371                            header);
2372
2373         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2374                                      user_shader_converter,
2375                                      &cmd->body.shid, &cmd->body.mobid,
2376                                      cmd->body.offsetInBytes);
2377 }
2378
2379 /**
2380  * vmw_cmd_dx_set_single_constant_buffer - Validate an
2381  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2382  *
2383  * @dev_priv: Pointer to a device private struct.
2384  * @sw_context: The software context being used for this batch.
2385  * @header: Pointer to the command header in the command stream.
2386  */
2387 static int
2388 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2389                                       struct vmw_sw_context *sw_context,
2390                                       SVGA3dCmdHeader *header)
2391 {
2392         struct {
2393                 SVGA3dCmdHeader header;
2394                 SVGA3dCmdDXSetSingleConstantBuffer body;
2395         } *cmd;
2396         struct vmw_resource_val_node *res_node = NULL;
2397         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2398         struct vmw_ctx_bindinfo_cb binding;
2399         int ret;
2400
2401         if (unlikely(ctx_node == NULL)) {
2402                 DRM_ERROR("DX Context not set.\n");
2403                 return -EINVAL;
2404         }
2405
2406         cmd = container_of(header, typeof(*cmd), header);
2407         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2408                                 user_surface_converter,
2409                                 &cmd->body.sid, &res_node);
2410         if (unlikely(ret != 0))
2411                 return ret;
2412
2413         binding.bi.ctx = ctx_node->res;
2414         binding.bi.res = res_node ? res_node->res : NULL;
2415         binding.bi.bt = vmw_ctx_binding_cb;
2416         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2417         binding.offset = cmd->body.offsetInBytes;
2418         binding.size = cmd->body.sizeInBytes;
2419         binding.slot = cmd->body.slot;
2420
2421         if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2422             binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2423                 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2424                           (unsigned) cmd->body.type,
2425                           (unsigned) binding.slot);
2426                 return -EINVAL;
2427         }
2428
2429         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2430                         binding.shader_slot, binding.slot);
2431
2432         return 0;
2433 }
2434
2435 /**
2436  * vmw_cmd_dx_set_shader_res - Validate an
2437  * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2438  *
2439  * @dev_priv: Pointer to a device private struct.
2440  * @sw_context: The software context being used for this batch.
2441  * @header: Pointer to the command header in the command stream.
2442  */
2443 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2444                                      struct vmw_sw_context *sw_context,
2445                                      SVGA3dCmdHeader *header)
2446 {
2447         struct {
2448                 SVGA3dCmdHeader header;
2449                 SVGA3dCmdDXSetShaderResources body;
2450         } *cmd = container_of(header, typeof(*cmd), header);
2451         u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2452                 sizeof(SVGA3dShaderResourceViewId);
2453
2454         if ((u64) cmd->body.startView + (u64) num_sr_view >
2455             (u64) SVGA3D_DX_MAX_SRVIEWS ||
2456             cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2457                 DRM_ERROR("Invalid shader binding.\n");
2458                 return -EINVAL;
2459         }
2460
2461         return vmw_view_bindings_add(sw_context, vmw_view_sr,
2462                                      vmw_ctx_binding_sr,
2463                                      cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2464                                      (void *) &cmd[1], num_sr_view,
2465                                      cmd->body.startView);
2466 }
2467
2468 /**
2469  * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2470  * command
2471  *
2472  * @dev_priv: Pointer to a device private struct.
2473  * @sw_context: The software context being used for this batch.
2474  * @header: Pointer to the command header in the command stream.
2475  */
2476 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2477                                  struct vmw_sw_context *sw_context,
2478                                  SVGA3dCmdHeader *header)
2479 {
2480         struct {
2481                 SVGA3dCmdHeader header;
2482                 SVGA3dCmdDXSetShader body;
2483         } *cmd;
2484         struct vmw_resource *res = NULL;
2485         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2486         struct vmw_ctx_bindinfo_shader binding;
2487         int ret = 0;
2488
2489         if (unlikely(ctx_node == NULL)) {
2490                 DRM_ERROR("DX Context not set.\n");
2491                 return -EINVAL;
2492         }
2493
2494         cmd = container_of(header, typeof(*cmd), header);
2495
2496         if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2497             cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2498                 DRM_ERROR("Illegal shader type %u.\n",
2499                           (unsigned) cmd->body.type);
2500                 return -EINVAL;
2501         }
2502
2503         if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2504                 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2505                 if (IS_ERR(res)) {
2506                         DRM_ERROR("Could not find shader for binding.\n");
2507                         return PTR_ERR(res);
2508                 }
2509
2510                 ret = vmw_resource_val_add(sw_context, res, NULL);
2511                 if (ret)
2512                         goto out_unref;
2513         }
2514
2515         binding.bi.ctx = ctx_node->res;
2516         binding.bi.res = res;
2517         binding.bi.bt = vmw_ctx_binding_dx_shader;
2518         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2519
2520         vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2521                         binding.shader_slot, 0);
2522 out_unref:
2523         if (res)
2524                 vmw_resource_unreference(&res);
2525
2526         return ret;
2527 }
2528
2529 /**
2530  * vmw_cmd_dx_set_vertex_buffers - Validates an
2531  * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2532  *
2533  * @dev_priv: Pointer to a device private struct.
2534  * @sw_context: The software context being used for this batch.
2535  * @header: Pointer to the command header in the command stream.
2536  */
2537 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2538                                          struct vmw_sw_context *sw_context,
2539                                          SVGA3dCmdHeader *header)
2540 {
2541         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2542         struct vmw_ctx_bindinfo_vb binding;
2543         struct vmw_resource_val_node *res_node;
2544         struct {
2545                 SVGA3dCmdHeader header;
2546                 SVGA3dCmdDXSetVertexBuffers body;
2547                 SVGA3dVertexBuffer buf[];
2548         } *cmd;
2549         int i, ret, num;
2550
2551         if (unlikely(ctx_node == NULL)) {
2552                 DRM_ERROR("DX Context not set.\n");
2553                 return -EINVAL;
2554         }
2555
2556         cmd = container_of(header, typeof(*cmd), header);
2557         num = (cmd->header.size - sizeof(cmd->body)) /
2558                 sizeof(SVGA3dVertexBuffer);
2559         if ((u64)num + (u64)cmd->body.startBuffer >
2560             (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2561                 DRM_ERROR("Invalid number of vertex buffers.\n");
2562                 return -EINVAL;
2563         }
2564
2565         for (i = 0; i < num; i++) {
2566                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2567                                         user_surface_converter,
2568                                         &cmd->buf[i].sid, &res_node);
2569                 if (unlikely(ret != 0))
2570                         return ret;
2571
2572                 binding.bi.ctx = ctx_node->res;
2573                 binding.bi.bt = vmw_ctx_binding_vb;
2574                 binding.bi.res = ((res_node) ? res_node->res : NULL);
2575                 binding.offset = cmd->buf[i].offset;
2576                 binding.stride = cmd->buf[i].stride;
2577                 binding.slot = i + cmd->body.startBuffer;
2578
2579                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2580                                 0, binding.slot);
2581         }
2582
2583         return 0;
2584 }
2585
2586 /**
2587  * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2588  * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2589  *
2590  * @dev_priv: Pointer to a device private struct.
2591  * @sw_context: The software context being used for this batch.
2592  * @header: Pointer to the command header in the command stream.
2593  */
2594 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2595                                        struct vmw_sw_context *sw_context,
2596                                        SVGA3dCmdHeader *header)
2597 {
2598         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2599         struct vmw_ctx_bindinfo_ib binding;
2600         struct vmw_resource_val_node *res_node;
2601         struct {
2602                 SVGA3dCmdHeader header;
2603                 SVGA3dCmdDXSetIndexBuffer body;
2604         } *cmd;
2605         int ret;
2606
2607         if (unlikely(ctx_node == NULL)) {
2608                 DRM_ERROR("DX Context not set.\n");
2609                 return -EINVAL;
2610         }
2611
2612         cmd = container_of(header, typeof(*cmd), header);
2613         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2614                                 user_surface_converter,
2615                                 &cmd->body.sid, &res_node);
2616         if (unlikely(ret != 0))
2617                 return ret;
2618
2619         binding.bi.ctx = ctx_node->res;
2620         binding.bi.res = ((res_node) ? res_node->res : NULL);
2621         binding.bi.bt = vmw_ctx_binding_ib;
2622         binding.offset = cmd->body.offset;
2623         binding.format = cmd->body.format;
2624
2625         vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2626
2627         return 0;
2628 }
2629
2630 /**
2631  * vmw_cmd_dx_set_rendertarget - Validate an
2632  * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2633  *
2634  * @dev_priv: Pointer to a device private struct.
2635  * @sw_context: The software context being used for this batch.
2636  * @header: Pointer to the command header in the command stream.
2637  */
2638 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2639                                         struct vmw_sw_context *sw_context,
2640                                         SVGA3dCmdHeader *header)
2641 {
2642         struct {
2643                 SVGA3dCmdHeader header;
2644                 SVGA3dCmdDXSetRenderTargets body;
2645         } *cmd = container_of(header, typeof(*cmd), header);
2646         int ret;
2647         u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2648                 sizeof(SVGA3dRenderTargetViewId);
2649
2650         if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2651                 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2652                 return -EINVAL;
2653         }
2654
2655         ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2656                                     vmw_ctx_binding_ds, 0,
2657                                     &cmd->body.depthStencilViewId, 1, 0);
2658         if (ret)
2659                 return ret;
2660
2661         return vmw_view_bindings_add(sw_context, vmw_view_rt,
2662                                      vmw_ctx_binding_dx_rt, 0,
2663                                      (void *)&cmd[1], num_rt_view, 0);
2664 }
2665
2666 /**
2667  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2668  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2669  *
2670  * @dev_priv: Pointer to a device private struct.
2671  * @sw_context: The software context being used for this batch.
2672  * @header: Pointer to the command header in the command stream.
2673  */
2674 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2675                                               struct vmw_sw_context *sw_context,
2676                                               SVGA3dCmdHeader *header)
2677 {
2678         struct {
2679                 SVGA3dCmdHeader header;
2680                 SVGA3dCmdDXClearRenderTargetView body;
2681         } *cmd = container_of(header, typeof(*cmd), header);
2682
2683         return vmw_view_id_val_add(sw_context, vmw_view_rt,
2684                                    cmd->body.renderTargetViewId);
2685 }
2686
2687 /**
2688  * vmw_cmd_dx_clear_rendertarget_view - Validate an
2689  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2690  *
2691  * @dev_priv: Pointer to a device private struct.
2692  * @sw_context: The software context being used for this batch.
2693  * @header: Pointer to the command header in the command stream.
2694  */
2695 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2696                                               struct vmw_sw_context *sw_context,
2697                                               SVGA3dCmdHeader *header)
2698 {
2699         struct {
2700                 SVGA3dCmdHeader header;
2701                 SVGA3dCmdDXClearDepthStencilView body;
2702         } *cmd = container_of(header, typeof(*cmd), header);
2703
2704         return vmw_view_id_val_add(sw_context, vmw_view_ds,
2705                                    cmd->body.depthStencilViewId);
2706 }
2707
2708 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2709                                   struct vmw_sw_context *sw_context,
2710                                   SVGA3dCmdHeader *header)
2711 {
2712         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2713         struct vmw_resource_val_node *srf_node;
2714         struct vmw_resource *res;
2715         enum vmw_view_type view_type;
2716         int ret;
2717         /*
2718          * This is based on the fact that all affected define commands have
2719          * the same initial command body layout.
2720          */
2721         struct {
2722                 SVGA3dCmdHeader header;
2723                 uint32 defined_id;
2724                 uint32 sid;
2725         } *cmd;
2726
2727         if (unlikely(ctx_node == NULL)) {
2728                 DRM_ERROR("DX Context not set.\n");
2729                 return -EINVAL;
2730         }
2731
2732         view_type = vmw_view_cmd_to_type(header->id);
2733         if (view_type == vmw_view_max)
2734                 return -EINVAL;
2735         cmd = container_of(header, typeof(*cmd), header);
2736         if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2737                 DRM_ERROR("Invalid surface id.\n");
2738                 return -EINVAL;
2739         }
2740         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2741                                 user_surface_converter,
2742                                 &cmd->sid, &srf_node);
2743         if (unlikely(ret != 0))
2744                 return ret;
2745
2746         res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2747         ret = vmw_cotable_notify(res, cmd->defined_id);
2748         vmw_resource_unreference(&res);
2749         if (unlikely(ret != 0))
2750                 return ret;
2751
2752         return vmw_view_add(sw_context->man,
2753                             ctx_node->res,
2754                             srf_node->res,
2755                             view_type,
2756                             cmd->defined_id,
2757                             header,
2758                             header->size + sizeof(*header),
2759                             &sw_context->staged_cmd_res);
2760 }
2761
2762 /**
2763  * vmw_cmd_dx_set_so_targets - Validate an
2764  * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2765  *
2766  * @dev_priv: Pointer to a device private struct.
2767  * @sw_context: The software context being used for this batch.
2768  * @header: Pointer to the command header in the command stream.
2769  */
2770 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2771                                      struct vmw_sw_context *sw_context,
2772                                      SVGA3dCmdHeader *header)
2773 {
2774         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2775         struct vmw_ctx_bindinfo_so binding;
2776         struct vmw_resource_val_node *res_node;
2777         struct {
2778                 SVGA3dCmdHeader header;
2779                 SVGA3dCmdDXSetSOTargets body;
2780                 SVGA3dSoTarget targets[];
2781         } *cmd;
2782         int i, ret, num;
2783
2784         if (unlikely(ctx_node == NULL)) {
2785                 DRM_ERROR("DX Context not set.\n");
2786                 return -EINVAL;
2787         }
2788
2789         cmd = container_of(header, typeof(*cmd), header);
2790         num = (cmd->header.size - sizeof(cmd->body)) /
2791                 sizeof(SVGA3dSoTarget);
2792
2793         if (num > SVGA3D_DX_MAX_SOTARGETS) {
2794                 DRM_ERROR("Invalid DX SO binding.\n");
2795                 return -EINVAL;
2796         }
2797
2798         for (i = 0; i < num; i++) {
2799                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2800                                         user_surface_converter,
2801                                         &cmd->targets[i].sid, &res_node);
2802                 if (unlikely(ret != 0))
2803                         return ret;
2804
2805                 binding.bi.ctx = ctx_node->res;
2806                 binding.bi.res = ((res_node) ? res_node->res : NULL);
2807                 binding.bi.bt = vmw_ctx_binding_so,
2808                 binding.offset = cmd->targets[i].offset;
2809                 binding.size = cmd->targets[i].sizeInBytes;
2810                 binding.slot = i;
2811
2812                 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2813                                 0, binding.slot);
2814         }
2815
2816         return 0;
2817 }
2818
2819 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2820                                 struct vmw_sw_context *sw_context,
2821                                 SVGA3dCmdHeader *header)
2822 {
2823         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2824         struct vmw_resource *res;
2825         /*
2826          * This is based on the fact that all affected define commands have
2827          * the same initial command body layout.
2828          */
2829         struct {
2830                 SVGA3dCmdHeader header;
2831                 uint32 defined_id;
2832         } *cmd;
2833         enum vmw_so_type so_type;
2834         int ret;
2835
2836         if (unlikely(ctx_node == NULL)) {
2837                 DRM_ERROR("DX Context not set.\n");
2838                 return -EINVAL;
2839         }
2840
2841         so_type = vmw_so_cmd_to_type(header->id);
2842         res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2843         cmd = container_of(header, typeof(*cmd), header);
2844         ret = vmw_cotable_notify(res, cmd->defined_id);
2845         vmw_resource_unreference(&res);
2846
2847         return ret;
2848 }
2849
2850 /**
2851  * vmw_cmd_dx_check_subresource - Validate an
2852  * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2853  *
2854  * @dev_priv: Pointer to a device private struct.
2855  * @sw_context: The software context being used for this batch.
2856  * @header: Pointer to the command header in the command stream.
2857  */
2858 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2859                                         struct vmw_sw_context *sw_context,
2860                                         SVGA3dCmdHeader *header)
2861 {
2862         struct {
2863                 SVGA3dCmdHeader header;
2864                 union {
2865                         SVGA3dCmdDXReadbackSubResource r_body;
2866                         SVGA3dCmdDXInvalidateSubResource i_body;
2867                         SVGA3dCmdDXUpdateSubResource u_body;
2868                         SVGA3dSurfaceId sid;
2869                 };
2870         } *cmd;
2871
2872         BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2873                      offsetof(typeof(*cmd), sid));
2874         BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2875                      offsetof(typeof(*cmd), sid));
2876         BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2877                      offsetof(typeof(*cmd), sid));
2878
2879         cmd = container_of(header, typeof(*cmd), header);
2880
2881         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2882                                  user_surface_converter,
2883                                  &cmd->sid, NULL);
2884 }
2885
2886 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2887                                 struct vmw_sw_context *sw_context,
2888                                 SVGA3dCmdHeader *header)
2889 {
2890         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2891
2892         if (unlikely(ctx_node == NULL)) {
2893                 DRM_ERROR("DX Context not set.\n");
2894                 return -EINVAL;
2895         }
2896
2897         return 0;
2898 }
2899
2900 /**
2901  * vmw_cmd_dx_view_remove - validate a view remove command and
2902  * schedule the view resource for removal.
2903  *
2904  * @dev_priv: Pointer to a device private struct.
2905  * @sw_context: The software context being used for this batch.
2906  * @header: Pointer to the command header in the command stream.
2907  *
2908  * Check that the view exists, and if it was not created using this
2909  * command batch, conditionally make this command a NOP.
2910  */
2911 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2912                                   struct vmw_sw_context *sw_context,
2913                                   SVGA3dCmdHeader *header)
2914 {
2915         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2916         struct {
2917                 SVGA3dCmdHeader header;
2918                 union vmw_view_destroy body;
2919         } *cmd = container_of(header, typeof(*cmd), header);
2920         enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2921         struct vmw_resource *view;
2922         int ret;
2923
2924         if (!ctx_node) {
2925                 DRM_ERROR("DX Context not set.\n");
2926                 return -EINVAL;
2927         }
2928
2929         ret = vmw_view_remove(sw_context->man,
2930                               cmd->body.view_id, view_type,
2931                               &sw_context->staged_cmd_res,
2932                               &view);
2933         if (ret || !view)
2934                 return ret;
2935
2936         /*
2937          * If the view wasn't created during this command batch, it might
2938          * have been removed due to a context swapout, so add a
2939          * relocation to conditionally make this command a NOP to avoid
2940          * device errors.
2941          */
2942         return vmw_resource_relocation_add(&sw_context->res_relocations,
2943                                            view,
2944                                            vmw_ptr_diff(sw_context->buf_start,
2945                                                         &cmd->header.id),
2946                                            vmw_res_rel_cond_nop);
2947 }
2948
2949 /**
2950  * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2951  * command
2952  *
2953  * @dev_priv: Pointer to a device private struct.
2954  * @sw_context: The software context being used for this batch.
2955  * @header: Pointer to the command header in the command stream.
2956  */
2957 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2958                                     struct vmw_sw_context *sw_context,
2959                                     SVGA3dCmdHeader *header)
2960 {
2961         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2962         struct vmw_resource *res;
2963         struct {
2964                 SVGA3dCmdHeader header;
2965                 SVGA3dCmdDXDefineShader body;
2966         } *cmd = container_of(header, typeof(*cmd), header);
2967         int ret;
2968
2969         if (!ctx_node) {
2970                 DRM_ERROR("DX Context not set.\n");
2971                 return -EINVAL;
2972         }
2973
2974         res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2975         ret = vmw_cotable_notify(res, cmd->body.shaderId);
2976         vmw_resource_unreference(&res);
2977         if (ret)
2978                 return ret;
2979
2980         return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2981                                  cmd->body.shaderId, cmd->body.type,
2982                                  &sw_context->staged_cmd_res);
2983 }
2984
2985 /**
2986  * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2987  * command
2988  *
2989  * @dev_priv: Pointer to a device private struct.
2990  * @sw_context: The software context being used for this batch.
2991  * @header: Pointer to the command header in the command stream.
2992  */
2993 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2994                                      struct vmw_sw_context *sw_context,
2995                                      SVGA3dCmdHeader *header)
2996 {
2997         struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2998         struct {
2999                 SVGA3dCmdHeader header;
3000                 SVGA3dCmdDXDestroyShader body;
3001         } *cmd = container_of(header, typeof(*cmd), header);
3002         int ret;
3003
3004         if (!ctx_node) {
3005                 DRM_ERROR("DX Context not set.\n");
3006                 return -EINVAL;
3007         }
3008
3009         ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3010                                 &sw_context->staged_cmd_res);
3011         if (ret)
3012                 DRM_ERROR("Could not find shader to remove.\n");
3013
3014         return ret;
3015 }
3016
3017 /**
3018  * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3019  * command
3020  *
3021  * @dev_priv: Pointer to a device private struct.
3022  * @sw_context: The software context being used for this batch.
3023  * @header: Pointer to the command header in the command stream.
3024  */
3025 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3026                                   struct vmw_sw_context *sw_context,
3027                                   SVGA3dCmdHeader *header)
3028 {
3029         struct vmw_resource_val_node *ctx_node;
3030         struct vmw_resource_val_node *res_node;
3031         struct vmw_resource *res;
3032         struct {
3033                 SVGA3dCmdHeader header;
3034                 SVGA3dCmdDXBindShader body;
3035         } *cmd = container_of(header, typeof(*cmd), header);
3036         int ret;
3037
3038         if (cmd->body.cid != SVGA3D_INVALID_ID) {
3039                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3040                                         user_context_converter,
3041                                         &cmd->body.cid, &ctx_node);
3042                 if (ret)
3043                         return ret;
3044         } else {
3045                 ctx_node = sw_context->dx_ctx_node;
3046                 if (!ctx_node) {
3047                         DRM_ERROR("DX Context not set.\n");
3048                         return -EINVAL;
3049                 }
3050         }
3051
3052         res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3053                                 cmd->body.shid, 0);
3054         if (IS_ERR(res)) {
3055                 DRM_ERROR("Could not find shader to bind.\n");
3056                 return PTR_ERR(res);
3057         }
3058
3059         ret = vmw_resource_val_add(sw_context, res, &res_node);
3060         if (ret) {
3061                 DRM_ERROR("Error creating resource validation node.\n");
3062                 goto out_unref;
3063         }
3064
3065
3066         ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3067                                         &cmd->body.mobid,
3068                                         cmd->body.offsetInBytes);
3069 out_unref:
3070         vmw_resource_unreference(&res);
3071
3072         return ret;
3073 }
3074
3075 /**
3076  * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3077  *
3078  * @dev_priv: Pointer to a device private struct.
3079  * @sw_context: The software context being used for this batch.
3080  * @header: Pointer to the command header in the command stream.
3081  */
3082 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3083                               struct vmw_sw_context *sw_context,
3084                               SVGA3dCmdHeader *header)
3085 {
3086         struct {
3087                 SVGA3dCmdHeader header;
3088                 SVGA3dCmdDXGenMips body;
3089         } *cmd = container_of(header, typeof(*cmd), header);
3090
3091         return vmw_view_id_val_add(sw_context, vmw_view_sr,
3092                                    cmd->body.shaderResourceViewId);
3093 }
3094
3095 /**
3096  * vmw_cmd_dx_transfer_from_buffer -
3097  * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3098  *
3099  * @dev_priv: Pointer to a device private struct.
3100  * @sw_context: The software context being used for this batch.
3101  * @header: Pointer to the command header in the command stream.
3102  */
3103 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3104                                            struct vmw_sw_context *sw_context,
3105                                            SVGA3dCmdHeader *header)
3106 {
3107         struct {
3108                 SVGA3dCmdHeader header;
3109                 SVGA3dCmdDXTransferFromBuffer body;
3110         } *cmd = container_of(header, typeof(*cmd), header);
3111         int ret;
3112
3113         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3114                                 user_surface_converter,
3115                                 &cmd->body.srcSid, NULL);
3116         if (ret != 0)
3117                 return ret;
3118
3119         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3120                                  user_surface_converter,
3121                                  &cmd->body.destSid, NULL);
3122 }
3123
3124 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3125                                 struct vmw_sw_context *sw_context,
3126                                 void *buf, uint32_t *size)
3127 {
3128         uint32_t size_remaining = *size;
3129         uint32_t cmd_id;
3130
3131         cmd_id = ((uint32_t *)buf)[0];
3132         switch (cmd_id) {
3133         case SVGA_CMD_UPDATE:
3134                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3135                 break;
3136         case SVGA_CMD_DEFINE_GMRFB:
3137                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3138                 break;
3139         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3140                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3141                 break;
3142         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3143                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3144                 break;
3145         default:
3146                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3147                 return -EINVAL;
3148         }
3149
3150         if (*size > size_remaining) {
3151                 DRM_ERROR("Invalid SVGA command (size mismatch):"
3152                           " %u.\n", cmd_id);
3153                 return -EINVAL;
3154         }
3155
3156         if (unlikely(!sw_context->kernel)) {
3157                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3158                 return -EPERM;
3159         }
3160
3161         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3162                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3163
3164         return 0;
3165 }
3166
3167 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3168         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3169                     false, false, false),
3170         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3171                     false, false, false),
3172         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3173                     true, false, false),
3174         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3175                     true, false, false),
3176         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3177                     true, false, false),
3178         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3179                     false, false, false),
3180         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3181                     false, false, false),
3182         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3183                     true, false, false),
3184         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3185                     true, false, false),
3186         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3187                     true, false, false),
3188         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3189                     &vmw_cmd_set_render_target_check, true, false, false),
3190         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3191                     true, false, false),
3192         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3193                     true, false, false),
3194         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3195                     true, false, false),
3196         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3197                     true, false, false),
3198         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3199                     true, false, false),
3200         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3201                     true, false, false),
3202         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3203                     true, false, false),
3204         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3205                     false, false, false),
3206         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3207                     true, false, false),
3208         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3209                     true, false, false),
3210         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3211                     true, false, false),
3212         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3213                     true, false, false),
3214         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3215                     true, false, false),
3216         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3217                     true, false, false),
3218         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3219                     true, false, false),
3220         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3221                     true, false, false),
3222         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3223                     true, false, false),
3224         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3225                     true, false, false),
3226         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3227                     &vmw_cmd_blt_surf_screen_check, false, false, false),
3228         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3229                     false, false, false),
3230         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3231                     false, false, false),
3232         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3233                     false, false, false),
3234         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3235                     false, false, false),
3236         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3237                     false, false, false),
3238         VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3239                     false, false, false),
3240         VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3241                     false, false, false),
3242         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3243                     false, false, false),
3244         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3245                     false, false, false),
3246         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3247                     false, false, false),
3248         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3249                     false, false, false),
3250         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3251                     false, false, false),
3252         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3253                     false, false, false),
3254         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3255                     false, false, true),
3256         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3257                     false, false, true),
3258         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3259                     false, false, true),
3260         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3261                     false, false, true),
3262         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3263                     false, false, true),
3264         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3265                     false, false, true),
3266         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3267                     false, false, true),
3268         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3269                     false, false, true),
3270         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3271                     true, false, true),
3272         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3273                     false, false, true),
3274         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3275                     true, false, true),
3276         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3277                     &vmw_cmd_update_gb_surface, true, false, true),
3278         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3279                     &vmw_cmd_readback_gb_image, true, false, true),
3280         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3281                     &vmw_cmd_readback_gb_surface, true, false, true),
3282         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3283                     &vmw_cmd_invalidate_gb_image, true, false, true),
3284         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3285                     &vmw_cmd_invalidate_gb_surface, true, false, true),
3286         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3287                     false, false, true),
3288         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3289                     false, false, true),
3290         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3291                     false, false, true),
3292         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3293                     false, false, true),
3294         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3295                     false, false, true),
3296         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3297                     false, false, true),
3298         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3299                     true, false, true),
3300         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3301                     false, false, true),
3302         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3303                     false, false, false),
3304         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3305                     true, false, true),
3306         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3307                     true, false, true),
3308         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3309                     true, false, true),
3310         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3311                     true, false, true),
3312         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3313                     false, false, true),
3314         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3315                     false, false, true),
3316         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3317                     false, false, true),
3318         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3319                     false, false, true),
3320         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3321                     false, false, true),
3322         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3323                     false, false, true),
3324         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3325                     false, false, true),
3326         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3327                     false, false, true),
3328         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3329                     false, false, true),
3330         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3331                     false, false, true),
3332         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3333                     true, false, true),
3334         VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3335                     false, false, true),
3336         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3337                     false, false, true),
3338         VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3339                     false, false, true),
3340         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3341                     false, false, true),
3342
3343         /*
3344          * DX commands
3345          */
3346         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3347                     false, false, true),
3348         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3349                     false, false, true),
3350         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3351                     false, false, true),
3352         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3353                     false, false, true),
3354         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3355                     false, false, true),
3356         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3357                     &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3358         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3359                     &vmw_cmd_dx_set_shader_res, true, false, true),
3360         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3361                     true, false, true),
3362         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3363                     true, false, true),
3364         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3365                     true, false, true),
3366         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3367                     true, false, true),
3368         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3369                     true, false, true),
3370         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3371                     &vmw_cmd_dx_cid_check, true, false, true),
3372         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3373                     true, false, true),
3374         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3375                     &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3376         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3377                     &vmw_cmd_dx_set_index_buffer, true, false, true),
3378         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3379                     &vmw_cmd_dx_set_rendertargets, true, false, true),
3380         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3381                     true, false, true),
3382         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3383                     &vmw_cmd_dx_cid_check, true, false, true),
3384         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3385                     &vmw_cmd_dx_cid_check, true, false, true),
3386         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3387                     true, false, true),
3388         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3389                     true, false, true),
3390         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3391                     true, false, true),
3392         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3393                     &vmw_cmd_dx_cid_check, true, false, true),
3394         VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3395                     true, false, true),
3396         VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3397                     true, false, true),
3398         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3399                     true, false, true),
3400         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3401                     true, false, true),
3402         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3403                     true, false, true),
3404         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3405                     true, false, true),
3406         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3407                     &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3408         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3409                     &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3410         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3411                     true, false, true),
3412         VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3413                     true, false, true),
3414         VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3415                     &vmw_cmd_dx_check_subresource, true, false, true),
3416         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3417                     &vmw_cmd_dx_check_subresource, true, false, true),
3418         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3419                     &vmw_cmd_dx_check_subresource, true, false, true),
3420         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3421                     &vmw_cmd_dx_view_define, true, false, true),
3422         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3423                     &vmw_cmd_dx_view_remove, true, false, true),
3424         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3425                     &vmw_cmd_dx_view_define, true, false, true),
3426         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3427                     &vmw_cmd_dx_view_remove, true, false, true),
3428         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3429                     &vmw_cmd_dx_view_define, true, false, true),
3430         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3431                     &vmw_cmd_dx_view_remove, true, false, true),
3432         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3433                     &vmw_cmd_dx_so_define, true, false, true),
3434         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3435                     &vmw_cmd_dx_cid_check, true, false, true),
3436         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3437                     &vmw_cmd_dx_so_define, true, false, true),
3438         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3439                     &vmw_cmd_dx_cid_check, true, false, true),
3440         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3441                     &vmw_cmd_dx_so_define, true, false, true),
3442         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3443                     &vmw_cmd_dx_cid_check, true, false, true),
3444         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3445                     &vmw_cmd_dx_so_define, true, false, true),
3446         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3447                     &vmw_cmd_dx_cid_check, true, false, true),
3448         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3449                     &vmw_cmd_dx_so_define, true, false, true),
3450         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3451                     &vmw_cmd_dx_cid_check, true, false, true),
3452         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3453                     &vmw_cmd_dx_define_shader, true, false, true),
3454         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3455                     &vmw_cmd_dx_destroy_shader, true, false, true),
3456         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3457                     &vmw_cmd_dx_bind_shader, true, false, true),
3458         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3459                     &vmw_cmd_dx_so_define, true, false, true),
3460         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3461                     &vmw_cmd_dx_cid_check, true, false, true),
3462         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3463                     true, false, true),
3464         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3465                     &vmw_cmd_dx_set_so_targets, true, false, true),
3466         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3467                     &vmw_cmd_dx_cid_check, true, false, true),
3468         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3469                     &vmw_cmd_dx_cid_check, true, false, true),
3470         VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3471                     &vmw_cmd_buffer_copy_check, true, false, true),
3472         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3473                     &vmw_cmd_pred_copy_check, true, false, true),
3474         VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3475                     &vmw_cmd_dx_transfer_from_buffer,
3476                     true, false, true),
3477 };
3478
3479 static int vmw_cmd_check(struct vmw_private *dev_priv,
3480                          struct vmw_sw_context *sw_context,
3481                          void *buf, uint32_t *size)
3482 {
3483         uint32_t cmd_id;
3484         uint32_t size_remaining = *size;
3485         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3486         int ret;
3487         const struct vmw_cmd_entry *entry;
3488         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3489
3490         cmd_id = ((uint32_t *)buf)[0];
3491         /* Handle any none 3D commands */
3492         if (unlikely(cmd_id < SVGA_CMD_MAX))
3493                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3494
3495
3496         cmd_id = header->id;
3497         *size = header->size + sizeof(SVGA3dCmdHeader);
3498
3499         cmd_id -= SVGA_3D_CMD_BASE;
3500         if (unlikely(*size > size_remaining))
3501                 goto out_invalid;
3502
3503         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3504                 goto out_invalid;
3505
3506         entry = &vmw_cmd_entries[cmd_id];
3507         if (unlikely(!entry->func))
3508                 goto out_invalid;
3509
3510         if (unlikely(!entry->user_allow && !sw_context->kernel))
3511                 goto out_privileged;
3512
3513         if (unlikely(entry->gb_disable && gb))
3514                 goto out_old;
3515
3516         if (unlikely(entry->gb_enable && !gb))
3517                 goto out_new;
3518
3519         ret = entry->func(dev_priv, sw_context, header);
3520         if (unlikely(ret != 0))
3521                 goto out_invalid;
3522
3523         return 0;
3524 out_invalid:
3525         DRM_ERROR("Invalid SVGA3D command: %d\n",
3526                   cmd_id + SVGA_3D_CMD_BASE);
3527         return -EINVAL;
3528 out_privileged:
3529         DRM_ERROR("Privileged SVGA3D command: %d\n",
3530                   cmd_id + SVGA_3D_CMD_BASE);
3531         return -EPERM;
3532 out_old:
3533         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3534                   cmd_id + SVGA_3D_CMD_BASE);
3535         return -EINVAL;
3536 out_new:
3537         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3538                   cmd_id + SVGA_3D_CMD_BASE);
3539         return -EINVAL;
3540 }
3541
3542 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3543                              struct vmw_sw_context *sw_context,
3544                              void *buf,
3545                              uint32_t size)
3546 {
3547         int32_t cur_size = size;
3548         int ret;
3549
3550         sw_context->buf_start = buf;
3551
3552         while (cur_size > 0) {
3553                 size = cur_size;
3554                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3555                 if (unlikely(ret != 0))
3556                         return ret;
3557                 buf = (void *)((unsigned long) buf + size);
3558                 cur_size -= size;
3559         }
3560
3561         if (unlikely(cur_size != 0)) {
3562                 DRM_ERROR("Command verifier out of sync.\n");
3563                 return -EINVAL;
3564         }
3565
3566         return 0;
3567 }
3568
3569 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3570 {
3571         sw_context->cur_reloc = 0;
3572 }
3573
3574 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3575 {
3576         uint32_t i;
3577         struct vmw_relocation *reloc;
3578         struct ttm_validate_buffer *validate;
3579         struct ttm_buffer_object *bo;
3580
3581         for (i = 0; i < sw_context->cur_reloc; ++i) {
3582                 reloc = &sw_context->relocs[i];
3583                 validate = &sw_context->val_bufs[reloc->index].base;
3584                 bo = validate->bo;
3585                 switch (bo->mem.mem_type) {
3586                 case TTM_PL_VRAM:
3587                         reloc->location->offset += bo->offset;
3588                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3589                         break;
3590                 case VMW_PL_GMR:
3591                         reloc->location->gmrId = bo->mem.start;
3592                         break;
3593                 case VMW_PL_MOB:
3594                         *reloc->mob_loc = bo->mem.start;
3595                         break;
3596                 default:
3597                         BUG();
3598                 }
3599         }
3600         vmw_free_relocations(sw_context);
3601 }
3602
3603 /**
3604  * vmw_resource_list_unrefererence - Free up a resource list and unreference
3605  * all resources referenced by it.
3606  *
3607  * @list: The resource list.
3608  */
3609 static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3610                                           struct list_head *list)
3611 {
3612         struct vmw_resource_val_node *val, *val_next;
3613
3614         /*
3615          * Drop references to resources held during command submission.
3616          */
3617
3618         list_for_each_entry_safe(val, val_next, list, head) {
3619                 list_del_init(&val->head);
3620                 vmw_resource_unreference(&val->res);
3621
3622                 if (val->staged_bindings) {
3623                         if (val->staged_bindings != sw_context->staged_bindings)
3624                                 vmw_binding_state_free(val->staged_bindings);
3625                         else
3626                                 sw_context->staged_bindings_inuse = false;
3627                         val->staged_bindings = NULL;
3628                 }
3629
3630                 kfree(val);
3631         }
3632 }
3633
3634 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3635 {
3636         struct vmw_validate_buffer *entry, *next;
3637         struct vmw_resource_val_node *val;
3638
3639         /*
3640          * Drop references to DMA buffers held during command submission.
3641          */
3642         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3643                                  base.head) {
3644                 list_del(&entry->base.head);
3645                 ttm_bo_unref(&entry->base.bo);
3646                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3647                 sw_context->cur_val_buf--;
3648         }
3649         BUG_ON(sw_context->cur_val_buf != 0);
3650
3651         list_for_each_entry(val, &sw_context->resource_list, head)
3652                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3653 }
3654
3655 int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3656                                struct ttm_buffer_object *bo,
3657                                bool interruptible,
3658                                bool validate_as_mob)
3659 {
3660         struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3661                                                   base);
3662         int ret;
3663
3664         if (vbo->pin_count > 0)
3665                 return 0;
3666
3667         if (validate_as_mob)
3668                 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3669                                        false);
3670
3671         /**
3672          * Put BO in VRAM if there is space, otherwise as a GMR.
3673          * If there is no space in VRAM and GMR ids are all used up,
3674          * start evicting GMRs to make room. If the DMA buffer can't be
3675          * used as a GMR, this will return -ENOMEM.
3676          */
3677
3678         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3679                               false);
3680         if (likely(ret == 0 || ret == -ERESTARTSYS))
3681                 return ret;
3682
3683         /**
3684          * If that failed, try VRAM again, this time evicting
3685          * previous contents.
3686          */
3687
3688         ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3689         return ret;
3690 }
3691
3692 static int vmw_validate_buffers(struct vmw_private *dev_priv,
3693                                 struct vmw_sw_context *sw_context)
3694 {
3695         struct vmw_validate_buffer *entry;
3696         int ret;
3697
3698         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3699                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3700                                                  true,
3701                                                  entry->validate_as_mob);
3702                 if (unlikely(ret != 0))
3703                         return ret;
3704         }
3705         return 0;
3706 }
3707
3708 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3709                                  uint32_t size)
3710 {
3711         if (likely(sw_context->cmd_bounce_size >= size))
3712                 return 0;
3713
3714         if (sw_context->cmd_bounce_size == 0)
3715                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3716
3717         while (sw_context->cmd_bounce_size < size) {
3718                 sw_context->cmd_bounce_size =
3719                         PAGE_ALIGN(sw_context->cmd_bounce_size +
3720                                    (sw_context->cmd_bounce_size >> 1));
3721         }
3722
3723         vfree(sw_context->cmd_bounce);
3724         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3725
3726         if (sw_context->cmd_bounce == NULL) {
3727                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3728                 sw_context->cmd_bounce_size = 0;
3729                 return -ENOMEM;
3730         }
3731
3732         return 0;
3733 }
3734
3735 /**
3736  * vmw_execbuf_fence_commands - create and submit a command stream fence
3737  *
3738  * Creates a fence object and submits a command stream marker.
3739  * If this fails for some reason, We sync the fifo and return NULL.
3740  * It is then safe to fence buffers with a NULL pointer.
3741  *
3742  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3743  * a userspace handle if @p_handle is not NULL, otherwise not.
3744  */
3745
3746 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3747                                struct vmw_private *dev_priv,
3748                                struct vmw_fence_obj **p_fence,
3749                                uint32_t *p_handle)
3750 {
3751         uint32_t sequence;
3752         int ret;
3753         bool synced = false;
3754
3755         /* p_handle implies file_priv. */
3756         BUG_ON(p_handle != NULL && file_priv == NULL);
3757
3758         ret = vmw_fifo_send_fence(dev_priv, &sequence);
3759         if (unlikely(ret != 0)) {
3760                 DRM_ERROR("Fence submission error. Syncing.\n");
3761                 synced = true;
3762         }
3763
3764         if (p_handle != NULL)
3765                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3766                                             sequence, p_fence, p_handle);
3767         else
3768                 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3769
3770         if (unlikely(ret != 0 && !synced)) {
3771                 (void) vmw_fallback_wait(dev_priv, false, false,
3772                                          sequence, false,
3773                                          VMW_FENCE_WAIT_TIMEOUT);
3774                 *p_fence = NULL;
3775         }
3776
3777         return ret;
3778 }
3779
3780 /**
3781  * vmw_execbuf_copy_fence_user - copy fence object information to
3782  * user-space.
3783  *
3784  * @dev_priv: Pointer to a vmw_private struct.
3785  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3786  * @ret: Return value from fence object creation.
3787  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3788  * which the information should be copied.
3789  * @fence: Pointer to the fenc object.
3790  * @fence_handle: User-space fence handle.
3791  *
3792  * This function copies fence information to user-space. If copying fails,
3793  * The user-space struct drm_vmw_fence_rep::error member is hopefully
3794  * left untouched, and if it's preloaded with an -EFAULT by user-space,
3795  * the error will hopefully be detected.
3796  * Also if copying fails, user-space will be unable to signal the fence
3797  * object so we wait for it immediately, and then unreference the
3798  * user-space reference.
3799  */
3800 void
3801 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3802                             struct vmw_fpriv *vmw_fp,
3803                             int ret,
3804                             struct drm_vmw_fence_rep __user *user_fence_rep,
3805                             struct vmw_fence_obj *fence,
3806                             uint32_t fence_handle)
3807 {
3808         struct drm_vmw_fence_rep fence_rep;
3809
3810         if (user_fence_rep == NULL)
3811                 return;
3812
3813         memset(&fence_rep, 0, sizeof(fence_rep));
3814
3815         fence_rep.error = ret;
3816         if (ret == 0) {
3817                 BUG_ON(fence == NULL);
3818
3819                 fence_rep.handle = fence_handle;
3820                 fence_rep.seqno = fence->base.seqno;
3821                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3822                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3823         }
3824
3825         /*
3826          * copy_to_user errors will be detected by user space not
3827          * seeing fence_rep::error filled in. Typically
3828          * user-space would have pre-set that member to -EFAULT.
3829          */
3830         ret = copy_to_user(user_fence_rep, &fence_rep,
3831                            sizeof(fence_rep));
3832
3833         /*
3834          * User-space lost the fence object. We need to sync
3835          * and unreference the handle.
3836          */
3837         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3838                 ttm_ref_object_base_unref(vmw_fp->tfile,
3839                                           fence_handle, TTM_REF_USAGE);
3840                 DRM_ERROR("Fence copy error. Syncing.\n");
3841                 (void) vmw_fence_obj_wait(fence, false, false,
3842                                           VMW_FENCE_WAIT_TIMEOUT);
3843         }
3844 }
3845
3846 /**
3847  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3848  * the fifo.
3849  *
3850  * @dev_priv: Pointer to a device private structure.
3851  * @kernel_commands: Pointer to the unpatched command batch.
3852  * @command_size: Size of the unpatched command batch.
3853  * @sw_context: Structure holding the relocation lists.
3854  *
3855  * Side effects: If this function returns 0, then the command batch
3856  * pointed to by @kernel_commands will have been modified.
3857  */
3858 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3859                                    void *kernel_commands,
3860                                    u32 command_size,
3861                                    struct vmw_sw_context *sw_context)
3862 {
3863         void *cmd;
3864
3865         if (sw_context->dx_ctx_node)
3866                 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3867                                           sw_context->dx_ctx_node->res->id);
3868         else
3869                 cmd = vmw_fifo_reserve(dev_priv, command_size);
3870         if (!cmd) {
3871                 DRM_ERROR("Failed reserving fifo space for commands.\n");
3872                 return -ENOMEM;
3873         }
3874
3875         vmw_apply_relocations(sw_context);
3876         memcpy(cmd, kernel_commands, command_size);
3877         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3878         vmw_resource_relocations_free(&sw_context->res_relocations);
3879         vmw_fifo_commit(dev_priv, command_size);
3880
3881         return 0;
3882 }
3883
3884 /**
3885  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3886  * the command buffer manager.
3887  *
3888  * @dev_priv: Pointer to a device private structure.
3889  * @header: Opaque handle to the command buffer allocation.
3890  * @command_size: Size of the unpatched command batch.
3891  * @sw_context: Structure holding the relocation lists.
3892  *
3893  * Side effects: If this function returns 0, then the command buffer
3894  * represented by @header will have been modified.
3895  */
3896 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3897                                      struct vmw_cmdbuf_header *header,
3898                                      u32 command_size,
3899                                      struct vmw_sw_context *sw_context)
3900 {
3901         u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3902                   SVGA3D_INVALID_ID);
3903         void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3904                                        id, false, header);
3905
3906         vmw_apply_relocations(sw_context);
3907         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3908         vmw_resource_relocations_free(&sw_context->res_relocations);
3909         vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3910
3911         return 0;
3912 }
3913
3914 /**
3915  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3916  * submission using a command buffer.
3917  *
3918  * @dev_priv: Pointer to a device private structure.
3919  * @user_commands: User-space pointer to the commands to be submitted.
3920  * @command_size: Size of the unpatched command batch.
3921  * @header: Out parameter returning the opaque pointer to the command buffer.
3922  *
3923  * This function checks whether we can use the command buffer manager for
3924  * submission and if so, creates a command buffer of suitable size and
3925  * copies the user data into that buffer.
3926  *
3927  * On successful return, the function returns a pointer to the data in the
3928  * command buffer and *@header is set to non-NULL.
3929  * If command buffers could not be used, the function will return the value
3930  * of @kernel_commands on function call. That value may be NULL. In that case,
3931  * the value of *@header will be set to NULL.
3932  * If an error is encountered, the function will return a pointer error value.
3933  * If the function is interrupted by a signal while sleeping, it will return
3934  * -ERESTARTSYS casted to a pointer error value.
3935  */
3936 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3937                                 void __user *user_commands,
3938                                 void *kernel_commands,
3939                                 u32 command_size,
3940                                 struct vmw_cmdbuf_header **header)
3941 {
3942         size_t cmdbuf_size;
3943         int ret;
3944
3945         *header = NULL;
3946         if (command_size > SVGA_CB_MAX_SIZE) {
3947                 DRM_ERROR("Command buffer is too large.\n");
3948                 return ERR_PTR(-EINVAL);
3949         }
3950
3951         if (!dev_priv->cman || kernel_commands)
3952                 return kernel_commands;
3953
3954         /* If possible, add a little space for fencing. */
3955         cmdbuf_size = command_size + 512;
3956         cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3957         kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3958                                            true, header);
3959         if (IS_ERR(kernel_commands))
3960                 return kernel_commands;
3961
3962         ret = copy_from_user(kernel_commands, user_commands,
3963                              command_size);
3964         if (ret) {
3965                 DRM_ERROR("Failed copying commands.\n");
3966                 vmw_cmdbuf_header_free(*header);
3967                 *header = NULL;
3968                 return ERR_PTR(-EFAULT);
3969         }
3970
3971         return kernel_commands;
3972 }
3973
3974 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3975                                    struct vmw_sw_context *sw_context,
3976                                    uint32_t handle)
3977 {
3978         struct vmw_resource_val_node *ctx_node;
3979         struct vmw_resource *res;
3980         int ret;
3981
3982         if (handle == SVGA3D_INVALID_ID)
3983                 return 0;
3984
3985         ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3986                                               handle, user_context_converter,
3987                                               &res);
3988         if (unlikely(ret != 0)) {
3989                 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3990                           (unsigned) handle);
3991                 return ret;
3992         }
3993
3994         ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3995         if (unlikely(ret != 0))
3996                 goto out_err;
3997
3998         sw_context->dx_ctx_node = ctx_node;
3999         sw_context->man = vmw_context_res_man(res);
4000 out_err:
4001         vmw_resource_unreference(&res);
4002         return ret;
4003 }
4004
4005 int vmw_execbuf_process(struct drm_file *file_priv,
4006                         struct vmw_private *dev_priv,
4007                         void __user *user_commands,
4008                         void *kernel_commands,
4009                         uint32_t command_size,
4010                         uint64_t throttle_us,
4011                         uint32_t dx_context_handle,
4012                         struct drm_vmw_fence_rep __user *user_fence_rep,
4013                         struct vmw_fence_obj **out_fence)
4014 {
4015         struct vmw_sw_context *sw_context = &dev_priv->ctx;
4016         struct vmw_fence_obj *fence = NULL;
4017         struct vmw_resource *error_resource;
4018         struct list_head resource_list;
4019         struct vmw_cmdbuf_header *header;
4020         struct ww_acquire_ctx ticket;
4021         uint32_t handle;
4022         int ret;
4023
4024         if (throttle_us) {
4025                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4026                                    throttle_us);
4027
4028                 if (ret)
4029                         return ret;
4030         }
4031
4032         kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4033                                              kernel_commands, command_size,
4034                                              &header);
4035         if (IS_ERR(kernel_commands))
4036                 return PTR_ERR(kernel_commands);
4037
4038         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4039         if (ret) {
4040                 ret = -ERESTARTSYS;
4041                 goto out_free_header;
4042         }
4043
4044         sw_context->kernel = false;
4045         if (kernel_commands == NULL) {
4046                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4047                 if (unlikely(ret != 0))
4048                         goto out_unlock;
4049
4050
4051                 ret = copy_from_user(sw_context->cmd_bounce,
4052                                      user_commands, command_size);
4053
4054                 if (unlikely(ret != 0)) {
4055                         ret = -EFAULT;
4056                         DRM_ERROR("Failed copying commands.\n");
4057                         goto out_unlock;
4058                 }
4059                 kernel_commands = sw_context->cmd_bounce;
4060         } else if (!header)
4061                 sw_context->kernel = true;
4062
4063         sw_context->fp = vmw_fpriv(file_priv);
4064         sw_context->cur_reloc = 0;
4065         sw_context->cur_val_buf = 0;
4066         INIT_LIST_HEAD(&sw_context->resource_list);
4067         INIT_LIST_HEAD(&sw_context->ctx_resource_list);
4068         sw_context->cur_query_bo = dev_priv->pinned_bo;
4069         sw_context->last_query_ctx = NULL;
4070         sw_context->needs_post_query_barrier = false;
4071         sw_context->dx_ctx_node = NULL;
4072         sw_context->dx_query_mob = NULL;
4073         sw_context->dx_query_ctx = NULL;
4074         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4075         INIT_LIST_HEAD(&sw_context->validate_nodes);
4076         INIT_LIST_HEAD(&sw_context->res_relocations);
4077         if (sw_context->staged_bindings)
4078                 vmw_binding_state_reset(sw_context->staged_bindings);
4079
4080         if (!sw_context->res_ht_initialized) {
4081                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4082                 if (unlikely(ret != 0))
4083                         goto out_unlock;
4084                 sw_context->res_ht_initialized = true;
4085         }
4086         INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4087         INIT_LIST_HEAD(&resource_list);
4088         ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4089         if (unlikely(ret != 0)) {
4090                 list_splice_init(&sw_context->ctx_resource_list,
4091                                  &sw_context->resource_list);
4092                 goto out_err_nores;
4093         }
4094
4095         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4096                                 command_size);
4097         /*
4098          * Merge the resource lists before checking the return status
4099          * from vmd_cmd_check_all so that all the open hashtabs will
4100          * be handled properly even if vmw_cmd_check_all fails.
4101          */
4102         list_splice_init(&sw_context->ctx_resource_list,
4103                          &sw_context->resource_list);
4104
4105         if (unlikely(ret != 0))
4106                 goto out_err_nores;
4107
4108         ret = vmw_resources_reserve(sw_context);
4109         if (unlikely(ret != 0))
4110                 goto out_err_nores;
4111
4112         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4113                                      true, NULL);
4114         if (unlikely(ret != 0))
4115                 goto out_err_nores;
4116
4117         ret = vmw_validate_buffers(dev_priv, sw_context);
4118         if (unlikely(ret != 0))
4119                 goto out_err;
4120
4121         ret = vmw_resources_validate(sw_context);
4122         if (unlikely(ret != 0))
4123                 goto out_err;
4124
4125         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4126         if (unlikely(ret != 0)) {
4127                 ret = -ERESTARTSYS;
4128                 goto out_err;
4129         }
4130
4131         if (dev_priv->has_mob) {
4132                 ret = vmw_rebind_contexts(sw_context);
4133                 if (unlikely(ret != 0))
4134                         goto out_unlock_binding;
4135         }
4136
4137         if (!header) {
4138                 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4139                                               command_size, sw_context);
4140         } else {
4141                 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4142                                                 sw_context);
4143                 header = NULL;
4144         }
4145         mutex_unlock(&dev_priv->binding_mutex);
4146         if (ret)
4147                 goto out_err;
4148
4149         vmw_query_bo_switch_commit(dev_priv, sw_context);
4150         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4151                                          &fence,
4152                                          (user_fence_rep) ? &handle : NULL);
4153         /*
4154          * This error is harmless, because if fence submission fails,
4155          * vmw_fifo_send_fence will sync. The error will be propagated to
4156          * user-space in @fence_rep
4157          */
4158
4159         if (ret != 0)
4160                 DRM_ERROR("Fence submission error. Syncing.\n");
4161
4162         vmw_resources_unreserve(sw_context, false);
4163
4164         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4165                                     (void *) fence);
4166
4167         if (unlikely(dev_priv->pinned_bo != NULL &&
4168                      !dev_priv->query_cid_valid))
4169                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4170
4171         vmw_clear_validations(sw_context);
4172         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4173                                     user_fence_rep, fence, handle);
4174
4175         /* Don't unreference when handing fence out */
4176         if (unlikely(out_fence != NULL)) {
4177                 *out_fence = fence;
4178                 fence = NULL;
4179         } else if (likely(fence != NULL)) {
4180                 vmw_fence_obj_unreference(&fence);
4181         }
4182
4183         list_splice_init(&sw_context->resource_list, &resource_list);
4184         vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4185         mutex_unlock(&dev_priv->cmdbuf_mutex);
4186
4187         /*
4188          * Unreference resources outside of the cmdbuf_mutex to
4189          * avoid deadlocks in resource destruction paths.
4190          */
4191         vmw_resource_list_unreference(sw_context, &resource_list);
4192
4193         return 0;
4194
4195 out_unlock_binding:
4196         mutex_unlock(&dev_priv->binding_mutex);
4197 out_err:
4198         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4199 out_err_nores:
4200         vmw_resources_unreserve(sw_context, true);
4201         vmw_resource_relocations_free(&sw_context->res_relocations);
4202         vmw_free_relocations(sw_context);
4203         vmw_clear_validations(sw_context);
4204         if (unlikely(dev_priv->pinned_bo != NULL &&
4205                      !dev_priv->query_cid_valid))
4206                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4207 out_unlock:
4208         list_splice_init(&sw_context->resource_list, &resource_list);
4209         error_resource = sw_context->error_resource;
4210         sw_context->error_resource = NULL;
4211         vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4212         mutex_unlock(&dev_priv->cmdbuf_mutex);
4213
4214         /*
4215          * Unreference resources outside of the cmdbuf_mutex to
4216          * avoid deadlocks in resource destruction paths.
4217          */
4218         vmw_resource_list_unreference(sw_context, &resource_list);
4219         if (unlikely(error_resource != NULL))
4220                 vmw_resource_unreference(&error_resource);
4221 out_free_header:
4222         if (header)
4223                 vmw_cmdbuf_header_free(header);
4224
4225         return ret;
4226 }
4227
4228 /**
4229  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4230  *
4231  * @dev_priv: The device private structure.
4232  *
4233  * This function is called to idle the fifo and unpin the query buffer
4234  * if the normal way to do this hits an error, which should typically be
4235  * extremely rare.
4236  */
4237 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4238 {
4239         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4240
4241         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4242         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4243         if (dev_priv->dummy_query_bo_pinned) {
4244                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4245                 dev_priv->dummy_query_bo_pinned = false;
4246         }
4247 }
4248
4249
4250 /**
4251  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4252  * query bo.
4253  *
4254  * @dev_priv: The device private structure.
4255  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4256  * _after_ a query barrier that flushes all queries touching the current
4257  * buffer pointed to by @dev_priv->pinned_bo
4258  *
4259  * This function should be used to unpin the pinned query bo, or
4260  * as a query barrier when we need to make sure that all queries have
4261  * finished before the next fifo command. (For example on hardware
4262  * context destructions where the hardware may otherwise leak unfinished
4263  * queries).
4264  *
4265  * This function does not return any failure codes, but make attempts
4266  * to do safe unpinning in case of errors.
4267  *
4268  * The function will synchronize on the previous query barrier, and will
4269  * thus not finish until that barrier has executed.
4270  *
4271  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4272  * before calling this function.
4273  */
4274 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4275                                      struct vmw_fence_obj *fence)
4276 {
4277         int ret = 0;
4278         struct list_head validate_list;
4279         struct ttm_validate_buffer pinned_val, query_val;
4280         struct vmw_fence_obj *lfence = NULL;
4281         struct ww_acquire_ctx ticket;
4282
4283         if (dev_priv->pinned_bo == NULL)
4284                 goto out_unlock;
4285
4286         INIT_LIST_HEAD(&validate_list);
4287
4288         pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4289         pinned_val.shared = false;
4290         list_add_tail(&pinned_val.head, &validate_list);
4291
4292         query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4293         query_val.shared = false;
4294         list_add_tail(&query_val.head, &validate_list);
4295
4296         ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4297                                      false, NULL);
4298         if (unlikely(ret != 0)) {
4299                 vmw_execbuf_unpin_panic(dev_priv);
4300                 goto out_no_reserve;
4301         }
4302
4303         if (dev_priv->query_cid_valid) {
4304                 BUG_ON(fence != NULL);
4305                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4306                 if (unlikely(ret != 0)) {
4307                         vmw_execbuf_unpin_panic(dev_priv);
4308                         goto out_no_emit;
4309                 }
4310                 dev_priv->query_cid_valid = false;
4311         }
4312
4313         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4314         if (dev_priv->dummy_query_bo_pinned) {
4315                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4316                 dev_priv->dummy_query_bo_pinned = false;
4317         }
4318         if (fence == NULL) {
4319                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4320                                                   NULL);
4321                 fence = lfence;
4322         }
4323         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4324         if (lfence != NULL)
4325                 vmw_fence_obj_unreference(&lfence);
4326
4327         ttm_bo_unref(&query_val.bo);
4328         ttm_bo_unref(&pinned_val.bo);
4329         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4330 out_unlock:
4331         return;
4332
4333 out_no_emit:
4334         ttm_eu_backoff_reservation(&ticket, &validate_list);
4335 out_no_reserve:
4336         ttm_bo_unref(&query_val.bo);
4337         ttm_bo_unref(&pinned_val.bo);
4338         vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4339 }
4340
4341 /**
4342  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4343  * query bo.
4344  *
4345  * @dev_priv: The device private structure.
4346  *
4347  * This function should be used to unpin the pinned query bo, or
4348  * as a query barrier when we need to make sure that all queries have
4349  * finished before the next fifo command. (For example on hardware
4350  * context destructions where the hardware may otherwise leak unfinished
4351  * queries).
4352  *
4353  * This function does not return any failure codes, but make attempts
4354  * to do safe unpinning in case of errors.
4355  *
4356  * The function will synchronize on the previous query barrier, and will
4357  * thus not finish until that barrier has executed.
4358  */
4359 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4360 {
4361         mutex_lock(&dev_priv->cmdbuf_mutex);
4362         if (dev_priv->query_cid_valid)
4363                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4364         mutex_unlock(&dev_priv->cmdbuf_mutex);
4365 }
4366
4367 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4368                       struct drm_file *file_priv, size_t size)
4369 {
4370         struct vmw_private *dev_priv = vmw_priv(dev);
4371         struct drm_vmw_execbuf_arg arg;
4372         int ret;
4373         static const size_t copy_offset[] = {
4374                 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4375                 sizeof(struct drm_vmw_execbuf_arg)};
4376
4377         if (unlikely(size < copy_offset[0])) {
4378                 DRM_ERROR("Invalid command size, ioctl %d\n",
4379                           DRM_VMW_EXECBUF);
4380                 return -EINVAL;
4381         }
4382
4383         if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4384                 return -EFAULT;
4385
4386         /*
4387          * Extend the ioctl argument while
4388          * maintaining backwards compatibility:
4389          * We take different code paths depending on the value of
4390          * arg.version.
4391          */
4392
4393         if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4394                      arg.version == 0)) {
4395                 DRM_ERROR("Incorrect execbuf version.\n");
4396                 return -EINVAL;
4397         }
4398
4399         if (arg.version > 1 &&
4400             copy_from_user(&arg.context_handle,
4401                            (void __user *) (data + copy_offset[0]),
4402                            copy_offset[arg.version - 1] -
4403                            copy_offset[0]) != 0)
4404                 return -EFAULT;
4405
4406         switch (arg.version) {
4407         case 1:
4408                 arg.context_handle = (uint32_t) -1;
4409                 break;
4410         case 2:
4411                 if (arg.pad64 != 0) {
4412                         DRM_ERROR("Unused IOCTL data not set to zero.\n");
4413                         return -EINVAL;
4414                 }
4415                 break;
4416         default:
4417                 break;
4418         }
4419
4420         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4421         if (unlikely(ret != 0))
4422                 return ret;
4423
4424         ret = vmw_execbuf_process(file_priv, dev_priv,
4425                                   (void __user *)(unsigned long)arg.commands,
4426                                   NULL, arg.command_size, arg.throttle_us,
4427                                   arg.context_handle,
4428                                   (void __user *)(unsigned long)arg.fence_rep,
4429                                   NULL);
4430         ttm_read_unlock(&dev_priv->reservation_sem);
4431         if (unlikely(ret != 0))
4432                 return ret;
4433
4434         vmw_kms_cursor_post_execbuf(dev_priv);
4435
4436         return 0;
4437 }