2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
35 #include "virtgpu_drv.h"
37 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
38 VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
39 VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
41 static int virtio_gpu_fence_event_create(struct drm_device *dev,
42 struct drm_file *file,
43 struct virtio_gpu_fence *fence,
46 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
47 struct virtio_gpu_fence_event *e = NULL;
50 if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
53 e = kzalloc(sizeof(*e), GFP_KERNEL);
57 e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
58 e->event.length = sizeof(e->event);
60 ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
71 /* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
72 static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
73 struct virtio_gpu_fpriv *vfpriv)
75 char dbgname[TASK_COMM_LEN];
77 get_task_comm(dbgname, current);
78 virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
79 vfpriv->context_init, strlen(dbgname),
82 vfpriv->context_created = true;
85 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
87 struct virtio_gpu_device *vgdev = dev->dev_private;
88 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
90 mutex_lock(&vfpriv->context_lock);
91 if (vfpriv->context_created)
94 virtio_gpu_create_context_locked(vgdev, vfpriv);
97 mutex_unlock(&vfpriv->context_lock);
100 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
101 struct drm_file *file)
103 struct virtio_gpu_device *vgdev = dev->dev_private;
104 struct drm_virtgpu_map *virtio_gpu_map = data;
106 return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
107 virtio_gpu_map->handle,
108 &virtio_gpu_map->offset);
112 * Usage of execbuffer:
113 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
114 * However, the command as passed from user space must *not* contain the initial
115 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
117 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
118 struct drm_file *file)
120 struct drm_virtgpu_execbuffer *exbuf = data;
121 struct virtio_gpu_device *vgdev = dev->dev_private;
122 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
123 struct virtio_gpu_fence *out_fence;
125 uint32_t *bo_handles = NULL;
126 void __user *user_bo_handles = NULL;
127 struct virtio_gpu_object_array *buflist = NULL;
128 struct sync_file *sync_file;
129 int in_fence_fd = exbuf->fence_fd;
130 int out_fence_fd = -1;
135 fence_ctx = vgdev->fence_drv.context;
138 if (vgdev->has_virgl_3d == false)
141 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
144 if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
145 if (exbuf->ring_idx >= vfpriv->num_rings)
148 if (!vfpriv->base_fence_ctx)
151 fence_ctx = vfpriv->base_fence_ctx;
152 ring_idx = exbuf->ring_idx;
155 exbuf->fence_fd = -1;
157 virtio_gpu_create_context(dev, file);
158 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
159 struct dma_fence *in_fence;
161 in_fence = sync_file_get_fence(in_fence_fd);
167 * Wait if the fence is from a foreign context, or if the fence
168 * array contains any fence from a foreign context.
171 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
172 ret = dma_fence_wait(in_fence, true);
174 dma_fence_put(in_fence);
179 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
180 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
181 if (out_fence_fd < 0)
185 if (exbuf->num_bo_handles) {
186 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
187 sizeof(uint32_t), GFP_KERNEL);
193 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
194 if (copy_from_user(bo_handles, user_bo_handles,
195 exbuf->num_bo_handles * sizeof(uint32_t))) {
200 buflist = virtio_gpu_array_from_handles(file, bo_handles,
201 exbuf->num_bo_handles);
210 buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
217 ret = virtio_gpu_array_lock_resv(buflist);
222 out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
228 ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
232 if (out_fence_fd >= 0) {
233 sync_file = sync_file_create(&out_fence->f);
235 dma_fence_put(&out_fence->f);
240 exbuf->fence_fd = out_fence_fd;
241 fd_install(out_fence_fd, sync_file->file);
244 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
245 vfpriv->ctx_id, buflist, out_fence);
246 dma_fence_put(&out_fence->f);
247 virtio_gpu_notify(vgdev);
252 virtio_gpu_array_unlock_resv(buflist);
258 virtio_gpu_array_put_free(buflist);
260 if (out_fence_fd >= 0)
261 put_unused_fd(out_fence_fd);
266 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
267 struct drm_file *file)
269 struct virtio_gpu_device *vgdev = dev->dev_private;
270 struct drm_virtgpu_getparam *param = data;
273 switch (param->param) {
274 case VIRTGPU_PARAM_3D_FEATURES:
275 value = vgdev->has_virgl_3d ? 1 : 0;
277 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
280 case VIRTGPU_PARAM_RESOURCE_BLOB:
281 value = vgdev->has_resource_blob ? 1 : 0;
283 case VIRTGPU_PARAM_HOST_VISIBLE:
284 value = vgdev->has_host_visible ? 1 : 0;
286 case VIRTGPU_PARAM_CROSS_DEVICE:
287 value = vgdev->has_resource_assign_uuid ? 1 : 0;
289 case VIRTGPU_PARAM_CONTEXT_INIT:
290 value = vgdev->has_context_init ? 1 : 0;
292 case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
293 value = vgdev->capset_id_mask;
298 if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
304 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
305 struct drm_file *file)
307 struct virtio_gpu_device *vgdev = dev->dev_private;
308 struct drm_virtgpu_resource_create *rc = data;
309 struct virtio_gpu_fence *fence;
311 struct virtio_gpu_object *qobj;
312 struct drm_gem_object *obj;
314 struct virtio_gpu_object_params params = { 0 };
316 if (vgdev->has_virgl_3d) {
317 virtio_gpu_create_context(dev, file);
319 params.target = rc->target;
320 params.bind = rc->bind;
321 params.depth = rc->depth;
322 params.array_size = rc->array_size;
323 params.last_level = rc->last_level;
324 params.nr_samples = rc->nr_samples;
325 params.flags = rc->flags;
329 if (rc->nr_samples > 1)
331 if (rc->last_level > 1)
335 if (rc->array_size > 1)
339 params.format = rc->format;
340 params.width = rc->width;
341 params.height = rc->height;
342 params.size = rc->size;
343 /* allocate a single page size object */
344 if (params.size == 0)
345 params.size = PAGE_SIZE;
347 fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
350 ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence);
351 dma_fence_put(&fence->f);
354 obj = &qobj->base.base;
356 ret = drm_gem_handle_create(file, obj, &handle);
358 drm_gem_object_release(obj);
361 drm_gem_object_put(obj);
363 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
364 rc->bo_handle = handle;
368 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
369 struct drm_file *file)
371 struct drm_virtgpu_resource_info *ri = data;
372 struct drm_gem_object *gobj = NULL;
373 struct virtio_gpu_object *qobj = NULL;
375 gobj = drm_gem_object_lookup(file, ri->bo_handle);
379 qobj = gem_to_virtio_gpu_obj(gobj);
381 ri->size = qobj->base.base.size;
382 ri->res_handle = qobj->hw_res_handle;
383 if (qobj->host3d_blob || qobj->guest_blob)
384 ri->blob_mem = qobj->blob_mem;
386 drm_gem_object_put(gobj);
390 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
392 struct drm_file *file)
394 struct virtio_gpu_device *vgdev = dev->dev_private;
395 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
396 struct drm_virtgpu_3d_transfer_from_host *args = data;
397 struct virtio_gpu_object *bo;
398 struct virtio_gpu_object_array *objs;
399 struct virtio_gpu_fence *fence;
401 u32 offset = args->offset;
403 if (vgdev->has_virgl_3d == false)
406 virtio_gpu_create_context(dev, file);
407 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
411 bo = gem_to_virtio_gpu_obj(objs->objs[0]);
412 if (bo->guest_blob && !bo->host3d_blob) {
417 if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
422 ret = virtio_gpu_array_lock_resv(objs);
426 fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
432 virtio_gpu_cmd_transfer_from_host_3d
433 (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
434 args->layer_stride, &args->box, objs, fence);
435 dma_fence_put(&fence->f);
436 virtio_gpu_notify(vgdev);
440 virtio_gpu_array_unlock_resv(objs);
442 virtio_gpu_array_put_free(objs);
446 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
447 struct drm_file *file)
449 struct virtio_gpu_device *vgdev = dev->dev_private;
450 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
451 struct drm_virtgpu_3d_transfer_to_host *args = data;
452 struct virtio_gpu_object *bo;
453 struct virtio_gpu_object_array *objs;
454 struct virtio_gpu_fence *fence;
456 u32 offset = args->offset;
458 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
462 bo = gem_to_virtio_gpu_obj(objs->objs[0]);
463 if (bo->guest_blob && !bo->host3d_blob) {
468 if (!vgdev->has_virgl_3d) {
469 virtio_gpu_cmd_transfer_to_host_2d
471 args->box.w, args->box.h, args->box.x, args->box.y,
474 virtio_gpu_create_context(dev, file);
476 if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
481 ret = virtio_gpu_array_lock_resv(objs);
486 fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
491 virtio_gpu_cmd_transfer_to_host_3d
493 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
494 args->stride, args->layer_stride, &args->box, objs,
496 dma_fence_put(&fence->f);
498 virtio_gpu_notify(vgdev);
502 virtio_gpu_array_unlock_resv(objs);
504 virtio_gpu_array_put_free(objs);
508 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
509 struct drm_file *file)
511 struct drm_virtgpu_3d_wait *args = data;
512 struct drm_gem_object *obj;
513 long timeout = 15 * HZ;
516 obj = drm_gem_object_lookup(file, args->handle);
520 if (args->flags & VIRTGPU_WAIT_NOWAIT) {
521 ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
523 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
531 drm_gem_object_put(obj);
535 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
536 void *data, struct drm_file *file)
538 struct virtio_gpu_device *vgdev = dev->dev_private;
539 struct drm_virtgpu_get_caps *args = data;
540 unsigned size, host_caps_size;
542 int found_valid = -1;
544 struct virtio_gpu_drv_cap_cache *cache_ent;
547 if (vgdev->num_capsets == 0)
550 /* don't allow userspace to pass 0 */
554 spin_lock(&vgdev->display_info_lock);
555 for (i = 0; i < vgdev->num_capsets; i++) {
556 if (vgdev->capsets[i].id == args->cap_set_id) {
557 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
564 if (found_valid == -1) {
565 spin_unlock(&vgdev->display_info_lock);
569 host_caps_size = vgdev->capsets[found_valid].max_size;
570 /* only copy to user the minimum of the host caps size or the guest caps size */
571 size = min(args->size, host_caps_size);
573 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
574 if (cache_ent->id == args->cap_set_id &&
575 cache_ent->version == args->cap_set_ver) {
576 spin_unlock(&vgdev->display_info_lock);
580 spin_unlock(&vgdev->display_info_lock);
582 /* not in cache - need to talk to hw */
583 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
585 virtio_gpu_notify(vgdev);
588 ret = wait_event_timeout(vgdev->resp_wq,
589 atomic_read(&cache_ent->is_valid), 5 * HZ);
593 /* is_valid check must proceed before copy of the cache entry. */
596 ptr = cache_ent->caps_cache;
598 if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
604 static int verify_blob(struct virtio_gpu_device *vgdev,
605 struct virtio_gpu_fpriv *vfpriv,
606 struct virtio_gpu_object_params *params,
607 struct drm_virtgpu_resource_create_blob *rc_blob,
608 bool *guest_blob, bool *host3d_blob)
610 if (!vgdev->has_resource_blob)
613 if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK)
616 if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
617 if (!vgdev->has_resource_assign_uuid)
621 switch (rc_blob->blob_mem) {
622 case VIRTGPU_BLOB_MEM_GUEST:
625 case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
628 case VIRTGPU_BLOB_MEM_HOST3D:
636 if (!vgdev->has_virgl_3d)
639 /* Must be dword aligned. */
640 if (rc_blob->cmd_size % 4 != 0)
643 params->ctx_id = vfpriv->ctx_id;
644 params->blob_id = rc_blob->blob_id;
646 if (rc_blob->blob_id != 0)
649 if (rc_blob->cmd_size != 0)
653 params->blob_mem = rc_blob->blob_mem;
654 params->size = rc_blob->size;
656 params->blob_flags = rc_blob->blob_flags;
660 static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
662 struct drm_file *file)
666 bool guest_blob = false;
667 bool host3d_blob = false;
668 struct drm_gem_object *obj;
669 struct virtio_gpu_object *bo;
670 struct virtio_gpu_object_params params = { 0 };
671 struct virtio_gpu_device *vgdev = dev->dev_private;
672 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
673 struct drm_virtgpu_resource_create_blob *rc_blob = data;
675 if (verify_blob(vgdev, vfpriv, ¶ms, rc_blob,
676 &guest_blob, &host3d_blob))
679 if (vgdev->has_virgl_3d)
680 virtio_gpu_create_context(dev, file);
682 if (rc_blob->cmd_size) {
685 buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
691 virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
692 vfpriv->ctx_id, NULL, NULL);
696 ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL);
697 else if (!guest_blob && host3d_blob)
698 ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo);
705 bo->guest_blob = guest_blob;
706 bo->host3d_blob = host3d_blob;
707 bo->blob_mem = rc_blob->blob_mem;
708 bo->blob_flags = rc_blob->blob_flags;
710 obj = &bo->base.base;
711 if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
712 ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
714 drm_gem_object_release(obj);
719 ret = drm_gem_handle_create(file, obj, &handle);
721 drm_gem_object_release(obj);
724 drm_gem_object_put(obj);
726 rc_blob->res_handle = bo->hw_res_handle;
727 rc_blob->bo_handle = handle;
732 static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
733 void *data, struct drm_file *file)
736 uint32_t num_params, i, param, value;
737 uint64_t valid_ring_mask;
739 struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
740 struct virtio_gpu_device *vgdev = dev->dev_private;
741 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
742 struct drm_virtgpu_context_init *args = data;
744 num_params = args->num_params;
745 len = num_params * sizeof(struct drm_virtgpu_context_set_param);
747 if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
750 /* Number of unique parameters supported at this time. */
754 ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
757 if (IS_ERR(ctx_set_params))
758 return PTR_ERR(ctx_set_params);
760 mutex_lock(&vfpriv->context_lock);
761 if (vfpriv->context_created) {
766 for (i = 0; i < num_params; i++) {
767 param = ctx_set_params[i].param;
768 value = ctx_set_params[i].value;
771 case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
772 if (value > MAX_CAPSET_ID) {
777 if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
782 /* Context capset ID already set */
783 if (vfpriv->context_init &
784 VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
789 vfpriv->context_init |= value;
791 case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
792 if (vfpriv->base_fence_ctx) {
797 if (value > MAX_RINGS) {
802 vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
803 vfpriv->num_rings = value;
805 case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
806 if (vfpriv->ring_idx_mask) {
811 vfpriv->ring_idx_mask = value;
819 if (vfpriv->ring_idx_mask) {
821 for (i = 0; i < vfpriv->num_rings; i++)
822 valid_ring_mask |= 1ULL << i;
824 if (~valid_ring_mask & vfpriv->ring_idx_mask) {
830 virtio_gpu_create_context_locked(vgdev, vfpriv);
831 virtio_gpu_notify(vgdev);
834 mutex_unlock(&vfpriv->context_lock);
835 kfree(ctx_set_params);
839 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
840 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
843 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
846 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
849 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
850 virtio_gpu_resource_create_ioctl,
853 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
856 /* make transfer async to the main ring? - no sure, can we
857 * thread these in the underlying GL
859 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
860 virtio_gpu_transfer_from_host_ioctl,
862 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
863 virtio_gpu_transfer_to_host_ioctl,
866 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
869 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
872 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
873 virtio_gpu_resource_create_blob_ioctl,
876 DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,