2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <drm/virtgpu_drm.h>
30 #include <drm/ttm/ttm_execbuf_util.h>
32 #include "virtgpu_drv.h"
34 static void convert_to_hw_box(struct virtio_gpu_box *dst,
35 const struct drm_virtgpu_3d_box *src)
37 dst->x = cpu_to_le32(src->x);
38 dst->y = cpu_to_le32(src->y);
39 dst->z = cpu_to_le32(src->z);
40 dst->w = cpu_to_le32(src->w);
41 dst->h = cpu_to_le32(src->h);
42 dst->d = cpu_to_le32(src->d);
45 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
46 struct drm_file *file_priv)
48 struct virtio_gpu_device *vgdev = dev->dev_private;
49 struct drm_virtgpu_map *virtio_gpu_map = data;
51 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
52 virtio_gpu_map->handle,
53 &virtio_gpu_map->offset);
56 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
57 struct list_head *head)
59 struct ttm_operation_ctx ctx = { false, false };
60 struct ttm_validate_buffer *buf;
61 struct ttm_buffer_object *bo;
62 struct virtio_gpu_object *qobj;
65 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
69 list_for_each_entry(buf, head, head) {
71 qobj = container_of(bo, struct virtio_gpu_object, tbo);
72 ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
74 ttm_eu_backoff_reservation(ticket, head);
81 static void virtio_gpu_unref_list(struct list_head *head)
83 struct ttm_validate_buffer *buf;
84 struct ttm_buffer_object *bo;
85 struct virtio_gpu_object *qobj;
87 list_for_each_entry(buf, head, head) {
89 qobj = container_of(bo, struct virtio_gpu_object, tbo);
91 drm_gem_object_put_unlocked(&qobj->gem_base);
96 * Usage of execbuffer:
97 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
98 * However, the command as passed from user space must *not* contain the initial
99 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
101 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
102 struct drm_file *drm_file)
104 struct drm_virtgpu_execbuffer *exbuf = data;
105 struct virtio_gpu_device *vgdev = dev->dev_private;
106 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
107 struct drm_gem_object *gobj;
108 struct virtio_gpu_fence *fence;
109 struct virtio_gpu_object *qobj;
111 uint32_t *bo_handles = NULL;
112 void __user *user_bo_handles = NULL;
113 struct list_head validate_list;
114 struct ttm_validate_buffer *buflist = NULL;
116 struct ww_acquire_ctx ticket;
119 if (vgdev->has_virgl_3d == false)
122 INIT_LIST_HEAD(&validate_list);
123 if (exbuf->num_bo_handles) {
125 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
126 sizeof(uint32_t), GFP_KERNEL);
127 buflist = kvmalloc_array(exbuf->num_bo_handles,
128 sizeof(struct ttm_validate_buffer),
129 GFP_KERNEL | __GFP_ZERO);
130 if (!bo_handles || !buflist) {
136 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
137 if (copy_from_user(bo_handles, user_bo_handles,
138 exbuf->num_bo_handles * sizeof(uint32_t))) {
145 for (i = 0; i < exbuf->num_bo_handles; i++) {
146 gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
153 qobj = gem_to_virtio_gpu_obj(gobj);
154 buflist[i].bo = &qobj->tbo;
156 list_add(&buflist[i].head, &validate_list);
161 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
165 buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
171 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
172 vfpriv->ctx_id, &fence);
174 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
176 /* fence the command bo */
177 virtio_gpu_unref_list(&validate_list);
179 dma_fence_put(&fence->f);
183 ttm_eu_backoff_reservation(&ticket, &validate_list);
185 virtio_gpu_unref_list(&validate_list);
190 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
191 struct drm_file *file_priv)
193 struct virtio_gpu_device *vgdev = dev->dev_private;
194 struct drm_virtgpu_getparam *param = data;
197 switch (param->param) {
198 case VIRTGPU_PARAM_3D_FEATURES:
199 value = vgdev->has_virgl_3d == true ? 1 : 0;
201 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
207 if (copy_to_user((void __user *)(unsigned long)param->value,
208 &value, sizeof(int))) {
214 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
215 struct drm_file *file_priv)
217 struct virtio_gpu_device *vgdev = dev->dev_private;
218 struct drm_virtgpu_resource_create *rc = data;
221 struct virtio_gpu_object *qobj;
222 struct drm_gem_object *obj;
225 struct list_head validate_list;
226 struct ttm_validate_buffer mainbuf;
227 struct virtio_gpu_fence *fence = NULL;
228 struct ww_acquire_ctx ticket;
229 struct virtio_gpu_resource_create_3d rc_3d;
231 if (vgdev->has_virgl_3d == false) {
234 if (rc->nr_samples > 1)
236 if (rc->last_level > 1)
240 if (rc->array_size > 1)
244 INIT_LIST_HEAD(&validate_list);
245 memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
247 virtio_gpu_resource_id_get(vgdev, &res_id);
251 /* allocate a single page size object */
255 qobj = virtio_gpu_alloc_object(dev, size, false, false);
260 obj = &qobj->gem_base;
262 if (!vgdev->has_virgl_3d) {
263 virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
264 rc->width, rc->height);
266 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
268 /* use a gem reference since unref list undoes them */
269 drm_gem_object_get(&qobj->gem_base);
270 mainbuf.bo = &qobj->tbo;
271 list_add(&mainbuf.head, &validate_list);
273 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
275 DRM_DEBUG("failed to validate\n");
279 rc_3d.resource_id = cpu_to_le32(res_id);
280 rc_3d.target = cpu_to_le32(rc->target);
281 rc_3d.format = cpu_to_le32(rc->format);
282 rc_3d.bind = cpu_to_le32(rc->bind);
283 rc_3d.width = cpu_to_le32(rc->width);
284 rc_3d.height = cpu_to_le32(rc->height);
285 rc_3d.depth = cpu_to_le32(rc->depth);
286 rc_3d.array_size = cpu_to_le32(rc->array_size);
287 rc_3d.last_level = cpu_to_le32(rc->last_level);
288 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
289 rc_3d.flags = cpu_to_le32(rc->flags);
291 virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
292 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
294 ttm_eu_backoff_reservation(&ticket, &validate_list);
297 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
300 qobj->hw_res_handle = res_id;
302 ret = drm_gem_handle_create(file_priv, obj, &handle);
305 drm_gem_object_release(obj);
306 if (vgdev->has_virgl_3d) {
307 virtio_gpu_unref_list(&validate_list);
308 dma_fence_put(&fence->f);
313 rc->res_handle = res_id; /* similiar to a VM address */
314 rc->bo_handle = handle;
316 if (vgdev->has_virgl_3d) {
317 virtio_gpu_unref_list(&validate_list);
318 dma_fence_put(&fence->f);
322 * The handle owns the reference now. But we must drop our
323 * remaining reference *after* we no longer need to dereference
324 * the obj. Otherwise userspace could guess the handle and
325 * race closing it from another thread.
327 drm_gem_object_put_unlocked(obj);
331 if (vgdev->has_virgl_3d) {
332 virtio_gpu_unref_list(&validate_list);
333 dma_fence_put(&fence->f);
336 // drm_gem_object_handle_unreference_unlocked(obj);
338 virtio_gpu_resource_id_put(vgdev, res_id);
342 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
343 struct drm_file *file_priv)
345 struct drm_virtgpu_resource_info *ri = data;
346 struct drm_gem_object *gobj = NULL;
347 struct virtio_gpu_object *qobj = NULL;
349 gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
353 qobj = gem_to_virtio_gpu_obj(gobj);
355 ri->size = qobj->gem_base.size;
356 ri->res_handle = qobj->hw_res_handle;
357 drm_gem_object_put_unlocked(gobj);
361 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
363 struct drm_file *file)
365 struct virtio_gpu_device *vgdev = dev->dev_private;
366 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
367 struct drm_virtgpu_3d_transfer_from_host *args = data;
368 struct ttm_operation_ctx ctx = { true, false };
369 struct drm_gem_object *gobj = NULL;
370 struct virtio_gpu_object *qobj = NULL;
371 struct virtio_gpu_fence *fence;
373 u32 offset = args->offset;
374 struct virtio_gpu_box box;
376 if (vgdev->has_virgl_3d == false)
379 gobj = drm_gem_object_lookup(file, args->bo_handle);
383 qobj = gem_to_virtio_gpu_obj(gobj);
385 ret = virtio_gpu_object_reserve(qobj, false);
389 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
393 convert_to_hw_box(&box, &args->box);
394 virtio_gpu_cmd_transfer_from_host_3d
395 (vgdev, qobj->hw_res_handle,
396 vfpriv->ctx_id, offset, args->level,
398 reservation_object_add_excl_fence(qobj->tbo.resv,
401 dma_fence_put(&fence->f);
403 virtio_gpu_object_unreserve(qobj);
405 drm_gem_object_put_unlocked(gobj);
409 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
410 struct drm_file *file)
412 struct virtio_gpu_device *vgdev = dev->dev_private;
413 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
414 struct drm_virtgpu_3d_transfer_to_host *args = data;
415 struct ttm_operation_ctx ctx = { true, false };
416 struct drm_gem_object *gobj = NULL;
417 struct virtio_gpu_object *qobj = NULL;
418 struct virtio_gpu_fence *fence;
419 struct virtio_gpu_box box;
421 u32 offset = args->offset;
423 gobj = drm_gem_object_lookup(file, args->bo_handle);
427 qobj = gem_to_virtio_gpu_obj(gobj);
429 ret = virtio_gpu_object_reserve(qobj, false);
433 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
437 convert_to_hw_box(&box, &args->box);
438 if (!vgdev->has_virgl_3d) {
439 virtio_gpu_cmd_transfer_to_host_2d
440 (vgdev, qobj->hw_res_handle, offset,
441 box.w, box.h, box.x, box.y, NULL);
443 virtio_gpu_cmd_transfer_to_host_3d
444 (vgdev, qobj->hw_res_handle,
445 vfpriv ? vfpriv->ctx_id : 0, offset,
446 args->level, &box, &fence);
447 reservation_object_add_excl_fence(qobj->tbo.resv,
449 dma_fence_put(&fence->f);
453 virtio_gpu_object_unreserve(qobj);
455 drm_gem_object_put_unlocked(gobj);
459 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
460 struct drm_file *file)
462 struct drm_virtgpu_3d_wait *args = data;
463 struct drm_gem_object *gobj = NULL;
464 struct virtio_gpu_object *qobj = NULL;
468 gobj = drm_gem_object_lookup(file, args->handle);
472 qobj = gem_to_virtio_gpu_obj(gobj);
474 if (args->flags & VIRTGPU_WAIT_NOWAIT)
476 ret = virtio_gpu_object_wait(qobj, nowait);
478 drm_gem_object_put_unlocked(gobj);
482 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
483 void *data, struct drm_file *file)
485 struct virtio_gpu_device *vgdev = dev->dev_private;
486 struct drm_virtgpu_get_caps *args = data;
487 unsigned size, host_caps_size;
489 int found_valid = -1;
491 struct virtio_gpu_drv_cap_cache *cache_ent;
494 if (vgdev->num_capsets == 0)
497 /* don't allow userspace to pass 0 */
501 spin_lock(&vgdev->display_info_lock);
502 for (i = 0; i < vgdev->num_capsets; i++) {
503 if (vgdev->capsets[i].id == args->cap_set_id) {
504 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
511 if (found_valid == -1) {
512 spin_unlock(&vgdev->display_info_lock);
516 host_caps_size = vgdev->capsets[found_valid].max_size;
517 /* only copy to user the minimum of the host caps size or the guest caps size */
518 size = min(args->size, host_caps_size);
520 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
521 if (cache_ent->id == args->cap_set_id &&
522 cache_ent->version == args->cap_set_ver) {
523 ptr = cache_ent->caps_cache;
524 spin_unlock(&vgdev->display_info_lock);
528 spin_unlock(&vgdev->display_info_lock);
530 /* not in cache - need to talk to hw */
531 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
534 ret = wait_event_timeout(vgdev->resp_wq,
535 atomic_read(&cache_ent->is_valid), 5 * HZ);
539 /* is_valid check must proceed before copy of the cache entry. */
542 ptr = cache_ent->caps_cache;
545 if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
551 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
552 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
553 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
555 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
556 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
558 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
559 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
561 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
562 virtio_gpu_resource_create_ioctl,
563 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
565 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
566 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
568 /* make transfer async to the main ring? - no sure, can we
569 * thread these in the underlying GL
571 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
572 virtio_gpu_transfer_from_host_ioctl,
573 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
574 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
575 virtio_gpu_transfer_to_host_ioctl,
576 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
578 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
579 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
581 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
582 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),