2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 const struct drm_virtgpu_3d_box *src)
46 dst->x = cpu_to_le32(src->x);
47 dst->y = cpu_to_le32(src->y);
48 dst->z = cpu_to_le32(src->z);
49 dst->w = cpu_to_le32(src->w);
50 dst->h = cpu_to_le32(src->h);
51 dst->d = cpu_to_le32(src->d);
54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
56 struct drm_device *dev = vq->vdev->priv;
57 struct virtio_gpu_device *vgdev = dev->dev_private;
59 schedule_work(&vgdev->ctrlq.dequeue_work);
62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
64 struct drm_device *dev = vq->vdev->priv;
65 struct virtio_gpu_device *vgdev = dev->dev_private;
67 schedule_work(&vgdev->cursorq.dequeue_work);
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
74 __alignof__(struct virtio_gpu_vbuffer),
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
83 kmem_cache_destroy(vgdev->vbufs);
87 static struct virtio_gpu_vbuffer*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 int size, int resp_size, void *resp_buf,
90 virtio_gpu_resp_cb resp_cb)
92 struct virtio_gpu_vbuffer *vbuf;
94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
96 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
97 size < sizeof(struct virtio_gpu_ctrl_hdr));
98 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 vbuf->resp_cb = resp_cb;
102 vbuf->resp_size = resp_size;
103 if (resp_size <= MAX_INLINE_RESP_SIZE)
104 vbuf->resp_buf = (void *)vbuf->buf + size;
106 vbuf->resp_buf = resp_buf;
107 BUG_ON(!vbuf->resp_buf);
111 static struct virtio_gpu_ctrl_hdr *
112 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
114 /* this assumes a vbuf contains a command that starts with a
115 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121 static struct virtio_gpu_update_cursor*
122 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
123 struct virtio_gpu_vbuffer **vbuffer_p)
125 struct virtio_gpu_vbuffer *vbuf;
127 vbuf = virtio_gpu_get_vbuf
128 (vgdev, sizeof(struct virtio_gpu_update_cursor),
132 return ERR_CAST(vbuf);
135 return (struct virtio_gpu_update_cursor *)vbuf->buf;
138 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
139 virtio_gpu_resp_cb cb,
140 struct virtio_gpu_vbuffer **vbuffer_p,
141 int cmd_size, int resp_size,
144 struct virtio_gpu_vbuffer *vbuf;
146 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
147 resp_size, resp_buf, cb);
149 return (struct virtio_gpu_command *)vbuf->buf;
152 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
153 struct virtio_gpu_vbuffer **vbuffer_p,
156 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
157 sizeof(struct virtio_gpu_ctrl_hdr),
161 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
162 struct virtio_gpu_vbuffer **vbuffer_p,
164 virtio_gpu_resp_cb cb)
166 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
167 sizeof(struct virtio_gpu_ctrl_hdr),
171 static void free_vbuf(struct virtio_gpu_device *vgdev,
172 struct virtio_gpu_vbuffer *vbuf)
174 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
175 kfree(vbuf->resp_buf);
176 kvfree(vbuf->data_buf);
177 kmem_cache_free(vgdev->vbufs, vbuf);
180 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
182 struct virtio_gpu_vbuffer *vbuf;
186 while ((vbuf = virtqueue_get_buf(vq, &len))) {
187 list_add_tail(&vbuf->list, reclaim_list);
191 DRM_DEBUG("Huh? zero vbufs reclaimed");
194 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
196 struct virtio_gpu_device *vgdev =
197 container_of(work, struct virtio_gpu_device,
199 struct list_head reclaim_list;
200 struct virtio_gpu_vbuffer *entry, *tmp;
201 struct virtio_gpu_ctrl_hdr *resp;
204 INIT_LIST_HEAD(&reclaim_list);
205 spin_lock(&vgdev->ctrlq.qlock);
207 virtqueue_disable_cb(vgdev->ctrlq.vq);
208 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
210 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
211 spin_unlock(&vgdev->ctrlq.qlock);
213 list_for_each_entry(entry, &reclaim_list, list) {
214 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
216 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
218 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
219 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
220 struct virtio_gpu_ctrl_hdr *cmd;
221 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
222 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
223 le32_to_cpu(resp->type),
224 le32_to_cpu(cmd->type));
226 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
228 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
229 u64 f = le64_to_cpu(resp->fence_id);
232 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
233 __func__, fence_id, f);
239 entry->resp_cb(vgdev, entry);
241 wake_up(&vgdev->ctrlq.ack_queue);
244 virtio_gpu_fence_event_process(vgdev, fence_id);
246 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
248 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
249 list_del(&entry->list);
250 free_vbuf(vgdev, entry);
254 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
256 struct virtio_gpu_device *vgdev =
257 container_of(work, struct virtio_gpu_device,
258 cursorq.dequeue_work);
259 struct list_head reclaim_list;
260 struct virtio_gpu_vbuffer *entry, *tmp;
262 INIT_LIST_HEAD(&reclaim_list);
263 spin_lock(&vgdev->cursorq.qlock);
265 virtqueue_disable_cb(vgdev->cursorq.vq);
266 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
267 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
268 spin_unlock(&vgdev->cursorq.qlock);
270 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
271 list_del(&entry->list);
272 free_vbuf(vgdev, entry);
274 wake_up(&vgdev->cursorq.ack_queue);
277 /* Create sg_table from a vmalloc'd buffer. */
278 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
281 struct sg_table *sgt;
282 struct scatterlist *sg;
285 if (WARN_ON(!PAGE_ALIGNED(data)))
288 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
292 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
293 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
299 for_each_sgtable_sg(sgt, sg, i) {
300 pg = vmalloc_to_page(data);
307 s = min_t(int, PAGE_SIZE, size);
308 sg_set_page(sg, pg, s, 0);
317 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
318 struct virtio_gpu_vbuffer *vbuf,
319 struct virtio_gpu_fence *fence,
321 struct scatterlist **sgs,
325 struct virtqueue *vq = vgdev->ctrlq.vq;
328 if (!drm_dev_enter(vgdev->ddev, &idx)) {
329 if (fence && vbuf->objs)
330 virtio_gpu_array_unlock_resv(vbuf->objs);
331 free_vbuf(vgdev, vbuf);
335 if (vgdev->has_indirect)
339 spin_lock(&vgdev->ctrlq.qlock);
341 if (vq->num_free < elemcnt) {
342 spin_unlock(&vgdev->ctrlq.qlock);
343 virtio_gpu_notify(vgdev);
344 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
348 /* now that the position of the vbuf in the virtqueue is known, we can
349 * finally set the fence id
352 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
355 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
356 virtio_gpu_array_unlock_resv(vbuf->objs);
360 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
363 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
365 atomic_inc(&vgdev->pending_commands);
367 spin_unlock(&vgdev->ctrlq.qlock);
373 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
374 struct virtio_gpu_vbuffer *vbuf,
375 struct virtio_gpu_fence *fence)
377 struct scatterlist *sgs[3], vcmd, vout, vresp;
378 struct sg_table *sgt = NULL;
379 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
382 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
388 if (vbuf->data_size) {
389 if (is_vmalloc_addr(vbuf->data_buf)) {
391 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
394 if (fence && vbuf->objs)
395 virtio_gpu_array_unlock_resv(vbuf->objs);
400 sgs[outcnt] = sgt->sgl;
402 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
410 if (vbuf->resp_size) {
411 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
413 sgs[outcnt + incnt] = &vresp;
417 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
427 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
431 if (!atomic_read(&vgdev->pending_commands))
434 spin_lock(&vgdev->ctrlq.qlock);
435 atomic_set(&vgdev->pending_commands, 0);
436 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
437 spin_unlock(&vgdev->ctrlq.qlock);
440 virtqueue_notify(vgdev->ctrlq.vq);
443 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
444 struct virtio_gpu_vbuffer *vbuf)
446 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
449 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
450 struct virtio_gpu_vbuffer *vbuf)
452 struct virtqueue *vq = vgdev->cursorq.vq;
453 struct scatterlist *sgs[1], ccmd;
454 int idx, ret, outcnt;
457 if (!drm_dev_enter(vgdev->ddev, &idx)) {
458 free_vbuf(vgdev, vbuf);
462 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
466 spin_lock(&vgdev->cursorq.qlock);
468 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
469 if (ret == -ENOSPC) {
470 spin_unlock(&vgdev->cursorq.qlock);
471 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
472 spin_lock(&vgdev->cursorq.qlock);
475 trace_virtio_gpu_cmd_queue(vq,
476 virtio_gpu_vbuf_ctrl_hdr(vbuf));
478 notify = virtqueue_kick_prepare(vq);
481 spin_unlock(&vgdev->cursorq.qlock);
484 virtqueue_notify(vq);
489 /* just create gem objects for userspace and long lived objects,
490 * just use dma_alloced pages for the queue objects?
493 /* create a basic resource */
494 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
495 struct virtio_gpu_object *bo,
496 struct virtio_gpu_object_params *params,
497 struct virtio_gpu_object_array *objs,
498 struct virtio_gpu_fence *fence)
500 struct virtio_gpu_resource_create_2d *cmd_p;
501 struct virtio_gpu_vbuffer *vbuf;
503 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
504 memset(cmd_p, 0, sizeof(*cmd_p));
507 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
508 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
509 cmd_p->format = cpu_to_le32(params->format);
510 cmd_p->width = cpu_to_le32(params->width);
511 cmd_p->height = cpu_to_le32(params->height);
513 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
517 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
518 struct virtio_gpu_vbuffer *vbuf)
520 struct virtio_gpu_object *bo;
522 bo = vbuf->resp_cb_data;
523 vbuf->resp_cb_data = NULL;
525 virtio_gpu_cleanup_object(bo);
528 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
529 struct virtio_gpu_object *bo)
531 struct virtio_gpu_resource_unref *cmd_p;
532 struct virtio_gpu_vbuffer *vbuf;
535 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
536 virtio_gpu_cmd_unref_cb);
537 memset(cmd_p, 0, sizeof(*cmd_p));
539 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
540 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
542 vbuf->resp_cb_data = bo;
543 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
545 virtio_gpu_cleanup_object(bo);
548 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
549 uint32_t scanout_id, uint32_t resource_id,
550 uint32_t width, uint32_t height,
551 uint32_t x, uint32_t y)
553 struct virtio_gpu_set_scanout *cmd_p;
554 struct virtio_gpu_vbuffer *vbuf;
556 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
557 memset(cmd_p, 0, sizeof(*cmd_p));
559 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
560 cmd_p->resource_id = cpu_to_le32(resource_id);
561 cmd_p->scanout_id = cpu_to_le32(scanout_id);
562 cmd_p->r.width = cpu_to_le32(width);
563 cmd_p->r.height = cpu_to_le32(height);
564 cmd_p->r.x = cpu_to_le32(x);
565 cmd_p->r.y = cpu_to_le32(y);
567 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
570 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
571 uint32_t resource_id,
572 uint32_t x, uint32_t y,
573 uint32_t width, uint32_t height)
575 struct virtio_gpu_resource_flush *cmd_p;
576 struct virtio_gpu_vbuffer *vbuf;
578 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
579 memset(cmd_p, 0, sizeof(*cmd_p));
581 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
582 cmd_p->resource_id = cpu_to_le32(resource_id);
583 cmd_p->r.width = cpu_to_le32(width);
584 cmd_p->r.height = cpu_to_le32(height);
585 cmd_p->r.x = cpu_to_le32(x);
586 cmd_p->r.y = cpu_to_le32(y);
588 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
591 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
593 uint32_t width, uint32_t height,
594 uint32_t x, uint32_t y,
595 struct virtio_gpu_object_array *objs,
596 struct virtio_gpu_fence *fence)
598 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
599 struct virtio_gpu_transfer_to_host_2d *cmd_p;
600 struct virtio_gpu_vbuffer *vbuf;
601 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
602 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
604 if (virtio_gpu_is_shmem(bo) && use_dma_api)
605 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
606 shmem->pages, DMA_TO_DEVICE);
608 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
609 memset(cmd_p, 0, sizeof(*cmd_p));
612 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
613 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
614 cmd_p->offset = cpu_to_le64(offset);
615 cmd_p->r.width = cpu_to_le32(width);
616 cmd_p->r.height = cpu_to_le32(height);
617 cmd_p->r.x = cpu_to_le32(x);
618 cmd_p->r.y = cpu_to_le32(y);
620 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
624 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
625 uint32_t resource_id,
626 struct virtio_gpu_mem_entry *ents,
628 struct virtio_gpu_fence *fence)
630 struct virtio_gpu_resource_attach_backing *cmd_p;
631 struct virtio_gpu_vbuffer *vbuf;
633 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
634 memset(cmd_p, 0, sizeof(*cmd_p));
636 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
637 cmd_p->resource_id = cpu_to_le32(resource_id);
638 cmd_p->nr_entries = cpu_to_le32(nents);
640 vbuf->data_buf = ents;
641 vbuf->data_size = sizeof(*ents) * nents;
643 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
646 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
647 struct virtio_gpu_vbuffer *vbuf)
649 struct virtio_gpu_resp_display_info *resp =
650 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
653 spin_lock(&vgdev->display_info_lock);
654 for (i = 0; i < vgdev->num_scanouts; i++) {
655 vgdev->outputs[i].info = resp->pmodes[i];
656 if (resp->pmodes[i].enabled) {
657 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
658 le32_to_cpu(resp->pmodes[i].r.width),
659 le32_to_cpu(resp->pmodes[i].r.height),
660 le32_to_cpu(resp->pmodes[i].r.x),
661 le32_to_cpu(resp->pmodes[i].r.y));
663 DRM_DEBUG("output %d: disabled", i);
667 vgdev->display_info_pending = false;
668 spin_unlock(&vgdev->display_info_lock);
669 wake_up(&vgdev->resp_wq);
671 if (!drm_helper_hpd_irq_event(vgdev->ddev))
672 drm_kms_helper_hotplug_event(vgdev->ddev);
675 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
676 struct virtio_gpu_vbuffer *vbuf)
678 struct virtio_gpu_get_capset_info *cmd =
679 (struct virtio_gpu_get_capset_info *)vbuf->buf;
680 struct virtio_gpu_resp_capset_info *resp =
681 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
682 int i = le32_to_cpu(cmd->capset_index);
684 spin_lock(&vgdev->display_info_lock);
685 if (vgdev->capsets) {
686 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
687 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
688 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
690 DRM_ERROR("invalid capset memory.");
692 spin_unlock(&vgdev->display_info_lock);
693 wake_up(&vgdev->resp_wq);
696 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
697 struct virtio_gpu_vbuffer *vbuf)
699 struct virtio_gpu_get_capset *cmd =
700 (struct virtio_gpu_get_capset *)vbuf->buf;
701 struct virtio_gpu_resp_capset *resp =
702 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
703 struct virtio_gpu_drv_cap_cache *cache_ent;
705 spin_lock(&vgdev->display_info_lock);
706 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
707 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
708 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
709 memcpy(cache_ent->caps_cache, resp->capset_data,
711 /* Copy must occur before is_valid is signalled. */
713 atomic_set(&cache_ent->is_valid, 1);
717 spin_unlock(&vgdev->display_info_lock);
718 wake_up_all(&vgdev->resp_wq);
721 static int virtio_get_edid_block(void *data, u8 *buf,
722 unsigned int block, size_t len)
724 struct virtio_gpu_resp_edid *resp = data;
725 size_t start = block * EDID_LENGTH;
727 if (start + len > le32_to_cpu(resp->size))
729 memcpy(buf, resp->edid + start, len);
733 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
734 struct virtio_gpu_vbuffer *vbuf)
736 struct virtio_gpu_cmd_get_edid *cmd =
737 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
738 struct virtio_gpu_resp_edid *resp =
739 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
740 uint32_t scanout = le32_to_cpu(cmd->scanout);
741 struct virtio_gpu_output *output;
742 struct edid *new_edid, *old_edid;
744 if (scanout >= vgdev->num_scanouts)
746 output = vgdev->outputs + scanout;
748 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
749 drm_connector_update_edid_property(&output->conn, new_edid);
751 spin_lock(&vgdev->display_info_lock);
752 old_edid = output->edid;
753 output->edid = new_edid;
754 spin_unlock(&vgdev->display_info_lock);
757 wake_up(&vgdev->resp_wq);
760 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
762 struct virtio_gpu_ctrl_hdr *cmd_p;
763 struct virtio_gpu_vbuffer *vbuf;
766 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
771 cmd_p = virtio_gpu_alloc_cmd_resp
772 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
773 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
775 memset(cmd_p, 0, sizeof(*cmd_p));
777 vgdev->display_info_pending = true;
778 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
779 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
783 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
785 struct virtio_gpu_get_capset_info *cmd_p;
786 struct virtio_gpu_vbuffer *vbuf;
789 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
794 cmd_p = virtio_gpu_alloc_cmd_resp
795 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
796 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
798 memset(cmd_p, 0, sizeof(*cmd_p));
800 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
801 cmd_p->capset_index = cpu_to_le32(idx);
802 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
806 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
807 int idx, int version,
808 struct virtio_gpu_drv_cap_cache **cache_p)
810 struct virtio_gpu_get_capset *cmd_p;
811 struct virtio_gpu_vbuffer *vbuf;
813 struct virtio_gpu_drv_cap_cache *cache_ent;
814 struct virtio_gpu_drv_cap_cache *search_ent;
819 if (idx >= vgdev->num_capsets)
822 if (version > vgdev->capsets[idx].max_version)
825 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
829 max_size = vgdev->capsets[idx].max_size;
830 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
831 if (!cache_ent->caps_cache) {
836 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
839 kfree(cache_ent->caps_cache);
844 cache_ent->version = version;
845 cache_ent->id = vgdev->capsets[idx].id;
846 atomic_set(&cache_ent->is_valid, 0);
847 cache_ent->size = max_size;
848 spin_lock(&vgdev->display_info_lock);
849 /* Search while under lock in case it was added by another task. */
850 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
851 if (search_ent->id == vgdev->capsets[idx].id &&
852 search_ent->version == version) {
853 *cache_p = search_ent;
858 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
859 spin_unlock(&vgdev->display_info_lock);
862 /* Entry was found, so free everything that was just created. */
864 kfree(cache_ent->caps_cache);
869 cmd_p = virtio_gpu_alloc_cmd_resp
870 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
871 sizeof(struct virtio_gpu_resp_capset) + max_size,
873 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
874 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
875 cmd_p->capset_version = cpu_to_le32(version);
876 *cache_p = cache_ent;
877 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
882 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
884 struct virtio_gpu_cmd_get_edid *cmd_p;
885 struct virtio_gpu_vbuffer *vbuf;
889 if (WARN_ON(!vgdev->has_edid))
892 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
893 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
898 cmd_p = virtio_gpu_alloc_cmd_resp
899 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
900 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
902 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
903 cmd_p->scanout = cpu_to_le32(scanout);
904 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
910 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
911 uint32_t nlen, const char *name)
913 struct virtio_gpu_ctx_create *cmd_p;
914 struct virtio_gpu_vbuffer *vbuf;
916 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
917 memset(cmd_p, 0, sizeof(*cmd_p));
919 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
920 cmd_p->hdr.ctx_id = cpu_to_le32(id);
921 cmd_p->nlen = cpu_to_le32(nlen);
922 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
923 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
924 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
927 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
930 struct virtio_gpu_ctx_destroy *cmd_p;
931 struct virtio_gpu_vbuffer *vbuf;
933 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
934 memset(cmd_p, 0, sizeof(*cmd_p));
936 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
937 cmd_p->hdr.ctx_id = cpu_to_le32(id);
938 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
941 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
943 struct virtio_gpu_object_array *objs)
945 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
946 struct virtio_gpu_ctx_resource *cmd_p;
947 struct virtio_gpu_vbuffer *vbuf;
949 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
950 memset(cmd_p, 0, sizeof(*cmd_p));
953 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
954 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
955 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
956 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
959 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
961 struct virtio_gpu_object_array *objs)
963 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
964 struct virtio_gpu_ctx_resource *cmd_p;
965 struct virtio_gpu_vbuffer *vbuf;
967 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
968 memset(cmd_p, 0, sizeof(*cmd_p));
971 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
972 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
973 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
974 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
978 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
979 struct virtio_gpu_object *bo,
980 struct virtio_gpu_object_params *params,
981 struct virtio_gpu_object_array *objs,
982 struct virtio_gpu_fence *fence)
984 struct virtio_gpu_resource_create_3d *cmd_p;
985 struct virtio_gpu_vbuffer *vbuf;
987 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
988 memset(cmd_p, 0, sizeof(*cmd_p));
991 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
992 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
993 cmd_p->format = cpu_to_le32(params->format);
994 cmd_p->width = cpu_to_le32(params->width);
995 cmd_p->height = cpu_to_le32(params->height);
997 cmd_p->target = cpu_to_le32(params->target);
998 cmd_p->bind = cpu_to_le32(params->bind);
999 cmd_p->depth = cpu_to_le32(params->depth);
1000 cmd_p->array_size = cpu_to_le32(params->array_size);
1001 cmd_p->last_level = cpu_to_le32(params->last_level);
1002 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1003 cmd_p->flags = cpu_to_le32(params->flags);
1005 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1010 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1012 uint64_t offset, uint32_t level,
1013 struct drm_virtgpu_3d_box *box,
1014 struct virtio_gpu_object_array *objs,
1015 struct virtio_gpu_fence *fence)
1017 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1018 struct virtio_gpu_transfer_host_3d *cmd_p;
1019 struct virtio_gpu_vbuffer *vbuf;
1020 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1021 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1024 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1025 shmem->pages, DMA_TO_DEVICE);
1027 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1028 memset(cmd_p, 0, sizeof(*cmd_p));
1032 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1033 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1034 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1035 convert_to_hw_box(&cmd_p->box, box);
1036 cmd_p->offset = cpu_to_le64(offset);
1037 cmd_p->level = cpu_to_le32(level);
1039 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1042 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1044 uint64_t offset, uint32_t level,
1045 struct drm_virtgpu_3d_box *box,
1046 struct virtio_gpu_object_array *objs,
1047 struct virtio_gpu_fence *fence)
1049 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1050 struct virtio_gpu_transfer_host_3d *cmd_p;
1051 struct virtio_gpu_vbuffer *vbuf;
1053 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1054 memset(cmd_p, 0, sizeof(*cmd_p));
1058 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1059 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1060 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1061 convert_to_hw_box(&cmd_p->box, box);
1062 cmd_p->offset = cpu_to_le64(offset);
1063 cmd_p->level = cpu_to_le32(level);
1065 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1068 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1069 void *data, uint32_t data_size,
1071 struct virtio_gpu_object_array *objs,
1072 struct virtio_gpu_fence *fence)
1074 struct virtio_gpu_cmd_submit *cmd_p;
1075 struct virtio_gpu_vbuffer *vbuf;
1077 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1078 memset(cmd_p, 0, sizeof(*cmd_p));
1080 vbuf->data_buf = data;
1081 vbuf->data_size = data_size;
1084 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1085 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1086 cmd_p->size = cpu_to_le32(data_size);
1088 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1091 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1092 struct virtio_gpu_object *obj,
1093 struct virtio_gpu_mem_entry *ents,
1096 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1100 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1101 struct virtio_gpu_output *output)
1103 struct virtio_gpu_vbuffer *vbuf;
1104 struct virtio_gpu_update_cursor *cur_p;
1106 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1107 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1108 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1109 virtio_gpu_queue_cursor(vgdev, vbuf);
1112 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1113 struct virtio_gpu_vbuffer *vbuf)
1115 struct virtio_gpu_object *obj =
1116 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1117 struct virtio_gpu_resp_resource_uuid *resp =
1118 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1119 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1121 spin_lock(&vgdev->resource_export_lock);
1122 WARN_ON(obj->uuid_state != UUID_INITIALIZING);
1124 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1125 obj->uuid_state == UUID_INITIALIZING) {
1126 memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
1127 obj->uuid_state = UUID_INITIALIZED;
1129 obj->uuid_state = UUID_INITIALIZATION_FAILED;
1131 spin_unlock(&vgdev->resource_export_lock);
1133 wake_up_all(&vgdev->resp_wq);
1137 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1138 struct virtio_gpu_object_array *objs)
1140 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1141 struct virtio_gpu_resource_assign_uuid *cmd_p;
1142 struct virtio_gpu_vbuffer *vbuf;
1143 struct virtio_gpu_resp_resource_uuid *resp_buf;
1145 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1147 spin_lock(&vgdev->resource_export_lock);
1148 bo->uuid_state = UUID_INITIALIZATION_FAILED;
1149 spin_unlock(&vgdev->resource_export_lock);
1150 virtio_gpu_array_put_free(objs);
1154 cmd_p = virtio_gpu_alloc_cmd_resp
1155 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1156 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1157 memset(cmd_p, 0, sizeof(*cmd_p));
1159 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1160 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1163 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);