2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
35 #define MAX_INLINE_CMD_SIZE 96
36 #define MAX_INLINE_RESP_SIZE 24
37 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
41 void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
46 idr_preload(GFP_KERNEL);
47 spin_lock(&vgdev->resource_idr_lock);
48 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 spin_unlock(&vgdev->resource_idr_lock);
54 void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
56 spin_lock(&vgdev->resource_idr_lock);
57 idr_remove(&vgdev->resource_idr, id);
58 spin_unlock(&vgdev->resource_idr_lock);
61 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
63 struct drm_device *dev = vq->vdev->priv;
64 struct virtio_gpu_device *vgdev = dev->dev_private;
65 schedule_work(&vgdev->ctrlq.dequeue_work);
68 void virtio_gpu_cursor_ack(struct virtqueue *vq)
70 struct drm_device *dev = vq->vdev->priv;
71 struct virtio_gpu_device *vgdev = dev->dev_private;
72 schedule_work(&vgdev->cursorq.dequeue_work);
75 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
77 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
79 __alignof__(struct virtio_gpu_vbuffer),
86 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
88 kmem_cache_destroy(vgdev->vbufs);
92 static struct virtio_gpu_vbuffer*
93 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
94 int size, int resp_size, void *resp_buf,
95 virtio_gpu_resp_cb resp_cb)
97 struct virtio_gpu_vbuffer *vbuf;
99 vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
101 return ERR_PTR(-ENOMEM);
102 memset(vbuf, 0, VBUFFER_SIZE);
104 BUG_ON(size > MAX_INLINE_CMD_SIZE);
105 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
108 vbuf->resp_cb = resp_cb;
109 vbuf->resp_size = resp_size;
110 if (resp_size <= MAX_INLINE_RESP_SIZE)
111 vbuf->resp_buf = (void *)vbuf->buf + size;
113 vbuf->resp_buf = resp_buf;
114 BUG_ON(!vbuf->resp_buf);
118 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
119 struct virtio_gpu_vbuffer **vbuffer_p,
122 struct virtio_gpu_vbuffer *vbuf;
124 vbuf = virtio_gpu_get_vbuf(vgdev, size,
125 sizeof(struct virtio_gpu_ctrl_hdr),
129 return ERR_CAST(vbuf);
135 static struct virtio_gpu_update_cursor*
136 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
137 struct virtio_gpu_vbuffer **vbuffer_p)
139 struct virtio_gpu_vbuffer *vbuf;
141 vbuf = virtio_gpu_get_vbuf
142 (vgdev, sizeof(struct virtio_gpu_update_cursor),
146 return ERR_CAST(vbuf);
149 return (struct virtio_gpu_update_cursor *)vbuf->buf;
152 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
153 virtio_gpu_resp_cb cb,
154 struct virtio_gpu_vbuffer **vbuffer_p,
155 int cmd_size, int resp_size,
158 struct virtio_gpu_vbuffer *vbuf;
160 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
161 resp_size, resp_buf, cb);
164 return ERR_CAST(vbuf);
167 return (struct virtio_gpu_command *)vbuf->buf;
170 static void free_vbuf(struct virtio_gpu_device *vgdev,
171 struct virtio_gpu_vbuffer *vbuf)
173 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
174 kfree(vbuf->resp_buf);
175 kfree(vbuf->data_buf);
176 kmem_cache_free(vgdev->vbufs, vbuf);
179 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
181 struct virtio_gpu_vbuffer *vbuf;
185 while ((vbuf = virtqueue_get_buf(vq, &len))) {
186 list_add_tail(&vbuf->list, reclaim_list);
190 DRM_DEBUG("Huh? zero vbufs reclaimed");
193 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
195 struct virtio_gpu_device *vgdev =
196 container_of(work, struct virtio_gpu_device,
198 struct list_head reclaim_list;
199 struct virtio_gpu_vbuffer *entry, *tmp;
200 struct virtio_gpu_ctrl_hdr *resp;
203 INIT_LIST_HEAD(&reclaim_list);
204 spin_lock(&vgdev->ctrlq.qlock);
206 virtqueue_disable_cb(vgdev->ctrlq.vq);
207 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
209 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
210 spin_unlock(&vgdev->ctrlq.qlock);
212 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
213 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
214 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
215 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
216 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
217 u64 f = le64_to_cpu(resp->fence_id);
220 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
221 __func__, fence_id, f);
227 entry->resp_cb(vgdev, entry);
229 list_del(&entry->list);
230 free_vbuf(vgdev, entry);
232 wake_up(&vgdev->ctrlq.ack_queue);
235 virtio_gpu_fence_event_process(vgdev, fence_id);
238 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
240 struct virtio_gpu_device *vgdev =
241 container_of(work, struct virtio_gpu_device,
242 cursorq.dequeue_work);
243 struct list_head reclaim_list;
244 struct virtio_gpu_vbuffer *entry, *tmp;
246 INIT_LIST_HEAD(&reclaim_list);
247 spin_lock(&vgdev->cursorq.qlock);
249 virtqueue_disable_cb(vgdev->cursorq.vq);
250 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
251 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
252 spin_unlock(&vgdev->cursorq.qlock);
254 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
255 list_del(&entry->list);
256 free_vbuf(vgdev, entry);
258 wake_up(&vgdev->cursorq.ack_queue);
261 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
262 struct virtio_gpu_vbuffer *vbuf)
263 __releases(&vgdev->ctrlq.qlock)
264 __acquires(&vgdev->ctrlq.qlock)
266 struct virtqueue *vq = vgdev->ctrlq.vq;
267 struct scatterlist *sgs[3], vcmd, vout, vresp;
268 int outcnt = 0, incnt = 0;
271 if (!vgdev->vqs_ready)
274 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
275 sgs[outcnt+incnt] = &vcmd;
278 if (vbuf->data_size) {
279 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
280 sgs[outcnt + incnt] = &vout;
284 if (vbuf->resp_size) {
285 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
286 sgs[outcnt + incnt] = &vresp;
291 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
292 if (ret == -ENOSPC) {
293 spin_unlock(&vgdev->ctrlq.qlock);
294 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
295 spin_lock(&vgdev->ctrlq.qlock);
306 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
307 struct virtio_gpu_vbuffer *vbuf)
311 spin_lock(&vgdev->ctrlq.qlock);
312 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
313 spin_unlock(&vgdev->ctrlq.qlock);
317 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
318 struct virtio_gpu_vbuffer *vbuf,
319 struct virtio_gpu_ctrl_hdr *hdr,
320 struct virtio_gpu_fence **fence)
322 struct virtqueue *vq = vgdev->ctrlq.vq;
326 spin_lock(&vgdev->ctrlq.qlock);
329 * Make sure we have enouth space in the virtqueue. If not
330 * wait here until we have.
332 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
333 * to wait for free space, which can result in fence ids being
334 * submitted out-of-order.
336 if (vq->num_free < 3) {
337 spin_unlock(&vgdev->ctrlq.qlock);
338 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
343 virtio_gpu_fence_emit(vgdev, hdr, fence);
344 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
345 spin_unlock(&vgdev->ctrlq.qlock);
349 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
350 struct virtio_gpu_vbuffer *vbuf)
352 struct virtqueue *vq = vgdev->cursorq.vq;
353 struct scatterlist *sgs[1], ccmd;
357 if (!vgdev->vqs_ready)
360 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
364 spin_lock(&vgdev->cursorq.qlock);
366 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
367 if (ret == -ENOSPC) {
368 spin_unlock(&vgdev->cursorq.qlock);
369 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
370 spin_lock(&vgdev->cursorq.qlock);
376 spin_unlock(&vgdev->cursorq.qlock);
383 /* just create gem objects for userspace and long lived objects,
384 just use dma_alloced pages for the queue objects? */
386 /* create a basic resource */
387 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
388 uint32_t resource_id,
393 struct virtio_gpu_resource_create_2d *cmd_p;
394 struct virtio_gpu_vbuffer *vbuf;
396 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
397 memset(cmd_p, 0, sizeof(*cmd_p));
399 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
400 cmd_p->resource_id = cpu_to_le32(resource_id);
401 cmd_p->format = cpu_to_le32(format);
402 cmd_p->width = cpu_to_le32(width);
403 cmd_p->height = cpu_to_le32(height);
405 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
408 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
409 uint32_t resource_id)
411 struct virtio_gpu_resource_unref *cmd_p;
412 struct virtio_gpu_vbuffer *vbuf;
414 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
415 memset(cmd_p, 0, sizeof(*cmd_p));
417 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
418 cmd_p->resource_id = cpu_to_le32(resource_id);
420 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
423 void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
424 uint32_t resource_id)
426 struct virtio_gpu_resource_detach_backing *cmd_p;
427 struct virtio_gpu_vbuffer *vbuf;
429 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
430 memset(cmd_p, 0, sizeof(*cmd_p));
432 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
433 cmd_p->resource_id = cpu_to_le32(resource_id);
435 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
438 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
439 uint32_t scanout_id, uint32_t resource_id,
440 uint32_t width, uint32_t height,
441 uint32_t x, uint32_t y)
443 struct virtio_gpu_set_scanout *cmd_p;
444 struct virtio_gpu_vbuffer *vbuf;
446 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
447 memset(cmd_p, 0, sizeof(*cmd_p));
449 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
450 cmd_p->resource_id = cpu_to_le32(resource_id);
451 cmd_p->scanout_id = cpu_to_le32(scanout_id);
452 cmd_p->r.width = cpu_to_le32(width);
453 cmd_p->r.height = cpu_to_le32(height);
454 cmd_p->r.x = cpu_to_le32(x);
455 cmd_p->r.y = cpu_to_le32(y);
457 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
460 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
461 uint32_t resource_id,
462 uint32_t x, uint32_t y,
463 uint32_t width, uint32_t height)
465 struct virtio_gpu_resource_flush *cmd_p;
466 struct virtio_gpu_vbuffer *vbuf;
468 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
469 memset(cmd_p, 0, sizeof(*cmd_p));
471 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
472 cmd_p->resource_id = cpu_to_le32(resource_id);
473 cmd_p->r.width = cpu_to_le32(width);
474 cmd_p->r.height = cpu_to_le32(height);
475 cmd_p->r.x = cpu_to_le32(x);
476 cmd_p->r.y = cpu_to_le32(y);
478 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
481 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
482 uint32_t resource_id, uint64_t offset,
483 __le32 width, __le32 height,
485 struct virtio_gpu_fence **fence)
487 struct virtio_gpu_transfer_to_host_2d *cmd_p;
488 struct virtio_gpu_vbuffer *vbuf;
490 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
491 memset(cmd_p, 0, sizeof(*cmd_p));
493 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
494 cmd_p->resource_id = cpu_to_le32(resource_id);
495 cmd_p->offset = cpu_to_le64(offset);
496 cmd_p->r.width = width;
497 cmd_p->r.height = height;
501 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
505 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
506 uint32_t resource_id,
507 struct virtio_gpu_mem_entry *ents,
509 struct virtio_gpu_fence **fence)
511 struct virtio_gpu_resource_attach_backing *cmd_p;
512 struct virtio_gpu_vbuffer *vbuf;
514 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
515 memset(cmd_p, 0, sizeof(*cmd_p));
517 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
518 cmd_p->resource_id = cpu_to_le32(resource_id);
519 cmd_p->nr_entries = cpu_to_le32(nents);
521 vbuf->data_buf = ents;
522 vbuf->data_size = sizeof(*ents) * nents;
524 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
527 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
528 struct virtio_gpu_vbuffer *vbuf)
530 struct virtio_gpu_resp_display_info *resp =
531 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
534 spin_lock(&vgdev->display_info_lock);
535 for (i = 0; i < vgdev->num_scanouts; i++) {
536 vgdev->outputs[i].info = resp->pmodes[i];
537 if (resp->pmodes[i].enabled) {
538 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
539 le32_to_cpu(resp->pmodes[i].r.width),
540 le32_to_cpu(resp->pmodes[i].r.height),
541 le32_to_cpu(resp->pmodes[i].r.x),
542 le32_to_cpu(resp->pmodes[i].r.y));
544 DRM_DEBUG("output %d: disabled", i);
548 vgdev->display_info_pending = false;
549 spin_unlock(&vgdev->display_info_lock);
550 wake_up(&vgdev->resp_wq);
552 if (!drm_helper_hpd_irq_event(vgdev->ddev))
553 drm_kms_helper_hotplug_event(vgdev->ddev);
556 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
557 struct virtio_gpu_vbuffer *vbuf)
559 struct virtio_gpu_get_capset_info *cmd =
560 (struct virtio_gpu_get_capset_info *)vbuf->buf;
561 struct virtio_gpu_resp_capset_info *resp =
562 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
563 int i = le32_to_cpu(cmd->capset_index);
565 spin_lock(&vgdev->display_info_lock);
566 if (vgdev->capsets) {
567 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
568 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
569 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
571 DRM_ERROR("invalid capset memory.");
573 spin_unlock(&vgdev->display_info_lock);
574 wake_up(&vgdev->resp_wq);
577 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
578 struct virtio_gpu_vbuffer *vbuf)
580 struct virtio_gpu_get_capset *cmd =
581 (struct virtio_gpu_get_capset *)vbuf->buf;
582 struct virtio_gpu_resp_capset *resp =
583 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
584 struct virtio_gpu_drv_cap_cache *cache_ent;
586 spin_lock(&vgdev->display_info_lock);
587 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
588 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
589 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
590 memcpy(cache_ent->caps_cache, resp->capset_data,
592 /* Copy must occur before is_valid is signalled. */
594 atomic_set(&cache_ent->is_valid, 1);
598 spin_unlock(&vgdev->display_info_lock);
599 wake_up(&vgdev->resp_wq);
603 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
605 struct virtio_gpu_ctrl_hdr *cmd_p;
606 struct virtio_gpu_vbuffer *vbuf;
609 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
614 cmd_p = virtio_gpu_alloc_cmd_resp
615 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
616 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
618 memset(cmd_p, 0, sizeof(*cmd_p));
620 vgdev->display_info_pending = true;
621 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
622 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
626 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
628 struct virtio_gpu_get_capset_info *cmd_p;
629 struct virtio_gpu_vbuffer *vbuf;
632 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
637 cmd_p = virtio_gpu_alloc_cmd_resp
638 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
639 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
641 memset(cmd_p, 0, sizeof(*cmd_p));
643 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
644 cmd_p->capset_index = cpu_to_le32(idx);
645 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
649 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
650 int idx, int version,
651 struct virtio_gpu_drv_cap_cache **cache_p)
653 struct virtio_gpu_get_capset *cmd_p;
654 struct virtio_gpu_vbuffer *vbuf;
656 struct virtio_gpu_drv_cap_cache *cache_ent;
659 if (idx >= vgdev->num_capsets)
662 if (version > vgdev->capsets[idx].max_version)
665 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
669 max_size = vgdev->capsets[idx].max_size;
670 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
671 if (!cache_ent->caps_cache) {
676 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
679 kfree(cache_ent->caps_cache);
684 cache_ent->version = version;
685 cache_ent->id = vgdev->capsets[idx].id;
686 atomic_set(&cache_ent->is_valid, 0);
687 cache_ent->size = max_size;
688 spin_lock(&vgdev->display_info_lock);
689 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
690 spin_unlock(&vgdev->display_info_lock);
692 cmd_p = virtio_gpu_alloc_cmd_resp
693 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
694 sizeof(struct virtio_gpu_resp_capset) + max_size,
696 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
697 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
698 cmd_p->capset_version = cpu_to_le32(version);
699 *cache_p = cache_ent;
700 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
705 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
706 uint32_t nlen, const char *name)
708 struct virtio_gpu_ctx_create *cmd_p;
709 struct virtio_gpu_vbuffer *vbuf;
711 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
712 memset(cmd_p, 0, sizeof(*cmd_p));
714 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
715 cmd_p->hdr.ctx_id = cpu_to_le32(id);
716 cmd_p->nlen = cpu_to_le32(nlen);
717 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
718 cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
719 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
722 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
725 struct virtio_gpu_ctx_destroy *cmd_p;
726 struct virtio_gpu_vbuffer *vbuf;
728 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
729 memset(cmd_p, 0, sizeof(*cmd_p));
731 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
732 cmd_p->hdr.ctx_id = cpu_to_le32(id);
733 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
736 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
738 uint32_t resource_id)
740 struct virtio_gpu_ctx_resource *cmd_p;
741 struct virtio_gpu_vbuffer *vbuf;
743 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
744 memset(cmd_p, 0, sizeof(*cmd_p));
746 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
747 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
748 cmd_p->resource_id = cpu_to_le32(resource_id);
749 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
753 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
755 uint32_t resource_id)
757 struct virtio_gpu_ctx_resource *cmd_p;
758 struct virtio_gpu_vbuffer *vbuf;
760 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
761 memset(cmd_p, 0, sizeof(*cmd_p));
763 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
764 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
765 cmd_p->resource_id = cpu_to_le32(resource_id);
766 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
770 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
771 struct virtio_gpu_resource_create_3d *rc_3d,
772 struct virtio_gpu_fence **fence)
774 struct virtio_gpu_resource_create_3d *cmd_p;
775 struct virtio_gpu_vbuffer *vbuf;
777 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
778 memset(cmd_p, 0, sizeof(*cmd_p));
781 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
782 cmd_p->hdr.flags = 0;
784 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
787 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
788 uint32_t resource_id, uint32_t ctx_id,
789 uint64_t offset, uint32_t level,
790 struct virtio_gpu_box *box,
791 struct virtio_gpu_fence **fence)
793 struct virtio_gpu_transfer_host_3d *cmd_p;
794 struct virtio_gpu_vbuffer *vbuf;
796 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
797 memset(cmd_p, 0, sizeof(*cmd_p));
799 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
800 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
801 cmd_p->resource_id = cpu_to_le32(resource_id);
803 cmd_p->offset = cpu_to_le64(offset);
804 cmd_p->level = cpu_to_le32(level);
806 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
809 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
810 uint32_t resource_id, uint32_t ctx_id,
811 uint64_t offset, uint32_t level,
812 struct virtio_gpu_box *box,
813 struct virtio_gpu_fence **fence)
815 struct virtio_gpu_transfer_host_3d *cmd_p;
816 struct virtio_gpu_vbuffer *vbuf;
818 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
819 memset(cmd_p, 0, sizeof(*cmd_p));
821 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
822 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
823 cmd_p->resource_id = cpu_to_le32(resource_id);
825 cmd_p->offset = cpu_to_le64(offset);
826 cmd_p->level = cpu_to_le32(level);
828 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
831 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
832 void *data, uint32_t data_size,
833 uint32_t ctx_id, struct virtio_gpu_fence **fence)
835 struct virtio_gpu_cmd_submit *cmd_p;
836 struct virtio_gpu_vbuffer *vbuf;
838 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
839 memset(cmd_p, 0, sizeof(*cmd_p));
841 vbuf->data_buf = data;
842 vbuf->data_size = data_size;
844 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
845 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
846 cmd_p->size = cpu_to_le32(data_size);
848 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
851 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
852 struct virtio_gpu_object *obj,
853 uint32_t resource_id,
854 struct virtio_gpu_fence **fence)
856 struct virtio_gpu_mem_entry *ents;
857 struct scatterlist *sg;
862 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
867 /* gets freed when the ring has consumed it */
868 ents = kvmalloc_array(obj->pages->nents,
869 sizeof(struct virtio_gpu_mem_entry),
872 DRM_ERROR("failed to allocate ent list\n");
876 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
877 ents[si].addr = cpu_to_le64(sg_phys(sg));
878 ents[si].length = cpu_to_le32(sg->length);
879 ents[si].padding = 0;
882 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
883 ents, obj->pages->nents,
885 obj->hw_res_handle = resource_id;
889 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
890 struct virtio_gpu_output *output)
892 struct virtio_gpu_vbuffer *vbuf;
893 struct virtio_gpu_update_cursor *cur_p;
895 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
896 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
897 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
898 virtio_gpu_queue_cursor(vgdev, vbuf);