GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / gpu / drm / virtio / virtgpu_plane.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_fourcc.h>
29 #include <drm/drm_plane_helper.h>
30
31 #include "virtgpu_drv.h"
32
33 static const uint32_t virtio_gpu_formats[] = {
34         DRM_FORMAT_HOST_XRGB8888,
35 };
36
37 static const uint32_t virtio_gpu_cursor_formats[] = {
38         DRM_FORMAT_HOST_ARGB8888,
39 };
40
41 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
42 {
43         uint32_t format;
44
45         switch (drm_fourcc) {
46         case DRM_FORMAT_XRGB8888:
47                 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
48                 break;
49         case DRM_FORMAT_ARGB8888:
50                 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
51                 break;
52         case DRM_FORMAT_BGRX8888:
53                 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
54                 break;
55         case DRM_FORMAT_BGRA8888:
56                 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
57                 break;
58         default:
59                 /*
60                  * This should not happen, we handle everything listed
61                  * in virtio_gpu_formats[].
62                  */
63                 format = 0;
64                 break;
65         }
66         WARN_ON(format == 0);
67         return format;
68 }
69
70 static void virtio_gpu_plane_destroy(struct drm_plane *plane)
71 {
72         drm_plane_cleanup(plane);
73         kfree(plane);
74 }
75
76 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
77         .update_plane           = drm_atomic_helper_update_plane,
78         .disable_plane          = drm_atomic_helper_disable_plane,
79         .destroy                = virtio_gpu_plane_destroy,
80         .reset                  = drm_atomic_helper_plane_reset,
81         .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
82         .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
83 };
84
85 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
86                                          struct drm_atomic_state *state)
87 {
88         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
89                                                                                  plane);
90         bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
91         struct drm_crtc_state *crtc_state;
92         int ret;
93
94         if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
95                 return 0;
96
97         crtc_state = drm_atomic_get_crtc_state(state,
98                                                new_plane_state->crtc);
99         if (IS_ERR(crtc_state))
100                 return PTR_ERR(crtc_state);
101
102         ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
103                                                   DRM_PLANE_HELPER_NO_SCALING,
104                                                   DRM_PLANE_HELPER_NO_SCALING,
105                                                   is_cursor, true);
106         return ret;
107 }
108
109 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
110                                       struct drm_plane_state *state,
111                                       struct drm_rect *rect)
112 {
113         struct virtio_gpu_object *bo =
114                 gem_to_virtio_gpu_obj(state->fb->obj[0]);
115         struct virtio_gpu_object_array *objs;
116         uint32_t w = rect->x2 - rect->x1;
117         uint32_t h = rect->y2 - rect->y1;
118         uint32_t x = rect->x1;
119         uint32_t y = rect->y1;
120         uint32_t off = x * state->fb->format->cpp[0] +
121                 y * state->fb->pitches[0];
122
123         objs = virtio_gpu_array_alloc(1);
124         if (!objs)
125                 return;
126         virtio_gpu_array_add_obj(objs, &bo->base.base);
127
128         virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
129                                            objs, NULL);
130 }
131
132 static void virtio_gpu_resource_flush(struct drm_plane *plane,
133                                       uint32_t x, uint32_t y,
134                                       uint32_t width, uint32_t height)
135 {
136         struct drm_device *dev = plane->dev;
137         struct virtio_gpu_device *vgdev = dev->dev_private;
138         struct virtio_gpu_framebuffer *vgfb;
139         struct virtio_gpu_object *bo;
140
141         vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
142         bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
143         if (vgfb->fence) {
144                 struct virtio_gpu_object_array *objs;
145
146                 objs = virtio_gpu_array_alloc(1);
147                 if (!objs)
148                         return;
149                 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
150                 virtio_gpu_array_lock_resv(objs);
151                 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
152                                               width, height, objs, vgfb->fence);
153                 virtio_gpu_notify(vgdev);
154
155                 dma_fence_wait_timeout(&vgfb->fence->f, true,
156                                        msecs_to_jiffies(50));
157                 dma_fence_put(&vgfb->fence->f);
158                 vgfb->fence = NULL;
159         } else {
160                 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
161                                               width, height, NULL, NULL);
162                 virtio_gpu_notify(vgdev);
163         }
164 }
165
166 static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
167                                             struct drm_atomic_state *state)
168 {
169         struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
170                                                                            plane);
171         struct drm_device *dev = plane->dev;
172         struct virtio_gpu_device *vgdev = dev->dev_private;
173         struct virtio_gpu_output *output = NULL;
174         struct virtio_gpu_object *bo;
175         struct drm_rect rect;
176
177         if (plane->state->crtc)
178                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
179         if (old_state->crtc)
180                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
181         if (WARN_ON(!output))
182                 return;
183
184         if (!plane->state->fb || !output->crtc.state->active) {
185                 DRM_DEBUG("nofb\n");
186                 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
187                                            plane->state->src_w >> 16,
188                                            plane->state->src_h >> 16,
189                                            0, 0);
190                 virtio_gpu_notify(vgdev);
191                 return;
192         }
193
194         if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
195                 return;
196
197         bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
198         if (bo->dumb)
199                 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
200
201         if (plane->state->fb != old_state->fb ||
202             plane->state->src_w != old_state->src_w ||
203             plane->state->src_h != old_state->src_h ||
204             plane->state->src_x != old_state->src_x ||
205             plane->state->src_y != old_state->src_y ||
206             output->needs_modeset) {
207                 output->needs_modeset = false;
208                 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
209                           bo->hw_res_handle,
210                           plane->state->crtc_w, plane->state->crtc_h,
211                           plane->state->crtc_x, plane->state->crtc_y,
212                           plane->state->src_w >> 16,
213                           plane->state->src_h >> 16,
214                           plane->state->src_x >> 16,
215                           plane->state->src_y >> 16);
216
217                 if (bo->host3d_blob || bo->guest_blob) {
218                         virtio_gpu_cmd_set_scanout_blob
219                                                 (vgdev, output->index, bo,
220                                                  plane->state->fb,
221                                                  plane->state->src_w >> 16,
222                                                  plane->state->src_h >> 16,
223                                                  plane->state->src_x >> 16,
224                                                  plane->state->src_y >> 16);
225                 } else {
226                         virtio_gpu_cmd_set_scanout(vgdev, output->index,
227                                                    bo->hw_res_handle,
228                                                    plane->state->src_w >> 16,
229                                                    plane->state->src_h >> 16,
230                                                    plane->state->src_x >> 16,
231                                                    plane->state->src_y >> 16);
232                 }
233         }
234
235         virtio_gpu_resource_flush(plane,
236                                   rect.x1,
237                                   rect.y1,
238                                   rect.x2 - rect.x1,
239                                   rect.y2 - rect.y1);
240 }
241
242 static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
243                                        struct drm_plane_state *new_state)
244 {
245         struct drm_device *dev = plane->dev;
246         struct virtio_gpu_device *vgdev = dev->dev_private;
247         struct virtio_gpu_framebuffer *vgfb;
248         struct virtio_gpu_object *bo;
249
250         if (!new_state->fb)
251                 return 0;
252
253         vgfb = to_virtio_gpu_framebuffer(new_state->fb);
254         bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
255         if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
256                 return 0;
257
258         if (bo->dumb && (plane->state->fb != new_state->fb)) {
259                 vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
260                                                      0);
261                 if (!vgfb->fence)
262                         return -ENOMEM;
263         }
264
265         return 0;
266 }
267
268 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
269                                         struct drm_plane_state *old_state)
270 {
271         struct virtio_gpu_framebuffer *vgfb;
272
273         if (!plane->state->fb)
274                 return;
275
276         vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
277         if (vgfb->fence) {
278                 dma_fence_put(&vgfb->fence->f);
279                 vgfb->fence = NULL;
280         }
281 }
282
283 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
284                                            struct drm_atomic_state *state)
285 {
286         struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
287                                                                            plane);
288         struct drm_device *dev = plane->dev;
289         struct virtio_gpu_device *vgdev = dev->dev_private;
290         struct virtio_gpu_output *output = NULL;
291         struct virtio_gpu_framebuffer *vgfb;
292         struct virtio_gpu_object *bo = NULL;
293         uint32_t handle;
294
295         if (plane->state->crtc)
296                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
297         if (old_state->crtc)
298                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
299         if (WARN_ON(!output))
300                 return;
301
302         if (plane->state->fb) {
303                 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
304                 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
305                 handle = bo->hw_res_handle;
306         } else {
307                 handle = 0;
308         }
309
310         if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
311                 /* new cursor -- update & wait */
312                 struct virtio_gpu_object_array *objs;
313
314                 objs = virtio_gpu_array_alloc(1);
315                 if (!objs)
316                         return;
317                 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
318                 virtio_gpu_array_lock_resv(objs);
319                 virtio_gpu_cmd_transfer_to_host_2d
320                         (vgdev, 0,
321                          plane->state->crtc_w,
322                          plane->state->crtc_h,
323                          0, 0, objs, vgfb->fence);
324                 virtio_gpu_notify(vgdev);
325                 dma_fence_wait(&vgfb->fence->f, true);
326                 dma_fence_put(&vgfb->fence->f);
327                 vgfb->fence = NULL;
328         }
329
330         if (plane->state->fb != old_state->fb) {
331                 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
332                           plane->state->crtc_x,
333                           plane->state->crtc_y,
334                           plane->state->fb ? plane->state->fb->hot_x : 0,
335                           plane->state->fb ? plane->state->fb->hot_y : 0);
336                 output->cursor.hdr.type =
337                         cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
338                 output->cursor.resource_id = cpu_to_le32(handle);
339                 if (plane->state->fb) {
340                         output->cursor.hot_x =
341                                 cpu_to_le32(plane->state->fb->hot_x);
342                         output->cursor.hot_y =
343                                 cpu_to_le32(plane->state->fb->hot_y);
344                 } else {
345                         output->cursor.hot_x = cpu_to_le32(0);
346                         output->cursor.hot_y = cpu_to_le32(0);
347                 }
348         } else {
349                 DRM_DEBUG("move +%d+%d\n",
350                           plane->state->crtc_x,
351                           plane->state->crtc_y);
352                 output->cursor.hdr.type =
353                         cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
354         }
355         output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
356         output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
357         virtio_gpu_cursor_ping(vgdev, output);
358 }
359
360 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
361         .prepare_fb             = virtio_gpu_plane_prepare_fb,
362         .cleanup_fb             = virtio_gpu_plane_cleanup_fb,
363         .atomic_check           = virtio_gpu_plane_atomic_check,
364         .atomic_update          = virtio_gpu_primary_plane_update,
365 };
366
367 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
368         .prepare_fb             = virtio_gpu_plane_prepare_fb,
369         .cleanup_fb             = virtio_gpu_plane_cleanup_fb,
370         .atomic_check           = virtio_gpu_plane_atomic_check,
371         .atomic_update          = virtio_gpu_cursor_plane_update,
372 };
373
374 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
375                                         enum drm_plane_type type,
376                                         int index)
377 {
378         struct drm_device *dev = vgdev->ddev;
379         const struct drm_plane_helper_funcs *funcs;
380         struct drm_plane *plane;
381         const uint32_t *formats;
382         int ret, nformats;
383
384         plane = kzalloc(sizeof(*plane), GFP_KERNEL);
385         if (!plane)
386                 return ERR_PTR(-ENOMEM);
387
388         if (type == DRM_PLANE_TYPE_CURSOR) {
389                 formats = virtio_gpu_cursor_formats;
390                 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
391                 funcs = &virtio_gpu_cursor_helper_funcs;
392         } else {
393                 formats = virtio_gpu_formats;
394                 nformats = ARRAY_SIZE(virtio_gpu_formats);
395                 funcs = &virtio_gpu_primary_helper_funcs;
396         }
397         ret = drm_universal_plane_init(dev, plane, 1 << index,
398                                        &virtio_gpu_plane_funcs,
399                                        formats, nformats,
400                                        NULL, type, NULL);
401         if (ret)
402                 goto err_plane_init;
403
404         drm_plane_helper_add(plane, funcs);
405         return plane;
406
407 err_plane_init:
408         kfree(plane);
409         return ERR_PTR(ret);
410 }