GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / gpu / drm / xen / xen_drm_front_kms.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10
11 #include <drm/drm_atomic.h>
12 #include <drm/drm_atomic_helper.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_fourcc.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_gem_atomic_helper.h>
17 #include <drm/drm_gem_framebuffer_helper.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_vblank.h>
20
21 #include "xen_drm_front.h"
22 #include "xen_drm_front_conn.h"
23 #include "xen_drm_front_kms.h"
24
25 /*
26  * Timeout in ms to wait for frame done event from the backend:
27  * must be a bit more than IO time-out
28  */
29 #define FRAME_DONE_TO_MS        (XEN_DRM_FRONT_WAIT_BACK_MS + 100)
30
31 static struct xen_drm_front_drm_pipeline *
32 to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
33 {
34         return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
35 }
36
37 static void fb_destroy(struct drm_framebuffer *fb)
38 {
39         struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
40         int idx;
41
42         if (drm_dev_enter(fb->dev, &idx)) {
43                 xen_drm_front_fb_detach(drm_info->front_info,
44                                         xen_drm_front_fb_to_cookie(fb));
45                 drm_dev_exit(idx);
46         }
47         drm_gem_fb_destroy(fb);
48 }
49
50 static const struct drm_framebuffer_funcs fb_funcs = {
51         .destroy = fb_destroy,
52 };
53
54 static struct drm_framebuffer *
55 fb_create(struct drm_device *dev, struct drm_file *filp,
56           const struct drm_mode_fb_cmd2 *mode_cmd)
57 {
58         struct xen_drm_front_drm_info *drm_info = dev->dev_private;
59         struct drm_framebuffer *fb;
60         struct drm_gem_object *gem_obj;
61         int ret;
62
63         fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
64         if (IS_ERR(fb))
65                 return fb;
66
67         gem_obj = fb->obj[0];
68
69         ret = xen_drm_front_fb_attach(drm_info->front_info,
70                                       xen_drm_front_dbuf_to_cookie(gem_obj),
71                                       xen_drm_front_fb_to_cookie(fb),
72                                       fb->width, fb->height,
73                                       fb->format->format);
74         if (ret < 0) {
75                 DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
76                 goto fail;
77         }
78
79         return fb;
80
81 fail:
82         drm_gem_fb_destroy(fb);
83         return ERR_PTR(ret);
84 }
85
86 static const struct drm_mode_config_funcs mode_config_funcs = {
87         .fb_create = fb_create,
88         .atomic_check = drm_atomic_helper_check,
89         .atomic_commit = drm_atomic_helper_commit,
90 };
91
92 static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
93 {
94         struct drm_crtc *crtc = &pipeline->pipe.crtc;
95         struct drm_device *dev = crtc->dev;
96         unsigned long flags;
97
98         spin_lock_irqsave(&dev->event_lock, flags);
99         if (pipeline->pending_event)
100                 drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
101         pipeline->pending_event = NULL;
102         spin_unlock_irqrestore(&dev->event_lock, flags);
103 }
104
105 static void display_enable(struct drm_simple_display_pipe *pipe,
106                            struct drm_crtc_state *crtc_state,
107                            struct drm_plane_state *plane_state)
108 {
109         struct xen_drm_front_drm_pipeline *pipeline =
110                         to_xen_drm_pipeline(pipe);
111         struct drm_crtc *crtc = &pipe->crtc;
112         struct drm_framebuffer *fb = plane_state->fb;
113         int ret, idx;
114
115         if (!drm_dev_enter(pipe->crtc.dev, &idx))
116                 return;
117
118         ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
119                                      fb->width, fb->height,
120                                      fb->format->cpp[0] * 8,
121                                      xen_drm_front_fb_to_cookie(fb));
122
123         if (ret) {
124                 DRM_ERROR("Failed to enable display: %d\n", ret);
125                 pipeline->conn_connected = false;
126         }
127
128         drm_dev_exit(idx);
129 }
130
131 static void display_disable(struct drm_simple_display_pipe *pipe)
132 {
133         struct xen_drm_front_drm_pipeline *pipeline =
134                         to_xen_drm_pipeline(pipe);
135         int ret = 0, idx;
136
137         if (drm_dev_enter(pipe->crtc.dev, &idx)) {
138                 ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
139                                              xen_drm_front_fb_to_cookie(NULL));
140                 drm_dev_exit(idx);
141         }
142         if (ret)
143                 DRM_ERROR("Failed to disable display: %d\n", ret);
144
145         /* Make sure we can restart with enabled connector next time */
146         pipeline->conn_connected = true;
147
148         /* release stalled event if any */
149         send_pending_event(pipeline);
150 }
151
152 void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
153                                      u64 fb_cookie)
154 {
155         /*
156          * This runs in interrupt context, e.g. under
157          * drm_info->front_info->io_lock, so we cannot call _sync version
158          * to cancel the work
159          */
160         cancel_delayed_work(&pipeline->pflip_to_worker);
161
162         send_pending_event(pipeline);
163 }
164
165 static void pflip_to_worker(struct work_struct *work)
166 {
167         struct delayed_work *delayed_work = to_delayed_work(work);
168         struct xen_drm_front_drm_pipeline *pipeline =
169                         container_of(delayed_work,
170                                      struct xen_drm_front_drm_pipeline,
171                                      pflip_to_worker);
172
173         DRM_ERROR("Frame done timed-out, releasing");
174         send_pending_event(pipeline);
175 }
176
177 static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
178                                    struct drm_plane_state *old_plane_state)
179 {
180         struct drm_plane_state *plane_state =
181                         drm_atomic_get_new_plane_state(old_plane_state->state,
182                                                        &pipe->plane);
183
184         /*
185          * If old_plane_state->fb is NULL and plane_state->fb is not,
186          * then this is an atomic commit which will enable display.
187          * If old_plane_state->fb is not NULL and plane_state->fb is,
188          * then this is an atomic commit which will disable display.
189          * Ignore these and do not send page flip as this framebuffer will be
190          * sent to the backend as a part of display_set_config call.
191          */
192         if (old_plane_state->fb && plane_state->fb) {
193                 struct xen_drm_front_drm_pipeline *pipeline =
194                                 to_xen_drm_pipeline(pipe);
195                 struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
196                 int ret;
197
198                 schedule_delayed_work(&pipeline->pflip_to_worker,
199                                       msecs_to_jiffies(FRAME_DONE_TO_MS));
200
201                 ret = xen_drm_front_page_flip(drm_info->front_info,
202                                               pipeline->index,
203                                               xen_drm_front_fb_to_cookie(plane_state->fb));
204                 if (ret) {
205                         DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
206
207                         pipeline->conn_connected = false;
208                         /*
209                          * Report the flip not handled, so pending event is
210                          * sent, unblocking user-space.
211                          */
212                         return false;
213                 }
214                 /*
215                  * Signal that page flip was handled, pending event will be sent
216                  * on frame done event from the backend.
217                  */
218                 return true;
219         }
220
221         return false;
222 }
223
224 static int display_check(struct drm_simple_display_pipe *pipe,
225                          struct drm_plane_state *plane_state,
226                          struct drm_crtc_state *crtc_state)
227 {
228         /*
229          * Xen doesn't initialize vblanking via drm_vblank_init(), so
230          * DRM helpers assume that it doesn't handle vblanking and start
231          * sending out fake VBLANK events automatically.
232          *
233          * As xen contains it's own logic for sending out VBLANK events
234          * in send_pending_event(), disable no_vblank (i.e., the xen
235          * driver has vblanking support).
236          */
237         crtc_state->no_vblank = false;
238
239         return 0;
240 }
241
242 static void display_update(struct drm_simple_display_pipe *pipe,
243                            struct drm_plane_state *old_plane_state)
244 {
245         struct xen_drm_front_drm_pipeline *pipeline =
246                         to_xen_drm_pipeline(pipe);
247         struct drm_crtc *crtc = &pipe->crtc;
248         struct drm_pending_vblank_event *event;
249         int idx;
250
251         event = crtc->state->event;
252         if (event) {
253                 struct drm_device *dev = crtc->dev;
254                 unsigned long flags;
255
256                 WARN_ON(pipeline->pending_event);
257
258                 spin_lock_irqsave(&dev->event_lock, flags);
259                 crtc->state->event = NULL;
260
261                 pipeline->pending_event = event;
262                 spin_unlock_irqrestore(&dev->event_lock, flags);
263         }
264
265         if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
266                 send_pending_event(pipeline);
267                 return;
268         }
269
270         /*
271          * Send page flip request to the backend *after* we have event cached
272          * above, so on page flip done event from the backend we can
273          * deliver it and there is no race condition between this code and
274          * event from the backend.
275          * If this is not a page flip, e.g. no flip done event from the backend
276          * is expected, then send now.
277          */
278         if (!display_send_page_flip(pipe, old_plane_state))
279                 send_pending_event(pipeline);
280
281         drm_dev_exit(idx);
282 }
283
284 static enum drm_mode_status
285 display_mode_valid(struct drm_simple_display_pipe *pipe,
286                    const struct drm_display_mode *mode)
287 {
288         struct xen_drm_front_drm_pipeline *pipeline =
289                         container_of(pipe, struct xen_drm_front_drm_pipeline,
290                                      pipe);
291
292         if (mode->hdisplay != pipeline->width)
293                 return MODE_ERROR;
294
295         if (mode->vdisplay != pipeline->height)
296                 return MODE_ERROR;
297
298         return MODE_OK;
299 }
300
301 static const struct drm_simple_display_pipe_funcs display_funcs = {
302         .mode_valid = display_mode_valid,
303         .enable = display_enable,
304         .disable = display_disable,
305         .check = display_check,
306         .update = display_update,
307 };
308
309 static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
310                              int index, struct xen_drm_front_cfg_connector *cfg,
311                              struct xen_drm_front_drm_pipeline *pipeline)
312 {
313         struct drm_device *dev = drm_info->drm_dev;
314         const u32 *formats;
315         int format_count;
316         int ret;
317
318         pipeline->drm_info = drm_info;
319         pipeline->index = index;
320         pipeline->height = cfg->height;
321         pipeline->width = cfg->width;
322
323         INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
324
325         ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
326         if (ret)
327                 return ret;
328
329         formats = xen_drm_front_conn_get_formats(&format_count);
330
331         return drm_simple_display_pipe_init(dev, &pipeline->pipe,
332                                             &display_funcs, formats,
333                                             format_count, NULL,
334                                             &pipeline->conn);
335 }
336
337 int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
338 {
339         struct drm_device *dev = drm_info->drm_dev;
340         int i, ret;
341
342         drm_mode_config_init(dev);
343
344         dev->mode_config.min_width = 0;
345         dev->mode_config.min_height = 0;
346         dev->mode_config.max_width = 4095;
347         dev->mode_config.max_height = 2047;
348         dev->mode_config.funcs = &mode_config_funcs;
349
350         for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
351                 struct xen_drm_front_cfg_connector *cfg =
352                                 &drm_info->front_info->cfg.connectors[i];
353                 struct xen_drm_front_drm_pipeline *pipeline =
354                                 &drm_info->pipeline[i];
355
356                 ret = display_pipe_init(drm_info, i, cfg, pipeline);
357                 if (ret) {
358                         drm_mode_config_cleanup(dev);
359                         return ret;
360                 }
361         }
362
363         drm_mode_config_reset(dev);
364         drm_kms_helper_poll_init(dev);
365         return 0;
366 }
367
368 void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
369 {
370         int i;
371
372         for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
373                 struct xen_drm_front_drm_pipeline *pipeline =
374                                 &drm_info->pipeline[i];
375
376                 cancel_delayed_work_sync(&pipeline->pflip_to_worker);
377
378                 send_pending_event(pipeline);
379         }
380 }