GNU Linux-libre 4.14.324-gnu1
[releases.git] / drivers / media / v4l2-core / v4l2-mem2mem.c
1 /*
2  * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3  *
4  * Helper functions for devices that use videobuf buffers for both their
5  * source and destination.
6  *
7  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8  * Pawel Osciak, <pawel@osciak.com>
9  * Marek Szyprowski, <m.szyprowski@samsung.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by the
13  * Free Software Foundation; either version 2 of the License, or (at your
14  * option) any later version.
15  */
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19
20 #include <media/videobuf2-v4l2.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
25
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
29
30 static bool debug;
31 module_param(debug, bool, 0644);
32
33 #define dprintk(fmt, arg...)                                            \
34         do {                                                            \
35                 if (debug)                                              \
36                         printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37         } while (0)
38
39
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED            (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING           (1 << 1)
44 /* Instance is currently aborting */
45 #define TRANS_ABORT             (1 << 2)
46
47
48 /* Offset base for buffers on the destination queue - used to distinguish
49  * between source and destination buffers when mmapping - they receive the same
50  * offsets but for different queues */
51 #define DST_QUEUE_OFF_BASE      (1 << 30)
52
53
54 /**
55  * struct v4l2_m2m_dev - per-device context
56  * @curr_ctx:           currently running instance
57  * @job_queue:          instances queued to run
58  * @job_spinlock:       protects job_queue
59  * @m2m_ops:            driver callbacks
60  */
61 struct v4l2_m2m_dev {
62         struct v4l2_m2m_ctx     *curr_ctx;
63
64         struct list_head        job_queue;
65         spinlock_t              job_spinlock;
66
67         const struct v4l2_m2m_ops *m2m_ops;
68 };
69
70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
71                                                 enum v4l2_buf_type type)
72 {
73         if (V4L2_TYPE_IS_OUTPUT(type))
74                 return &m2m_ctx->out_q_ctx;
75         else
76                 return &m2m_ctx->cap_q_ctx;
77 }
78
79 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
80                                        enum v4l2_buf_type type)
81 {
82         struct v4l2_m2m_queue_ctx *q_ctx;
83
84         q_ctx = get_queue_ctx(m2m_ctx, type);
85         if (!q_ctx)
86                 return NULL;
87
88         return &q_ctx->q;
89 }
90 EXPORT_SYMBOL(v4l2_m2m_get_vq);
91
92 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
93 {
94         struct v4l2_m2m_buffer *b;
95         unsigned long flags;
96
97         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
98
99         if (list_empty(&q_ctx->rdy_queue)) {
100                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
101                 return NULL;
102         }
103
104         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
105         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
106         return &b->vb;
107 }
108 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
109
110 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
111 {
112         struct v4l2_m2m_buffer *b;
113         unsigned long flags;
114
115         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
116         if (list_empty(&q_ctx->rdy_queue)) {
117                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
118                 return NULL;
119         }
120         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
121         list_del(&b->list);
122         q_ctx->num_rdy--;
123         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
124
125         return &b->vb;
126 }
127 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
128
129 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
130                                 struct vb2_v4l2_buffer *vbuf)
131 {
132         struct v4l2_m2m_buffer *b;
133         unsigned long flags;
134
135         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
136         b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
137         list_del(&b->list);
138         q_ctx->num_rdy--;
139         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
140 }
141 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
142
143 struct vb2_v4l2_buffer *
144 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
145
146 {
147         struct v4l2_m2m_buffer *b, *tmp;
148         struct vb2_v4l2_buffer *ret = NULL;
149         unsigned long flags;
150
151         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
152         list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
153                 if (b->vb.vb2_buf.index == idx) {
154                         list_del(&b->list);
155                         q_ctx->num_rdy--;
156                         ret = &b->vb;
157                         break;
158                 }
159         }
160         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
161
162         return ret;
163 }
164 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
165
166 /*
167  * Scheduling handlers
168  */
169
170 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
171 {
172         unsigned long flags;
173         void *ret = NULL;
174
175         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
176         if (m2m_dev->curr_ctx)
177                 ret = m2m_dev->curr_ctx->priv;
178         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
179
180         return ret;
181 }
182 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
183
184 /**
185  * v4l2_m2m_try_run() - select next job to perform and run it if possible
186  *
187  * Get next transaction (if present) from the waiting jobs list and run it.
188  */
189 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
190 {
191         unsigned long flags;
192
193         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
194         if (NULL != m2m_dev->curr_ctx) {
195                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
196                 dprintk("Another instance is running, won't run now\n");
197                 return;
198         }
199
200         if (list_empty(&m2m_dev->job_queue)) {
201                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
202                 dprintk("No job pending\n");
203                 return;
204         }
205
206         m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
207                                    struct v4l2_m2m_ctx, queue);
208         m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
209         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
210
211         m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
212 }
213
214 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
215 {
216         struct v4l2_m2m_dev *m2m_dev;
217         unsigned long flags_job, flags_out, flags_cap;
218
219         m2m_dev = m2m_ctx->m2m_dev;
220         dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
221
222         if (!m2m_ctx->out_q_ctx.q.streaming
223             || !m2m_ctx->cap_q_ctx.q.streaming) {
224                 dprintk("Streaming needs to be on for both queues\n");
225                 return;
226         }
227
228         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
229
230         /* If the context is aborted then don't schedule it */
231         if (m2m_ctx->job_flags & TRANS_ABORT) {
232                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
233                 dprintk("Aborted context\n");
234                 return;
235         }
236
237         if (m2m_ctx->job_flags & TRANS_QUEUED) {
238                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
239                 dprintk("On job queue already\n");
240                 return;
241         }
242
243         spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
244         if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
245             && !m2m_ctx->out_q_ctx.buffered) {
246                 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
247                                         flags_out);
248                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
249                 dprintk("No input buffers available\n");
250                 return;
251         }
252         spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
253         if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
254             && !m2m_ctx->cap_q_ctx.buffered) {
255                 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
256                                         flags_cap);
257                 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
258                                         flags_out);
259                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
260                 dprintk("No output buffers available\n");
261                 return;
262         }
263         spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
264         spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
265
266         if (m2m_dev->m2m_ops->job_ready
267                 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
268                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
269                 dprintk("Driver not ready\n");
270                 return;
271         }
272
273         list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
274         m2m_ctx->job_flags |= TRANS_QUEUED;
275
276         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
277
278         v4l2_m2m_try_run(m2m_dev);
279 }
280 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
281
282 /**
283  * v4l2_m2m_cancel_job() - cancel pending jobs for the context
284  *
285  * In case of streamoff or release called on any context,
286  * 1] If the context is currently running, then abort job will be called
287  * 2] If the context is queued, then the context will be removed from
288  *    the job_queue
289  */
290 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
291 {
292         struct v4l2_m2m_dev *m2m_dev;
293         unsigned long flags;
294
295         m2m_dev = m2m_ctx->m2m_dev;
296         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
297
298         m2m_ctx->job_flags |= TRANS_ABORT;
299         if (m2m_ctx->job_flags & TRANS_RUNNING) {
300                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
301                 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
302                 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
303                 wait_event(m2m_ctx->finished,
304                                 !(m2m_ctx->job_flags & TRANS_RUNNING));
305         } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
306                 list_del(&m2m_ctx->queue);
307                 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
308                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
309                 dprintk("m2m_ctx: %p had been on queue and was removed\n",
310                         m2m_ctx);
311         } else {
312                 /* Do nothing, was not on queue/running */
313                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
314         }
315 }
316
317 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
318                          struct v4l2_m2m_ctx *m2m_ctx)
319 {
320         unsigned long flags;
321
322         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
323         if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
324                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
325                 dprintk("Called by an instance not currently running\n");
326                 return;
327         }
328
329         list_del(&m2m_dev->curr_ctx->queue);
330         m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
331         wake_up(&m2m_dev->curr_ctx->finished);
332         m2m_dev->curr_ctx = NULL;
333
334         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
335
336         /* This instance might have more buffers ready, but since we do not
337          * allow more than one job on the job_queue per instance, each has
338          * to be scheduled separately after the previous one finishes. */
339         v4l2_m2m_try_schedule(m2m_ctx);
340         v4l2_m2m_try_run(m2m_dev);
341 }
342 EXPORT_SYMBOL(v4l2_m2m_job_finish);
343
344 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
345                      struct v4l2_requestbuffers *reqbufs)
346 {
347         struct vb2_queue *vq;
348         int ret;
349
350         vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
351         ret = vb2_reqbufs(vq, reqbufs);
352         /* If count == 0, then the owner has released all buffers and he
353            is no longer owner of the queue. Otherwise we have an owner. */
354         if (ret == 0)
355                 vq->owner = reqbufs->count ? file->private_data : NULL;
356
357         return ret;
358 }
359 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
360
361 static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
362                                        struct v4l2_buffer *buf)
363 {
364         /* Adjust MMAP memory offsets for the CAPTURE queue */
365         if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
366                 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
367                         unsigned int i;
368
369                         for (i = 0; i < buf->length; ++i)
370                                 buf->m.planes[i].m.mem_offset
371                                         += DST_QUEUE_OFF_BASE;
372                 } else {
373                         buf->m.offset += DST_QUEUE_OFF_BASE;
374                 }
375         }
376 }
377
378 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
379                       struct v4l2_buffer *buf)
380 {
381         struct vb2_queue *vq;
382         int ret;
383
384         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
385         ret = vb2_querybuf(vq, buf);
386         if (ret)
387                 return ret;
388
389         /* Adjust MMAP memory offsets for the CAPTURE queue */
390         v4l2_m2m_adjust_mem_offset(vq, buf);
391
392         return 0;
393 }
394 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
395
396 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
397                   struct v4l2_buffer *buf)
398 {
399         struct vb2_queue *vq;
400         int ret;
401
402         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
403         ret = vb2_qbuf(vq, buf);
404         if (ret)
405                 return ret;
406
407         /* Adjust MMAP memory offsets for the CAPTURE queue */
408         v4l2_m2m_adjust_mem_offset(vq, buf);
409
410         v4l2_m2m_try_schedule(m2m_ctx);
411
412         return 0;
413 }
414 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
415
416 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
417                    struct v4l2_buffer *buf)
418 {
419         struct vb2_queue *vq;
420         int ret;
421
422         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
423         ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
424         if (ret)
425                 return ret;
426
427         /* Adjust MMAP memory offsets for the CAPTURE queue */
428         v4l2_m2m_adjust_mem_offset(vq, buf);
429
430         return 0;
431 }
432 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
433
434 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
435                          struct v4l2_buffer *buf)
436 {
437         struct vb2_queue *vq;
438         int ret;
439
440         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
441         ret = vb2_prepare_buf(vq, buf);
442         if (ret)
443                 return ret;
444
445         /* Adjust MMAP memory offsets for the CAPTURE queue */
446         v4l2_m2m_adjust_mem_offset(vq, buf);
447
448         v4l2_m2m_try_schedule(m2m_ctx);
449
450         return 0;
451 }
452 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
453
454 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
455                          struct v4l2_create_buffers *create)
456 {
457         struct vb2_queue *vq;
458
459         vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
460         return vb2_create_bufs(vq, create);
461 }
462 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
463
464 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
465                   struct v4l2_exportbuffer *eb)
466 {
467         struct vb2_queue *vq;
468
469         vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
470         return vb2_expbuf(vq, eb);
471 }
472 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
473
474 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
475                       enum v4l2_buf_type type)
476 {
477         struct vb2_queue *vq;
478         int ret;
479
480         vq = v4l2_m2m_get_vq(m2m_ctx, type);
481         ret = vb2_streamon(vq, type);
482         if (!ret)
483                 v4l2_m2m_try_schedule(m2m_ctx);
484
485         return ret;
486 }
487 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
488
489 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
490                        enum v4l2_buf_type type)
491 {
492         struct v4l2_m2m_dev *m2m_dev;
493         struct v4l2_m2m_queue_ctx *q_ctx;
494         unsigned long flags_job, flags;
495         int ret;
496
497         /* wait until the current context is dequeued from job_queue */
498         v4l2_m2m_cancel_job(m2m_ctx);
499
500         q_ctx = get_queue_ctx(m2m_ctx, type);
501         ret = vb2_streamoff(&q_ctx->q, type);
502         if (ret)
503                 return ret;
504
505         m2m_dev = m2m_ctx->m2m_dev;
506         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
507         /* We should not be scheduled anymore, since we're dropping a queue. */
508         if (m2m_ctx->job_flags & TRANS_QUEUED)
509                 list_del(&m2m_ctx->queue);
510         m2m_ctx->job_flags = 0;
511
512         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
513         /* Drop queue, since streamoff returns device to the same state as after
514          * calling reqbufs. */
515         INIT_LIST_HEAD(&q_ctx->rdy_queue);
516         q_ctx->num_rdy = 0;
517         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
518
519         if (m2m_dev->curr_ctx == m2m_ctx) {
520                 m2m_dev->curr_ctx = NULL;
521                 wake_up(&m2m_ctx->finished);
522         }
523         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
524
525         return 0;
526 }
527 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
528
529 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
530                            struct poll_table_struct *wait)
531 {
532         struct video_device *vfd = video_devdata(file);
533         unsigned long req_events = poll_requested_events(wait);
534         struct vb2_queue *src_q, *dst_q;
535         struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
536         unsigned int rc = 0;
537         unsigned long flags;
538
539         if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
540                 struct v4l2_fh *fh = file->private_data;
541
542                 if (v4l2_event_pending(fh))
543                         rc = POLLPRI;
544                 else if (req_events & POLLPRI)
545                         poll_wait(file, &fh->wait, wait);
546                 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
547                         return rc;
548         }
549
550         src_q = v4l2_m2m_get_src_vq(m2m_ctx);
551         dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
552
553         /*
554          * There has to be at least one buffer queued on each queued_list, which
555          * means either in driver already or waiting for driver to claim it
556          * and start processing.
557          */
558         if ((!src_q->streaming || list_empty(&src_q->queued_list))
559                 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
560                 rc |= POLLERR;
561                 goto end;
562         }
563
564         spin_lock_irqsave(&src_q->done_lock, flags);
565         if (list_empty(&src_q->done_list))
566                 poll_wait(file, &src_q->done_wq, wait);
567         spin_unlock_irqrestore(&src_q->done_lock, flags);
568
569         spin_lock_irqsave(&dst_q->done_lock, flags);
570         if (list_empty(&dst_q->done_list)) {
571                 /*
572                  * If the last buffer was dequeued from the capture queue,
573                  * return immediately. DQBUF will return -EPIPE.
574                  */
575                 if (dst_q->last_buffer_dequeued) {
576                         spin_unlock_irqrestore(&dst_q->done_lock, flags);
577                         return rc | POLLIN | POLLRDNORM;
578                 }
579
580                 poll_wait(file, &dst_q->done_wq, wait);
581         }
582         spin_unlock_irqrestore(&dst_q->done_lock, flags);
583
584         spin_lock_irqsave(&src_q->done_lock, flags);
585         if (!list_empty(&src_q->done_list))
586                 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
587                                                 done_entry);
588         if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
589                         || src_vb->state == VB2_BUF_STATE_ERROR))
590                 rc |= POLLOUT | POLLWRNORM;
591         spin_unlock_irqrestore(&src_q->done_lock, flags);
592
593         spin_lock_irqsave(&dst_q->done_lock, flags);
594         if (!list_empty(&dst_q->done_list))
595                 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
596                                                 done_entry);
597         if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
598                         || dst_vb->state == VB2_BUF_STATE_ERROR))
599                 rc |= POLLIN | POLLRDNORM;
600         spin_unlock_irqrestore(&dst_q->done_lock, flags);
601
602 end:
603         return rc;
604 }
605 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
606
607 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
608                          struct vm_area_struct *vma)
609 {
610         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
611         struct vb2_queue *vq;
612
613         if (offset < DST_QUEUE_OFF_BASE) {
614                 vq = v4l2_m2m_get_src_vq(m2m_ctx);
615         } else {
616                 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
617                 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
618         }
619
620         return vb2_mmap(vq, vma);
621 }
622 EXPORT_SYMBOL(v4l2_m2m_mmap);
623
624 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
625 {
626         struct v4l2_m2m_dev *m2m_dev;
627
628         if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
629                         WARN_ON(!m2m_ops->job_abort))
630                 return ERR_PTR(-EINVAL);
631
632         m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
633         if (!m2m_dev)
634                 return ERR_PTR(-ENOMEM);
635
636         m2m_dev->curr_ctx = NULL;
637         m2m_dev->m2m_ops = m2m_ops;
638         INIT_LIST_HEAD(&m2m_dev->job_queue);
639         spin_lock_init(&m2m_dev->job_spinlock);
640
641         return m2m_dev;
642 }
643 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
644
645 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
646 {
647         kfree(m2m_dev);
648 }
649 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
650
651 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
652                 void *drv_priv,
653                 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
654 {
655         struct v4l2_m2m_ctx *m2m_ctx;
656         struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
657         int ret;
658
659         m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
660         if (!m2m_ctx)
661                 return ERR_PTR(-ENOMEM);
662
663         m2m_ctx->priv = drv_priv;
664         m2m_ctx->m2m_dev = m2m_dev;
665         init_waitqueue_head(&m2m_ctx->finished);
666
667         out_q_ctx = &m2m_ctx->out_q_ctx;
668         cap_q_ctx = &m2m_ctx->cap_q_ctx;
669
670         INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
671         INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
672         spin_lock_init(&out_q_ctx->rdy_spinlock);
673         spin_lock_init(&cap_q_ctx->rdy_spinlock);
674
675         INIT_LIST_HEAD(&m2m_ctx->queue);
676
677         ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
678
679         if (ret)
680                 goto err;
681         /*
682          * If both queues use same mutex assign it as the common buffer
683          * queues lock to the m2m context. This lock is used in the
684          * v4l2_m2m_ioctl_* helpers.
685          */
686         if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
687                 m2m_ctx->q_lock = out_q_ctx->q.lock;
688
689         return m2m_ctx;
690 err:
691         kfree(m2m_ctx);
692         return ERR_PTR(ret);
693 }
694 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
695
696 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
697 {
698         /* wait until the current context is dequeued from job_queue */
699         v4l2_m2m_cancel_job(m2m_ctx);
700
701         vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
702         vb2_queue_release(&m2m_ctx->out_q_ctx.q);
703
704         kfree(m2m_ctx);
705 }
706 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
707
708 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
709                 struct vb2_v4l2_buffer *vbuf)
710 {
711         struct v4l2_m2m_buffer *b = container_of(vbuf,
712                                 struct v4l2_m2m_buffer, vb);
713         struct v4l2_m2m_queue_ctx *q_ctx;
714         unsigned long flags;
715
716         q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
717         if (!q_ctx)
718                 return;
719
720         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
721         list_add_tail(&b->list, &q_ctx->rdy_queue);
722         q_ctx->num_rdy++;
723         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
724 }
725 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
726
727 /* Videobuf2 ioctl helpers */
728
729 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
730                                 struct v4l2_requestbuffers *rb)
731 {
732         struct v4l2_fh *fh = file->private_data;
733
734         return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
735 }
736 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
737
738 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
739                                 struct v4l2_create_buffers *create)
740 {
741         struct v4l2_fh *fh = file->private_data;
742
743         return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
744 }
745 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
746
747 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
748                                 struct v4l2_buffer *buf)
749 {
750         struct v4l2_fh *fh = file->private_data;
751
752         return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
753 }
754 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
755
756 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
757                                 struct v4l2_buffer *buf)
758 {
759         struct v4l2_fh *fh = file->private_data;
760
761         return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
762 }
763 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
764
765 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
766                                 struct v4l2_buffer *buf)
767 {
768         struct v4l2_fh *fh = file->private_data;
769
770         return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
771 }
772 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
773
774 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
775                                struct v4l2_buffer *buf)
776 {
777         struct v4l2_fh *fh = file->private_data;
778
779         return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
780 }
781 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
782
783 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
784                                 struct v4l2_exportbuffer *eb)
785 {
786         struct v4l2_fh *fh = file->private_data;
787
788         return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
789 }
790 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
791
792 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
793                                 enum v4l2_buf_type type)
794 {
795         struct v4l2_fh *fh = file->private_data;
796
797         return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
798 }
799 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
800
801 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
802                                 enum v4l2_buf_type type)
803 {
804         struct v4l2_fh *fh = file->private_data;
805
806         return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
807 }
808 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
809
810 /*
811  * v4l2_file_operations helpers. It is assumed here same lock is used
812  * for the output and the capture buffer queue.
813  */
814
815 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
816 {
817         struct v4l2_fh *fh = file->private_data;
818
819         return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
820 }
821 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
822
823 unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
824 {
825         struct v4l2_fh *fh = file->private_data;
826         struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
827         unsigned int ret;
828
829         if (m2m_ctx->q_lock)
830                 mutex_lock(m2m_ctx->q_lock);
831
832         ret = v4l2_m2m_poll(file, m2m_ctx, wait);
833
834         if (m2m_ctx->q_lock)
835                 mutex_unlock(m2m_ctx->q_lock);
836
837         return ret;
838 }
839 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
840