2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
20 #include <media/videobuf2-v4l2.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
31 module_param(debug, bool, 0644);
33 #define dprintk(fmt, arg...) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING (1 << 1)
44 /* Instance is currently aborting */
45 #define TRANS_ABORT (1 << 2)
48 /* Offset base for buffers on the destination queue - used to distinguish
49 * between source and destination buffers when mmapping - they receive the same
50 * offsets but for different queues */
51 #define DST_QUEUE_OFF_BASE (1 << 30)
55 * struct v4l2_m2m_dev - per-device context
56 * @curr_ctx: currently running instance
57 * @job_queue: instances queued to run
58 * @job_spinlock: protects job_queue
59 * @m2m_ops: driver callbacks
62 struct v4l2_m2m_ctx *curr_ctx;
64 struct list_head job_queue;
65 spinlock_t job_spinlock;
67 const struct v4l2_m2m_ops *m2m_ops;
70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
71 enum v4l2_buf_type type)
73 if (V4L2_TYPE_IS_OUTPUT(type))
74 return &m2m_ctx->out_q_ctx;
76 return &m2m_ctx->cap_q_ctx;
79 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
80 enum v4l2_buf_type type)
82 struct v4l2_m2m_queue_ctx *q_ctx;
84 q_ctx = get_queue_ctx(m2m_ctx, type);
90 EXPORT_SYMBOL(v4l2_m2m_get_vq);
92 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
94 struct v4l2_m2m_buffer *b;
97 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
99 if (list_empty(&q_ctx->rdy_queue)) {
100 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
104 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
105 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
108 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
110 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
112 struct v4l2_m2m_buffer *b;
115 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
116 if (list_empty(&q_ctx->rdy_queue)) {
117 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
120 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
123 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
127 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
129 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
130 struct vb2_v4l2_buffer *vbuf)
132 struct v4l2_m2m_buffer *b;
135 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
136 b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
139 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
141 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
143 struct vb2_v4l2_buffer *
144 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
147 struct v4l2_m2m_buffer *b, *tmp;
148 struct vb2_v4l2_buffer *ret = NULL;
151 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
152 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
153 if (b->vb.vb2_buf.index == idx) {
160 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
164 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
167 * Scheduling handlers
170 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
175 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
176 if (m2m_dev->curr_ctx)
177 ret = m2m_dev->curr_ctx->priv;
178 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
182 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
185 * v4l2_m2m_try_run() - select next job to perform and run it if possible
187 * Get next transaction (if present) from the waiting jobs list and run it.
189 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
193 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
194 if (NULL != m2m_dev->curr_ctx) {
195 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
196 dprintk("Another instance is running, won't run now\n");
200 if (list_empty(&m2m_dev->job_queue)) {
201 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
202 dprintk("No job pending\n");
206 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
207 struct v4l2_m2m_ctx, queue);
208 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
209 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
211 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
214 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
216 struct v4l2_m2m_dev *m2m_dev;
217 unsigned long flags_job, flags_out, flags_cap;
219 m2m_dev = m2m_ctx->m2m_dev;
220 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
222 if (!m2m_ctx->out_q_ctx.q.streaming
223 || !m2m_ctx->cap_q_ctx.q.streaming) {
224 dprintk("Streaming needs to be on for both queues\n");
228 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
230 /* If the context is aborted then don't schedule it */
231 if (m2m_ctx->job_flags & TRANS_ABORT) {
232 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
233 dprintk("Aborted context\n");
237 if (m2m_ctx->job_flags & TRANS_QUEUED) {
238 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
239 dprintk("On job queue already\n");
243 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
244 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
245 && !m2m_ctx->out_q_ctx.buffered) {
246 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
248 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
249 dprintk("No input buffers available\n");
252 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
253 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
254 && !m2m_ctx->cap_q_ctx.buffered) {
255 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
257 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
259 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
260 dprintk("No output buffers available\n");
263 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
264 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
266 if (m2m_dev->m2m_ops->job_ready
267 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
268 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
269 dprintk("Driver not ready\n");
273 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
274 m2m_ctx->job_flags |= TRANS_QUEUED;
276 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
278 v4l2_m2m_try_run(m2m_dev);
280 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
283 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
285 * In case of streamoff or release called on any context,
286 * 1] If the context is currently running, then abort job will be called
287 * 2] If the context is queued, then the context will be removed from
290 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
292 struct v4l2_m2m_dev *m2m_dev;
295 m2m_dev = m2m_ctx->m2m_dev;
296 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
298 m2m_ctx->job_flags |= TRANS_ABORT;
299 if (m2m_ctx->job_flags & TRANS_RUNNING) {
300 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
301 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
302 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
303 wait_event(m2m_ctx->finished,
304 !(m2m_ctx->job_flags & TRANS_RUNNING));
305 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
306 list_del(&m2m_ctx->queue);
307 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
308 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
309 dprintk("m2m_ctx: %p had been on queue and was removed\n",
312 /* Do nothing, was not on queue/running */
313 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
317 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
318 struct v4l2_m2m_ctx *m2m_ctx)
322 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
323 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
324 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
325 dprintk("Called by an instance not currently running\n");
329 list_del(&m2m_dev->curr_ctx->queue);
330 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
331 wake_up(&m2m_dev->curr_ctx->finished);
332 m2m_dev->curr_ctx = NULL;
334 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
336 /* This instance might have more buffers ready, but since we do not
337 * allow more than one job on the job_queue per instance, each has
338 * to be scheduled separately after the previous one finishes. */
339 v4l2_m2m_try_schedule(m2m_ctx);
340 v4l2_m2m_try_run(m2m_dev);
342 EXPORT_SYMBOL(v4l2_m2m_job_finish);
344 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
345 struct v4l2_requestbuffers *reqbufs)
347 struct vb2_queue *vq;
350 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
351 ret = vb2_reqbufs(vq, reqbufs);
352 /* If count == 0, then the owner has released all buffers and he
353 is no longer owner of the queue. Otherwise we have an owner. */
355 vq->owner = reqbufs->count ? file->private_data : NULL;
359 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
361 static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
362 struct v4l2_buffer *buf)
364 /* Adjust MMAP memory offsets for the CAPTURE queue */
365 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
366 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
369 for (i = 0; i < buf->length; ++i)
370 buf->m.planes[i].m.mem_offset
371 += DST_QUEUE_OFF_BASE;
373 buf->m.offset += DST_QUEUE_OFF_BASE;
378 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
379 struct v4l2_buffer *buf)
381 struct vb2_queue *vq;
384 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
385 ret = vb2_querybuf(vq, buf);
389 /* Adjust MMAP memory offsets for the CAPTURE queue */
390 v4l2_m2m_adjust_mem_offset(vq, buf);
394 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
396 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
397 struct v4l2_buffer *buf)
399 struct vb2_queue *vq;
402 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
403 ret = vb2_qbuf(vq, buf);
407 /* Adjust MMAP memory offsets for the CAPTURE queue */
408 v4l2_m2m_adjust_mem_offset(vq, buf);
410 v4l2_m2m_try_schedule(m2m_ctx);
414 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
416 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
417 struct v4l2_buffer *buf)
419 struct vb2_queue *vq;
422 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
423 ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
427 /* Adjust MMAP memory offsets for the CAPTURE queue */
428 v4l2_m2m_adjust_mem_offset(vq, buf);
432 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
434 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
435 struct v4l2_buffer *buf)
437 struct vb2_queue *vq;
440 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
441 ret = vb2_prepare_buf(vq, buf);
445 /* Adjust MMAP memory offsets for the CAPTURE queue */
446 v4l2_m2m_adjust_mem_offset(vq, buf);
448 v4l2_m2m_try_schedule(m2m_ctx);
452 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
454 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
455 struct v4l2_create_buffers *create)
457 struct vb2_queue *vq;
459 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
460 return vb2_create_bufs(vq, create);
462 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
464 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
465 struct v4l2_exportbuffer *eb)
467 struct vb2_queue *vq;
469 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
470 return vb2_expbuf(vq, eb);
472 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
474 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
475 enum v4l2_buf_type type)
477 struct vb2_queue *vq;
480 vq = v4l2_m2m_get_vq(m2m_ctx, type);
481 ret = vb2_streamon(vq, type);
483 v4l2_m2m_try_schedule(m2m_ctx);
487 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
489 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
490 enum v4l2_buf_type type)
492 struct v4l2_m2m_dev *m2m_dev;
493 struct v4l2_m2m_queue_ctx *q_ctx;
494 unsigned long flags_job, flags;
497 /* wait until the current context is dequeued from job_queue */
498 v4l2_m2m_cancel_job(m2m_ctx);
500 q_ctx = get_queue_ctx(m2m_ctx, type);
501 ret = vb2_streamoff(&q_ctx->q, type);
505 m2m_dev = m2m_ctx->m2m_dev;
506 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
507 /* We should not be scheduled anymore, since we're dropping a queue. */
508 if (m2m_ctx->job_flags & TRANS_QUEUED)
509 list_del(&m2m_ctx->queue);
510 m2m_ctx->job_flags = 0;
512 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
513 /* Drop queue, since streamoff returns device to the same state as after
514 * calling reqbufs. */
515 INIT_LIST_HEAD(&q_ctx->rdy_queue);
517 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
519 if (m2m_dev->curr_ctx == m2m_ctx) {
520 m2m_dev->curr_ctx = NULL;
521 wake_up(&m2m_ctx->finished);
523 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
527 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
529 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
530 struct poll_table_struct *wait)
532 struct video_device *vfd = video_devdata(file);
533 unsigned long req_events = poll_requested_events(wait);
534 struct vb2_queue *src_q, *dst_q;
535 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
539 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
540 struct v4l2_fh *fh = file->private_data;
542 if (v4l2_event_pending(fh))
544 else if (req_events & POLLPRI)
545 poll_wait(file, &fh->wait, wait);
546 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
550 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
551 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
554 * There has to be at least one buffer queued on each queued_list, which
555 * means either in driver already or waiting for driver to claim it
556 * and start processing.
558 if ((!src_q->streaming || list_empty(&src_q->queued_list))
559 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
564 spin_lock_irqsave(&src_q->done_lock, flags);
565 if (list_empty(&src_q->done_list))
566 poll_wait(file, &src_q->done_wq, wait);
567 spin_unlock_irqrestore(&src_q->done_lock, flags);
569 spin_lock_irqsave(&dst_q->done_lock, flags);
570 if (list_empty(&dst_q->done_list)) {
572 * If the last buffer was dequeued from the capture queue,
573 * return immediately. DQBUF will return -EPIPE.
575 if (dst_q->last_buffer_dequeued) {
576 spin_unlock_irqrestore(&dst_q->done_lock, flags);
577 return rc | POLLIN | POLLRDNORM;
580 poll_wait(file, &dst_q->done_wq, wait);
582 spin_unlock_irqrestore(&dst_q->done_lock, flags);
584 spin_lock_irqsave(&src_q->done_lock, flags);
585 if (!list_empty(&src_q->done_list))
586 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
588 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
589 || src_vb->state == VB2_BUF_STATE_ERROR))
590 rc |= POLLOUT | POLLWRNORM;
591 spin_unlock_irqrestore(&src_q->done_lock, flags);
593 spin_lock_irqsave(&dst_q->done_lock, flags);
594 if (!list_empty(&dst_q->done_list))
595 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
597 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
598 || dst_vb->state == VB2_BUF_STATE_ERROR))
599 rc |= POLLIN | POLLRDNORM;
600 spin_unlock_irqrestore(&dst_q->done_lock, flags);
605 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
607 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
608 struct vm_area_struct *vma)
610 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
611 struct vb2_queue *vq;
613 if (offset < DST_QUEUE_OFF_BASE) {
614 vq = v4l2_m2m_get_src_vq(m2m_ctx);
616 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
617 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
620 return vb2_mmap(vq, vma);
622 EXPORT_SYMBOL(v4l2_m2m_mmap);
624 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
626 struct v4l2_m2m_dev *m2m_dev;
628 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
629 WARN_ON(!m2m_ops->job_abort))
630 return ERR_PTR(-EINVAL);
632 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
634 return ERR_PTR(-ENOMEM);
636 m2m_dev->curr_ctx = NULL;
637 m2m_dev->m2m_ops = m2m_ops;
638 INIT_LIST_HEAD(&m2m_dev->job_queue);
639 spin_lock_init(&m2m_dev->job_spinlock);
643 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
645 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
649 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
651 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
653 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
655 struct v4l2_m2m_ctx *m2m_ctx;
656 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
659 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
661 return ERR_PTR(-ENOMEM);
663 m2m_ctx->priv = drv_priv;
664 m2m_ctx->m2m_dev = m2m_dev;
665 init_waitqueue_head(&m2m_ctx->finished);
667 out_q_ctx = &m2m_ctx->out_q_ctx;
668 cap_q_ctx = &m2m_ctx->cap_q_ctx;
670 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
671 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
672 spin_lock_init(&out_q_ctx->rdy_spinlock);
673 spin_lock_init(&cap_q_ctx->rdy_spinlock);
675 INIT_LIST_HEAD(&m2m_ctx->queue);
677 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
682 * If both queues use same mutex assign it as the common buffer
683 * queues lock to the m2m context. This lock is used in the
684 * v4l2_m2m_ioctl_* helpers.
686 if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
687 m2m_ctx->q_lock = out_q_ctx->q.lock;
694 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
696 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
698 /* wait until the current context is dequeued from job_queue */
699 v4l2_m2m_cancel_job(m2m_ctx);
701 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
702 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
706 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
708 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
709 struct vb2_v4l2_buffer *vbuf)
711 struct v4l2_m2m_buffer *b = container_of(vbuf,
712 struct v4l2_m2m_buffer, vb);
713 struct v4l2_m2m_queue_ctx *q_ctx;
716 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
720 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
721 list_add_tail(&b->list, &q_ctx->rdy_queue);
723 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
725 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
727 /* Videobuf2 ioctl helpers */
729 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
730 struct v4l2_requestbuffers *rb)
732 struct v4l2_fh *fh = file->private_data;
734 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
736 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
738 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
739 struct v4l2_create_buffers *create)
741 struct v4l2_fh *fh = file->private_data;
743 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
745 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
747 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
748 struct v4l2_buffer *buf)
750 struct v4l2_fh *fh = file->private_data;
752 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
754 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
756 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
757 struct v4l2_buffer *buf)
759 struct v4l2_fh *fh = file->private_data;
761 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
763 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
765 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
766 struct v4l2_buffer *buf)
768 struct v4l2_fh *fh = file->private_data;
770 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
772 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
774 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
775 struct v4l2_buffer *buf)
777 struct v4l2_fh *fh = file->private_data;
779 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
781 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
783 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
784 struct v4l2_exportbuffer *eb)
786 struct v4l2_fh *fh = file->private_data;
788 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
790 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
792 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
793 enum v4l2_buf_type type)
795 struct v4l2_fh *fh = file->private_data;
797 return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
799 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
801 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
802 enum v4l2_buf_type type)
804 struct v4l2_fh *fh = file->private_data;
806 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
808 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
811 * v4l2_file_operations helpers. It is assumed here same lock is used
812 * for the output and the capture buffer queue.
815 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
817 struct v4l2_fh *fh = file->private_data;
819 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
821 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
823 unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
825 struct v4l2_fh *fh = file->private_data;
826 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
830 mutex_lock(m2m_ctx->q_lock);
832 ret = v4l2_m2m_poll(file, m2m_ctx, wait);
835 mutex_unlock(m2m_ctx->q_lock);
839 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);