2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
20 #include <media/media-device.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/v4l2-mem2mem.h>
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-fh.h>
26 #include <media/v4l2-event.h>
28 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
29 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
30 MODULE_LICENSE("GPL");
33 module_param(debug, bool, 0644);
35 #define dprintk(fmt, arg...) \
38 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
42 /* Instance is already queued on the job_queue */
43 #define TRANS_QUEUED (1 << 0)
44 /* Instance is currently running in hardware */
45 #define TRANS_RUNNING (1 << 1)
46 /* Instance is currently aborting */
47 #define TRANS_ABORT (1 << 2)
50 /* Offset base for buffers on the destination queue - used to distinguish
51 * between source and destination buffers when mmapping - they receive the same
52 * offsets but for different queues */
53 #define DST_QUEUE_OFF_BASE (1 << 30)
55 enum v4l2_m2m_entity_type {
56 MEM2MEM_ENT_TYPE_SOURCE,
57 MEM2MEM_ENT_TYPE_SINK,
61 static const char * const m2m_entity_name[] = {
68 * struct v4l2_m2m_dev - per-device context
69 * @source: &struct media_entity pointer with the source entity
70 * Used only when the M2M device is registered via
71 * v4l2_m2m_unregister_media_controller().
72 * @source_pad: &struct media_pad with the source pad.
73 * Used only when the M2M device is registered via
74 * v4l2_m2m_unregister_media_controller().
75 * @sink: &struct media_entity pointer with the sink entity
76 * Used only when the M2M device is registered via
77 * v4l2_m2m_unregister_media_controller().
78 * @sink_pad: &struct media_pad with the sink pad.
79 * Used only when the M2M device is registered via
80 * v4l2_m2m_unregister_media_controller().
81 * @proc: &struct media_entity pointer with the M2M device itself.
82 * @proc_pads: &struct media_pad with the @proc pads.
83 * Used only when the M2M device is registered via
84 * v4l2_m2m_unregister_media_controller().
85 * @intf_devnode: &struct media_intf devnode pointer with the interface
86 * with controls the M2M device.
87 * @curr_ctx: currently running instance
88 * @job_queue: instances queued to run
89 * @job_spinlock: protects job_queue
90 * @m2m_ops: driver callbacks
93 struct v4l2_m2m_ctx *curr_ctx;
94 #ifdef CONFIG_MEDIA_CONTROLLER
95 struct media_entity *source;
96 struct media_pad source_pad;
97 struct media_entity sink;
98 struct media_pad sink_pad;
99 struct media_entity proc;
100 struct media_pad proc_pads[2];
101 struct media_intf_devnode *intf_devnode;
104 struct list_head job_queue;
105 spinlock_t job_spinlock;
107 const struct v4l2_m2m_ops *m2m_ops;
110 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
111 enum v4l2_buf_type type)
113 if (V4L2_TYPE_IS_OUTPUT(type))
114 return &m2m_ctx->out_q_ctx;
116 return &m2m_ctx->cap_q_ctx;
119 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
120 enum v4l2_buf_type type)
122 struct v4l2_m2m_queue_ctx *q_ctx;
124 q_ctx = get_queue_ctx(m2m_ctx, type);
130 EXPORT_SYMBOL(v4l2_m2m_get_vq);
132 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
134 struct v4l2_m2m_buffer *b;
137 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
139 if (list_empty(&q_ctx->rdy_queue)) {
140 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
144 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
145 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
148 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
150 void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
152 struct v4l2_m2m_buffer *b;
155 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
157 if (list_empty(&q_ctx->rdy_queue)) {
158 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
162 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
163 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
166 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
168 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
170 struct v4l2_m2m_buffer *b;
173 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
174 if (list_empty(&q_ctx->rdy_queue)) {
175 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
178 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
181 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
185 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
187 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
188 struct vb2_v4l2_buffer *vbuf)
190 struct v4l2_m2m_buffer *b;
193 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
194 b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
197 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
199 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
201 struct vb2_v4l2_buffer *
202 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
205 struct v4l2_m2m_buffer *b, *tmp;
206 struct vb2_v4l2_buffer *ret = NULL;
209 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
210 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
211 if (b->vb.vb2_buf.index == idx) {
218 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
222 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
225 * Scheduling handlers
228 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
233 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
234 if (m2m_dev->curr_ctx)
235 ret = m2m_dev->curr_ctx->priv;
236 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
240 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
243 * v4l2_m2m_try_run() - select next job to perform and run it if possible
244 * @m2m_dev: per-device context
246 * Get next transaction (if present) from the waiting jobs list and run it.
248 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
252 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
253 if (NULL != m2m_dev->curr_ctx) {
254 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
255 dprintk("Another instance is running, won't run now\n");
259 if (list_empty(&m2m_dev->job_queue)) {
260 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
261 dprintk("No job pending\n");
265 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
266 struct v4l2_m2m_ctx, queue);
267 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
268 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
270 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
271 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
275 * __v4l2_m2m_try_queue() - queue a job
276 * @m2m_dev: m2m device
277 * @m2m_ctx: m2m context
279 * Check if this context is ready to queue a job.
281 * This function can run in interrupt context.
283 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
284 struct v4l2_m2m_ctx *m2m_ctx)
286 unsigned long flags_job, flags_out, flags_cap;
288 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
290 if (!m2m_ctx->out_q_ctx.q.streaming
291 || !m2m_ctx->cap_q_ctx.q.streaming) {
292 dprintk("Streaming needs to be on for both queues\n");
296 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
298 /* If the context is aborted then don't schedule it */
299 if (m2m_ctx->job_flags & TRANS_ABORT) {
300 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
301 dprintk("Aborted context\n");
305 if (m2m_ctx->job_flags & TRANS_QUEUED) {
306 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
307 dprintk("On job queue already\n");
311 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
312 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
313 && !m2m_ctx->out_q_ctx.buffered) {
314 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
316 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
317 dprintk("No input buffers available\n");
320 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
321 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
322 && !m2m_ctx->cap_q_ctx.buffered) {
323 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
325 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
327 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
328 dprintk("No output buffers available\n");
331 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
332 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
334 if (m2m_dev->m2m_ops->job_ready
335 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
336 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
337 dprintk("Driver not ready\n");
341 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
342 m2m_ctx->job_flags |= TRANS_QUEUED;
344 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
348 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
349 * @m2m_ctx: m2m context
351 * Check if this context is ready to queue a job. If suitable,
352 * run the next queued job on the mem2mem device.
354 * This function shouldn't run in interrupt context.
356 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
357 * and then run another job for another context.
359 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
361 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
363 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
364 v4l2_m2m_try_run(m2m_dev);
366 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
369 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
370 * @m2m_ctx: m2m context with jobs to be canceled
372 * In case of streamoff or release called on any context,
373 * 1] If the context is currently running, then abort job will be called
374 * 2] If the context is queued, then the context will be removed from
377 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
379 struct v4l2_m2m_dev *m2m_dev;
382 m2m_dev = m2m_ctx->m2m_dev;
383 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
385 m2m_ctx->job_flags |= TRANS_ABORT;
386 if (m2m_ctx->job_flags & TRANS_RUNNING) {
387 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
388 if (m2m_dev->m2m_ops->job_abort)
389 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
390 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
391 wait_event(m2m_ctx->finished,
392 !(m2m_ctx->job_flags & TRANS_RUNNING));
393 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
394 list_del(&m2m_ctx->queue);
395 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
396 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
397 dprintk("m2m_ctx: %p had been on queue and was removed\n",
400 /* Do nothing, was not on queue/running */
401 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
405 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
406 struct v4l2_m2m_ctx *m2m_ctx)
410 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
411 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
412 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
413 dprintk("Called by an instance not currently running\n");
417 list_del(&m2m_dev->curr_ctx->queue);
418 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
419 wake_up(&m2m_dev->curr_ctx->finished);
420 m2m_dev->curr_ctx = NULL;
422 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
424 /* This instance might have more buffers ready, but since we do not
425 * allow more than one job on the job_queue per instance, each has
426 * to be scheduled separately after the previous one finishes. */
427 v4l2_m2m_try_schedule(m2m_ctx);
429 EXPORT_SYMBOL(v4l2_m2m_job_finish);
431 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
432 struct v4l2_requestbuffers *reqbufs)
434 struct vb2_queue *vq;
437 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
438 ret = vb2_reqbufs(vq, reqbufs);
439 /* If count == 0, then the owner has released all buffers and he
440 is no longer owner of the queue. Otherwise we have an owner. */
442 vq->owner = reqbufs->count ? file->private_data : NULL;
446 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
448 static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
449 struct v4l2_buffer *buf)
451 /* Adjust MMAP memory offsets for the CAPTURE queue */
452 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
453 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
456 for (i = 0; i < buf->length; ++i)
457 buf->m.planes[i].m.mem_offset
458 += DST_QUEUE_OFF_BASE;
460 buf->m.offset += DST_QUEUE_OFF_BASE;
465 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
466 struct v4l2_buffer *buf)
468 struct vb2_queue *vq;
471 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
472 ret = vb2_querybuf(vq, buf);
476 /* Adjust MMAP memory offsets for the CAPTURE queue */
477 v4l2_m2m_adjust_mem_offset(vq, buf);
481 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
483 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
484 struct v4l2_buffer *buf)
486 struct vb2_queue *vq;
489 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
490 ret = vb2_qbuf(vq, buf);
494 /* Adjust MMAP memory offsets for the CAPTURE queue */
495 v4l2_m2m_adjust_mem_offset(vq, buf);
497 v4l2_m2m_try_schedule(m2m_ctx);
501 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
503 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
504 struct v4l2_buffer *buf)
506 struct vb2_queue *vq;
509 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
510 ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
514 /* Adjust MMAP memory offsets for the CAPTURE queue */
515 v4l2_m2m_adjust_mem_offset(vq, buf);
519 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
521 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
522 struct v4l2_buffer *buf)
524 struct vb2_queue *vq;
527 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
528 ret = vb2_prepare_buf(vq, buf);
532 /* Adjust MMAP memory offsets for the CAPTURE queue */
533 v4l2_m2m_adjust_mem_offset(vq, buf);
535 v4l2_m2m_try_schedule(m2m_ctx);
539 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
541 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
542 struct v4l2_create_buffers *create)
544 struct vb2_queue *vq;
546 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
547 return vb2_create_bufs(vq, create);
549 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
551 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
552 struct v4l2_exportbuffer *eb)
554 struct vb2_queue *vq;
556 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
557 return vb2_expbuf(vq, eb);
559 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
561 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
562 enum v4l2_buf_type type)
564 struct vb2_queue *vq;
567 vq = v4l2_m2m_get_vq(m2m_ctx, type);
568 ret = vb2_streamon(vq, type);
570 v4l2_m2m_try_schedule(m2m_ctx);
574 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
576 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
577 enum v4l2_buf_type type)
579 struct v4l2_m2m_dev *m2m_dev;
580 struct v4l2_m2m_queue_ctx *q_ctx;
581 unsigned long flags_job, flags;
584 /* wait until the current context is dequeued from job_queue */
585 v4l2_m2m_cancel_job(m2m_ctx);
587 q_ctx = get_queue_ctx(m2m_ctx, type);
588 ret = vb2_streamoff(&q_ctx->q, type);
592 m2m_dev = m2m_ctx->m2m_dev;
593 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
594 /* We should not be scheduled anymore, since we're dropping a queue. */
595 if (m2m_ctx->job_flags & TRANS_QUEUED)
596 list_del(&m2m_ctx->queue);
597 m2m_ctx->job_flags = 0;
599 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
600 /* Drop queue, since streamoff returns device to the same state as after
601 * calling reqbufs. */
602 INIT_LIST_HEAD(&q_ctx->rdy_queue);
604 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
606 if (m2m_dev->curr_ctx == m2m_ctx) {
607 m2m_dev->curr_ctx = NULL;
608 wake_up(&m2m_ctx->finished);
610 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
614 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
616 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
617 struct poll_table_struct *wait)
619 struct video_device *vfd = video_devdata(file);
620 __poll_t req_events = poll_requested_events(wait);
621 struct vb2_queue *src_q, *dst_q;
622 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
626 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
627 struct v4l2_fh *fh = file->private_data;
629 if (v4l2_event_pending(fh))
631 else if (req_events & EPOLLPRI)
632 poll_wait(file, &fh->wait, wait);
633 if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)))
637 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
638 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
641 * There has to be at least one buffer queued on each queued_list, which
642 * means either in driver already or waiting for driver to claim it
643 * and start processing.
645 if ((!src_q->streaming || list_empty(&src_q->queued_list))
646 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
651 spin_lock_irqsave(&src_q->done_lock, flags);
652 if (list_empty(&src_q->done_list))
653 poll_wait(file, &src_q->done_wq, wait);
654 spin_unlock_irqrestore(&src_q->done_lock, flags);
656 spin_lock_irqsave(&dst_q->done_lock, flags);
657 if (list_empty(&dst_q->done_list)) {
659 * If the last buffer was dequeued from the capture queue,
660 * return immediately. DQBUF will return -EPIPE.
662 if (dst_q->last_buffer_dequeued) {
663 spin_unlock_irqrestore(&dst_q->done_lock, flags);
664 return rc | EPOLLIN | EPOLLRDNORM;
667 poll_wait(file, &dst_q->done_wq, wait);
669 spin_unlock_irqrestore(&dst_q->done_lock, flags);
671 spin_lock_irqsave(&src_q->done_lock, flags);
672 if (!list_empty(&src_q->done_list))
673 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
675 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
676 || src_vb->state == VB2_BUF_STATE_ERROR))
677 rc |= EPOLLOUT | EPOLLWRNORM;
678 spin_unlock_irqrestore(&src_q->done_lock, flags);
680 spin_lock_irqsave(&dst_q->done_lock, flags);
681 if (!list_empty(&dst_q->done_list))
682 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
684 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
685 || dst_vb->state == VB2_BUF_STATE_ERROR))
686 rc |= EPOLLIN | EPOLLRDNORM;
687 spin_unlock_irqrestore(&dst_q->done_lock, flags);
692 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
694 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
695 struct vm_area_struct *vma)
697 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
698 struct vb2_queue *vq;
700 if (offset < DST_QUEUE_OFF_BASE) {
701 vq = v4l2_m2m_get_src_vq(m2m_ctx);
703 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
704 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
707 return vb2_mmap(vq, vma);
709 EXPORT_SYMBOL(v4l2_m2m_mmap);
711 #if defined(CONFIG_MEDIA_CONTROLLER)
712 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
714 media_remove_intf_links(&m2m_dev->intf_devnode->intf);
715 media_devnode_remove(m2m_dev->intf_devnode);
717 media_entity_remove_links(m2m_dev->source);
718 media_entity_remove_links(&m2m_dev->sink);
719 media_entity_remove_links(&m2m_dev->proc);
720 media_device_unregister_entity(m2m_dev->source);
721 media_device_unregister_entity(&m2m_dev->sink);
722 media_device_unregister_entity(&m2m_dev->proc);
723 kfree(m2m_dev->source->name);
724 kfree(m2m_dev->sink.name);
725 kfree(m2m_dev->proc.name);
727 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
729 static int v4l2_m2m_register_entity(struct media_device *mdev,
730 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
731 struct video_device *vdev, int function)
733 struct media_entity *entity;
734 struct media_pad *pads;
741 case MEM2MEM_ENT_TYPE_SOURCE:
742 entity = m2m_dev->source;
743 pads = &m2m_dev->source_pad;
744 pads[0].flags = MEDIA_PAD_FL_SOURCE;
747 case MEM2MEM_ENT_TYPE_SINK:
748 entity = &m2m_dev->sink;
749 pads = &m2m_dev->sink_pad;
750 pads[0].flags = MEDIA_PAD_FL_SINK;
753 case MEM2MEM_ENT_TYPE_PROC:
754 entity = &m2m_dev->proc;
755 pads = m2m_dev->proc_pads;
756 pads[0].flags = MEDIA_PAD_FL_SINK;
757 pads[1].flags = MEDIA_PAD_FL_SOURCE;
764 entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
765 if (type != MEM2MEM_ENT_TYPE_PROC) {
766 entity->info.dev.major = VIDEO_MAJOR;
767 entity->info.dev.minor = vdev->minor;
769 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
770 name = kmalloc(len, GFP_KERNEL);
773 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
775 entity->function = function;
777 ret = media_entity_pads_init(entity, num_pads, pads);
780 ret = media_device_register_entity(mdev, entity);
787 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
788 struct video_device *vdev, int function)
790 struct media_device *mdev = vdev->v4l2_dev->mdev;
791 struct media_link *link;
797 /* A memory-to-memory device consists in two
798 * DMA engine and one video processing entities.
799 * The DMA engine entities are linked to a V4L interface
802 /* Create the three entities with their pads */
803 m2m_dev->source = &vdev->entity;
804 ret = v4l2_m2m_register_entity(mdev, m2m_dev,
805 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
808 ret = v4l2_m2m_register_entity(mdev, m2m_dev,
809 MEM2MEM_ENT_TYPE_PROC, vdev, function);
811 goto err_rel_entity0;
812 ret = v4l2_m2m_register_entity(mdev, m2m_dev,
813 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
815 goto err_rel_entity1;
817 /* Connect the three entities */
818 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
819 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
821 goto err_rel_entity2;
823 ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
824 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
828 /* Create video interface */
829 m2m_dev->intf_devnode = media_devnode_create(mdev,
830 MEDIA_INTF_T_V4L_VIDEO, 0,
831 VIDEO_MAJOR, vdev->minor);
832 if (!m2m_dev->intf_devnode) {
837 /* Connect the two DMA engines to the interface */
838 link = media_create_intf_link(m2m_dev->source,
839 &m2m_dev->intf_devnode->intf,
840 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
846 link = media_create_intf_link(&m2m_dev->sink,
847 &m2m_dev->intf_devnode->intf,
848 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
851 goto err_rm_intf_link;
856 media_remove_intf_links(&m2m_dev->intf_devnode->intf);
858 media_devnode_remove(m2m_dev->intf_devnode);
860 media_entity_remove_links(&m2m_dev->sink);
862 media_entity_remove_links(&m2m_dev->proc);
863 media_entity_remove_links(m2m_dev->source);
865 media_device_unregister_entity(&m2m_dev->proc);
866 kfree(m2m_dev->proc.name);
868 media_device_unregister_entity(&m2m_dev->sink);
869 kfree(m2m_dev->sink.name);
871 media_device_unregister_entity(m2m_dev->source);
872 kfree(m2m_dev->source->name);
876 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
879 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
881 struct v4l2_m2m_dev *m2m_dev;
883 if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
884 return ERR_PTR(-EINVAL);
886 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
888 return ERR_PTR(-ENOMEM);
890 m2m_dev->curr_ctx = NULL;
891 m2m_dev->m2m_ops = m2m_ops;
892 INIT_LIST_HEAD(&m2m_dev->job_queue);
893 spin_lock_init(&m2m_dev->job_spinlock);
897 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
899 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
903 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
905 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
907 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
909 struct v4l2_m2m_ctx *m2m_ctx;
910 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
913 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
915 return ERR_PTR(-ENOMEM);
917 m2m_ctx->priv = drv_priv;
918 m2m_ctx->m2m_dev = m2m_dev;
919 init_waitqueue_head(&m2m_ctx->finished);
921 out_q_ctx = &m2m_ctx->out_q_ctx;
922 cap_q_ctx = &m2m_ctx->cap_q_ctx;
924 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
925 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
926 spin_lock_init(&out_q_ctx->rdy_spinlock);
927 spin_lock_init(&cap_q_ctx->rdy_spinlock);
929 INIT_LIST_HEAD(&m2m_ctx->queue);
931 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
936 * If both queues use same mutex assign it as the common buffer
937 * queues lock to the m2m context. This lock is used in the
938 * v4l2_m2m_ioctl_* helpers.
940 if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
941 m2m_ctx->q_lock = out_q_ctx->q.lock;
948 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
950 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
952 /* wait until the current context is dequeued from job_queue */
953 v4l2_m2m_cancel_job(m2m_ctx);
955 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
956 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
960 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
962 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
963 struct vb2_v4l2_buffer *vbuf)
965 struct v4l2_m2m_buffer *b = container_of(vbuf,
966 struct v4l2_m2m_buffer, vb);
967 struct v4l2_m2m_queue_ctx *q_ctx;
970 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
974 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
975 list_add_tail(&b->list, &q_ctx->rdy_queue);
977 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
979 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
981 /* Videobuf2 ioctl helpers */
983 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
984 struct v4l2_requestbuffers *rb)
986 struct v4l2_fh *fh = file->private_data;
988 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
990 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
992 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
993 struct v4l2_create_buffers *create)
995 struct v4l2_fh *fh = file->private_data;
997 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
999 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
1001 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
1002 struct v4l2_buffer *buf)
1004 struct v4l2_fh *fh = file->private_data;
1006 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
1008 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
1010 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
1011 struct v4l2_buffer *buf)
1013 struct v4l2_fh *fh = file->private_data;
1015 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
1017 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
1019 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
1020 struct v4l2_buffer *buf)
1022 struct v4l2_fh *fh = file->private_data;
1024 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
1026 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
1028 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
1029 struct v4l2_buffer *buf)
1031 struct v4l2_fh *fh = file->private_data;
1033 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
1035 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
1037 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
1038 struct v4l2_exportbuffer *eb)
1040 struct v4l2_fh *fh = file->private_data;
1042 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
1044 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
1046 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
1047 enum v4l2_buf_type type)
1049 struct v4l2_fh *fh = file->private_data;
1051 return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
1053 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
1055 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
1056 enum v4l2_buf_type type)
1058 struct v4l2_fh *fh = file->private_data;
1060 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
1062 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
1065 * v4l2_file_operations helpers. It is assumed here same lock is used
1066 * for the output and the capture buffer queue.
1069 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
1071 struct v4l2_fh *fh = file->private_data;
1073 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
1075 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
1077 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
1079 struct v4l2_fh *fh = file->private_data;
1080 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
1083 if (m2m_ctx->q_lock)
1084 mutex_lock(m2m_ctx->q_lock);
1086 ret = v4l2_m2m_poll(file, m2m_ctx, wait);
1088 if (m2m_ctx->q_lock)
1089 mutex_unlock(m2m_ctx->q_lock);
1093 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);