2 * generic helper functions for handling video4linux capture buffers
4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
6 * Highly based on video-buf written originally by:
7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
9 * (c) 2006 Ted Walther and John Sokol
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
24 #include <media/videobuf-core.h>
26 #define MAGIC_BUFFER 0x20070728
27 #define MAGIC_CHECK(is, should) \
29 if (unlikely((is) != (should))) { \
31 "magic mismatch: %x (expected %x)\n", \
38 module_param(debug, int, 0644);
40 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
41 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
42 MODULE_LICENSE("GPL");
44 #define dprintk(level, fmt, arg...) \
47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
50 /* --------------------------------------------------------------------- */
52 #define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
54 #define CALLPTR(q, f, arg...) \
55 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
57 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
59 struct videobuf_buffer *vb;
61 BUG_ON(q->msize < sizeof(*vb));
63 if (!q->int_ops || !q->int_ops->alloc_vb) {
64 printk(KERN_ERR "No specific ops defined!\n");
68 vb = q->int_ops->alloc_vb(q->msize);
70 init_waitqueue_head(&vb->done);
71 vb->magic = MAGIC_BUFFER;
76 EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
78 static int state_neither_active_nor_queued(struct videobuf_queue *q,
79 struct videobuf_buffer *vb)
84 spin_lock_irqsave(q->irqlock, flags);
85 rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
86 spin_unlock_irqrestore(q->irqlock, flags);
90 int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
91 int non_blocking, int intr)
96 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
99 if (state_neither_active_nor_queued(q, vb))
104 is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
106 /* Release vdev lock to prevent this wait from blocking outside access to
109 mutex_unlock(q->ext_lock);
111 ret = wait_event_interruptible(vb->done,
112 state_neither_active_nor_queued(q, vb));
114 wait_event(vb->done, state_neither_active_nor_queued(q, vb));
117 mutex_lock(q->ext_lock);
121 EXPORT_SYMBOL_GPL(videobuf_waiton);
123 int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
124 struct v4l2_framebuffer *fbuf)
126 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
127 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
129 return CALL(q, iolock, q, vb, fbuf);
131 EXPORT_SYMBOL_GPL(videobuf_iolock);
133 void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
134 struct videobuf_buffer *buf)
136 if (q->int_ops->vaddr)
137 return q->int_ops->vaddr(buf);
140 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
142 /* --------------------------------------------------------------------- */
145 void videobuf_queue_core_init(struct videobuf_queue *q,
146 const struct videobuf_queue_ops *ops,
149 enum v4l2_buf_type type,
150 enum v4l2_field field,
153 struct videobuf_qtype_ops *int_ops,
154 struct mutex *ext_lock)
157 memset(q, 0, sizeof(*q));
158 q->irqlock = irqlock;
159 q->ext_lock = ext_lock;
166 q->int_ops = int_ops;
168 /* All buffer operations are mandatory */
169 BUG_ON(!q->ops->buf_setup);
170 BUG_ON(!q->ops->buf_prepare);
171 BUG_ON(!q->ops->buf_queue);
172 BUG_ON(!q->ops->buf_release);
174 /* Lock is mandatory for queue_cancel to work */
177 /* Having implementations for abstract methods are mandatory */
180 mutex_init(&q->vb_lock);
181 init_waitqueue_head(&q->wait);
182 INIT_LIST_HEAD(&q->stream);
184 EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
186 /* Locking: Only usage in bttv unsafe find way to remove */
187 int videobuf_queue_is_busy(struct videobuf_queue *q)
191 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
194 dprintk(1, "busy: streaming active\n");
198 dprintk(1, "busy: pending read #1\n");
202 dprintk(1, "busy: pending read #2\n");
205 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
206 if (NULL == q->bufs[i])
208 if (q->bufs[i]->map) {
209 dprintk(1, "busy: buffer #%d mapped\n", i);
212 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
213 dprintk(1, "busy: buffer #%d queued\n", i);
216 if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
217 dprintk(1, "busy: buffer #%d avtive\n", i);
223 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
226 * __videobuf_free() - free all the buffers and their control structures
228 * This function can only be called if streaming/reading is off, i.e. no buffers
229 * are under control of the driver.
231 /* Locking: Caller holds q->vb_lock */
232 static int __videobuf_free(struct videobuf_queue *q)
236 dprintk(1, "%s\n", __func__);
240 if (q->streaming || q->reading) {
241 dprintk(1, "Cannot free buffers when streaming or reading\n");
245 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
247 for (i = 0; i < VIDEO_MAX_FRAME; i++)
248 if (q->bufs[i] && q->bufs[i]->map) {
249 dprintk(1, "Cannot free mmapped buffers\n");
253 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
254 if (NULL == q->bufs[i])
256 q->ops->buf_release(q, q->bufs[i]);
264 /* Locking: Caller holds q->vb_lock */
265 void videobuf_queue_cancel(struct videobuf_queue *q)
267 unsigned long flags = 0;
272 wake_up_interruptible_sync(&q->wait);
274 /* remove queued buffers from list */
275 spin_lock_irqsave(q->irqlock, flags);
276 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
277 if (NULL == q->bufs[i])
279 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
280 list_del(&q->bufs[i]->queue);
281 q->bufs[i]->state = VIDEOBUF_ERROR;
282 wake_up_all(&q->bufs[i]->done);
285 spin_unlock_irqrestore(q->irqlock, flags);
287 /* free all buffers + clear queue */
288 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
289 if (NULL == q->bufs[i])
291 q->ops->buf_release(q, q->bufs[i]);
293 INIT_LIST_HEAD(&q->stream);
295 EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
297 /* --------------------------------------------------------------------- */
299 /* Locking: Caller holds q->vb_lock */
300 enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
302 enum v4l2_field field = q->field;
304 BUG_ON(V4L2_FIELD_ANY == field);
306 if (V4L2_FIELD_ALTERNATE == field) {
307 if (V4L2_FIELD_TOP == q->last) {
308 field = V4L2_FIELD_BOTTOM;
309 q->last = V4L2_FIELD_BOTTOM;
311 field = V4L2_FIELD_TOP;
312 q->last = V4L2_FIELD_TOP;
317 EXPORT_SYMBOL_GPL(videobuf_next_field);
319 /* Locking: Caller holds q->vb_lock */
320 static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
321 struct videobuf_buffer *vb, enum v4l2_buf_type type)
323 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
324 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
329 b->memory = vb->memory;
331 case V4L2_MEMORY_MMAP:
332 b->m.offset = vb->boff;
333 b->length = vb->bsize;
335 case V4L2_MEMORY_USERPTR:
336 b->m.userptr = vb->baddr;
337 b->length = vb->bsize;
339 case V4L2_MEMORY_OVERLAY:
340 b->m.offset = vb->boff;
342 case V4L2_MEMORY_DMABUF:
343 /* DMABUF is not handled in videobuf framework */
347 b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
349 b->flags |= V4L2_BUF_FLAG_MAPPED;
352 case VIDEOBUF_PREPARED:
353 case VIDEOBUF_QUEUED:
354 case VIDEOBUF_ACTIVE:
355 b->flags |= V4L2_BUF_FLAG_QUEUED;
358 b->flags |= V4L2_BUF_FLAG_ERROR;
361 b->flags |= V4L2_BUF_FLAG_DONE;
363 case VIDEOBUF_NEEDS_INIT:
369 b->field = vb->field;
370 b->timestamp = vb->ts;
371 b->bytesused = vb->size;
372 b->sequence = vb->field_count >> 1;
375 int videobuf_mmap_free(struct videobuf_queue *q)
378 videobuf_queue_lock(q);
379 ret = __videobuf_free(q);
380 videobuf_queue_unlock(q);
383 EXPORT_SYMBOL_GPL(videobuf_mmap_free);
385 /* Locking: Caller holds q->vb_lock */
386 int __videobuf_mmap_setup(struct videobuf_queue *q,
387 unsigned int bcount, unsigned int bsize,
388 enum v4l2_memory memory)
393 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
395 err = __videobuf_free(q);
399 /* Allocate and initialize buffers */
400 for (i = 0; i < bcount; i++) {
401 q->bufs[i] = videobuf_alloc_vb(q);
403 if (NULL == q->bufs[i])
407 q->bufs[i]->memory = memory;
408 q->bufs[i]->bsize = bsize;
410 case V4L2_MEMORY_MMAP:
411 q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
413 case V4L2_MEMORY_USERPTR:
414 case V4L2_MEMORY_OVERLAY:
415 case V4L2_MEMORY_DMABUF:
424 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
428 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
430 int videobuf_mmap_setup(struct videobuf_queue *q,
431 unsigned int bcount, unsigned int bsize,
432 enum v4l2_memory memory)
435 videobuf_queue_lock(q);
436 ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
437 videobuf_queue_unlock(q);
440 EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
442 int videobuf_reqbufs(struct videobuf_queue *q,
443 struct v4l2_requestbuffers *req)
445 unsigned int size, count;
448 if (req->memory != V4L2_MEMORY_MMAP &&
449 req->memory != V4L2_MEMORY_USERPTR &&
450 req->memory != V4L2_MEMORY_OVERLAY) {
451 dprintk(1, "reqbufs: memory type invalid\n");
455 videobuf_queue_lock(q);
456 if (req->type != q->type) {
457 dprintk(1, "reqbufs: queue type invalid\n");
463 dprintk(1, "reqbufs: streaming already exists\n");
467 if (!list_empty(&q->stream)) {
468 dprintk(1, "reqbufs: stream running\n");
473 if (req->count == 0) {
474 dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
475 retval = __videobuf_free(q);
480 if (count > VIDEO_MAX_FRAME)
481 count = VIDEO_MAX_FRAME;
483 q->ops->buf_setup(q, &count, &size);
484 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
486 (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
488 retval = __videobuf_mmap_setup(q, count, size, req->memory);
490 dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
498 videobuf_queue_unlock(q);
501 EXPORT_SYMBOL_GPL(videobuf_reqbufs);
503 int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
507 videobuf_queue_lock(q);
508 if (unlikely(b->type != q->type)) {
509 dprintk(1, "querybuf: Wrong type.\n");
512 if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
513 dprintk(1, "querybuf: index out of range.\n");
516 if (unlikely(NULL == q->bufs[b->index])) {
517 dprintk(1, "querybuf: buffer is null.\n");
521 videobuf_status(q, b, q->bufs[b->index], q->type);
525 videobuf_queue_unlock(q);
528 EXPORT_SYMBOL_GPL(videobuf_querybuf);
530 int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
532 struct videobuf_buffer *buf;
533 enum v4l2_field field;
534 unsigned long flags = 0;
537 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
539 if (b->memory == V4L2_MEMORY_MMAP)
540 down_read(¤t->mm->mmap_sem);
542 videobuf_queue_lock(q);
545 dprintk(1, "qbuf: Reading running...\n");
549 if (b->type != q->type) {
550 dprintk(1, "qbuf: Wrong type.\n");
553 if (b->index >= VIDEO_MAX_FRAME) {
554 dprintk(1, "qbuf: index out of range.\n");
557 buf = q->bufs[b->index];
559 dprintk(1, "qbuf: buffer is null.\n");
562 MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
563 if (buf->memory != b->memory) {
564 dprintk(1, "qbuf: memory type is wrong.\n");
567 if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
568 dprintk(1, "qbuf: buffer is already queued or active.\n");
573 case V4L2_MEMORY_MMAP:
574 if (0 == buf->baddr) {
575 dprintk(1, "qbuf: mmap requested "
576 "but buffer addr is zero!\n");
579 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
580 || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
581 || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
582 || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
583 buf->size = b->bytesused;
584 buf->field = b->field;
585 buf->ts = b->timestamp;
588 case V4L2_MEMORY_USERPTR:
589 if (b->length < buf->bsize) {
590 dprintk(1, "qbuf: buffer length is not enough\n");
593 if (VIDEOBUF_NEEDS_INIT != buf->state &&
594 buf->baddr != b->m.userptr)
595 q->ops->buf_release(q, buf);
596 buf->baddr = b->m.userptr;
598 case V4L2_MEMORY_OVERLAY:
599 buf->boff = b->m.offset;
602 dprintk(1, "qbuf: wrong memory type\n");
606 dprintk(1, "qbuf: requesting next field\n");
607 field = videobuf_next_field(q);
608 retval = q->ops->buf_prepare(q, buf, field);
610 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
614 list_add_tail(&buf->stream, &q->stream);
616 spin_lock_irqsave(q->irqlock, flags);
617 q->ops->buf_queue(q, buf);
618 spin_unlock_irqrestore(q->irqlock, flags);
620 dprintk(1, "qbuf: succeeded\n");
622 wake_up_interruptible_sync(&q->wait);
625 videobuf_queue_unlock(q);
627 if (b->memory == V4L2_MEMORY_MMAP)
628 up_read(¤t->mm->mmap_sem);
632 EXPORT_SYMBOL_GPL(videobuf_qbuf);
634 /* Locking: Caller holds q->vb_lock */
635 static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
641 dprintk(1, "next_buffer: Not streaming\n");
646 if (list_empty(&q->stream)) {
649 dprintk(2, "next_buffer: no buffers to dequeue\n");
652 dprintk(2, "next_buffer: waiting on buffer\n");
654 /* Drop lock to avoid deadlock with qbuf */
655 videobuf_queue_unlock(q);
657 /* Checking list_empty and streaming is safe without
658 * locks because we goto checks to validate while
659 * holding locks before proceeding */
660 retval = wait_event_interruptible(q->wait,
661 !list_empty(&q->stream) || !q->streaming);
662 videobuf_queue_lock(q);
677 /* Locking: Caller holds q->vb_lock */
678 static int stream_next_buffer(struct videobuf_queue *q,
679 struct videobuf_buffer **vb, int nonblocking)
682 struct videobuf_buffer *buf = NULL;
684 retval = stream_next_buffer_check_queue(q, nonblocking);
688 buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
689 retval = videobuf_waiton(q, buf, nonblocking, 1);
698 int videobuf_dqbuf(struct videobuf_queue *q,
699 struct v4l2_buffer *b, int nonblocking)
701 struct videobuf_buffer *buf = NULL;
704 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
706 memset(b, 0, sizeof(*b));
707 videobuf_queue_lock(q);
709 retval = stream_next_buffer(q, &buf, nonblocking);
711 dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
715 switch (buf->state) {
717 dprintk(1, "dqbuf: state is error\n");
720 dprintk(1, "dqbuf: state is done\n");
723 dprintk(1, "dqbuf: state invalid\n");
727 CALL(q, sync, q, buf);
728 videobuf_status(q, b, buf, q->type);
729 list_del(&buf->stream);
730 buf->state = VIDEOBUF_IDLE;
731 b->flags &= ~V4L2_BUF_FLAG_DONE;
733 videobuf_queue_unlock(q);
736 EXPORT_SYMBOL_GPL(videobuf_dqbuf);
738 int videobuf_streamon(struct videobuf_queue *q)
740 struct videobuf_buffer *buf;
741 unsigned long flags = 0;
744 videobuf_queue_lock(q);
752 spin_lock_irqsave(q->irqlock, flags);
753 list_for_each_entry(buf, &q->stream, stream)
754 if (buf->state == VIDEOBUF_PREPARED)
755 q->ops->buf_queue(q, buf);
756 spin_unlock_irqrestore(q->irqlock, flags);
758 wake_up_interruptible_sync(&q->wait);
760 videobuf_queue_unlock(q);
763 EXPORT_SYMBOL_GPL(videobuf_streamon);
765 /* Locking: Caller holds q->vb_lock */
766 static int __videobuf_streamoff(struct videobuf_queue *q)
771 videobuf_queue_cancel(q);
776 int videobuf_streamoff(struct videobuf_queue *q)
780 videobuf_queue_lock(q);
781 retval = __videobuf_streamoff(q);
782 videobuf_queue_unlock(q);
786 EXPORT_SYMBOL_GPL(videobuf_streamoff);
788 /* Locking: Caller holds q->vb_lock */
789 static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
791 size_t count, loff_t *ppos)
793 enum v4l2_field field;
794 unsigned long flags = 0;
797 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
800 q->read_buf = videobuf_alloc_vb(q);
801 if (NULL == q->read_buf)
804 q->read_buf->memory = V4L2_MEMORY_USERPTR;
805 q->read_buf->baddr = (unsigned long)data;
806 q->read_buf->bsize = count;
808 field = videobuf_next_field(q);
809 retval = q->ops->buf_prepare(q, q->read_buf, field);
813 /* start capture & wait */
814 spin_lock_irqsave(q->irqlock, flags);
815 q->ops->buf_queue(q, q->read_buf);
816 spin_unlock_irqrestore(q->irqlock, flags);
817 retval = videobuf_waiton(q, q->read_buf, 0, 0);
819 CALL(q, sync, q, q->read_buf);
820 if (VIDEOBUF_ERROR == q->read_buf->state)
823 retval = q->read_buf->size;
828 q->ops->buf_release(q, q->read_buf);
834 static int __videobuf_copy_to_user(struct videobuf_queue *q,
835 struct videobuf_buffer *buf,
836 char __user *data, size_t count,
839 void *vaddr = CALLPTR(q, vaddr, buf);
841 /* copy to userspace */
842 if (count > buf->size - q->read_off)
843 count = buf->size - q->read_off;
845 if (copy_to_user(data, vaddr + q->read_off, count))
851 static int __videobuf_copy_stream(struct videobuf_queue *q,
852 struct videobuf_buffer *buf,
853 char __user *data, size_t count, size_t pos,
854 int vbihack, int nonblocking)
856 unsigned int *fc = CALLPTR(q, vaddr, buf);
859 /* dirty, undocumented hack -- pass the frame counter
860 * within the last four bytes of each vbi data block.
861 * We need that one to maintain backward compatibility
862 * to all vbi decoding software out there ... */
863 fc += (buf->size >> 2) - 1;
864 *fc = buf->field_count >> 1;
865 dprintk(1, "vbihack: %d\n", *fc);
868 /* copy stuff using the common method */
869 count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
871 if ((count == -EFAULT) && (pos == 0))
877 ssize_t videobuf_read_one(struct videobuf_queue *q,
878 char __user *data, size_t count, loff_t *ppos,
881 enum v4l2_field field;
882 unsigned long flags = 0;
883 unsigned size = 0, nbufs = 1;
886 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
888 videobuf_queue_lock(q);
890 q->ops->buf_setup(q, &nbufs, &size);
892 if (NULL == q->read_buf &&
895 retval = videobuf_read_zerocopy(q, data, count, ppos);
896 if (retval >= 0 || retval == -EIO)
899 /* fallback to kernel bounce buffer on failures */
902 if (NULL == q->read_buf) {
903 /* need to capture a new frame */
905 q->read_buf = videobuf_alloc_vb(q);
907 dprintk(1, "video alloc=0x%p\n", q->read_buf);
908 if (NULL == q->read_buf)
910 q->read_buf->memory = V4L2_MEMORY_USERPTR;
911 q->read_buf->bsize = count; /* preferred size */
912 field = videobuf_next_field(q);
913 retval = q->ops->buf_prepare(q, q->read_buf, field);
921 spin_lock_irqsave(q->irqlock, flags);
922 q->ops->buf_queue(q, q->read_buf);
923 spin_unlock_irqrestore(q->irqlock, flags);
928 /* wait until capture is done */
929 retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
933 CALL(q, sync, q, q->read_buf);
935 if (VIDEOBUF_ERROR == q->read_buf->state) {
936 /* catch I/O errors */
937 q->ops->buf_release(q, q->read_buf);
944 /* Copy to userspace */
945 retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
949 q->read_off += retval;
950 if (q->read_off == q->read_buf->size) {
951 /* all data copied, cleanup */
952 q->ops->buf_release(q, q->read_buf);
958 videobuf_queue_unlock(q);
961 EXPORT_SYMBOL_GPL(videobuf_read_one);
963 /* Locking: Caller holds q->vb_lock */
964 static int __videobuf_read_start(struct videobuf_queue *q)
966 enum v4l2_field field;
967 unsigned long flags = 0;
968 unsigned int count = 0, size = 0;
971 q->ops->buf_setup(q, &count, &size);
974 if (count > VIDEO_MAX_FRAME)
975 count = VIDEO_MAX_FRAME;
976 size = PAGE_ALIGN(size);
978 err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
984 for (i = 0; i < count; i++) {
985 field = videobuf_next_field(q);
986 err = q->ops->buf_prepare(q, q->bufs[i], field);
989 list_add_tail(&q->bufs[i]->stream, &q->stream);
991 spin_lock_irqsave(q->irqlock, flags);
992 for (i = 0; i < count; i++)
993 q->ops->buf_queue(q, q->bufs[i]);
994 spin_unlock_irqrestore(q->irqlock, flags);
999 static void __videobuf_read_stop(struct videobuf_queue *q)
1003 videobuf_queue_cancel(q);
1005 INIT_LIST_HEAD(&q->stream);
1006 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1007 if (NULL == q->bufs[i])
1015 int videobuf_read_start(struct videobuf_queue *q)
1019 videobuf_queue_lock(q);
1020 rc = __videobuf_read_start(q);
1021 videobuf_queue_unlock(q);
1025 EXPORT_SYMBOL_GPL(videobuf_read_start);
1027 void videobuf_read_stop(struct videobuf_queue *q)
1029 videobuf_queue_lock(q);
1030 __videobuf_read_stop(q);
1031 videobuf_queue_unlock(q);
1033 EXPORT_SYMBOL_GPL(videobuf_read_stop);
1035 void videobuf_stop(struct videobuf_queue *q)
1037 videobuf_queue_lock(q);
1040 __videobuf_streamoff(q);
1043 __videobuf_read_stop(q);
1045 videobuf_queue_unlock(q);
1047 EXPORT_SYMBOL_GPL(videobuf_stop);
1049 ssize_t videobuf_read_stream(struct videobuf_queue *q,
1050 char __user *data, size_t count, loff_t *ppos,
1051 int vbihack, int nonblocking)
1054 unsigned long flags = 0;
1056 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1058 dprintk(2, "%s\n", __func__);
1059 videobuf_queue_lock(q);
1064 retval = __videobuf_read_start(q);
1071 /* get / wait for data */
1072 if (NULL == q->read_buf) {
1073 q->read_buf = list_entry(q->stream.next,
1074 struct videobuf_buffer,
1076 list_del(&q->read_buf->stream);
1079 rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
1086 if (q->read_buf->state == VIDEOBUF_DONE) {
1087 rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
1088 retval, vbihack, nonblocking);
1098 q->read_off = q->read_buf->size;
1103 /* requeue buffer when done with copying */
1104 if (q->read_off == q->read_buf->size) {
1105 list_add_tail(&q->read_buf->stream,
1107 spin_lock_irqsave(q->irqlock, flags);
1108 q->ops->buf_queue(q, q->read_buf);
1109 spin_unlock_irqrestore(q->irqlock, flags);
1117 videobuf_queue_unlock(q);
1120 EXPORT_SYMBOL_GPL(videobuf_read_stream);
1122 unsigned int videobuf_poll_stream(struct file *file,
1123 struct videobuf_queue *q,
1126 unsigned long req_events = poll_requested_events(wait);
1127 struct videobuf_buffer *buf = NULL;
1128 unsigned int rc = 0;
1130 videobuf_queue_lock(q);
1132 if (!list_empty(&q->stream))
1133 buf = list_entry(q->stream.next,
1134 struct videobuf_buffer, stream);
1135 } else if (req_events & (POLLIN | POLLRDNORM)) {
1137 __videobuf_read_start(q);
1140 } else if (NULL == q->read_buf) {
1141 q->read_buf = list_entry(q->stream.next,
1142 struct videobuf_buffer,
1144 list_del(&q->read_buf->stream);
1153 poll_wait(file, &buf->done, wait);
1154 if (buf->state == VIDEOBUF_DONE ||
1155 buf->state == VIDEOBUF_ERROR) {
1157 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1158 case V4L2_BUF_TYPE_VBI_OUTPUT:
1159 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1160 case V4L2_BUF_TYPE_SDR_OUTPUT:
1161 rc = POLLOUT | POLLWRNORM;
1164 rc = POLLIN | POLLRDNORM;
1169 videobuf_queue_unlock(q);
1172 EXPORT_SYMBOL_GPL(videobuf_poll_stream);
1174 int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
1179 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1181 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
1182 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1186 videobuf_queue_lock(q);
1187 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1188 struct videobuf_buffer *buf = q->bufs[i];
1190 if (buf && buf->memory == V4L2_MEMORY_MMAP &&
1191 buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
1192 rc = CALL(q, mmap_mapper, q, buf, vma);
1196 videobuf_queue_unlock(q);
1200 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);