1 // SPDX-License-Identifier: GPL-2.0-only
3 * generic helper functions for handling video4linux capture buffers
5 * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
7 * Highly based on video-buf written originally by:
8 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
9 * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
10 * (c) 2006 Ted Walther and John Sokol
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
21 #include <media/videobuf-core.h>
23 #define MAGIC_BUFFER 0x20070728
24 #define MAGIC_CHECK(is, should) \
26 if (unlikely((is) != (should))) { \
28 "magic mismatch: %x (expected %x)\n", \
35 module_param(debug, int, 0644);
37 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
38 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
39 MODULE_LICENSE("GPL");
41 #define dprintk(level, fmt, arg...) \
44 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
47 /* --------------------------------------------------------------------- */
49 #define CALL(q, f, arg...) \
50 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
51 #define CALLPTR(q, f, arg...) \
52 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
54 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
56 struct videobuf_buffer *vb;
58 BUG_ON(q->msize < sizeof(*vb));
60 if (!q->int_ops || !q->int_ops->alloc_vb) {
61 printk(KERN_ERR "No specific ops defined!\n");
65 vb = q->int_ops->alloc_vb(q->msize);
67 init_waitqueue_head(&vb->done);
68 vb->magic = MAGIC_BUFFER;
73 EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
75 static int state_neither_active_nor_queued(struct videobuf_queue *q,
76 struct videobuf_buffer *vb)
81 spin_lock_irqsave(q->irqlock, flags);
82 rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
83 spin_unlock_irqrestore(q->irqlock, flags);
87 int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
88 int non_blocking, int intr)
93 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
96 if (state_neither_active_nor_queued(q, vb))
101 is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
103 /* Release vdev lock to prevent this wait from blocking outside access to
106 mutex_unlock(q->ext_lock);
108 ret = wait_event_interruptible(vb->done,
109 state_neither_active_nor_queued(q, vb));
111 wait_event(vb->done, state_neither_active_nor_queued(q, vb));
114 mutex_lock(q->ext_lock);
118 EXPORT_SYMBOL_GPL(videobuf_waiton);
120 int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
121 struct v4l2_framebuffer *fbuf)
123 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
124 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
126 return CALL(q, iolock, q, vb, fbuf);
128 EXPORT_SYMBOL_GPL(videobuf_iolock);
130 void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
131 struct videobuf_buffer *buf)
133 if (q->int_ops->vaddr)
134 return q->int_ops->vaddr(buf);
137 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
139 /* --------------------------------------------------------------------- */
142 void videobuf_queue_core_init(struct videobuf_queue *q,
143 const struct videobuf_queue_ops *ops,
146 enum v4l2_buf_type type,
147 enum v4l2_field field,
150 struct videobuf_qtype_ops *int_ops,
151 struct mutex *ext_lock)
154 memset(q, 0, sizeof(*q));
155 q->irqlock = irqlock;
156 q->ext_lock = ext_lock;
163 q->int_ops = int_ops;
165 /* All buffer operations are mandatory */
166 BUG_ON(!q->ops->buf_setup);
167 BUG_ON(!q->ops->buf_prepare);
168 BUG_ON(!q->ops->buf_queue);
169 BUG_ON(!q->ops->buf_release);
171 /* Lock is mandatory for queue_cancel to work */
174 /* Having implementations for abstract methods are mandatory */
177 mutex_init(&q->vb_lock);
178 init_waitqueue_head(&q->wait);
179 INIT_LIST_HEAD(&q->stream);
181 EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
183 /* Locking: Only usage in bttv unsafe find way to remove */
184 int videobuf_queue_is_busy(struct videobuf_queue *q)
188 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
191 dprintk(1, "busy: streaming active\n");
195 dprintk(1, "busy: pending read #1\n");
199 dprintk(1, "busy: pending read #2\n");
202 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
203 if (NULL == q->bufs[i])
205 if (q->bufs[i]->map) {
206 dprintk(1, "busy: buffer #%d mapped\n", i);
209 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
210 dprintk(1, "busy: buffer #%d queued\n", i);
213 if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
214 dprintk(1, "busy: buffer #%d active\n", i);
220 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
223 * __videobuf_free() - free all the buffers and their control structures
225 * This function can only be called if streaming/reading is off, i.e. no buffers
226 * are under control of the driver.
228 /* Locking: Caller holds q->vb_lock */
229 static int __videobuf_free(struct videobuf_queue *q)
233 dprintk(1, "%s\n", __func__);
237 if (q->streaming || q->reading) {
238 dprintk(1, "Cannot free buffers when streaming or reading\n");
242 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
244 for (i = 0; i < VIDEO_MAX_FRAME; i++)
245 if (q->bufs[i] && q->bufs[i]->map) {
246 dprintk(1, "Cannot free mmapped buffers\n");
250 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
251 if (NULL == q->bufs[i])
253 q->ops->buf_release(q, q->bufs[i]);
261 /* Locking: Caller holds q->vb_lock */
262 void videobuf_queue_cancel(struct videobuf_queue *q)
264 unsigned long flags = 0;
269 wake_up_interruptible_sync(&q->wait);
271 /* remove queued buffers from list */
272 spin_lock_irqsave(q->irqlock, flags);
273 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
274 if (NULL == q->bufs[i])
276 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
277 list_del(&q->bufs[i]->queue);
278 q->bufs[i]->state = VIDEOBUF_ERROR;
279 wake_up_all(&q->bufs[i]->done);
282 spin_unlock_irqrestore(q->irqlock, flags);
284 /* free all buffers + clear queue */
285 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
286 if (NULL == q->bufs[i])
288 q->ops->buf_release(q, q->bufs[i]);
290 INIT_LIST_HEAD(&q->stream);
292 EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
294 /* --------------------------------------------------------------------- */
296 /* Locking: Caller holds q->vb_lock */
297 enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
299 enum v4l2_field field = q->field;
301 BUG_ON(V4L2_FIELD_ANY == field);
303 if (V4L2_FIELD_ALTERNATE == field) {
304 if (V4L2_FIELD_TOP == q->last) {
305 field = V4L2_FIELD_BOTTOM;
306 q->last = V4L2_FIELD_BOTTOM;
308 field = V4L2_FIELD_TOP;
309 q->last = V4L2_FIELD_TOP;
314 EXPORT_SYMBOL_GPL(videobuf_next_field);
316 /* Locking: Caller holds q->vb_lock */
317 static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
318 struct videobuf_buffer *vb, enum v4l2_buf_type type)
320 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
321 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
326 b->memory = vb->memory;
328 case V4L2_MEMORY_MMAP:
329 b->m.offset = vb->boff;
330 b->length = vb->bsize;
332 case V4L2_MEMORY_USERPTR:
333 b->m.userptr = vb->baddr;
334 b->length = vb->bsize;
336 case V4L2_MEMORY_OVERLAY:
337 b->m.offset = vb->boff;
339 case V4L2_MEMORY_DMABUF:
340 /* DMABUF is not handled in videobuf framework */
344 b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
346 b->flags |= V4L2_BUF_FLAG_MAPPED;
349 case VIDEOBUF_PREPARED:
350 case VIDEOBUF_QUEUED:
351 case VIDEOBUF_ACTIVE:
352 b->flags |= V4L2_BUF_FLAG_QUEUED;
355 b->flags |= V4L2_BUF_FLAG_ERROR;
358 b->flags |= V4L2_BUF_FLAG_DONE;
360 case VIDEOBUF_NEEDS_INIT:
366 b->field = vb->field;
367 b->timestamp = ns_to_timeval(vb->ts);
368 b->bytesused = vb->size;
369 b->sequence = vb->field_count >> 1;
372 int videobuf_mmap_free(struct videobuf_queue *q)
375 videobuf_queue_lock(q);
376 ret = __videobuf_free(q);
377 videobuf_queue_unlock(q);
380 EXPORT_SYMBOL_GPL(videobuf_mmap_free);
382 /* Locking: Caller holds q->vb_lock */
383 int __videobuf_mmap_setup(struct videobuf_queue *q,
384 unsigned int bcount, unsigned int bsize,
385 enum v4l2_memory memory)
390 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
392 err = __videobuf_free(q);
396 /* Allocate and initialize buffers */
397 for (i = 0; i < bcount; i++) {
398 q->bufs[i] = videobuf_alloc_vb(q);
400 if (NULL == q->bufs[i])
404 q->bufs[i]->memory = memory;
405 q->bufs[i]->bsize = bsize;
407 case V4L2_MEMORY_MMAP:
408 q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
410 case V4L2_MEMORY_USERPTR:
411 case V4L2_MEMORY_OVERLAY:
412 case V4L2_MEMORY_DMABUF:
421 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
425 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
427 int videobuf_mmap_setup(struct videobuf_queue *q,
428 unsigned int bcount, unsigned int bsize,
429 enum v4l2_memory memory)
432 videobuf_queue_lock(q);
433 ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
434 videobuf_queue_unlock(q);
437 EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
439 int videobuf_reqbufs(struct videobuf_queue *q,
440 struct v4l2_requestbuffers *req)
442 unsigned int size, count;
445 if (req->memory != V4L2_MEMORY_MMAP &&
446 req->memory != V4L2_MEMORY_USERPTR &&
447 req->memory != V4L2_MEMORY_OVERLAY) {
448 dprintk(1, "reqbufs: memory type invalid\n");
452 videobuf_queue_lock(q);
453 if (req->type != q->type) {
454 dprintk(1, "reqbufs: queue type invalid\n");
460 dprintk(1, "reqbufs: streaming already exists\n");
464 if (!list_empty(&q->stream)) {
465 dprintk(1, "reqbufs: stream running\n");
470 if (req->count == 0) {
471 dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
472 retval = __videobuf_free(q);
477 if (count > VIDEO_MAX_FRAME)
478 count = VIDEO_MAX_FRAME;
480 q->ops->buf_setup(q, &count, &size);
481 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
483 (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
485 retval = __videobuf_mmap_setup(q, count, size, req->memory);
487 dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
495 videobuf_queue_unlock(q);
498 EXPORT_SYMBOL_GPL(videobuf_reqbufs);
500 int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
504 videobuf_queue_lock(q);
505 if (unlikely(b->type != q->type)) {
506 dprintk(1, "querybuf: Wrong type.\n");
509 if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
510 dprintk(1, "querybuf: index out of range.\n");
513 if (unlikely(NULL == q->bufs[b->index])) {
514 dprintk(1, "querybuf: buffer is null.\n");
518 videobuf_status(q, b, q->bufs[b->index], q->type);
522 videobuf_queue_unlock(q);
525 EXPORT_SYMBOL_GPL(videobuf_querybuf);
527 int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
529 struct videobuf_buffer *buf;
530 enum v4l2_field field;
531 unsigned long flags = 0;
534 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
536 if (b->memory == V4L2_MEMORY_MMAP)
537 down_read(¤t->mm->mmap_sem);
539 videobuf_queue_lock(q);
542 dprintk(1, "qbuf: Reading running...\n");
546 if (b->type != q->type) {
547 dprintk(1, "qbuf: Wrong type.\n");
550 if (b->index >= VIDEO_MAX_FRAME) {
551 dprintk(1, "qbuf: index out of range.\n");
554 buf = q->bufs[b->index];
556 dprintk(1, "qbuf: buffer is null.\n");
559 MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
560 if (buf->memory != b->memory) {
561 dprintk(1, "qbuf: memory type is wrong.\n");
564 if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
565 dprintk(1, "qbuf: buffer is already queued or active.\n");
570 case V4L2_MEMORY_MMAP:
571 if (0 == buf->baddr) {
572 dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
575 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
576 || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
577 || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
578 || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
579 buf->size = b->bytesused;
580 buf->field = b->field;
581 buf->ts = v4l2_timeval_to_ns(&b->timestamp);
584 case V4L2_MEMORY_USERPTR:
585 if (b->length < buf->bsize) {
586 dprintk(1, "qbuf: buffer length is not enough\n");
589 if (VIDEOBUF_NEEDS_INIT != buf->state &&
590 buf->baddr != b->m.userptr)
591 q->ops->buf_release(q, buf);
592 buf->baddr = b->m.userptr;
594 case V4L2_MEMORY_OVERLAY:
595 buf->boff = b->m.offset;
598 dprintk(1, "qbuf: wrong memory type\n");
602 dprintk(1, "qbuf: requesting next field\n");
603 field = videobuf_next_field(q);
604 retval = q->ops->buf_prepare(q, buf, field);
606 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
610 list_add_tail(&buf->stream, &q->stream);
612 spin_lock_irqsave(q->irqlock, flags);
613 q->ops->buf_queue(q, buf);
614 spin_unlock_irqrestore(q->irqlock, flags);
616 dprintk(1, "qbuf: succeeded\n");
618 wake_up_interruptible_sync(&q->wait);
621 videobuf_queue_unlock(q);
623 if (b->memory == V4L2_MEMORY_MMAP)
624 up_read(¤t->mm->mmap_sem);
628 EXPORT_SYMBOL_GPL(videobuf_qbuf);
630 /* Locking: Caller holds q->vb_lock */
631 static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
637 dprintk(1, "next_buffer: Not streaming\n");
642 if (list_empty(&q->stream)) {
645 dprintk(2, "next_buffer: no buffers to dequeue\n");
648 dprintk(2, "next_buffer: waiting on buffer\n");
650 /* Drop lock to avoid deadlock with qbuf */
651 videobuf_queue_unlock(q);
653 /* Checking list_empty and streaming is safe without
654 * locks because we goto checks to validate while
655 * holding locks before proceeding */
656 retval = wait_event_interruptible(q->wait,
657 !list_empty(&q->stream) || !q->streaming);
658 videobuf_queue_lock(q);
673 /* Locking: Caller holds q->vb_lock */
674 static int stream_next_buffer(struct videobuf_queue *q,
675 struct videobuf_buffer **vb, int nonblocking)
678 struct videobuf_buffer *buf = NULL;
680 retval = stream_next_buffer_check_queue(q, nonblocking);
684 buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
685 retval = videobuf_waiton(q, buf, nonblocking, 1);
694 int videobuf_dqbuf(struct videobuf_queue *q,
695 struct v4l2_buffer *b, int nonblocking)
697 struct videobuf_buffer *buf = NULL;
700 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
702 memset(b, 0, sizeof(*b));
703 videobuf_queue_lock(q);
705 retval = stream_next_buffer(q, &buf, nonblocking);
707 dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
711 switch (buf->state) {
713 dprintk(1, "dqbuf: state is error\n");
716 dprintk(1, "dqbuf: state is done\n");
719 dprintk(1, "dqbuf: state invalid\n");
723 CALL(q, sync, q, buf);
724 videobuf_status(q, b, buf, q->type);
725 list_del(&buf->stream);
726 buf->state = VIDEOBUF_IDLE;
727 b->flags &= ~V4L2_BUF_FLAG_DONE;
729 videobuf_queue_unlock(q);
732 EXPORT_SYMBOL_GPL(videobuf_dqbuf);
734 int videobuf_streamon(struct videobuf_queue *q)
736 struct videobuf_buffer *buf;
737 unsigned long flags = 0;
740 videobuf_queue_lock(q);
748 spin_lock_irqsave(q->irqlock, flags);
749 list_for_each_entry(buf, &q->stream, stream)
750 if (buf->state == VIDEOBUF_PREPARED)
751 q->ops->buf_queue(q, buf);
752 spin_unlock_irqrestore(q->irqlock, flags);
754 wake_up_interruptible_sync(&q->wait);
756 videobuf_queue_unlock(q);
759 EXPORT_SYMBOL_GPL(videobuf_streamon);
761 /* Locking: Caller holds q->vb_lock */
762 static int __videobuf_streamoff(struct videobuf_queue *q)
767 videobuf_queue_cancel(q);
772 int videobuf_streamoff(struct videobuf_queue *q)
776 videobuf_queue_lock(q);
777 retval = __videobuf_streamoff(q);
778 videobuf_queue_unlock(q);
782 EXPORT_SYMBOL_GPL(videobuf_streamoff);
784 /* Locking: Caller holds q->vb_lock */
785 static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
787 size_t count, loff_t *ppos)
789 enum v4l2_field field;
790 unsigned long flags = 0;
793 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
796 q->read_buf = videobuf_alloc_vb(q);
797 if (NULL == q->read_buf)
800 q->read_buf->memory = V4L2_MEMORY_USERPTR;
801 q->read_buf->baddr = (unsigned long)data;
802 q->read_buf->bsize = count;
804 field = videobuf_next_field(q);
805 retval = q->ops->buf_prepare(q, q->read_buf, field);
809 /* start capture & wait */
810 spin_lock_irqsave(q->irqlock, flags);
811 q->ops->buf_queue(q, q->read_buf);
812 spin_unlock_irqrestore(q->irqlock, flags);
813 retval = videobuf_waiton(q, q->read_buf, 0, 0);
815 CALL(q, sync, q, q->read_buf);
816 if (VIDEOBUF_ERROR == q->read_buf->state)
819 retval = q->read_buf->size;
824 q->ops->buf_release(q, q->read_buf);
830 static int __videobuf_copy_to_user(struct videobuf_queue *q,
831 struct videobuf_buffer *buf,
832 char __user *data, size_t count,
835 void *vaddr = CALLPTR(q, vaddr, buf);
837 /* copy to userspace */
838 if (count > buf->size - q->read_off)
839 count = buf->size - q->read_off;
841 if (copy_to_user(data, vaddr + q->read_off, count))
847 static int __videobuf_copy_stream(struct videobuf_queue *q,
848 struct videobuf_buffer *buf,
849 char __user *data, size_t count, size_t pos,
850 int vbihack, int nonblocking)
852 unsigned int *fc = CALLPTR(q, vaddr, buf);
855 /* dirty, undocumented hack -- pass the frame counter
856 * within the last four bytes of each vbi data block.
857 * We need that one to maintain backward compatibility
858 * to all vbi decoding software out there ... */
859 fc += (buf->size >> 2) - 1;
860 *fc = buf->field_count >> 1;
861 dprintk(1, "vbihack: %d\n", *fc);
864 /* copy stuff using the common method */
865 count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
867 if ((count == -EFAULT) && (pos == 0))
873 ssize_t videobuf_read_one(struct videobuf_queue *q,
874 char __user *data, size_t count, loff_t *ppos,
877 enum v4l2_field field;
878 unsigned long flags = 0;
879 unsigned size = 0, nbufs = 1;
882 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
884 videobuf_queue_lock(q);
886 q->ops->buf_setup(q, &nbufs, &size);
888 if (NULL == q->read_buf &&
891 retval = videobuf_read_zerocopy(q, data, count, ppos);
892 if (retval >= 0 || retval == -EIO)
895 /* fallback to kernel bounce buffer on failures */
898 if (NULL == q->read_buf) {
899 /* need to capture a new frame */
901 q->read_buf = videobuf_alloc_vb(q);
903 dprintk(1, "video alloc=0x%p\n", q->read_buf);
904 if (NULL == q->read_buf)
906 q->read_buf->memory = V4L2_MEMORY_USERPTR;
907 q->read_buf->bsize = count; /* preferred size */
908 field = videobuf_next_field(q);
909 retval = q->ops->buf_prepare(q, q->read_buf, field);
917 spin_lock_irqsave(q->irqlock, flags);
918 q->ops->buf_queue(q, q->read_buf);
919 spin_unlock_irqrestore(q->irqlock, flags);
924 /* wait until capture is done */
925 retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
929 CALL(q, sync, q, q->read_buf);
931 if (VIDEOBUF_ERROR == q->read_buf->state) {
932 /* catch I/O errors */
933 q->ops->buf_release(q, q->read_buf);
940 /* Copy to userspace */
941 retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
945 q->read_off += retval;
946 if (q->read_off == q->read_buf->size) {
947 /* all data copied, cleanup */
948 q->ops->buf_release(q, q->read_buf);
954 videobuf_queue_unlock(q);
957 EXPORT_SYMBOL_GPL(videobuf_read_one);
959 /* Locking: Caller holds q->vb_lock */
960 static int __videobuf_read_start(struct videobuf_queue *q)
962 enum v4l2_field field;
963 unsigned long flags = 0;
964 unsigned int count = 0, size = 0;
967 q->ops->buf_setup(q, &count, &size);
970 if (count > VIDEO_MAX_FRAME)
971 count = VIDEO_MAX_FRAME;
972 size = PAGE_ALIGN(size);
974 err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
980 for (i = 0; i < count; i++) {
981 field = videobuf_next_field(q);
982 err = q->ops->buf_prepare(q, q->bufs[i], field);
985 list_add_tail(&q->bufs[i]->stream, &q->stream);
987 spin_lock_irqsave(q->irqlock, flags);
988 for (i = 0; i < count; i++)
989 q->ops->buf_queue(q, q->bufs[i]);
990 spin_unlock_irqrestore(q->irqlock, flags);
995 static void __videobuf_read_stop(struct videobuf_queue *q)
999 videobuf_queue_cancel(q);
1001 INIT_LIST_HEAD(&q->stream);
1002 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1003 if (NULL == q->bufs[i])
1011 int videobuf_read_start(struct videobuf_queue *q)
1015 videobuf_queue_lock(q);
1016 rc = __videobuf_read_start(q);
1017 videobuf_queue_unlock(q);
1021 EXPORT_SYMBOL_GPL(videobuf_read_start);
1023 void videobuf_read_stop(struct videobuf_queue *q)
1025 videobuf_queue_lock(q);
1026 __videobuf_read_stop(q);
1027 videobuf_queue_unlock(q);
1029 EXPORT_SYMBOL_GPL(videobuf_read_stop);
1031 void videobuf_stop(struct videobuf_queue *q)
1033 videobuf_queue_lock(q);
1036 __videobuf_streamoff(q);
1039 __videobuf_read_stop(q);
1041 videobuf_queue_unlock(q);
1043 EXPORT_SYMBOL_GPL(videobuf_stop);
1045 ssize_t videobuf_read_stream(struct videobuf_queue *q,
1046 char __user *data, size_t count, loff_t *ppos,
1047 int vbihack, int nonblocking)
1050 unsigned long flags = 0;
1052 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1054 dprintk(2, "%s\n", __func__);
1055 videobuf_queue_lock(q);
1060 retval = __videobuf_read_start(q);
1067 /* get / wait for data */
1068 if (NULL == q->read_buf) {
1069 q->read_buf = list_entry(q->stream.next,
1070 struct videobuf_buffer,
1072 list_del(&q->read_buf->stream);
1075 rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
1082 if (q->read_buf->state == VIDEOBUF_DONE) {
1083 rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
1084 retval, vbihack, nonblocking);
1094 q->read_off = q->read_buf->size;
1099 /* requeue buffer when done with copying */
1100 if (q->read_off == q->read_buf->size) {
1101 list_add_tail(&q->read_buf->stream,
1103 spin_lock_irqsave(q->irqlock, flags);
1104 q->ops->buf_queue(q, q->read_buf);
1105 spin_unlock_irqrestore(q->irqlock, flags);
1113 videobuf_queue_unlock(q);
1116 EXPORT_SYMBOL_GPL(videobuf_read_stream);
1118 __poll_t videobuf_poll_stream(struct file *file,
1119 struct videobuf_queue *q,
1122 __poll_t req_events = poll_requested_events(wait);
1123 struct videobuf_buffer *buf = NULL;
1126 videobuf_queue_lock(q);
1128 if (!list_empty(&q->stream))
1129 buf = list_entry(q->stream.next,
1130 struct videobuf_buffer, stream);
1131 } else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
1133 __videobuf_read_start(q);
1136 } else if (NULL == q->read_buf) {
1137 q->read_buf = list_entry(q->stream.next,
1138 struct videobuf_buffer,
1140 list_del(&q->read_buf->stream);
1146 poll_wait(file, &buf->done, wait);
1151 if (buf->state == VIDEOBUF_DONE ||
1152 buf->state == VIDEOBUF_ERROR) {
1154 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1155 case V4L2_BUF_TYPE_VBI_OUTPUT:
1156 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1157 case V4L2_BUF_TYPE_SDR_OUTPUT:
1158 rc = EPOLLOUT | EPOLLWRNORM;
1161 rc = EPOLLIN | EPOLLRDNORM;
1166 videobuf_queue_unlock(q);
1169 EXPORT_SYMBOL_GPL(videobuf_poll_stream);
1171 int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
1176 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1178 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
1179 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1183 videobuf_queue_lock(q);
1184 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1185 struct videobuf_buffer *buf = q->bufs[i];
1187 if (buf && buf->memory == V4L2_MEMORY_MMAP &&
1188 buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
1189 rc = CALL(q, mmap_mapper, q, buf, vma);
1193 videobuf_queue_unlock(q);
1197 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);