GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / usb / gadget / function / uvc_video.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *      uvc_video.c  --  USB Video Class Gadget driver
4  *
5  *      Copyright (C) 2009-2010
6  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/usb/ch9.h>
13 #include <linux/usb/gadget.h>
14 #include <linux/usb/video.h>
15 #include <asm/unaligned.h>
16
17 #include <media/v4l2-dev.h>
18
19 #include "uvc.h"
20 #include "uvc_queue.h"
21 #include "uvc_video.h"
22
23 /* --------------------------------------------------------------------------
24  * Video codecs
25  */
26
27 static int
28 uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
29                 u8 *data, int len)
30 {
31         struct uvc_device *uvc = container_of(video, struct uvc_device, video);
32         struct usb_composite_dev *cdev = uvc->func.config->cdev;
33         struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp);
34         int pos = 2;
35
36         data[1] = UVC_STREAM_EOH | video->fid;
37
38         if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
39                 data[1] |= UVC_STREAM_ERR;
40
41         if (video->queue.buf_used == 0 && ts.tv_sec) {
42                 /* dwClockFrequency is 48 MHz */
43                 u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
44
45                 data[1] |= UVC_STREAM_PTS;
46                 put_unaligned_le32(pts, &data[pos]);
47                 pos += 4;
48         }
49
50         if (cdev->gadget->ops->get_frame) {
51                 u32 sof, stc;
52
53                 sof = usb_gadget_frame_number(cdev->gadget);
54                 ktime_get_ts64(&ts);
55                 stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
56
57                 data[1] |= UVC_STREAM_SCR;
58                 put_unaligned_le32(stc, &data[pos]);
59                 put_unaligned_le16(sof, &data[pos+4]);
60                 pos += 6;
61         }
62
63         data[0] = pos;
64
65         if (buf->bytesused - video->queue.buf_used <= len - pos)
66                 data[1] |= UVC_STREAM_EOF;
67
68         return pos;
69 }
70
71 static int
72 uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
73                 u8 *data, int len)
74 {
75         struct uvc_video_queue *queue = &video->queue;
76         unsigned int nbytes;
77         void *mem;
78
79         /* Copy video data to the USB buffer. */
80         mem = buf->mem + queue->buf_used;
81         nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
82
83         memcpy(data, mem, nbytes);
84         queue->buf_used += nbytes;
85
86         return nbytes;
87 }
88
89 static void
90 uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
91                 struct uvc_buffer *buf)
92 {
93         void *mem = req->buf;
94         struct uvc_request *ureq = req->context;
95         int len = video->req_size;
96         int ret;
97
98         /* Add a header at the beginning of the payload. */
99         if (video->payload_size == 0) {
100                 ret = uvc_video_encode_header(video, buf, mem, len);
101                 video->payload_size += ret;
102                 mem += ret;
103                 len -= ret;
104         }
105
106         /* Process video data. */
107         len = min((int)(video->max_payload_size - video->payload_size), len);
108         ret = uvc_video_encode_data(video, buf, mem, len);
109
110         video->payload_size += ret;
111         len -= ret;
112
113         req->length = video->req_size - len;
114         req->zero = video->payload_size == video->max_payload_size;
115
116         if (buf->bytesused == video->queue.buf_used) {
117                 video->queue.buf_used = 0;
118                 buf->state = UVC_BUF_STATE_DONE;
119                 list_del(&buf->queue);
120                 video->fid ^= UVC_STREAM_FID;
121                 ureq->last_buf = buf;
122
123                 video->payload_size = 0;
124         }
125
126         if (video->payload_size == video->max_payload_size ||
127             video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE ||
128             buf->bytesused == video->queue.buf_used)
129                 video->payload_size = 0;
130 }
131
132 static void
133 uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
134                 struct uvc_buffer *buf)
135 {
136         unsigned int pending = buf->bytesused - video->queue.buf_used;
137         struct uvc_request *ureq = req->context;
138         struct scatterlist *sg, *iter;
139         unsigned int len = video->req_size;
140         unsigned int sg_left, part = 0;
141         unsigned int i;
142         int header_len;
143
144         sg = ureq->sgt.sgl;
145         sg_init_table(sg, ureq->sgt.nents);
146
147         /* Init the header. */
148         header_len = uvc_video_encode_header(video, buf, ureq->header,
149                                       video->req_size);
150         sg_set_buf(sg, ureq->header, header_len);
151         len -= header_len;
152
153         if (pending <= len)
154                 len = pending;
155
156         req->length = (len == pending) ?
157                 len + header_len : video->req_size;
158
159         /* Init the pending sgs with payload */
160         sg = sg_next(sg);
161
162         for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
163                 if (!len || !buf->sg || !buf->sg->length)
164                         break;
165
166                 sg_left = buf->sg->length - buf->offset;
167                 part = min_t(unsigned int, len, sg_left);
168
169                 sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
170
171                 if (part == sg_left) {
172                         buf->offset = 0;
173                         buf->sg = sg_next(buf->sg);
174                 } else {
175                         buf->offset += part;
176                 }
177                 len -= part;
178         }
179
180         /* Assign the video data with header. */
181         req->buf = NULL;
182         req->sg = ureq->sgt.sgl;
183         req->num_sgs = i + 1;
184
185         req->length -= len;
186         video->queue.buf_used += req->length - header_len;
187
188         if (buf->bytesused == video->queue.buf_used || !buf->sg ||
189                         video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
190                 video->queue.buf_used = 0;
191                 buf->state = UVC_BUF_STATE_DONE;
192                 buf->offset = 0;
193                 list_del(&buf->queue);
194                 video->fid ^= UVC_STREAM_FID;
195                 ureq->last_buf = buf;
196         }
197 }
198
199 static void
200 uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
201                 struct uvc_buffer *buf)
202 {
203         void *mem = req->buf;
204         struct uvc_request *ureq = req->context;
205         int len = video->req_size;
206         int ret;
207
208         /* Add the header. */
209         ret = uvc_video_encode_header(video, buf, mem, len);
210         mem += ret;
211         len -= ret;
212
213         /* Process video data. */
214         ret = uvc_video_encode_data(video, buf, mem, len);
215         len -= ret;
216
217         req->length = video->req_size - len;
218
219         if (buf->bytesused == video->queue.buf_used ||
220                         video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
221                 video->queue.buf_used = 0;
222                 buf->state = UVC_BUF_STATE_DONE;
223                 list_del(&buf->queue);
224                 video->fid ^= UVC_STREAM_FID;
225                 ureq->last_buf = buf;
226         }
227 }
228
229 /* --------------------------------------------------------------------------
230  * Request handling
231  */
232
233 /*
234  * Callers must take care to hold req_lock when this function may be called
235  * from multiple threads. For example, when frames are streaming to the host.
236  */
237 static void
238 uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
239 {
240         sg_free_table(&ureq->sgt);
241         if (ureq->req && ep) {
242                 usb_ep_free_request(ep, ureq->req);
243                 ureq->req = NULL;
244         }
245
246         kfree(ureq->req_buffer);
247         ureq->req_buffer = NULL;
248
249         if (!list_empty(&ureq->list))
250                 list_del_init(&ureq->list);
251
252         kfree(ureq);
253 }
254
255 static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
256 {
257         int ret;
258
259         ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
260         if (ret < 0) {
261                 uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
262                          ret);
263
264                 /* If the endpoint is disabled the descriptor may be NULL. */
265                 if (video->ep->desc) {
266                         /* Isochronous endpoints can't be halted. */
267                         if (usb_endpoint_xfer_bulk(video->ep->desc))
268                                 usb_ep_set_halt(video->ep);
269                 }
270         }
271
272         return ret;
273 }
274
275 /* This function must be called with video->req_lock held. */
276 static int uvcg_video_usb_req_queue(struct uvc_video *video,
277         struct usb_request *req, bool queue_to_ep)
278 {
279         bool is_bulk = video->max_payload_size;
280         struct list_head *list = NULL;
281
282         if (!video->is_enabled)
283                 return -ENODEV;
284
285         if (queue_to_ep) {
286                 struct uvc_request *ureq = req->context;
287                 /*
288                  * With USB3 handling more requests at a higher speed, we can't
289                  * afford to generate an interrupt for every request. Decide to
290                  * interrupt:
291                  *
292                  * - When no more requests are available in the free queue, as
293                  *   this may be our last chance to refill the endpoint's
294                  *   request queue.
295                  *
296                  * - When this is request is the last request for the video
297                  *   buffer, as we want to start sending the next video buffer
298                  *   ASAP in case it doesn't get started already in the next
299                  *   iteration of this loop.
300                  *
301                  * - Four times over the length of the requests queue (as
302                  *   indicated by video->uvc_num_requests), as a trade-off
303                  *   between latency and interrupt load.
304                  */
305                 if (list_empty(&video->req_free) || ureq->last_buf ||
306                         !(video->req_int_count %
307                         DIV_ROUND_UP(video->uvc_num_requests, 4))) {
308                         video->req_int_count = 0;
309                         req->no_interrupt = 0;
310                 } else {
311                         req->no_interrupt = 1;
312                 }
313                 video->req_int_count++;
314                 return uvcg_video_ep_queue(video, req);
315         }
316         /*
317          * If we're not queuing to the ep, for isoc we're queuing
318          * to the req_ready list, otherwise req_free.
319          */
320         list = is_bulk ? &video->req_free : &video->req_ready;
321         list_add_tail(&req->list, list);
322         return 0;
323 }
324
325 /*
326  * Must only be called from uvcg_video_enable - since after that we only want to
327  * queue requests to the endpoint from the uvc_video_complete complete handler.
328  * This function is needed in order to 'kick start' the flow of requests from
329  * gadget driver to the usb controller.
330  */
331 static void uvc_video_ep_queue_initial_requests(struct uvc_video *video)
332 {
333         struct usb_request *req = NULL;
334         unsigned long flags = 0;
335         unsigned int count = 0;
336         int ret = 0;
337
338         /*
339          * We only queue half of the free list since we still want to have
340          * some free usb_requests in the free list for the video_pump async_wq
341          * thread to encode uvc buffers into. Otherwise we could get into a
342          * situation where the free list does not have any usb requests to
343          * encode into - we always end up queueing 0 length requests to the
344          * end point.
345          */
346         unsigned int half_list_size = video->uvc_num_requests / 2;
347
348         spin_lock_irqsave(&video->req_lock, flags);
349         /*
350          * Take these requests off the free list and queue them all to the
351          * endpoint. Since we queue 0 length requests with the req_lock held,
352          * there isn't any 'data' race involved here with the complete handler.
353          */
354         while (count < half_list_size) {
355                 req = list_first_entry(&video->req_free, struct usb_request,
356                                         list);
357                 list_del(&req->list);
358                 req->length = 0;
359                 ret = uvcg_video_ep_queue(video, req);
360                 if (ret < 0) {
361                         uvcg_queue_cancel(&video->queue, 0);
362                         break;
363                 }
364                 count++;
365         }
366         spin_unlock_irqrestore(&video->req_lock, flags);
367 }
368
369 static void
370 uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
371 {
372         struct uvc_request *ureq = req->context;
373         struct uvc_video *video = ureq->video;
374         struct uvc_video_queue *queue = &video->queue;
375         struct uvc_buffer *last_buf;
376         unsigned long flags;
377         bool is_bulk = video->max_payload_size;
378         int ret = 0;
379
380         spin_lock_irqsave(&video->req_lock, flags);
381         if (!video->is_enabled) {
382                 /*
383                  * When is_enabled is false, uvcg_video_disable() ensures
384                  * that in-flight uvc_buffers are returned, so we can
385                  * safely call free_request without worrying about
386                  * last_buf.
387                  */
388                 uvc_video_free_request(ureq, ep);
389                 spin_unlock_irqrestore(&video->req_lock, flags);
390                 return;
391         }
392
393         last_buf = ureq->last_buf;
394         ureq->last_buf = NULL;
395         spin_unlock_irqrestore(&video->req_lock, flags);
396
397         switch (req->status) {
398         case 0:
399                 break;
400
401         case -EXDEV:
402                 uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
403                 queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
404                 break;
405
406         case -ESHUTDOWN:        /* disconnect from host. */
407                 uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
408                 uvcg_queue_cancel(queue, 1);
409                 break;
410
411         default:
412                 uvcg_warn(&video->uvc->func,
413                           "VS request completed with status %d.\n",
414                           req->status);
415                 uvcg_queue_cancel(queue, 0);
416         }
417
418         if (last_buf) {
419                 spin_lock_irqsave(&queue->irqlock, flags);
420                 uvcg_complete_buffer(queue, last_buf);
421                 spin_unlock_irqrestore(&queue->irqlock, flags);
422         }
423
424         spin_lock_irqsave(&video->req_lock, flags);
425         /*
426          * Video stream might have been disabled while we were
427          * processing the current usb_request. So make sure
428          * we're still streaming before queueing the usb_request
429          * back to req_free
430          */
431         if (video->is_enabled) {
432                 /*
433                  * Here we check whether any request is available in the ready
434                  * list. If it is, queue it to the ep and add the current
435                  * usb_request to the req_free list - for video_pump to fill in.
436                  * Otherwise, just use the current usb_request to queue a 0
437                  * length request to the ep. Since we always add to the req_free
438                  * list if we dequeue from the ready list, there will never
439                  * be a situation where the req_free list is completely out of
440                  * requests and cannot recover.
441                  */
442                 struct usb_request *to_queue = req;
443
444                 to_queue->length = 0;
445                 if (!list_empty(&video->req_ready)) {
446                         to_queue = list_first_entry(&video->req_ready,
447                                 struct usb_request, list);
448                         list_del(&to_queue->list);
449                         list_add_tail(&req->list, &video->req_free);
450                         /*
451                          * Queue work to the wq as well since it is possible that a
452                          * buffer may not have been completely encoded with the set of
453                          * in-flight usb requests for whih the complete callbacks are
454                          * firing.
455                          * In that case, if we do not queue work to the worker thread,
456                          * the buffer will never be marked as complete - and therefore
457                          * not be returned to userpsace. As a result,
458                          * dequeue -> queue -> dequeue flow of uvc buffers will not
459                          * happen.
460                          */
461                         queue_work(video->async_wq, &video->pump);
462                 }
463                 /*
464                  * Queue to the endpoint. The actual queueing to ep will
465                  * only happen on one thread - the async_wq for bulk endpoints
466                  * and this thread for isoc endpoints.
467                  */
468                 ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
469                 if (ret < 0) {
470                         /*
471                          * Endpoint error, but the stream is still enabled.
472                          * Put request back in req_free for it to be cleaned
473                          * up later.
474                          */
475                         list_add_tail(&to_queue->list, &video->req_free);
476                 }
477         } else {
478                 uvc_video_free_request(ureq, ep);
479                 ret = 0;
480         }
481         spin_unlock_irqrestore(&video->req_lock, flags);
482         if (ret < 0)
483                 uvcg_queue_cancel(queue, 0);
484 }
485
486 static int
487 uvc_video_free_requests(struct uvc_video *video)
488 {
489         struct uvc_request *ureq, *temp;
490
491         list_for_each_entry_safe(ureq, temp, &video->ureqs, list)
492                 uvc_video_free_request(ureq, video->ep);
493
494         INIT_LIST_HEAD(&video->ureqs);
495         INIT_LIST_HEAD(&video->req_free);
496         INIT_LIST_HEAD(&video->req_ready);
497         video->req_size = 0;
498         return 0;
499 }
500
501 static int
502 uvc_video_alloc_requests(struct uvc_video *video)
503 {
504         struct uvc_request *ureq;
505         unsigned int req_size;
506         unsigned int i;
507         int ret = -ENOMEM;
508
509         BUG_ON(video->req_size);
510
511         req_size = video->ep->maxpacket
512                  * max_t(unsigned int, video->ep->maxburst, 1)
513                  * (video->ep->mult);
514
515         for (i = 0; i < video->uvc_num_requests; i++) {
516                 ureq = kzalloc(sizeof(struct uvc_request), GFP_KERNEL);
517                 if (ureq == NULL)
518                         goto error;
519
520                 INIT_LIST_HEAD(&ureq->list);
521
522                 list_add_tail(&ureq->list, &video->ureqs);
523
524                 ureq->req_buffer = kmalloc(req_size, GFP_KERNEL);
525                 if (ureq->req_buffer == NULL)
526                         goto error;
527
528                 ureq->req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
529                 if (ureq->req == NULL)
530                         goto error;
531
532                 ureq->req->buf = ureq->req_buffer;
533                 ureq->req->length = 0;
534                 ureq->req->complete = uvc_video_complete;
535                 ureq->req->context = ureq;
536                 ureq->video = video;
537                 ureq->last_buf = NULL;
538
539                 list_add_tail(&ureq->req->list, &video->req_free);
540                 /* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
541                 sg_alloc_table(&ureq->sgt,
542                                DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
543                                             PAGE_SIZE) + 2, GFP_KERNEL);
544         }
545
546         video->req_size = req_size;
547
548         return 0;
549
550 error:
551         uvc_video_free_requests(video);
552         return ret;
553 }
554
555 /* --------------------------------------------------------------------------
556  * Video streaming
557  */
558
559 /*
560  * uvcg_video_pump - Pump video data into the USB requests
561  *
562  * This function fills the available USB requests (listed in req_free) with
563  * video data from the queued buffers.
564  */
565 static void uvcg_video_pump(struct work_struct *work)
566 {
567         struct uvc_video *video = container_of(work, struct uvc_video, pump);
568         struct uvc_video_queue *queue = &video->queue;
569         /* video->max_payload_size is only set when using bulk transfer */
570         bool is_bulk = video->max_payload_size;
571         struct usb_request *req = NULL;
572         struct uvc_buffer *buf;
573         unsigned long flags;
574         int ret = 0;
575
576         while (true) {
577                 if (!video->ep->enabled)
578                         return;
579
580                 /*
581                  * Check is_enabled and retrieve the first available USB
582                  * request, protected by the request lock.
583                  */
584                 spin_lock_irqsave(&video->req_lock, flags);
585                 if (!video->is_enabled || list_empty(&video->req_free)) {
586                         spin_unlock_irqrestore(&video->req_lock, flags);
587                         return;
588                 }
589                 req = list_first_entry(&video->req_free, struct usb_request,
590                                         list);
591                 list_del(&req->list);
592                 spin_unlock_irqrestore(&video->req_lock, flags);
593
594                 /*
595                  * Retrieve the first available video buffer and fill the
596                  * request, protected by the video queue irqlock.
597                  */
598                 spin_lock_irqsave(&queue->irqlock, flags);
599                 buf = uvcg_queue_head(queue);
600                 if (!buf) {
601                         /*
602                          * Either the queue has been disconnected or no video buffer
603                          * available for bulk transfer. Either way, stop processing
604                          * further.
605                          */
606                         spin_unlock_irqrestore(&queue->irqlock, flags);
607                         break;
608                 }
609
610                 video->encode(req, video, buf);
611
612                 spin_unlock_irqrestore(&queue->irqlock, flags);
613
614                 spin_lock_irqsave(&video->req_lock, flags);
615                 /* For bulk end points we queue from the worker thread
616                  * since we would preferably not want to wait on requests
617                  * to be ready, in the uvcg_video_complete() handler.
618                  * For isoc endpoints we add the request to the ready list
619                  * and only queue it to the endpoint from the complete handler.
620                  */
621                 ret = uvcg_video_usb_req_queue(video, req, is_bulk);
622                 spin_unlock_irqrestore(&video->req_lock, flags);
623
624                 if (ret < 0) {
625                         uvcg_queue_cancel(queue, 0);
626                         break;
627                 }
628
629                 /* The request is owned by  the endpoint / ready list. */
630                 req = NULL;
631         }
632
633         if (!req)
634                 return;
635
636         spin_lock_irqsave(&video->req_lock, flags);
637         if (video->is_enabled)
638                 list_add_tail(&req->list, &video->req_free);
639         else
640                 uvc_video_free_request(req->context, video->ep);
641         spin_unlock_irqrestore(&video->req_lock, flags);
642 }
643
644 /*
645  * Disable the video stream
646  */
647 int
648 uvcg_video_disable(struct uvc_video *video)
649 {
650         unsigned long flags;
651         struct list_head inflight_bufs;
652         struct usb_request *req, *temp;
653         struct uvc_buffer *buf, *btemp;
654         struct uvc_request *ureq, *utemp;
655
656         if (video->ep == NULL) {
657                 uvcg_info(&video->uvc->func,
658                           "Video disable failed, device is uninitialized.\n");
659                 return -ENODEV;
660         }
661
662         INIT_LIST_HEAD(&inflight_bufs);
663         spin_lock_irqsave(&video->req_lock, flags);
664         video->is_enabled = false;
665
666         /*
667          * Remove any in-flight buffers from the uvc_requests
668          * because we want to return them before cancelling the
669          * queue. This ensures that we aren't stuck waiting for
670          * all complete callbacks to come through before disabling
671          * vb2 queue.
672          */
673         list_for_each_entry(ureq, &video->ureqs, list) {
674                 if (ureq->last_buf) {
675                         list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
676                         ureq->last_buf = NULL;
677                 }
678         }
679         spin_unlock_irqrestore(&video->req_lock, flags);
680
681         cancel_work_sync(&video->pump);
682         uvcg_queue_cancel(&video->queue, 0);
683
684         spin_lock_irqsave(&video->req_lock, flags);
685         /*
686          * Remove all uvc_requests from ureqs with list_del_init
687          * This lets uvc_video_free_request correctly identify
688          * if the uvc_request is attached to a list or not when freeing
689          * memory.
690          */
691         list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
692                 list_del_init(&ureq->list);
693
694         list_for_each_entry_safe(req, temp, &video->req_free, list) {
695                 list_del(&req->list);
696                 uvc_video_free_request(req->context, video->ep);
697         }
698
699         list_for_each_entry_safe(req, temp, &video->req_ready, list) {
700                 list_del(&req->list);
701                 uvc_video_free_request(req->context, video->ep);
702         }
703
704         INIT_LIST_HEAD(&video->ureqs);
705         INIT_LIST_HEAD(&video->req_free);
706         INIT_LIST_HEAD(&video->req_ready);
707         video->req_size = 0;
708         spin_unlock_irqrestore(&video->req_lock, flags);
709
710         /*
711          * Return all the video buffers before disabling the queue.
712          */
713         spin_lock_irqsave(&video->queue.irqlock, flags);
714         list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
715                 list_del(&buf->queue);
716                 uvcg_complete_buffer(&video->queue, buf);
717         }
718         spin_unlock_irqrestore(&video->queue.irqlock, flags);
719
720         uvcg_queue_enable(&video->queue, 0);
721         return 0;
722 }
723
724 /*
725  * Enable the video stream.
726  */
727 int uvcg_video_enable(struct uvc_video *video)
728 {
729         int ret;
730
731         if (video->ep == NULL) {
732                 uvcg_info(&video->uvc->func,
733                           "Video enable failed, device is uninitialized.\n");
734                 return -ENODEV;
735         }
736
737         /*
738          * Safe to access request related fields without req_lock because
739          * this is the only thread currently active, and no other
740          * request handling thread will become active until this function
741          * returns.
742          */
743         video->is_enabled = true;
744
745         if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
746                 return ret;
747
748         if ((ret = uvc_video_alloc_requests(video)) < 0)
749                 return ret;
750
751         if (video->max_payload_size) {
752                 video->encode = uvc_video_encode_bulk;
753                 video->payload_size = 0;
754         } else
755                 video->encode = video->queue.use_sg ?
756                         uvc_video_encode_isoc_sg : uvc_video_encode_isoc;
757
758         video->req_int_count = 0;
759
760         uvc_video_ep_queue_initial_requests(video);
761
762         return ret;
763 }
764
765 /*
766  * Initialize the UVC video stream.
767  */
768 int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
769 {
770         video->is_enabled = false;
771         INIT_LIST_HEAD(&video->ureqs);
772         INIT_LIST_HEAD(&video->req_free);
773         INIT_LIST_HEAD(&video->req_ready);
774         spin_lock_init(&video->req_lock);
775         INIT_WORK(&video->pump, uvcg_video_pump);
776
777         /* Allocate a work queue for asynchronous video pump handler. */
778         video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
779         if (!video->async_wq)
780                 return -EINVAL;
781
782         video->uvc = uvc;
783         video->fcc = V4L2_PIX_FMT_YUYV;
784         video->bpp = 16;
785         video->width = 320;
786         video->height = 240;
787         video->imagesize = 320 * 240 * 2;
788
789         /* Initialize the video buffers queue. */
790         uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
791                         V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
792         return 0;
793 }