GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / media / pci / ivtv / ivtv-queue.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3     buffer queues.
4     Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
5     Copyright (C) 2004  Chris Kennedy <c@groovy.org>
6     Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
7
8  */
9
10 #include "ivtv-driver.h"
11 #include "ivtv-queue.h"
12
13 int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
14 {
15         if (s->buf_size - buf->bytesused < copybytes)
16                 copybytes = s->buf_size - buf->bytesused;
17         if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
18                 return -EFAULT;
19         }
20         buf->bytesused += copybytes;
21         return copybytes;
22 }
23
24 void ivtv_buf_swap(struct ivtv_buffer *buf)
25 {
26         int i;
27
28         for (i = 0; i < buf->bytesused; i += 4)
29                 swab32s((u32 *)(buf->buf + i));
30 }
31
32 void ivtv_queue_init(struct ivtv_queue *q)
33 {
34         INIT_LIST_HEAD(&q->list);
35         q->buffers = 0;
36         q->length = 0;
37         q->bytesused = 0;
38 }
39
40 void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
41 {
42         unsigned long flags;
43
44         /* clear the buffer if it is going to be enqueued to the free queue */
45         if (q == &s->q_free) {
46                 buf->bytesused = 0;
47                 buf->readpos = 0;
48                 buf->b_flags = 0;
49                 buf->dma_xfer_cnt = 0;
50         }
51         spin_lock_irqsave(&s->qlock, flags);
52         list_add_tail(&buf->list, &q->list);
53         q->buffers++;
54         q->length += s->buf_size;
55         q->bytesused += buf->bytesused - buf->readpos;
56         spin_unlock_irqrestore(&s->qlock, flags);
57 }
58
59 struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
60 {
61         struct ivtv_buffer *buf = NULL;
62         unsigned long flags;
63
64         spin_lock_irqsave(&s->qlock, flags);
65         if (!list_empty(&q->list)) {
66                 buf = list_entry(q->list.next, struct ivtv_buffer, list);
67                 list_del_init(q->list.next);
68                 q->buffers--;
69                 q->length -= s->buf_size;
70                 q->bytesused -= buf->bytesused - buf->readpos;
71         }
72         spin_unlock_irqrestore(&s->qlock, flags);
73         return buf;
74 }
75
76 static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
77                 struct ivtv_queue *to, int clear)
78 {
79         struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
80
81         list_move_tail(from->list.next, &to->list);
82         from->buffers--;
83         from->length -= s->buf_size;
84         from->bytesused -= buf->bytesused - buf->readpos;
85         /* special handling for q_free */
86         if (clear)
87                 buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
88         to->buffers++;
89         to->length += s->buf_size;
90         to->bytesused += buf->bytesused - buf->readpos;
91 }
92
93 /* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
94    If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
95    If 'steal' != NULL, then buffers may also taken from that queue if
96    needed, but only if 'from' is the free queue.
97
98    The buffer is automatically cleared if it goes to the free queue. It is
99    also cleared if buffers need to be taken from the 'steal' queue and
100    the 'from' queue is the free queue.
101
102    When 'from' is q_free, then needed_bytes is compared to the total
103    available buffer length, otherwise needed_bytes is compared to the
104    bytesused value. For the 'steal' queue the total available buffer
105    length is always used.
106
107    -ENOMEM is returned if the buffers could not be obtained, 0 if all
108    buffers where obtained from the 'from' list and if non-zero then
109    the number of stolen buffers is returned. */
110 int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
111                     struct ivtv_queue *to, int needed_bytes)
112 {
113         unsigned long flags;
114         int rc = 0;
115         int from_free = from == &s->q_free;
116         int to_free = to == &s->q_free;
117         int bytes_available, bytes_steal;
118
119         spin_lock_irqsave(&s->qlock, flags);
120         if (needed_bytes == 0) {
121                 from_free = 1;
122                 needed_bytes = from->length;
123         }
124
125         bytes_available = from_free ? from->length : from->bytesused;
126         bytes_steal = (from_free && steal) ? steal->length : 0;
127
128         if (bytes_available + bytes_steal < needed_bytes) {
129                 spin_unlock_irqrestore(&s->qlock, flags);
130                 return -ENOMEM;
131         }
132         while (steal && bytes_available < needed_bytes) {
133                 struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
134                 u16 dma_xfer_cnt = buf->dma_xfer_cnt;
135
136                 /* move buffers from the tail of the 'steal' queue to the tail of the
137                    'from' queue. Always copy all the buffers with the same dma_xfer_cnt
138                    value, this ensures that you do not end up with partial frame data
139                    if one frame is stored in multiple buffers. */
140                 while (dma_xfer_cnt == buf->dma_xfer_cnt) {
141                         list_move_tail(steal->list.prev, &from->list);
142                         rc++;
143                         steal->buffers--;
144                         steal->length -= s->buf_size;
145                         steal->bytesused -= buf->bytesused - buf->readpos;
146                         buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
147                         from->buffers++;
148                         from->length += s->buf_size;
149                         bytes_available += s->buf_size;
150                         if (list_empty(&steal->list))
151                                 break;
152                         buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
153                 }
154         }
155         if (from_free) {
156                 u32 old_length = to->length;
157
158                 while (to->length - old_length < needed_bytes) {
159                         ivtv_queue_move_buf(s, from, to, 1);
160                 }
161         }
162         else {
163                 u32 old_bytesused = to->bytesused;
164
165                 while (to->bytesused - old_bytesused < needed_bytes) {
166                         ivtv_queue_move_buf(s, from, to, to_free);
167                 }
168         }
169         spin_unlock_irqrestore(&s->qlock, flags);
170         return rc;
171 }
172
173 void ivtv_flush_queues(struct ivtv_stream *s)
174 {
175         ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
176         ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
177         ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
178         ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
179 }
180
181 int ivtv_stream_alloc(struct ivtv_stream *s)
182 {
183         struct ivtv *itv = s->itv;
184         int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers;
185         int i;
186
187         if (s->buffers == 0)
188                 return 0;
189
190         IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
191                 s->dma != DMA_NONE ? "DMA " : "",
192                 s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
193
194         s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
195         if (s->sg_pending == NULL) {
196                 IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
197                 return -ENOMEM;
198         }
199         s->sg_pending_size = 0;
200
201         s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
202         if (s->sg_processing == NULL) {
203                 IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
204                 kfree(s->sg_pending);
205                 s->sg_pending = NULL;
206                 return -ENOMEM;
207         }
208         s->sg_processing_size = 0;
209
210         s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
211                                         GFP_KERNEL|__GFP_NOWARN);
212         if (s->sg_dma == NULL) {
213                 IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
214                 kfree(s->sg_pending);
215                 s->sg_pending = NULL;
216                 kfree(s->sg_processing);
217                 s->sg_processing = NULL;
218                 return -ENOMEM;
219         }
220         if (ivtv_might_use_dma(s)) {
221                 s->sg_handle = dma_map_single(&itv->pdev->dev, s->sg_dma,
222                                               sizeof(struct ivtv_sg_element),
223                                               DMA_TO_DEVICE);
224                 ivtv_stream_sync_for_cpu(s);
225         }
226
227         /* allocate stream buffers. Initially all buffers are in q_free. */
228         for (i = 0; i < s->buffers; i++) {
229                 struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
230                                                 GFP_KERNEL|__GFP_NOWARN);
231
232                 if (buf == NULL)
233                         break;
234                 buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
235                 if (buf->buf == NULL) {
236                         kfree(buf);
237                         break;
238                 }
239                 INIT_LIST_HEAD(&buf->list);
240                 if (ivtv_might_use_dma(s)) {
241                         buf->dma_handle = dma_map_single(&s->itv->pdev->dev,
242                                 buf->buf, s->buf_size + 256, s->dma);
243                         ivtv_buf_sync_for_cpu(s, buf);
244                 }
245                 ivtv_enqueue(s, buf, &s->q_free);
246         }
247         if (i == s->buffers)
248                 return 0;
249         IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
250         ivtv_stream_free(s);
251         return -ENOMEM;
252 }
253
254 void ivtv_stream_free(struct ivtv_stream *s)
255 {
256         struct ivtv_buffer *buf;
257
258         /* move all buffers to q_free */
259         ivtv_flush_queues(s);
260
261         /* empty q_free */
262         while ((buf = ivtv_dequeue(s, &s->q_free))) {
263                 if (ivtv_might_use_dma(s))
264                         dma_unmap_single(&s->itv->pdev->dev, buf->dma_handle,
265                                          s->buf_size + 256, s->dma);
266                 kfree(buf->buf);
267                 kfree(buf);
268         }
269
270         /* Free SG Array/Lists */
271         if (s->sg_dma != NULL) {
272                 if (s->sg_handle != IVTV_DMA_UNMAPPED) {
273                         dma_unmap_single(&s->itv->pdev->dev, s->sg_handle,
274                                          sizeof(struct ivtv_sg_element),
275                                          DMA_TO_DEVICE);
276                         s->sg_handle = IVTV_DMA_UNMAPPED;
277                 }
278                 kfree(s->sg_pending);
279                 kfree(s->sg_processing);
280                 kfree(s->sg_dma);
281                 s->sg_pending = NULL;
282                 s->sg_processing = NULL;
283                 s->sg_dma = NULL;
284                 s->sg_pending_size = 0;
285                 s->sg_processing_size = 0;
286         }
287 }